repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
marcusdiaz/BitcoinUnlimited | qa/rpc-tests/test_framework/bignum.py | 123 | 1929 | #!/usr/bin/env python3
#
# bignum.py
#
# This file is copied from python-bitcoinlib.
#
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Bignum routines"""
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# bitcoin-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
| mit |
nttks/edx-platform | openedx/core/djangoapps/content/course_overviews/management/commands/generate_course_overview.py | 7 | 1699 | """
Command to load course overviews.
"""
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Example usage:
$ ./manage.py lms generate_course_overview --all --settings=devstack
$ ./manage.py lms generate_course_overview 'edX/DemoX/Demo_Course' --settings=devstack
"""
args = '<course_id course_id ...>'
help = 'Generates and stores course overview for one or more courses.'
def add_arguments(self, parser):
"""
Add arguments to the command parser.
"""
parser.add_argument(
'--all',
action='store_true',
dest='all',
default=False,
help='Generate course overview for all courses.',
)
def handle(self, *args, **options):
course_keys = []
if options['all']:
course_keys = [course.id for course in modulestore().get_courses()]
else:
if len(args) < 1:
raise CommandError('At least one course or --all must be specified.')
try:
course_keys = [CourseKey.from_string(arg) for arg in args]
except InvalidKeyError:
log.fatal('Invalid key specified.')
if not course_keys:
log.fatal('No courses specified.')
CourseOverview.get_select_courses(course_keys)
| agpl-3.0 |
stelligent/ansible-modules-extras | cloud/amazon/ec2_eni.py | 24 | 14258 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_eni
short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
description:
- Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID is provided, an attempt is made to update the existing ENI. By passing 'None' as the instance_id, an ENI can be detached from an instance.
version_added: "2.0"
author: Rob White, wimnat [at] gmail.com, @wimnat
options:
eni_id:
description:
- The ID of the ENI
required: false
default: null
instance_id:
description:
- Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'.
required: false
default: null
private_ip_address:
description:
- Private IP address.
required: false
default: null
subnet_id:
description:
- ID of subnet in which to create the ENI. Only required when state=present.
required: true
description:
description:
- Optional description of the ENI.
required: false
default: null
security_groups:
description:
- List of security groups associated with the interface. Only used when state=present.
required: false
default: null
state:
description:
- Create or delete ENI.
required: false
default: present
choices: [ 'present', 'absent' ]
device_index:
description:
- The index of the device for the network interface attachment on the instance.
required: false
default: 0
force_detach:
description:
- Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent.
required: false
default: no
delete_on_termination:
description:
- Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation.
required: false
source_dest_check:
description:
- By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation.
required: false
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an ENI. As no security group is defined, ENI will be created in default security group
- ec2_eni:
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI and attach it to an instance
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Destroy an ENI, detaching it from any instance if necessary
- ec2_eni:
eni_id: eni-xxxxxxx
force_detach: yes
state: absent
# Update an ENI
- ec2_eni:
eni_id: eni-xxxxxxx
description: "My new description"
state: present
# Detach an ENI from an instance
- ec2_eni:
eni_id: eni-xxxxxxx
instance_id: None
state: present
### Delete an interface on termination
# First create the interface
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
register: eni
# Modify the interface to enable the delete_on_terminaton flag
- ec2_eni:
eni_id: {{ "eni.interface.id" }}
delete_on_termination: true
'''
import time
import xml.etree.ElementTree as ET
import re
try:
import boto.ec2
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_error_message(xml_string):
root = ET.fromstring(xml_string)
for message in root.findall('.//Message'):
return message.text
def get_eni_info(interface):
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
'vpc_id': interface.vpc_id,
'description': interface.description,
'owner_id': interface.owner_id,
'status': interface.status,
'mac_address': interface.mac_address,
'private_ip_address': interface.private_ip_address,
'source_dest_check': interface.source_dest_check,
'groups': dict((group.id, group.name) for group in interface.groups),
}
if interface.attachment is not None:
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
'instance_id': interface.attachment.instance_id,
'device_index': interface.attachment.device_index,
'status': interface.attachment.status,
'attach_time': interface.attachment.attach_time,
'delete_on_termination': interface.attachment.delete_on_termination,
}
return interface_info
def wait_for_eni(eni, status):
while True:
time.sleep(3)
eni.update()
# If the status is detached we just need attachment to disappear
if eni.attachment is None:
if status == "detached":
break
else:
if status == "attached" and eni.attachment.status == "attached":
break
def create_eni(connection, module):
instance_id = module.params.get("instance_id")
if instance_id == 'None':
instance_id = None
do_detach = True
else:
do_detach = False
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
changed = False
try:
eni = compare_eni(connection, module)
if eni is None:
eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
if instance_id is not None:
try:
eni.attach(instance_id, device_index)
except BotoServerError as ex:
eni.delete()
raise
# Wait to allow creation / attachment to finish
wait_for_eni(eni, "attached")
eni.update()
changed = True
except BotoServerError as e:
module.fail_json(msg=get_error_message(e.args[2]))
module.exit_json(changed=changed, interface=get_eni_info(eni))
def modify_eni(connection, module):
eni_id = module.params.get("eni_id")
instance_id = module.params.get("instance_id")
if instance_id == 'None':
instance_id = None
do_detach = True
else:
do_detach = False
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
force_detach = module.params.get("force_detach")
source_dest_check = module.params.get("source_dest_check")
delete_on_termination = module.params.get("delete_on_termination")
changed = False
try:
# Get the eni with the eni_id specified
eni_result_set = connection.get_all_network_interfaces(eni_id)
eni = eni_result_set[0]
if description is not None:
if eni.description != description:
connection.modify_network_interface_attribute(eni.id, "description", description)
changed = True
if security_groups is not None:
if sorted(get_sec_group_list(eni.groups)) != sorted(security_groups):
connection.modify_network_interface_attribute(eni.id, "groupSet", security_groups)
changed = True
if source_dest_check is not None:
if eni.source_dest_check != source_dest_check:
connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
changed = True
if delete_on_termination is not None:
if eni.attachment is not None:
if eni.attachment.delete_on_termination is not delete_on_termination:
connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
changed = True
else:
module.fail_json(msg="Can not modify delete_on_termination as the interface is not attached")
if eni.attachment is not None and instance_id is None and do_detach is True:
eni.detach(force_detach)
wait_for_eni(eni, "detached")
changed = True
else:
if instance_id is not None:
eni.attach(instance_id, device_index)
wait_for_eni(eni, "attached")
changed = True
except BotoServerError as e:
print e
module.fail_json(msg=get_error_message(e.args[2]))
eni.update()
module.exit_json(changed=changed, interface=get_eni_info(eni))
def delete_eni(connection, module):
eni_id = module.params.get("eni_id")
force_detach = module.params.get("force_detach")
try:
eni_result_set = connection.get_all_network_interfaces(eni_id)
eni = eni_result_set[0]
if force_detach is True:
if eni.attachment is not None:
eni.detach(force_detach)
# Wait to allow detachment to finish
wait_for_eni(eni, "detached")
eni.update()
eni.delete()
changed = True
else:
eni.delete()
changed = True
module.exit_json(changed=changed)
except BotoServerError as e:
msg = get_error_message(e.args[2])
regex = re.compile('The networkInterface ID \'.*\' does not exist')
if regex.search(msg) is not None:
module.exit_json(changed=False)
else:
module.fail_json(msg=get_error_message(e.args[2]))
def compare_eni(connection, module):
eni_id = module.params.get("eni_id")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
try:
all_eni = connection.get_all_network_interfaces(eni_id)
for eni in all_eni:
remote_security_groups = get_sec_group_list(eni.groups)
if (eni.subnet_id == subnet_id) and (eni.private_ip_address == private_ip_address) and (eni.description == description) and (remote_security_groups == security_groups):
return eni
except BotoServerError as e:
module.fail_json(msg=get_error_message(e.args[2]))
return None
def get_sec_group_list(groups):
# Build list of remote security groups
remote_security_groups = []
for group in groups:
remote_security_groups.append(group.id.encode())
return remote_security_groups
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
eni_id = dict(default=None),
instance_id = dict(default=None),
private_ip_address = dict(),
subnet_id = dict(),
description = dict(),
security_groups = dict(type='list'),
device_index = dict(default=0, type='int'),
state = dict(default='present', choices=['present', 'absent']),
force_detach = dict(default='no', type='bool'),
source_dest_check = dict(default=None, type='bool'),
delete_on_termination = dict(default=None, type='bool')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
state = module.params.get("state")
eni_id = module.params.get("eni_id")
if state == 'present':
if eni_id is None:
if module.params.get("subnet_id") is None:
module.fail_json(msg="subnet_id must be specified when state=present")
create_eni(connection, module)
else:
modify_eni(connection, module)
elif state == 'absent':
if eni_id is None:
module.fail_json(msg="eni_id must be specified")
else:
delete_eni(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
| gpl-3.0 |
ostree/plaso | plaso/lib/lexer.py | 2 | 15495 | # -*- coding: utf-8 -*-
"""An LL(1) lexer. This lexer is very tolerant of errors and can resync.
This lexer is originally copied from the GRR project:
https://code.google.com/p/grr
"""
import logging
import re
from plaso.lib import errors
class Token(object):
"""A token action."""
def __init__(self, state_regex, regex, actions, next_state, flags=re.I):
"""Initializes the token object.
Args:
state_regex: If this regular expression matches the current state this
rule is considered.
regex: A regular expression to try and match from the current point.
actions: A command separated list of method names in the Lexer to call.
next_state: The next state we transition to if this Token matches.
flags: re flags.
"""
self.state_regex = re.compile(
state_regex, re.DOTALL | re.M | re.S | re.U | flags)
self.regex = re.compile(regex, re.DOTALL | re.M | re.S | re.U | flags)
self.re_str = regex
self.actions = []
if actions:
self.actions = actions.split(',')
self.next_state = next_state
class Lexer(object):
"""A generic feed lexer."""
_CONTINUE_STATE = 'CONTINUE'
_INITIAL_STATE = 'INITIAL'
_ERROR_TOKEN = 'Error'
# A list of Token() instances.
tokens = []
def __init__(self, data=''):
"""Initializes the lexer object.
Args:
data: optional initial data to be processed by the lexer.
"""
super(Lexer, self).__init__()
self.buffer = data
self.error = 0
self.flags = 0
self.processed = 0
self.processed_buffer = ''
self.state = self._INITIAL_STATE
self.state_stack = []
self.verbose = 0
def NextToken(self):
"""Fetch the next token by trying to match any of the regexes in order."""
current_state = self.state
for token in self.tokens:
# Does the rule apply to us?
if not token.state_regex.match(current_state):
continue
# Try to match the rule
m = token.regex.match(self.buffer)
if not m:
continue
# The match consumes the data off the buffer (the handler can put it back
# if it likes)
# TODO: using joins might be more efficient here.
self.processed_buffer += self.buffer[:m.end()]
self.buffer = self.buffer[m.end():]
self.processed += m.end()
next_state = token.next_state
for action in token.actions:
# Is there a callback to handle this action?
callback = getattr(self, action, self.Default)
# Allow a callback to skip other callbacks.
try:
possible_next_state = callback(string=m.group(0), match=m)
if possible_next_state == self._CONTINUE_STATE:
continue
# Override the state from the Token
elif possible_next_state:
next_state = possible_next_state
except errors.ParseError as exception:
self.Error(exception)
# Update the next state
if next_state:
self.state = next_state
return token
# Check that we are making progress - if we are too full, we assume we are
# stuck.
self.Error(u'Expected {0:s}'.format(self.state))
self.processed_buffer += self.buffer[:1]
self.buffer = self.buffer[1:]
return self._ERROR_TOKEN
def Close(self):
"""A convenience function to force us to parse all the data."""
while self.NextToken():
if not self.buffer:
return
def Default(self, **kwarg):
"""The default callback handler."""
logging.debug(u'Default handler: {0:s}'.format(kwarg))
def Empty(self):
"""Returns a boolean indicating if the buffer is empty."""
return not self.buffer
def Error(self, message=None, weight=1):
"""Log an error down.
Args:
message: optional error message.
weight: optional error weight.
"""
logging.debug(u'Error({0:d}): {1:s}'.format(weight, message))
# Keep a count of errors
self.error += weight
def PushState(self, **unused_kwargs):
"""Push the current state on the state stack."""
logging.debug(u'Storing state {0:s}'.format(repr(self.state)))
self.state_stack.append(self.state)
def PopState(self, **unused_kwargs):
"""Pop the previous state from the stack."""
try:
self.state = self.state_stack.pop()
logging.debug(u'Returned state to {0:s}'.format(self.state))
return self.state
except IndexError:
self.Error(
u'Tried to pop the state but failed - possible recursion error')
def Feed(self, data):
"""Feed the buffer with data.
Args:
data: data to be processed by the lexer.
"""
self.buffer = ''.join([self.buffer, data])
def PushBack(self, string='', **unused_kwargs):
"""Push the match back on the stream.
Args:
string: optional data.
"""
self.buffer = string + self.buffer
self.processed_buffer = self.processed_buffer[:-len(string)]
class SelfFeederMixIn(Lexer):
"""This mixin is used to make a lexer which feeds itself.
Note that self.file_object must be the file object we read from.
"""
# TODO: fix this, file object either needs to be set or not passed here.
def __init__(self, file_object=None):
"""Initializes the lexer feeder min object.
Args:
file_object: Optional file-like object. The default is None.
"""
super(SelfFeederMixIn, self).__init__()
self.file_object = file_object
def Feed(self, size=512):
"""Feed data into the buffer.
Args:
size: optional data size to read form the file-like object.
"""
data = self.file_object.read(size)
Lexer.Feed(self, data)
return len(data)
def NextToken(self):
"""Retrieves the next token.
Returns:
The next token (instance of Token) or None.
"""
# If we don't have enough data - feed ourselves: We assume
# that we must have at least one sector in our buffer.
if len(self.buffer) < 512:
if self.Feed() == 0 and not self.buffer:
return
return Lexer.NextToken(self)
class Expression(object):
"""A class representing an expression."""
attribute = None
args = None
operator = None
# The expected number of args
number_of_args = 1
def __init__(self):
"""Initializes the expression object."""
self.args = []
def __str__(self):
"""Return a string representation of the expression."""
return 'Expression: ({0:s}) ({1:s}) {2:s}'.format(
self.attribute, self.operator, self.args)
def AddArg(self, arg):
"""Adds a new arg to this expression.
Args:
arg: The argument to add (string).
Returns:
True if this arg is the last arg, False otherwise.
Raises:
ParseError: If there are too many args.
"""
self.args.append(arg)
if len(self.args) > self.number_of_args:
raise errors.ParseError(u'Too many args for this expression.')
elif len(self.args) == self.number_of_args:
return True
return False
def Compile(self, unused_filter_implementation):
"""Given a filter implementation, compile this expression."""
raise NotImplementedError(
u'{0:s} does not implement Compile.'.format(self.__class__.__name__))
# TODO: rename this function to GetTreeAsString or equivalent.
def PrintTree(self, depth=''):
"""Print the tree."""
return u'{0:s} {1:s}'.format(depth, self)
def SetAttribute(self, attribute):
"""Set the attribute."""
self.attribute = attribute
def SetOperator(self, operator):
"""Set the operator."""
self.operator = operator
class BinaryExpression(Expression):
"""An expression which takes two other expressions."""
def __init__(self, operator='', part=None):
"""Initializes the expression object."""
self.operator = operator
self.args = []
if part:
self.args.append(part)
super(BinaryExpression, self).__init__()
def __str__(self):
"""Return a string representation of the binary expression."""
return 'Binary Expression: {0:s} {1:s}'.format(
self.operator, [str(x) for x in self.args])
def AddOperands(self, lhs, rhs):
"""Add an operand."""
if isinstance(lhs, Expression) and isinstance(rhs, Expression):
self.args = [lhs, rhs]
else:
raise errors.ParseError(
u'Expected expression, got {0:s} {1:s} {2:s}'.format(
lhs, self.operator, rhs))
# TODO: rename this function to GetTreeAsString or equivalent.
def PrintTree(self, depth=''):
"""Print the tree."""
result = u'{0:s}{1:s}\n'.format(depth, self.operator)
for part in self.args:
result += u'{0:s}-{1:s}\n'.format(depth, part.PrintTree(depth + ' '))
return result
def Compile(self, filter_implementation):
"""Compile the binary expression into a filter object."""
operator = self.operator.lower()
if operator == 'and' or operator == '&&':
method = 'AndFilter'
elif operator == 'or' or operator == '||':
method = 'OrFilter'
else:
raise errors.ParseError(
u'Invalid binary operator {0:s}'.format(operator))
args = [x.Compile(filter_implementation) for x in self.args]
return getattr(filter_implementation, method)(*args)
class IdentityExpression(Expression):
"""An Expression which always evaluates to True."""
def Compile(self, filter_implementation):
"""Compile the expression."""
return filter_implementation.IdentityFilter()
class SearchParser(Lexer):
"""This parser can parse the mini query language and build an AST.
Examples of valid syntax:
filename contains "foo" and (size > 100k or date before "2011-10")
date between 2011 and 2010
files older than 1 year
"""
expression_cls = Expression
binary_expression_cls = BinaryExpression
tokens = [
# Double quoted string
Token('STRING', '"', 'PopState,StringFinish', None),
Token('STRING', r'\\(.)', 'StringEscape', None),
Token('STRING', r'[^\\"]+', 'StringInsert', None),
# Single quoted string
Token('SQ_STRING', '\'', 'PopState,StringFinish', None),
Token('SQ_STRING', r'\\(.)', 'StringEscape', None),
Token('SQ_STRING', r'[^\\\']+', 'StringInsert', None),
# TODO: Implement a unary not operator.
# The first thing we see in the initial state takes up to the ATTRIBUTE
Token('INITIAL', r'(and|or|\&\&|\|\|)', 'BinaryOperator', None),
Token('INITIAL', r'[^\s\(\)]', 'PushState,PushBack', 'ATTRIBUTE'),
Token('INITIAL', r'\(', 'BracketOpen', None),
Token('INITIAL', r'\)', 'BracketClose', None),
Token('ATTRIBUTE', r'[\w._0-9]+', 'StoreAttribute', 'OPERATOR'),
Token('OPERATOR', r'[a-z0-9<>=\-\+\!\^\&%]+', 'StoreOperator',
'ARG_LIST'),
Token('OPERATOR', r'(!=|[<>=])', 'StoreSpecialOperator', 'ARG_LIST'),
Token('ARG_LIST', r'[^\s\'"]+', 'InsertArg', None),
# Start a string.
Token('.', '"', 'PushState,StringStart', 'STRING'),
Token('.', '\'', 'PushState,StringStart', 'SQ_STRING'),
# Skip whitespace.
Token('.', r'\s+', None, None),
]
def __init__(self, data):
"""Initializes the search parser object."""
# Holds expression
self.current_expression = self.expression_cls()
self.filter_string = data
# The token stack
self.stack = []
Lexer.__init__(self, data)
def BinaryOperator(self, string=None, **unused_kwargs):
"""Set the binary operator."""
self.stack.append(self.binary_expression_cls(string))
def BracketOpen(self, **unused_kwargs):
"""Define an open bracket."""
self.stack.append('(')
def BracketClose(self, **unused_kwargs):
"""Close the bracket."""
self.stack.append(')')
def StringStart(self, **unused_kwargs):
"""Initialize the string."""
self.string = ''
def StringEscape(self, string, match, **unused_kwargs):
"""Escape backslashes found inside a string quote.
Backslashes followed by anything other than ['"rnbt] will just be included
in the string.
Args:
string: The string that matched.
match: the match object (instance of re.MatchObject).
Where match.group(1) contains the escaped code.
"""
if match.group(1) in '\'"rnbt':
self.string += string.decode('string_escape')
else:
self.string += string
def StringInsert(self, string='', **unused_kwargs):
"""Add to the string."""
self.string += string
def StringFinish(self, **unused_kwargs):
"""Finish the string operation."""
if self.state == 'ATTRIBUTE':
return self.StoreAttribute(string=self.string)
elif self.state == 'ARG_LIST':
return self.InsertArg(string=self.string)
def StoreAttribute(self, string='', **unused_kwargs):
"""Store the attribute."""
logging.debug(u'Storing attribute {0:s}'.format(repr(string)))
# TODO: Update the expected number_of_args
try:
self.current_expression.SetAttribute(string)
except AttributeError:
raise errors.ParseError(u'Invalid attribute \'{0:s}\''.format(string))
return 'OPERATOR'
def StoreOperator(self, string='', **unused_kwargs):
"""Store the operator."""
logging.debug(u'Storing operator {0:s}'.format(repr(string)))
self.current_expression.SetOperator(string)
def InsertArg(self, string='', **unused_kwargs):
"""Insert an arg to the current expression."""
logging.debug(u'Storing Argument {0:s}'.format(string))
# This expression is complete
if self.current_expression.AddArg(string):
self.stack.append(self.current_expression)
self.current_expression = self.expression_cls()
return self.PopState()
def _CombineBinaryExpressions(self, operator):
"""Combine binary expressions."""
for i in range(1, len(self.stack)-1):
item = self.stack[i]
if (isinstance(item, BinaryExpression) and item.operator == operator and
isinstance(self.stack[i-1], Expression) and
isinstance(self.stack[i+1], Expression)):
lhs = self.stack[i-1]
rhs = self.stack[i+1]
self.stack[i].AddOperands(lhs, rhs)
self.stack[i-1] = None
self.stack[i+1] = None
self.stack = filter(None, self.stack)
def _CombineParenthesis(self):
"""Combine parenthesis."""
for i in range(len(self.stack)-2):
if (self.stack[i] == '(' and self.stack[i+2] == ')' and
isinstance(self.stack[i+1], Expression)):
self.stack[i] = None
self.stack[i+2] = None
self.stack = filter(None, self.stack)
def Reduce(self):
"""Reduce the token stack into an AST."""
# Check for sanity
if self.state != 'INITIAL':
self.Error(u'Premature end of expression')
length = len(self.stack)
while length > 1:
# Precedence order
self._CombineParenthesis()
self._CombineBinaryExpressions('and')
self._CombineBinaryExpressions('or')
# No change
if len(self.stack) == length:
break
length = len(self.stack)
if length != 1:
self.Error(u'Illegal query expression')
return self.stack[0]
def Error(self, message=None, unused_weight=1):
"""Raise an error message."""
raise errors.ParseError(
u'{0:s} in position {1:s}: {2:s} <----> {3:s} )'.format(
message, len(self.processed_buffer), self.processed_buffer,
self.buffer))
def Parse(self):
"""Parse."""
if not self.filter_string:
return IdentityExpression()
self.Close()
return self.Reduce()
| apache-2.0 |
mostaphaRoudsari/Honeybee | src/Honeybee_Constant Schedule.py | 1 | 9723 | #
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2020, Antonello Di Nunzio <antonellodinunzio@gmail.com> and Chris Mackey <Chris@MackeyArchitecture.com>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to generate a schedule with a constant value or a schedule with 24 values that repeat in the same 24-hour pattern every day.
-
Provided by Ladybug 0.0.62
Args:
_value: A value or list of 24 values that will be repeated for every day of the year.
_scheduleName: A text string representing a name for the schedule that this component will create. This name should be unique among the schedules in your Grasshopper document to ensure that you do not overwrite other schedules.
_schedTypeLimits_: A text string from the scheduleTypeLimits output of the "Honeybee_Call From EP Schedule Library" component. This value represents the units of the schedule input values. The default is "Fractional" for a schedule with values that range between 0 and 1. Other common inputs include "Temperature", "On/Off", and "ActivityLevel".
Returns:
readMe!: ...
schedule: The name of the schedule that has been written to the memory of the GH document. Connect this to any shcedule input of a Honeybee component to assign the schedule.
weekSched: The name of the weekly schedule that has been written to the memory of the GH document. If your final intended annual schedule is seasonal (composed of different weekly schedules), you can use this output with the "Honeybee_Seasonal Schedule" to create such schedules.
schedIDFText: The text needed to tell EnergyPlus how to run the schedule. If you are done creating/editing a shcedule with this component, you may want to make your GH document smaller by internalizing this IDF text and using the "Honeybee_Add To EnergyPlus Library" component to add the schedule to the memory the next time you open the GH file. Then you can delete this component.
"""
ghenv.Component.Name = "Honeybee_Constant Schedule"
ghenv.Component.NickName = 'ConstantSchedule'
ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "HB-Legacy"
ghenv.Component.SubCategory = "07 | Energy | Schedule"
#compatibleHBVersion = VER 0.0.56\nFEB_01_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "0"
except: pass
import scriptcontext as sc
import Grasshopper.Kernel as gh
def IFDstrFromDayVals(dayValues, schName, daytype, schTypeLims):
idfStr = 'Schedule:Day:Interval,\n' + \
'\t' + schName + ' Day Schedule - ' + daytype + ', !- Name\n' + \
'\t' + schTypeLims + ', !- Schedule Type Limits Name\n' + \
'\t' + 'No, !- Interpolate to Timestep\n'
tCount = 1
for hCount, val in enumerate(dayValues):
if hCount+1 == len(dayValues):
idfStr = idfStr + '\t24:00, !- Time ' + str(tCount) + ' {hh:mm}\n' + \
'\t' + str(val) + '; !- Value Until Time ' + str(tCount) + '\n'
elif val == dayValues[hCount+1]: pass
else:
idfStr = idfStr + '\t' + str(hCount+1) + ':00, !- Time ' + str(tCount) + ' {hh:mm}\n' + \
'\t' + str(val) + ', !- Value Until Time ' + str(tCount) + '\n'
tCount += 1
return idfStr
def IFDstrForWeek(daySchedName, schName):
idfStr = 'Schedule:Week:Daily,\n' + \
'\t' + schName + ' Week Schedule' + ', !- Name\n' + \
'\t' + daySchedName + ', !- Sunday Schedule:Day Name\n' + \
'\t' + daySchedName + ', !- Monday Schedule:Day Name\n' + \
'\t' + daySchedName + ', !- Tuesday Schedule:Day Name\n' + \
'\t' + daySchedName + ', !- Wednesday Schedule:Day Name\n' + \
'\t' + daySchedName + ', !- Thursday Schedule:Day Name\n' + \
'\t' + daySchedName + ', !- Friday Schedule:Day Name\n' + \
'\t' + daySchedName + ', !- Saturday Schedule:Day Name\n' + \
'\t' + daySchedName + ', !- Holiday Schedule:Day Name\n' + \
'\t' + daySchedName + ', !- SummerDesignDay Schedule:Day Name\n' + \
'\t' + daySchedName + ', !- WinterDesignDay Schedule:Day Name\n' + \
'\t' + daySchedName + ', !- CustomDay1 Schedule:Day Name\n' + \
'\t' + daySchedName + '; !- CustomDay2 Schedule:Day Name\n'
return idfStr
def IFDstrForYear(weekSchedName, schName, schTypeLims):
idfStr = 'Schedule:Year,\n' + \
'\t' + schName + ', !- Name\n' + \
'\t' + schTypeLims + ', !- Schedule Type Limits Name\n' + \
'\t' + weekSchedName + ', !- Schedule:Week Name\n' + \
'\t' + '1' + ', !- Start Month 1\n' + \
'\t' + '1' + ', !- Start Day 1\n' + \
'\t' + '12' + ', !- End Month\n' + \
'\t' + '31' + '; !- End Day\n'
return idfStr
def main(values, schedName, schedTypeLimits):
# Import the classes.
lb_preparation = sc.sticky["ladybug_Preparation"]()
hb_EPObjectsAux = sc.sticky["honeybee_EPObjectsAUX"]()
scheduleTypeLimitsLib = sc.sticky["honeybee_ScheduleTypeLimitsLib"].keys()
# Generate a schedule IDF string if writeSchedule_ is set to True.
daySchedCollection = []
daySchNameCollect = []
schedIDFStrs = []
daySchedNames = []
# Get the type limits for the schedule.
if schedTypeLimits == None:
schTypeLims = 'Fractional'
else:
schTypeLims = schedTypeLimits
if schTypeLims.upper() == 'TEMPERATURE':
schTypeLims = 'TEMPERATURE 1'
if schTypeLims.upper() not in scheduleTypeLimitsLib:
warning = "Can't find the connected _schedTypeLimits_ '" + schTypeLims + "' in the Honeybee EP Schedule Library."
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
return -1
# Write out text strings for the daily schedules
if len(values) == 1:
values = [values[0] for x in range(24)]
elif len(values) == 24:
pass
else:
warning = "_value must be either a single value or a list of 24 values for each hour of the day."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
schedIDFStrs.append(IFDstrFromDayVals(values, schedName, 'Constant', schTypeLims))
daySchName = schedName + ' Day Schedule - ' + 'Constant'
# Write out text strings for the weekly values.
schedIDFStrs.append(IFDstrForWeek(daySchName, schedName))
weekSchedName = schedName + ' Week Schedule'
# Write out text for the annual values.
schedIDFStrs.append(IFDstrForYear(schedName + ' Week Schedule', schedName, schTypeLims))
yearSchedName = schedName
# Write all of the schedules to the memory of the GH document.
for EPObject in schedIDFStrs:
added, name = hb_EPObjectsAux.addEPObjectToLib(EPObject, overwrite = True)
return yearSchedName, weekSchedName, schedIDFStrs
w = gh.GH_RuntimeMessageLevel.Warning
#If Honeybee or Ladybug is not flying or is an older version, give a warning.
initCheck = True
#Ladybug check.
if not sc.sticky.has_key('ladybug_release') == True:
initCheck = False
print "You should first let Ladybug fly..."
ghenv.Component.AddRuntimeMessage(w, "You should first let Ladybug fly...")
else:
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): initCheck = False
if sc.sticky['ladybug_release'].isInputMissing(ghenv.Component): initCheck = False
except:
initCheck = False
warning = "You need a newer version of Ladybug to use this compoent." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
ghenv.Component.AddRuntimeMessage(w, warning)
#Honeybee check.
if not sc.sticky.has_key('honeybee_release') == True:
initCheck = False
print "You should first let Honeybee fly..."
ghenv.Component.AddRuntimeMessage(w, "You should first let Honeybee fly...")
else:
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): initCheck = False
if sc.sticky['honeybee_release'].isInputMissing(ghenv.Component): initCheck = False
except:
initCheck = False
warning = "You need a newer version of Honeybee to use this compoent." + \
"Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
ghenv.Component.AddRuntimeMessage(w, warning)
#Check the data to make sure it is the correct type
if initCheck == True:
result = main(_value, _scheduleName, _schedTypeLimits_)
if result != -1:
schedule, weekSched, schedIDFText = result
print '\nscheduleValues generated!'
| gpl-3.0 |
cherez/youtube-dl | youtube_dl/extractor/vimeo.py | 5 | 28337 | # encoding: utf-8
from __future__ import unicode_literals
import json
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_urlparse,
)
from ..utils import (
encode_dict,
ExtractorError,
InAdvancePagedList,
int_or_none,
RegexNotFoundError,
sanitized_Request,
smuggle_url,
std_headers,
unified_strdate,
unsmuggle_url,
urlencode_postdata,
unescapeHTML,
)
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
_LOGIN_URL = 'https://vimeo.com/log_in'
def _login(self):
(username, password) = self._get_login_info()
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
self.report_login()
webpage = self._download_webpage(self._LOGIN_URL, None, False)
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = urlencode_postdata(encode_dict({
'action': 'login',
'email': username,
'password': password,
'service': 'vimeo',
'token': token,
}))
login_request = sanitized_Request(self._LOGIN_URL, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_request.add_header('Referer', self._LOGIN_URL)
self._set_vimeo_cookie('vuid', vuid)
self._download_webpage(login_request, None, False, 'Wrong login info')
def _extract_xsrft_and_vuid(self, webpage):
xsrft = self._search_regex(
r'xsrft\s*[=:]\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
webpage, 'login token', group='xsrft')
vuid = self._search_regex(
r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
webpage, 'vuid', group='vuid')
return xsrft, vuid
def _set_vimeo_cookie(self, name, value):
self._set_cookie('vimeo.com', name, value)
class VimeoIE(VimeoBaseInfoExtractor):
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
_VALID_URL = r'''(?x)
https?://
(?:(?:www|(?P<player>player))\.)?
vimeo(?P<pro>pro)?\.com/
(?!channels/[^/?#]+/?(?:$|[?#])|album/)
(?:.*?/)?
(?:(?:play_redirect_hls|moogaloop\.swf)\?clip_id=)?
(?:videos?/)?
(?P<id>[0-9]+)
/?(?:[?&].*)?(?:[#].*)?$'''
IE_NAME = 'vimeo'
_TESTS = [
{
'url': 'http://vimeo.com/56015672#at=0',
'md5': '8879b6cc097e987f02484baf890129e5',
'info_dict': {
'id': '56015672',
'ext': 'mp4',
'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
'description': 'md5:2d3305bad981a06ff79f027f19865021',
'upload_date': '20121220',
'uploader_id': 'user7108434',
'uploader': 'Filippo Valsorda',
'duration': 10,
},
},
{
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
'note': 'Vimeo Pro video (#1197)',
'info_dict': {
'id': '68093876',
'ext': 'mp4',
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
'description': 'md5:fd69a7b8d8c34a4e1d2ec2e4afd6ec30',
'duration': 1595,
},
},
{
'url': 'http://player.vimeo.com/video/54469442',
'md5': '619b811a4417aa4abe78dc653becf511',
'note': 'Videos that embed the url in the player page',
'info_dict': {
'id': '54469442',
'ext': 'mp4',
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
'uploader': 'The BLN & Business of Software',
'uploader_id': 'theblnbusinessofsoftware',
'duration': 3610,
'description': None,
},
},
{
'url': 'http://vimeo.com/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'note': 'Video protected with password',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'upload_date': '20130614',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people\u2026',
},
'params': {
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/channels/keypeele/75629013',
'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
'note': 'Video is freely available via original URL '
'and protected with password when accessed via http://vimeo.com/75629013',
'info_dict': {
'id': '75629013',
'ext': 'mp4',
'title': 'Key & Peele: Terrorist Interrogation',
'description': 'md5:8678b246399b070816b12313e8b4eb5c',
'uploader_id': 'atencio',
'uploader': 'Peter Atencio',
'upload_date': '20130927',
'duration': 187,
},
},
{
'url': 'http://vimeo.com/76979871',
'note': 'Video with subtitles',
'info_dict': {
'id': '76979871',
'ext': 'mp4',
'title': 'The New Vimeo Player (You Know, For Videos)',
'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
'upload_date': '20131015',
'uploader_id': 'staff',
'uploader': 'Vimeo Staff',
'duration': 62,
}
},
{
# from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
'url': 'https://player.vimeo.com/video/98044508',
'note': 'The js code contains assignments to the same variable as the config',
'info_dict': {
'id': '98044508',
'ext': 'mp4',
'title': 'Pier Solar OUYA Official Trailer',
'uploader': 'Tulio Gonçalves',
'uploader_id': 'user28849593',
},
},
{
'url': 'https://vimeo.com/109815029',
'note': 'Video not completely processed, "failed" seed status',
'only_matching': True,
},
{
'url': 'https://vimeo.com/groups/travelhd/videos/22439234',
'only_matching': True,
},
]
@staticmethod
def _extract_vimeo_url(url, webpage):
# Look for embedded (iframe) Vimeo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/.+?)\1', webpage)
if mobj:
player_url = unescapeHTML(mobj.group('url'))
surl = smuggle_url(player_url, {'Referer': url})
return surl
# Look for embedded (swf embed) Vimeo player
mobj = re.search(
r'<embed[^>]+?src="((?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
if mobj:
return mobj.group(1)
def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get('videopassword', None)
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = urlencode_postdata(encode_dict({
'password': password,
'token': token,
}))
if url.startswith('http://'):
# vimeo only supports https now, but the user can give an http url
url = url.replace('http://', 'https://')
password_request = sanitized_Request(url + '/password', data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
password_request.add_header('Referer', url)
self._set_vimeo_cookie('vuid', vuid)
return self._download_webpage(
password_request, video_id,
'Verifying the password', 'Wrong password')
def _verify_player_video_password(self, url, video_id):
password = self._downloader.params.get('videopassword', None)
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option')
data = urlencode_postdata(encode_dict({'password': password}))
pass_url = url + '/check-password'
password_request = sanitized_Request(pass_url, data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
return self._download_json(
password_request, video_id,
'Verifying the password',
'Wrong password')
def _real_initialize(self):
self._login()
def _real_extract(self, url):
url, data = unsmuggle_url(url)
headers = std_headers
if data is not None:
headers = headers.copy()
headers.update(data)
if 'Referer' not in headers:
headers['Referer'] = url
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
orig_url = url
if mobj.group('pro') or mobj.group('player'):
url = 'https://player.vimeo.com/video/' + video_id
else:
url = 'https://vimeo.com/' + video_id
# Retrieve video webpage to extract further information
request = sanitized_Request(url, None, headers)
try:
webpage = self._download_webpage(request, video_id)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if b'Because of its privacy settings, this video cannot be played here' in errmsg:
raise ExtractorError(
'Cannot download embed-only video without embedding '
'URL. Please call youtube-dl with the URL of the page '
'that embeds this video.',
expected=True)
raise
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
vimeo_config = self._search_regex(
r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));', webpage,
'vimeo config', default=None)
if vimeo_config:
seed_status = self._parse_json(vimeo_config, video_id).get('seed_status', {})
if seed_status.get('state') == 'failed':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, seed_status['title']),
expected=True)
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage,
'config URL', default=None)
if not config_url:
# Sometimes new react-based page is served instead of old one that require
# different config URL extraction approach (see
# https://github.com/rg3/youtube-dl/pull/7209)
vimeo_clip_page_config = self._search_regex(
r'vimeo\.clip_page_config\s*=\s*({.+?});', webpage,
'vimeo clip page config')
config_url = self._parse_json(
vimeo_clip_page_config, video_id)['player']['config_url']
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search('(\w)\.video\.id', webpage)
if m_variable_name is not None:
config_re = r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))
else:
config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
config = self._search_regex(config_re, webpage, 'info section',
flags=re.DOTALL)
config = json.loads(config)
except Exception as e:
if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if data and '_video_password_verified' in data:
raise ExtractorError('video password verification failed!')
self._verify_video_password(url, video_id, webpage)
return self._real_extract(
smuggle_url(url, {'_video_password_verified': 'verified'}))
else:
raise ExtractorError('Unable to extract info section',
cause=e)
else:
if config.get('view') == 4:
config = self._verify_player_video_password(url, video_id)
# Extract title
video_title = config["video"]["title"]
# Extract uploader and uploader_id
video_uploader = config["video"]["owner"]["name"]
video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
# Extract video thumbnail
video_thumbnail = config["video"].get("thumbnail")
if video_thumbnail is None:
video_thumbs = config["video"].get("thumbs")
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
# Extract video description
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage, 'description', default=None)
if not video_description:
video_description = self._html_search_meta(
'description', webpage, default=None)
if not video_description and mobj.group('pro'):
orig_webpage = self._download_webpage(
orig_url, video_id,
note='Downloading webpage for description',
fatal=False)
if orig_webpage:
video_description = self._html_search_meta(
'description', orig_webpage, default=None)
if not video_description and not mobj.group('player'):
self._downloader.report_warning('Cannot find video description')
# Extract video duration
video_duration = int_or_none(config["video"].get("duration"))
# Extract upload date
video_upload_date = None
mobj = re.search(r'<time[^>]+datetime="([^"]+)"', webpage)
if mobj is not None:
video_upload_date = unified_strdate(mobj.group(1))
try:
view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
formats = []
config_files = config['video'].get('files') or config['request'].get('files', {})
for f in config_files.get('progressive', []):
video_url = f.get('url')
if not video_url:
continue
formats.append({
'url': video_url,
'format_id': 'http-%s' % f.get('quality'),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'fps': int_or_none(f.get('fps')),
'tbr': int_or_none(f.get('bitrate')),
})
m3u8_url = config_files.get('hls', {}).get('url')
if m3u8_url:
m3u8_formats = self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native', 0, 'hls', fatal=False)
if m3u8_formats:
formats.extend(m3u8_formats)
# Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
# at the same time without actual units specified. This lead to wrong sorting.
self._sort_formats(formats, field_preference=('height', 'width', 'fps', 'format_id'))
subtitles = {}
text_tracks = config['request'].get('text_tracks')
if text_tracks:
for tt in text_tracks:
subtitles[tt['lang']] = [{
'ext': 'vtt',
'url': 'https://vimeo.com' + tt['url'],
}]
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'upload_date': video_upload_date,
'title': video_title,
'thumbnail': video_thumbnail,
'description': video_description,
'duration': video_duration,
'formats': formats,
'webpage_url': url,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'subtitles': subtitles,
}
class VimeoChannelIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:channel'
_VALID_URL = r'https://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
_TITLE = None
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
_TESTS = [{
'url': 'https://vimeo.com/channels/tributes',
'info_dict': {
'id': 'tributes',
'title': 'Vimeo Tributes',
},
'playlist_mincount': 25,
}]
def _page_url(self, base_url, pagenum):
return '%s/videos/page:%d/' % (base_url, pagenum)
def _extract_list_title(self, webpage):
return self._TITLE or self._html_search_regex(self._TITLE_RE, webpage, 'list title')
def _login_list_password(self, page_url, list_id, webpage):
login_form = self._search_regex(
r'(?s)<form[^>]+?id="pw_form"(.*?)</form>',
webpage, 'login form', default=None)
if not login_form:
return webpage
password = self._downloader.params.get('videopassword', None)
if password is None:
raise ExtractorError('This album is protected by a password, use the --video-password option', expected=True)
fields = self._hidden_inputs(login_form)
token, vuid = self._extract_xsrft_and_vuid(webpage)
fields['token'] = token
fields['password'] = password
post = urlencode_postdata(encode_dict(fields))
password_path = self._search_regex(
r'action="([^"]+)"', login_form, 'password URL')
password_url = compat_urlparse.urljoin(page_url, password_path)
password_request = sanitized_Request(password_url, post)
password_request.add_header('Content-type', 'application/x-www-form-urlencoded')
self._set_vimeo_cookie('vuid', vuid)
self._set_vimeo_cookie('xsrft', token)
return self._download_webpage(
password_request, list_id,
'Verifying the password', 'Wrong password')
def _title_and_entries(self, list_id, base_url):
for pagenum in itertools.count(1):
page_url = self._page_url(base_url, pagenum)
webpage = self._download_webpage(
page_url, list_id,
'Downloading page %s' % pagenum)
if pagenum == 1:
webpage = self._login_list_password(page_url, list_id, webpage)
yield self._extract_list_title(webpage)
for video_id in re.findall(r'id="clip_(\d+?)"', webpage):
yield self.url_result('https://vimeo.com/%s' % video_id, 'Vimeo')
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
break
def _extract_videos(self, list_id, base_url):
title_and_entries = self._title_and_entries(list_id, base_url)
list_title = next(title_and_entries)
return self.playlist_result(title_and_entries, list_id, list_title)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
return self._extract_videos(channel_id, 'https://vimeo.com/channels/%s' % channel_id)
class VimeoUserIE(VimeoChannelIE):
IE_NAME = 'vimeo:user'
_VALID_URL = r'https://vimeo\.com/(?!(?:[0-9]+|watchlater)(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
_TESTS = [{
'url': 'https://vimeo.com/nkistudio/videos',
'info_dict': {
'title': 'Nki',
'id': 'nkistudio',
},
'playlist_mincount': 66,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
return self._extract_videos(name, 'https://vimeo.com/%s' % name)
class VimeoAlbumIE(VimeoChannelIE):
IE_NAME = 'vimeo:album'
_VALID_URL = r'https://vimeo\.com/album/(?P<id>\d+)'
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
_TESTS = [{
'url': 'https://vimeo.com/album/2632481',
'info_dict': {
'id': '2632481',
'title': 'Staff Favorites: November 2013',
},
'playlist_mincount': 13,
}, {
'note': 'Password-protected album',
'url': 'https://vimeo.com/album/3253534',
'info_dict': {
'title': 'test',
'id': '3253534',
},
'playlist_count': 1,
'params': {
'videopassword': 'youtube-dl',
}
}]
def _page_url(self, base_url, pagenum):
return '%s/page:%d/' % (base_url, pagenum)
def _real_extract(self, url):
album_id = self._match_id(url)
return self._extract_videos(album_id, 'https://vimeo.com/album/%s' % album_id)
class VimeoGroupsIE(VimeoAlbumIE):
IE_NAME = 'vimeo:group'
_VALID_URL = r'https://vimeo\.com/groups/(?P<name>[^/]+)(?:/(?!videos?/\d+)|$)'
_TESTS = [{
'url': 'https://vimeo.com/groups/rolexawards',
'info_dict': {
'id': 'rolexawards',
'title': 'Rolex Awards for Enterprise',
},
'playlist_mincount': 73,
}]
def _extract_list_title(self, webpage):
return self._og_search_title(webpage)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
return self._extract_videos(name, 'https://vimeo.com/groups/%s' % name)
class VimeoReviewIE(InfoExtractor):
IE_NAME = 'vimeo:review'
IE_DESC = 'Review pages on vimeo'
_VALID_URL = r'https://vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
_TESTS = [{
'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
'md5': 'c507a72f780cacc12b2248bb4006d253',
'info_dict': {
'id': '75524534',
'ext': 'mp4',
'title': "DICK HARDWICK 'Comedian'",
'uploader': 'Richard Hardwick',
}
}, {
'note': 'video player needs Referer',
'url': 'https://vimeo.com/user22258446/review/91613211/13f927e053',
'md5': '6295fdab8f4bf6a002d058b2c6dce276',
'info_dict': {
'id': '91613211',
'ext': 'mp4',
'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
'uploader': 'DevWeek Events',
'duration': 2773,
'thumbnail': 're:^https?://.*\.jpg$',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
player_url = 'https://player.vimeo.com/player/' + video_id
return self.url_result(player_url, 'Vimeo', video_id)
class VimeoWatchLaterIE(VimeoChannelIE):
IE_NAME = 'vimeo:watchlater'
IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
_VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater'
_TITLE = 'Watch Later'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': 'https://vimeo.com/watchlater',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def _page_url(self, base_url, pagenum):
url = '%s/page:%d/' % (base_url, pagenum)
request = sanitized_Request(url)
# Set the header to get a partial html page with the ids,
# the normal page doesn't contain them.
request.add_header('X-Requested-With', 'XMLHttpRequest')
return request
def _real_extract(self, url):
return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
class VimeoLikesIE(InfoExtractor):
_VALID_URL = r'https://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
IE_NAME = 'vimeo:likes'
IE_DESC = 'Vimeo user likes'
_TEST = {
'url': 'https://vimeo.com/user755559/likes/',
'playlist_mincount': 293,
"info_dict": {
'id': 'user755559_likes',
"description": "See all the videos urza likes",
"title": 'Videos urza likes',
},
}
def _real_extract(self, url):
user_id = self._match_id(url)
webpage = self._download_webpage(url, user_id)
page_count = self._int(
self._search_regex(
r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
.*?</a></li>\s*<li\s+class="pagination_next">
''', webpage, 'page count'),
'page count', fatal=True)
PAGE_SIZE = 12
title = self._html_search_regex(
r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
description = self._html_search_meta('description', webpage)
def _get_page(idx):
page_url = 'https://vimeo.com/user%s/likes/page:%d/sort:date' % (
user_id, idx + 1)
webpage = self._download_webpage(
page_url, user_id,
note='Downloading page %d/%d' % (idx + 1, page_count))
video_list = self._search_regex(
r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
webpage, 'video content')
paths = re.findall(
r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
for path in paths:
yield {
'_type': 'url',
'url': compat_urlparse.urljoin(page_url, path),
}
pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
return {
'_type': 'playlist',
'id': 'user%s_likes' % user_id,
'title': title,
'description': description,
'entries': pl,
}
| unlicense |
jorik041/dfvfs | tests/vfs/tsk_file_entry.py | 2 | 5833 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the file entry implementation using the SleuthKit (TSK)."""
import os
import unittest
from dfvfs.path import os_path_spec
from dfvfs.path import tsk_path_spec
from dfvfs.resolver import context
from dfvfs.vfs import tsk_file_entry
from dfvfs.vfs import tsk_file_system
class TSKFileEntryTest(unittest.TestCase):
"""The unit test for the SleuthKit (TSK) file entry object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = os.path.join(u'test_data', u'ímynd.dd')
self._os_path_spec = os_path_spec.OSPathSpec(location=test_file)
self._tsk_path_spec = tsk_path_spec.TSKPathSpec(
location=u'/', parent=self._os_path_spec)
self._file_system = tsk_file_system.TSKFileSystem(self._resolver_context)
self._file_system.Open(path_spec=self._tsk_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._file_system.Close()
def testIntialize(self):
"""Test the initialize functionality."""
file_entry = tsk_file_entry.TSKFileEntry(
self._resolver_context, self._file_system, self._tsk_path_spec)
self.assertNotEqual(file_entry, None)
def testGetFileEntryByPathSpec(self):
"""Test the get entry by path specification functionality."""
path_spec = tsk_path_spec.TSKPathSpec(inode=15, parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertNotEqual(file_entry, None)
def testGetLinkedFileEntry(self):
"""Test the get linked file entry functionality."""
path_spec = tsk_path_spec.TSKPathSpec(
inode=13, location=u'/a_link', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertNotEqual(file_entry, None)
linked_file_entry = file_entry.GetLinkedFileEntry()
self.assertNotEqual(linked_file_entry, None)
self.assertEqual(linked_file_entry.name, u'another_file')
def testGetParentFileEntry(self):
"""Test the get parent file entry functionality."""
path_spec = tsk_path_spec.TSKPathSpec(
inode=16, location=u'/a_directory/another_file',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertNotEqual(file_entry, None)
parent_file_entry = file_entry.GetParentFileEntry()
self.assertNotEqual(parent_file_entry, None)
self.assertEqual(parent_file_entry.name, u'a_directory')
def testGetStat(self):
"""Test the get stat functionality."""
path_spec = tsk_path_spec.TSKPathSpec(
inode=16, location=u'/a_directory/another_file',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
stat_object = file_entry.GetStat()
self.assertNotEqual(stat_object, None)
self.assertEqual(stat_object.type, stat_object.TYPE_FILE)
def testIsFunctions(self):
"""Test the Is? functionality."""
path_spec = tsk_path_spec.TSKPathSpec(
inode=16, location=u'/a_directory/another_file',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertFalse(file_entry.IsDirectory())
self.assertTrue(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
path_spec = tsk_path_spec.TSKPathSpec(
inode=12, location=u'/a_directory',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertTrue(file_entry.IsDirectory())
self.assertFalse(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
path_spec = tsk_path_spec.TSKPathSpec(
location=u'/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertTrue(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertTrue(file_entry.IsDirectory())
self.assertFalse(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
def testSubFileEntries(self):
"""Test the sub file entries iteration functionality."""
path_spec = tsk_path_spec.TSKPathSpec(
location=u'/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertNotEqual(file_entry, None)
self.assertEqual(file_entry.number_of_sub_file_entries, 5)
# Note that passwords.txt~ is currently ignored by dfvfs, since
# its directory entry has no pytsk3.TSK_FS_META object.
expected_sub_file_entry_names = [
u'a_directory',
u'a_link',
u'lost+found',
u'passwords.txt',
u'$OrphanFiles']
sub_file_entry_names = []
for sub_file_entry in file_entry.sub_file_entries:
sub_file_entry_names.append(sub_file_entry.name)
self.assertEqual(
len(sub_file_entry_names), len(expected_sub_file_entry_names))
self.assertEqual(
sorted(sub_file_entry_names), sorted(expected_sub_file_entry_names))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
treeeferrrr/frogcast | app/lib/pbr/hooks/files.py | 98 | 3681 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from pbr import find_package
from pbr.hooks import base
def get_manpath():
manpath = 'share/man'
if os.path.exists(os.path.join(sys.prefix, 'man')):
# This works around a bug with install where it expects every node
# in the relative data directory to be an actual directory, since at
# least Debian derivatives (and probably other platforms as well)
# like to symlink Unixish /usr/local/man to /usr/local/share/man.
manpath = 'man'
return manpath
def get_man_section(section):
return os.path.join(get_manpath(), 'man%s' % section)
class FilesConfig(base.BaseConfig):
section = 'files'
def __init__(self, config, name):
super(FilesConfig, self).__init__(config)
self.name = name
self.data_files = self.config.get('data_files', '')
def save(self):
self.config['data_files'] = self.data_files
super(FilesConfig, self).save()
def expand_globs(self):
finished = []
for line in self.data_files.split("\n"):
if line.rstrip().endswith('*') and '=' in line:
(target, source_glob) = line.split('=')
source_prefix = source_glob.strip()[:-1]
target = target.strip()
if not target.endswith(os.path.sep):
target += os.path.sep
for (dirpath, dirnames, fnames) in os.walk(source_prefix):
finished.append(
"%s = " % dirpath.replace(source_prefix, target))
finished.extend(
[" %s" % os.path.join(dirpath, f) for f in fnames])
else:
finished.append(line)
self.data_files = "\n".join(finished)
def add_man_path(self, man_path):
self.data_files = "%s\n%s =" % (self.data_files, man_path)
def add_man_page(self, man_page):
self.data_files = "%s\n %s" % (self.data_files, man_page)
def get_man_sections(self):
man_sections = dict()
manpages = self.pbr_config['manpages']
for manpage in manpages.split():
section_number = manpage.strip()[-1]
section = man_sections.get(section_number, list())
section.append(manpage.strip())
man_sections[section_number] = section
return man_sections
def hook(self):
packages = self.config.get('packages', self.name).strip()
expanded = []
for pkg in packages.split("\n"):
if os.path.isdir(pkg.strip()):
expanded.append(find_package.smart_find_packages(pkg.strip()))
self.config['packages'] = "\n".join(expanded)
self.expand_globs()
if 'manpages' in self.pbr_config:
man_sections = self.get_man_sections()
for (section, pages) in man_sections.items():
manpath = get_man_section(section)
self.add_man_path(manpath)
for page in pages:
self.add_man_page(page)
| apache-2.0 |
jeanlinux/calibre | src/calibre/ebooks/metadata/epub.py | 13 | 12254 | #!/usr/bin/env python2
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''Read meta information from epub files'''
import os, re, posixpath
from cStringIO import StringIO
from contextlib import closing
from future_builtins import map
from calibre.utils.zipfile import ZipFile, BadZipfile, safe_replace
from calibre.utils.localunzip import LocalZipFile
from calibre.ebooks.BeautifulSoup import BeautifulStoneSoup
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.metadata.opf2 import OPF
from calibre.ptempfile import TemporaryDirectory, PersistentTemporaryFile
from calibre import CurrentDir, walk
from calibre.constants import isosx
from calibre.utils.localization import lang_as_iso639_1
class EPubException(Exception):
pass
class OCFException(EPubException):
pass
class ContainerException(OCFException):
pass
class Container(dict):
def __init__(self, stream=None):
if not stream:
return
soup = BeautifulStoneSoup(stream.read())
container = soup.find(name=re.compile(r'container$', re.I))
if not container:
raise OCFException("<container> element missing")
if container.get('version', None) != '1.0':
raise EPubException("unsupported version of OCF")
rootfiles = container.find(re.compile(r'rootfiles$', re.I))
if not rootfiles:
raise EPubException("<rootfiles/> element missing")
for rootfile in rootfiles.findAll(re.compile(r'rootfile$', re.I)):
try:
self[rootfile['media-type']] = rootfile['full-path']
except KeyError:
raise EPubException("<rootfile/> element malformed")
class OCF(object):
MIMETYPE = 'application/epub+zip'
CONTAINER_PATH = 'META-INF/container.xml'
ENCRYPTION_PATH = 'META-INF/encryption.xml'
def __init__(self):
raise NotImplementedError('Abstract base class')
class Encryption(object):
OBFUSCATION_ALGORITHMS = frozenset(['http://ns.adobe.com/pdf/enc#RC',
'http://www.idpf.org/2008/embedding'])
def __init__(self, raw):
from lxml import etree
self.root = etree.fromstring(raw) if raw else None
self.entries = {}
if self.root is not None:
for em in self.root.xpath('descendant::*[contains(name(), "EncryptionMethod")]'):
algorithm = em.get('Algorithm', '')
cr = em.getparent().xpath('descendant::*[contains(name(), "CipherReference")]')
if cr:
uri = cr[0].get('URI', '')
if uri and algorithm:
self.entries[uri] = algorithm
def is_encrypted(self, uri):
algo = self.entries.get(uri, None)
return algo is not None and algo not in self.OBFUSCATION_ALGORITHMS
class OCFReader(OCF):
def __init__(self):
try:
mimetype = self.open('mimetype').read().rstrip()
if mimetype != OCF.MIMETYPE:
print 'WARNING: Invalid mimetype declaration', mimetype
except:
print 'WARNING: Epub doesn\'t contain a mimetype declaration'
try:
with closing(self.open(OCF.CONTAINER_PATH)) as f:
self.container = Container(f)
except KeyError:
raise EPubException("missing OCF container.xml file")
self.opf_path = self.container[OPF.MIMETYPE]
try:
with closing(self.open(self.opf_path)) as f:
self.opf = OPF(f, self.root, populate_spine=False)
except KeyError:
raise EPubException("missing OPF package file")
try:
with closing(self.open(self.ENCRYPTION_PATH)) as f:
self.encryption_meta = Encryption(f.read())
except:
self.encryption_meta = Encryption(None)
class OCFZipReader(OCFReader):
def __init__(self, stream, mode='r', root=None):
if isinstance(stream, (LocalZipFile, ZipFile)):
self.archive = stream
else:
try:
self.archive = ZipFile(stream, mode=mode)
except BadZipfile:
raise EPubException("not a ZIP .epub OCF container")
self.root = root
if self.root is None:
name = getattr(stream, 'name', False)
if name:
self.root = os.path.abspath(os.path.dirname(name))
else:
self.root = os.getcwdu()
super(OCFZipReader, self).__init__()
def open(self, name, mode='r'):
if isinstance(self.archive, LocalZipFile):
return self.archive.open(name)
return StringIO(self.archive.read(name))
def get_zip_reader(stream, root=None):
try:
zf = ZipFile(stream, mode='r')
except:
stream.seek(0)
zf = LocalZipFile(stream)
return OCFZipReader(zf, root=root)
class OCFDirReader(OCFReader):
def __init__(self, path):
self.root = path
super(OCFDirReader, self).__init__()
def open(self, path, *args, **kwargs):
return open(os.path.join(self.root, path), *args, **kwargs)
def render_cover(opf, opf_path, zf, reader=None):
from calibre.ebooks import render_html_svg_workaround
from calibre.utils.logging import default_log
cpage = opf.first_spine_item()
if not cpage:
return
if reader is not None and reader.encryption_meta.is_encrypted(cpage):
return
with TemporaryDirectory('_epub_meta') as tdir:
with CurrentDir(tdir):
zf.extractall()
opf_path = opf_path.replace('/', os.sep)
cpage = os.path.join(tdir, os.path.dirname(opf_path), cpage)
if not os.path.exists(cpage):
return
if isosx:
# On OS X trying to render a HTML cover which uses embedded
# fonts more than once in the same process causes a crash in Qt
# so be safe and remove the fonts as well as any @font-face
# rules
for f in walk('.'):
if os.path.splitext(f)[1].lower() in ('.ttf', '.otf'):
os.remove(f)
ffpat = re.compile(br'@font-face.*?{.*?}',
re.DOTALL|re.IGNORECASE)
with open(cpage, 'r+b') as f:
raw = f.read()
f.truncate(0)
f.seek(0)
raw = ffpat.sub(b'', raw)
f.write(raw)
from calibre.ebooks.chardet import xml_to_unicode
raw = xml_to_unicode(raw,
strip_encoding_pats=True, resolve_entities=True)[0]
from lxml import html
for link in html.fromstring(raw).xpath('//link'):
href = link.get('href', '')
if href:
path = os.path.join(os.path.dirname(cpage), href)
if os.path.exists(path):
with open(path, 'r+b') as f:
raw = f.read()
f.truncate(0)
f.seek(0)
raw = ffpat.sub(b'', raw)
f.write(raw)
return render_html_svg_workaround(cpage, default_log)
def get_cover(opf, opf_path, stream, reader=None):
raster_cover = opf.raster_cover
stream.seek(0)
try:
zf = ZipFile(stream)
except:
stream.seek(0)
zf = LocalZipFile(stream)
if raster_cover:
base = posixpath.dirname(opf_path)
cpath = posixpath.normpath(posixpath.join(base, raster_cover))
if reader is not None and \
reader.encryption_meta.is_encrypted(cpath):
return
try:
member = zf.getinfo(cpath)
except:
pass
else:
f = zf.open(member)
data = f.read()
f.close()
zf.close()
return data
return render_cover(opf, opf_path, zf, reader=reader)
def get_metadata(stream, extract_cover=True):
""" Return metadata as a :class:`Metadata` object """
stream.seek(0)
reader = get_zip_reader(stream)
mi = reader.opf.to_book_metadata()
if extract_cover:
try:
cdata = get_cover(reader.opf, reader.opf_path, stream, reader=reader)
if cdata is not None:
mi.cover_data = ('jpg', cdata)
except:
import traceback
traceback.print_exc()
mi.timestamp = None
return mi
def get_quick_metadata(stream):
return get_metadata(stream, False)
def _write_new_cover(new_cdata, cpath):
from calibre.utils.magick.draw import save_cover_data_to
new_cover = PersistentTemporaryFile(suffix=os.path.splitext(cpath)[1])
new_cover.close()
save_cover_data_to(new_cdata, new_cover.name)
return new_cover
def normalize_languages(opf_languages, mi_languages):
' Preserve original country codes and use 2-letter lang codes where possible '
from calibre.spell import parse_lang_code
def parse(x):
try:
return parse_lang_code(x)
except ValueError:
return None
opf_languages = filter(None, map(parse, opf_languages))
cc_map = {c.langcode:c.countrycode for c in opf_languages}
mi_languages = filter(None, map(parse, mi_languages))
def norm(x):
lc = x.langcode
cc = x.countrycode or cc_map.get(lc, None)
lc = lang_as_iso639_1(lc) or lc
if cc:
lc += '-' + cc
return lc
return list(map(norm, mi_languages))
def update_metadata(opf, mi, apply_null=False, update_timestamp=False, force_identifiers=False):
for x in ('guide', 'toc', 'manifest', 'spine'):
setattr(mi, x, None)
if mi.languages:
mi.languages = normalize_languages(list(opf.raw_languages) or [], mi.languages)
opf.smart_update(mi, apply_null=apply_null)
if getattr(mi, 'uuid', None):
opf.application_id = mi.uuid
if apply_null or force_identifiers:
opf.set_identifiers(mi.get_identifiers())
else:
orig = opf.get_identifiers()
orig.update(mi.get_identifiers())
opf.set_identifiers({k:v for k, v in orig.iteritems() if k and v})
if update_timestamp and mi.timestamp is not None:
opf.timestamp = mi.timestamp
def set_metadata(stream, mi, apply_null=False, update_timestamp=False, force_identifiers=False):
stream.seek(0)
reader = get_zip_reader(stream, root=os.getcwdu())
raster_cover = reader.opf.raster_cover
mi = MetaInformation(mi)
new_cdata = None
replacements = {}
try:
new_cdata = mi.cover_data[1]
if not new_cdata:
raise Exception('no cover')
except:
try:
new_cdata = open(mi.cover, 'rb').read()
except:
pass
new_cover = cpath = None
if new_cdata and raster_cover:
try:
cpath = posixpath.join(posixpath.dirname(reader.opf_path),
raster_cover)
cover_replacable = not reader.encryption_meta.is_encrypted(cpath) and \
os.path.splitext(cpath)[1].lower() in ('.png', '.jpg', '.jpeg')
if cover_replacable:
new_cover = _write_new_cover(new_cdata, cpath)
replacements[cpath] = open(new_cover.name, 'rb')
except:
import traceback
traceback.print_exc()
update_metadata(reader.opf, mi, apply_null=apply_null,
update_timestamp=update_timestamp, force_identifiers=force_identifiers)
newopf = StringIO(reader.opf.render())
if isinstance(reader.archive, LocalZipFile):
reader.archive.safe_replace(reader.container[OPF.MIMETYPE], newopf,
extra_replacements=replacements)
else:
safe_replace(stream, reader.container[OPF.MIMETYPE], newopf,
extra_replacements=replacements)
try:
if cpath is not None:
replacements[cpath].close()
os.remove(replacements[cpath].name)
except:
pass
| gpl-3.0 |
lehinevych/cfme_tests | artifactor/plugins/video.py | 8 | 2973 | """ Video plugin for Artifactor
Add a stanza to the artifactor config like this,
artifactor:
log_dir: /home/username/outdir
per_run: test #test, run, None
overwrite: True
plugins:
video:
enabled: True
plugin: video
quality: 10
display: ":99"
"""
from artifactor import ArtifactorBasePlugin
import os
from utils.video import Recorder
class Video(ArtifactorBasePlugin):
class Test(object):
def __init__(self, ident):
self.ident = ident
self.in_progress = False
self.recorder = None
def plugin_initialize(self):
self.register_plugin_hook('start_test', self.start_test)
self.register_plugin_hook('finish_test', self.finish_test)
self.register_plugin_hook('finish_session', self.finish_session)
def configure(self):
self.configured = True
self.tests = {}
self.quality = self.data.get('quality', '10')
self.display = self.data.get('display', ':0')
@ArtifactorBasePlugin.check_configured
def start_test(self, artifact_path, test_name, test_location, slaveid):
test_ident = "{}/{}".format(test_location, test_name)
if test_ident in self.tests:
if self.tests[test_ident].in_progress:
print("Test already running, can't start another")
return None
else:
self.tests[test_ident] = self.Test(test_ident)
self.tests[test_ident].in_progress = True
artifacts = []
os_filename = self.ident + ".ogv"
os_filename = os.path.join(artifact_path, os_filename)
if os.path.isfile(os_filename):
os.remove(os_filename)
artifacts.append(os_filename)
try:
self.tests[test_ident].recorder = Recorder(os_filename, display=self.display,
quality=self.quality)
self.tests[test_ident].recorder.start()
except Exception as e:
print(e)
self.tests[test_ident].in_progress = True
for filename in artifacts:
self.fire_hook('filedump', test_location=test_location, test_name=test_name,
description="Video recording", file_type="video",
contents="", display_glyph="camera", dont_write=True, os_filename=filename,
group_id="misc-artifacts", slaveid=slaveid)
@ArtifactorBasePlugin.check_configured
def finish_test(self, artifact_path, test_name, test_location):
"""Finish test"""
test_ident = "{}/{}".format(test_location, test_name)
try:
self.tests[test_ident].recorder.stop()
except Exception as e:
print(e)
del self.tests[test_ident]
def finish_session(self):
try:
for test in self.tests:
test.recorder.stop()
except Exception as e:
print(e)
| gpl-2.0 |
nilmini20s/gem5-2016-08-13 | ext/ply/example/calcdebug/calc.py | 165 | 2306 | # -----------------------------------------------------------------------------
# calc.py
#
# This example shows how to run the parser in a debugging mode
# with output routed to a logging object.
# -----------------------------------------------------------------------------
import sys
sys.path.insert(0,"../..")
if sys.version_info[0] >= 3:
raw_input = input
tokens = (
'NAME','NUMBER',
)
literals = ['=','+','-','*','/', '(',')']
# Tokens
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
import ply.lex as lex
lex.lex()
# Parsing rules
precedence = (
('left','+','-'),
('left','*','/'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(p):
'statement : NAME "=" expression'
names[p[1]] = p[3]
def p_statement_expr(p):
'statement : expression'
print(p[1])
def p_expression_binop(p):
'''expression : expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression'''
if p[2] == '+' : p[0] = p[1] + p[3]
elif p[2] == '-': p[0] = p[1] - p[3]
elif p[2] == '*': p[0] = p[1] * p[3]
elif p[2] == '/': p[0] = p[1] / p[3]
def p_expression_uminus(p):
"expression : '-' expression %prec UMINUS"
p[0] = -p[2]
def p_expression_group(p):
"expression : '(' expression ')'"
p[0] = p[2]
def p_expression_number(p):
"expression : NUMBER"
p[0] = p[1]
def p_expression_name(p):
"expression : NAME"
try:
p[0] = names[p[1]]
except LookupError:
print("Undefined name '%s'" % p[1])
p[0] = 0
def p_error(p):
if p:
print("Syntax error at '%s'" % p.value)
else:
print("Syntax error at EOF")
import ply.yacc as yacc
yacc.yacc()
import logging
logging.basicConfig(
level=logging.INFO,
filename="parselog.txt"
)
while 1:
try:
s = raw_input('calc > ')
except EOFError:
break
if not s: continue
yacc.parse(s,debug=logging.getLogger())
| bsd-3-clause |
mushtaqak/edx-platform | lms/djangoapps/shoppingcart/migrations/0027_add_invoice_history.py | 102 | 22387 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'InvoiceHistory'
db.create_table('shoppingcart_invoicehistory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('invoice', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shoppingcart.Invoice'])),
('snapshot', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('shoppingcart', ['InvoiceHistory'])
def backwards(self, orm):
# Deleting model 'InvoiceHistory'
db.delete_table('shoppingcart_invoicehistory')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 8, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregcodeitem': {
'Meta': {'object_name': 'CourseRegCodeItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.courseregcodeitemannotation': {
'Meta': {'object_name': 'CourseRegCodeItemAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 8, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'}),
'invoice_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCodeInvoiceItem']", 'null': 'True'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_order'", 'null': 'True', 'to': "orm['shoppingcart.Order']"})
},
'shoppingcart.courseregistrationcodeinvoiceitem': {
'Meta': {'object_name': 'CourseRegistrationCodeInvoiceItem', '_ormbases': ['shoppingcart.InvoiceItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'invoiceitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.InvoiceItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.donation': {
'Meta': {'object_name': 'Donation', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'general'", 'max_length': '32'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.donationconfiguration': {
'Meta': {'object_name': 'DonationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_line_3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'})
},
'shoppingcart.invoicehistory': {
'Meta': {'object_name': 'InvoiceHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']"}),
'snapshot': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'})
},
'shoppingcart.invoiceitem': {
'Meta': {'object_name': 'InvoiceItem'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']"}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'})
},
'shoppingcart.invoicetransaction': {
'Meta': {'object_name': 'InvoiceTransaction'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']"}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_modified_by_user'", 'to': "orm['auth.User']"}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '32'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_type': ('django.db.models.fields.CharField', [], {'default': "'personal'", 'max_length': '32'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']", 'null': 'True'}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 8, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 |
sebrandon1/nova | nova/tests/unit/api/openstack/compute/test_cloudpipe.py | 3 | 7072 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid as uuid_lib
from oslo_utils import timeutils
from webob import exc
from nova.api.openstack.compute import cloudpipe as cloudpipe_v21
from nova.compute import utils as compute_utils
import nova.conf
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network
from nova.tests.unit import matchers
from nova import utils
CONF = nova.conf.CONF
project_id = str(uuid_lib.uuid4().hex)
uuid = str(uuid_lib.uuid4())
def fake_vpn_instance():
return objects.Instance(
id=7, image_ref=CONF.cloudpipe.vpn_image_id, vm_state='active',
created_at=timeutils.parse_strtime('1981-10-20T00:00:00.000000'),
uuid=uuid, project_id=project_id)
def compute_api_get_all_empty(context, search_opts=None):
return []
def compute_api_get_all(context, search_opts=None):
return [fake_vpn_instance()]
def utils_vpn_ping(addr, port, timeout=0.05, session_id=None):
return True
class CloudpipeTestV21(test.NoDBTestCase):
cloudpipe = cloudpipe_v21
url = '/v2/fake/os-cloudpipe'
def setUp(self):
super(CloudpipeTestV21, self).setUp()
self.controller = self.cloudpipe.CloudpipeController()
self.stubs.Set(self.controller.compute_api, "get_all",
compute_api_get_all_empty)
self.stubs.Set(utils, 'vpn_ping', utils_vpn_ping)
self.req = fakes.HTTPRequest.blank('')
def test_cloudpipe_list_no_network(self):
def fake_get_nw_info_for_instance(instance):
return {}
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
fake_get_nw_info_for_instance)
self.stubs.Set(self.controller.compute_api, "get_all",
compute_api_get_all)
res_dict = self.controller.index(self.req)
response = {'cloudpipes': [{'project_id': project_id,
'instance_id': uuid,
'created_at': '1981-10-20T00:00:00Z'}]}
self.assertEqual(res_dict, response)
def test_cloudpipe_list(self):
def network_api_get(context, network_id):
self.assertEqual(context.project_id, project_id)
return {'vpn_public_address': '127.0.0.1',
'vpn_public_port': 22}
def fake_get_nw_info_for_instance(instance):
return fake_network.fake_get_instance_nw_info(self)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
fake_get_nw_info_for_instance)
self.stubs.Set(self.controller.network_api, "get",
network_api_get)
self.stubs.Set(self.controller.compute_api, "get_all",
compute_api_get_all)
res_dict = self.controller.index(self.req)
response = {'cloudpipes': [{'project_id': project_id,
'internal_ip': '192.168.1.100',
'public_ip': '127.0.0.1',
'public_port': 22,
'state': 'running',
'instance_id': uuid,
'created_at': '1981-10-20T00:00:00Z'}]}
self.assertThat(res_dict, matchers.DictMatches(response))
def test_cloudpipe_create(self):
def launch_vpn_instance(context):
return ([fake_vpn_instance()], 'fake-reservation')
self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
launch_vpn_instance)
body = {'cloudpipe': {'project_id': project_id}}
res_dict = self.controller.create(self.req, body=body)
response = {'instance_id': uuid}
self.assertEqual(res_dict, response)
def test_cloudpipe_create_no_networks(self):
def launch_vpn_instance(context):
raise exception.NoMoreNetworks
self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
launch_vpn_instance)
body = {'cloudpipe': {'project_id': project_id}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(exc.HTTPBadRequest,
self.controller.create, req, body=body)
def test_cloudpipe_create_already_running(self):
def launch_vpn_instance(*args, **kwargs):
self.fail("Method should not have been called")
self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
launch_vpn_instance)
self.stubs.Set(self.controller.compute_api, "get_all",
compute_api_get_all)
body = {'cloudpipe': {'project_id': project_id}}
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.create(req, body=body)
response = {'instance_id': uuid}
self.assertEqual(res_dict, response)
def test_cloudpipe_create_with_bad_project_id_failed(self):
body = {'cloudpipe': {'project_id': 'bad.project.id'}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
class CloudpipePolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(CloudpipePolicyEnforcementV21, self).setUp()
self.controller = cloudpipe_v21.CloudpipeController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, func, *arg, **kwarg):
rule_name = "os_compute_api:os-cloudpipe"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_list_policy_failed(self):
self._common_policy_check(self.controller.index, self.req)
def test_create_policy_failed(self):
body = {'cloudpipe': {'project_id': uuid}}
self._common_policy_check(self.controller.create, self.req, body=body)
def test_update_policy_failed(self):
body = {"configure_project": {'vpn_ip': '192.168.1.1',
'vpn_port': 2000}}
self._common_policy_check(
self.controller.update, self.req, uuid, body=body)
| apache-2.0 |
Thoshh/wapad | lib/python2.7/site-packages/django/conf/locale/fr/formats.py | 504 | 1454 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss [fr_CH), '25.10.2006', '25.10.06'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss [fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # Swiss (fr_CH), '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| mit |
aymeric-spiga/planetoplot | modules/dev/planetoplot_v1/myscript.py | 1 | 11456 | def getparseroptions(parser):
### I/O
parser.add_option('-f', '--file', action='append',dest='file', type="string", default=None, help='[NEEDED] filename. Append: different figures. Comma-separated: same figure (+ possible --operation). Regex OK: use -f "foo*" DONT FORGET QUOTES "" !!!!')
parser.add_option('-L', '--large', action='store_true',dest='monster', default=False, help='speedy version for large files (EXPERIMENTAL)')
parser.add_option('--seevar', action='store_true',dest='seevar', default=False, help='display the list of variables in the file')
parser.add_option('-t', '--target', action='store',dest='tgt', type="string", default=None, help='destination folder')
parser.add_option('-S', '--save', action='store',dest='save', type="string", default="gui", help='save mode (gui,png,eps,svg,pdf,txt,html,avi) [gui]')
parser.add_option('-d', '--display',action='store_false',dest='display', default=True, help='do not pop up created images')
parser.add_option('-O','--output', action='store',dest='out', type="string", default=None, help='output file name')
parser.add_option('--rate', action='store' ,dest='rate', type="int", default=None, help='output is a movie along Time dimension [None]')
parser.add_option('--quality', action='store_true',dest='quality', default=False,help='For movie mode: improves movie quality.(slower)')
### WHAT I PLOT
parser.add_option('-v', '--var', action='append',dest='var', type="string", default=None, help='variable color-shaded')
parser.add_option('-w', '--with', action='store',dest='var2', type="string", default=None, help='variable contoured')
parser.add_option('-a', '--anomaly',action='store_true',dest='anomaly', default=False, help='compute and plot relative anomaly in %')
parser.add_option('--mult', action='store',dest='mult', type="float", default=1., help='multiplicative factor to plotted field (2718=log)')
parser.add_option('--add', action='store',dest='add', type="float", default=0., help='additive factor to plotted field')
parser.add_option('-m', '--min', action='append',dest='vmin', type="float", default=None, help='bounding minimum value [min]')
parser.add_option('-M', '--max', action='append',dest='vmax', type="float", default=None, help='bounding maximum value [max]')
parser.add_option('-H', '--hole', action='store_true',dest='hole', default=False, help='holes above max and below min')
parser.add_option('--nolow', action='store_true',dest='nolow', default=False, help='do not plot low |values| [False]')
parser.add_option('--redope', action='store',dest='redope', type="string", default=None, help='REDuce OPErators: mint,maxt for the moment [None]')
### VERTICAL INTERPOLATION
parser.add_option('-l', '--level', action='store',dest='lvl', type="string", default="0", help='level / start,stop,step (-i 2: p,Pa)(-i 3,4: z,km) [0]')
parser.add_option('-i', '--interp', action='store',dest='itp', type="int", default=None, help='interpolation (2: p, 3: z-amr, 4:z-als, -1)')
parser.add_option('--intas', action='store',dest='intas', type="string", default=None, help='specify "mcs" or "tes" for gcm P interpolation grid')
parser.add_option('-N', '--no-api', action='store_true',dest='nocall', default=False, help='do not recreate api file')
### GENERIC GRAPHICS SETTINGS
parser.add_option('-c', '--color', action='store',dest='clb', type="string", default=None, help='change colormapS (also: nobar,onebar,def)')
parser.add_option('--trycol', action='store_true',dest='trycol', default=False, help='try 9 typical color palette')
parser.add_option('--nocolorb', action='store_true',dest='nocolorb', default=False, help='no color bar please')
parser.add_option('--div', action='store',dest='ndiv', type="int", default=10, help='number of divisions in colorbar [10]')
parser.add_option('--title', action='store',dest='zetitle', type="string", default="fill",help='customize the whole title')
parser.add_option('-T', '--tiled', action='store_true',dest='tile', default=False, help='draw a tiled plot (3D) or add crosses (1D)')
parser.add_option('--res', action='store',dest='res', type="float", default=200., help='resolution for png outputs. --save png needed. [200.]')
parser.add_option('--trans', action='store',dest='trans', type="float", default=1., help='shaded plot transparency, 0 to 1 (=opaque) [1]')
parser.add_option('--area', action='store',dest='area', type="string", default=None, help='area on the map to be plot [None]')
parser.add_option('--xlabel', action='store',dest='xlab', type="string", default=None, help='customize the x-axis label')
parser.add_option('--ylabel', action='store',dest='ylab', type="string", default=None, help='customize the y-axis label')
parser.add_option('--labels', action='store',dest='labels', type="string", default=None, help='customize 1D curve labels. Str comma-separated. [None]')
parser.add_option('--lstyle', action='store',dest='linestyle', type="string", default=None, help='customize 1D curve linestyles. Str comma-separ. [None]')
### SPECIFIC FOR MAPPING [MAPMODE 1]
parser.add_option('-p', '--proj', action='store',dest='proj', type="string", default=None, help='projection')
parser.add_option('-b', '--back', action='store',dest='back', type="string", default=None, help='background image [None]')
parser.add_option('-W', '--winds', action='store_true',dest='winds', default=False, help='wind vectors [False]')
parser.add_option('--facwind', action='store',dest='facwind', type="float", default=1, help='wind vectors magnifying factor [1]')
parser.add_option('-s', '--stride', action='store',dest='ste', type="int", default=3, help='stride vectors [3]')
parser.add_option('-z', '--zoom', action='store',dest='zoom', type="float", default=None, help='zoom factor in %')
parser.add_option('--blat', action='store',dest='blat', type="int", default=None, help='reference lat (or bounding lat for stere) [computed]')
parser.add_option('--blon', action='store',dest='blon', type="int", default=None, help='reference lon [computed]')
parser.add_option('--mark', action='append',dest='mark', type="string", default=None, help='superimpose a crossmark at given lon,lat [None]')
parser.add_option('--finddevil', action='store_true',dest='mdevil', default=False, help='superimpose a crossmark where the steepest dust devil is [False]')
### SPECIFIC FOR SLICING [MAPMODE 0]
parser.add_option('--lat', action='append',dest='slat', type="string", default=None, help='slices along lat. 2 comma-separated values: averaging')
parser.add_option('--lon', action='append',dest='slon', type="string", default=None, help='slices along lon. 2 comma-separated values: averaging')
parser.add_option('--vert', action='append',dest='svert', type="string", default=None, help='slices along vert. 2 comma-separated values: averaging')
parser.add_option('--column', action='store_true',dest='column', default=False,help='changes --vert z1,z2 from MEAN to INTEGRATE along z')
parser.add_option('--time', action='append',dest='stime', type="string", default=None, help='slices along time. 2 comma-separated values: averaging')
parser.add_option('--xmax', action='store',dest='xmax', type="float", default=None, help='max value for x-axis in contour-plots [max(xaxis)]')
parser.add_option('--ymax', action='store',dest='ymax', type="float", default=None, help='max value for y-axis in contour-plots [max(yaxis)]')
parser.add_option('--xmin', action='store',dest='xmin', type="float", default=None, help='min value for x-axis in contour-plots [min(xaxis)]')
parser.add_option('--ymin', action='store',dest='ymin', type="float", default=None, help='min value for y-axis in contour-plots [min(yaxis)]')
parser.add_option('--inverty', action='store_true',dest='inverty', default=False,help='force decreasing values along y-axis (e.g. p-levels)')
parser.add_option('--logx', action='store_true',dest='logx', default=False,help='set x-axis to logarithmic')
parser.add_option('--logy', action='store_true',dest='logy', default=False,help='set y-axis to logarithmic')
parser.add_option('--axtime', action='store',dest='axtime', type="string", default=None, help='choose "ls","sol","lt" for time ref (1D or --time)')
### OPERATIONS BETWEEN FILES
parser.add_option('--operation', action='store',dest='operat', type="string", default=None, help='operation to perform on input files given through -f. "+" or "-" acts on each input file by adding or substracting the ref file specified through --fref. "cat" acts on all input files in-a-row. "add_var" "sub_var" "mul_var" "div_var" acts on two variables (add _only to get only operation plot). "-_histo" will add an histogram plot for the "-" operation.')
parser.add_option('--fref', action='store',dest='fref', type="string", default=None, help='reference namefile for the --operation option.')
parser.add_option('--mope', action='store',dest='vminope', type="float", default=0., help='bounding minimum value for inter-file operation')
parser.add_option('--Mope', action='store',dest='vmaxope', type="float", default=0., help='bounding maximum value for inter-file operation')
parser.add_option('--titleref', action='store',dest='titref', type="string", default="fill", help='title for the reference file. [title of fig (1)]')
### SPECIAL
parser.add_option('--tsat', action='store_true',dest='tsat', default=False,help='convert temperature field T in Tsat-T using pressure')
parser.add_option('--stream', action='store_true',dest='stream', default=False,help='plot streamlines from streamfunction.e for vert/lat slices.')
parser.add_option('--analysis', action='store' ,dest='analysis', default=None ,help='Analysis operation. histo, density (kernel distribution estimate, with gaussian kernel only for the moment (many other distributions are available and can be added)), histodensity (overplot of both density and histo), fft. (currently fft works only on the z-axis, i.e. spatial Fourier Transform, and yields spectrum amplitude. To get enough bandwith, use API with a step of 10m on your data. Note that if you use --time A,B, the result will be the mean of FT at each timestep, and not the FT of the mean. The same apply to --lon and --lat, but does not apply to histo and density (for which arrays are flattened -> no mean).) [None]')
return parser
| gpl-2.0 |
Changaco/oh-mainline | vendor/packages/Django/django/utils/archive.py | 229 | 6935 | """
Based on "python-archive" -- http://pypi.python.org/pypi/python-archive/
Copyright (c) 2010 Gary Wilson Jr. <gary.wilson@gmail.com> and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import shutil
import tarfile
import zipfile
from django.utils import six
class ArchiveException(Exception):
"""
Base exception class for all archive errors.
"""
class UnrecognizedArchiveFormat(ArchiveException):
"""
Error raised when passed file is not a recognized archive format.
"""
def extract(path, to_path=''):
"""
Unpack the tar or zip file at the specified path to the directory
specified by to_path.
"""
with Archive(path) as archive:
archive.extract(to_path)
class Archive(object):
"""
The external API class that encapsulates an archive implementation.
"""
def __init__(self, file):
self._archive = self._archive_cls(file)(file)
@staticmethod
def _archive_cls(file):
cls = None
if isinstance(file, six.string_types):
filename = file
else:
try:
filename = file.name
except AttributeError:
raise UnrecognizedArchiveFormat(
"File object not a recognized archive format.")
base, tail_ext = os.path.splitext(filename.lower())
cls = extension_map.get(tail_ext)
if not cls:
base, ext = os.path.splitext(base)
cls = extension_map.get(ext)
if not cls:
raise UnrecognizedArchiveFormat(
"Path not a recognized archive format: %s" % filename)
return cls
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def extract(self, to_path=''):
self._archive.extract(to_path)
def list(self):
self._archive.list()
def close(self):
self._archive.close()
class BaseArchive(object):
"""
Base Archive class. Implementations should inherit this class.
"""
def split_leading_dir(self, path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(self, paths):
"""
Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)
"""
common_prefix = None
for path in paths:
prefix, rest = self.split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def extract(self):
raise NotImplementedError
def list(self):
raise NotImplementedError
class TarArchive(BaseArchive):
def __init__(self, file):
self._archive = tarfile.open(file)
def list(self, *args, **kwargs):
self._archive.list(*args, **kwargs)
def extract(self, to_path):
# note: python<=2.5 doesnt seem to know about pax headers, filter them
members = [member for member in self._archive.getmembers()
if member.name != 'pax_global_header']
leading = self.has_leading_dir(members)
for member in members:
name = member.name
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
if member.isdir():
if filename and not os.path.exists(filename):
os.makedirs(filename)
else:
try:
extracted = self._archive.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
print("In the tar file %s the member %s is invalid: %s" %
(name, member.name, exc))
else:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(extracted, outfile)
finally:
if extracted:
extracted.close()
def close(self):
self._archive.close()
class ZipArchive(BaseArchive):
def __init__(self, file):
self._archive = zipfile.ZipFile(file)
def list(self, *args, **kwargs):
self._archive.printdir(*args, **kwargs)
def extract(self, to_path):
namelist = self._archive.namelist()
leading = self.has_leading_dir(namelist)
for name in namelist:
data = self._archive.read(name)
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if filename.endswith(('/', '\\')):
# A directory
if not os.path.exists(filename):
os.makedirs(filename)
else:
with open(filename, 'wb') as outfile:
outfile.write(data)
def close(self):
self._archive.close()
extension_map = {
'.tar': TarArchive,
'.tar.bz2': TarArchive,
'.tar.gz': TarArchive,
'.tgz': TarArchive,
'.tz2': TarArchive,
'.zip': ZipArchive,
}
| agpl-3.0 |
styxit/CouchPotatoServer | libs/sqlalchemy/util/topological.py | 18 | 2603 | # util/topological.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Topological sorting algorithms."""
from sqlalchemy.exc import CircularDependencyError
from sqlalchemy import util
__all__ = ['sort', 'sort_as_subsets', 'find_cycles']
def sort_as_subsets(tuples, allitems):
edges = util.defaultdict(set)
for parent, child in tuples:
edges[child].add(parent)
todo = set(allitems)
while todo:
output = set()
for node in list(todo):
if not todo.intersection(edges[node]):
output.add(node)
if not output:
raise CircularDependencyError(
"Circular dependency detected.",
find_cycles(tuples, allitems),
_gen_edges(edges)
)
todo.difference_update(output)
yield output
def sort(tuples, allitems):
"""sort the given list of items by dependency.
'tuples' is a list of tuples representing a partial ordering.
"""
for set_ in sort_as_subsets(tuples, allitems):
for s in set_:
yield s
def find_cycles(tuples, allitems):
# straight from gvr with some mods
edges = util.defaultdict(set)
for parent, child in tuples:
edges[parent].add(child)
nodes_to_test = set(edges)
output = set()
# we'd like to find all nodes that are
# involved in cycles, so we do the full
# pass through the whole thing for each
# node in the original list.
# we can go just through parent edge nodes.
# if a node is only a child and never a parent,
# by definition it can't be part of a cycle. same
# if it's not in the edges at all.
for node in nodes_to_test:
stack = [node]
todo = nodes_to_test.difference(stack)
while stack:
top = stack[-1]
for node in edges[top]:
if node in stack:
cyc = stack[stack.index(node):]
todo.difference_update(cyc)
output.update(cyc)
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return output
def _gen_edges(edges):
return set([
(right, left)
for left in edges
for right in edges[left]
])
| gpl-3.0 |
bmcage/stickproject | stick/utils/expmeas_201104.py | 1 | 12888 | #
# Copyright (C) 2011 Benny Malengier <bm@cage.ugent.be>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from __future__ import division
"""
This package contains experimental data, usefull for plotting.
Experiments 201104 from Department Organic Chemistry
Experiments 201106 "
"""
#---------------------------------------------------------------
#
# System imports
#
#---------------------------------------------------------------
import os
import numpy
import time
#---------------------------------------------------------------
#
# local imports
#
#---------------------------------------------------------------
#---------------------------------------------------------------
#
# Constants
#
#---------------------------------------------------------------
LE = 'Liquid extraction'
PER = 'Permethrine'
DEET = 'DEET'
LE_1stPEAK = 3 #mL used for first peak
LE_2ndPEAK = 2 #mL used for second peak
LE_1stPEAKextra = 20 #mL used for first peak for extra exp
LE_2ndPEAKextra = 20 #mL used for second peak for extra exp
EXTRA = ['35','36'] #samples that use LE extra
PERIOD = ['march2011', 'midjune2011', 'endjune2011']
PERIOD2DATE = { #begin to end date (weeknumber, year)
'march2011': [(9,2011), (11,2011)],
'midjune2011': [(25,2011), (25,2011)], #15 weeks later
'endjune2011': [(26,2011), (26,2011)],
}
LE_HEADERS = ('Compound', 'Sample', 'Weight g', 'Peak 1st LE', 'Peak 2nd LE', 'period')
LE_1 = ( (DEET, 9, 0.0436, 0, 0, 'midjune2011'),
(DEET, 10, 0.0564, 0, 0, 'midjune2011'),
(DEET, 18, 0.0515, 0, 0, 'midjune2011'),
)
LE_A5 = ( (DEET, 4, 0.045, 39304700, 2551721, 'march2011'),
(DEET, 5, 0.048, 35528900, 3361885, 'march2011'),
(DEET, 6, 0.048, 32224500, 2649236, 'march2011'),
(PER, 4, 0.045, 4496200, 619155, 'march2011'),
(PER, 5, 0.048, 6477600, 367440, 'march2011'),
(PER, 6, 0.048, 3203100, 606709, 'march2011'),
(DEET, 19, 0.0476, 290499, 871726, 'endjune2011'),
(DEET, 20, 0.0529, 297978, 692612, 'endjune2011'),
(DEET, 21, 0.0515, 272392, 840634, 'endjune2011'),
(DEET, 22, 0.0554, 279433, 856935, 'endjune2011'),
(PER, 19, 0.0476, 11528690, 362629, 'endjune2011'),
(PER, 20, 0.0529, 12301786, 277688, 'endjune2011'),
(PER, 21, 0.0515, 11689620, 365262, 'endjune2011'),
(PER, 22, 0.0554, 13294682, 326642, 'endjune2011'),
(DEET, 35, 0.0447, 32982, 0, 'endjune2011'),
(PER, 35, 0.0447, 1267861, 3550, 'endjune2011'),
)
LE_C5 = ( (DEET, 1, 0.063, 1446700, 357011, 'march2011'),
(DEET, 2, 0.059, 3437700, 643069, 'march2011'),
(DEET, 3, 0.055, 3243400, 590819, 'march2011'),
(PER, 1, 0.063, 8854400, 1193029, 'march2011'),
(PER, 2, 0.059, 7332300, 974129, 'march2011'),
(PER, 3, 0.055, 7305000, 823552, 'march2011'),
(DEET, 23, 0.0528, 1466525, 32925, 'endjune2011'),
(DEET, 24, 0.061, 1438981, 41870, 'endjune2011'),
(DEET, 25, 0.0541, 646714, 12923, 'endjune2011'),
(DEET, 26, 0.047, 578745, 17730, 'endjune2011'),
(PER, 23, 0.0528, 12329718, 169823, 'endjune2011'),
(PER, 24, 0.061, 12376012, 210634, 'endjune2011'),
(PER, 25, 0.0541, 12541304, 121612, 'endjune2011'),
(PER, 26, 0.047, 12656640, 170574, 'endjune2011'),
)
LE_C3 = ( (DEET, 7, 0.061, 24436400,1857128, 'march2011'),
(DEET, 8, 0.061, 24059000,2568056, 'march2011'),
(DEET, 9, 0.062, 36600600,21992888, 'march2011'),
(DEET, 15, 0.0457, 828062,62087, 'midjune2011'),
(DEET, 16, 0.0496, 792785,52594, 'midjune2011'),
(DEET, 17, 0.0456, 387493,1793852, 'endjune2011'),
(PER, 15, 0.0457, 131076,169592, 'midjune2011'),
(PER, 16, 0.0496, 132592,0, 'midjune2011'),
(PER, 17, 0.0456, 109862,0, 'endjune2011'),
)
LE_A3 = ( (DEET, 10, 0.051, 36613200,2896294, 'march2011'),
(DEET, 11, 0.049, 26140500, 2143200, 'march2011'),
(DEET, 12, 0.068, 62181000, 2896294, 'march2011'),
(DEET, 11, 0.045, 796834, 53678, 'midjune2011'),
(DEET, 12, 0.0494, 810956, 51353, 'midjune2011'),
(DEET, 13, 0.0439, 603561, 45985, 'midjune2011'),
(DEET, 14, 0.0476, 527676, 45453, 'midjune2011'),
(PER, 11, 0.045, 465270, 11367, 'midjune2011'),
(PER, 11, 0.0494, 909316, 10470, 'midjune2011'),
(PER, 11, 0.0439, 303554, 4118, 'midjune2011'),
(PER, 11, 0.0476, 317038, 4352, 'midjune2011'),
)
LE_C2 = ( (DEET, 16, 0.053, 9957200,791148, 'march2011'),
(DEET, 17, 0.064, 20680900,1806349, 'march2011'),
(DEET, 18, 0.072, 13272600,1018263, 'march2011'),
(PER, 16, 0.053, 9037400,1234738, 'march2011'),
(PER, 17, 0.064, 14296600,1663628, 'march2011'),
(PER, 18, 0.072, 14850600,1928021, 'march2011'),
(PER, 5, 0.0498, 19794606,540734, 'midjune2011'),
(PER, 6, 0.0544, 15702738,543919, 'endjune2011'),
(PER, 7, 0.0544, 15077898,334366, 'endjune2011'),
(PER, 8, 0.0447, 12612794,259176, 'endjune2011'),
)
LE_A2 = ( (DEET, 19, 0.042, 8676100,502327, 'march2011'),
(DEET, 20, 0.049, 5261600,260656, 'march2011'),
(DEET, 21, 0.057, 2133600,284562, 'march2011'),
(PER, 19, 0.042, 5273700,436837, 'march2011'),
(PER, 20, 0.049, 4885400,479561, 'march2011'),
(PER, 21, 0.057, 3656000,756018, 'march2011'),
(PER, 1, 0.0566, 18905485, 469107, 'midjune2011'),
(PER, 2, 0.0564, 17929555, 569509, 'endjune2011'),
(PER, 3, 0.0515, 21990415, 651812, 'endjune2011'),
(PER, 4, 0.0455, 20142143, 615055, 'endjune2011'),
)
LE_A6 = ( (DEET, 27, 0.0569, 1670529,69932, 'endjune2011'),
(DEET, 28, 0.0484, 1456575,69502, 'endjune2011'),
(DEET, 29, 0.0488, 1777403,67690, 'endjune2011'),
(DEET, 30, 0.0564, 1818404,51930, 'endjune2011'),
(PER, 27, 0.0569, 20377690,587595, 'endjune2011'),
(PER, 28, 0.0484, 17634452,462370, 'endjune2011'),
(PER, 29, 0.0488, 26968453,742955, 'endjune2011'),
(PER, 30, 0.0564, 25484190,479484, 'endjune2011'),
)
LE_C6 = ( (DEET, 22, 0.049, 109706800,3305107, 'march2011'),
(DEET, 23, 0.058, 193078400,11983694, 'march2011'),
(DEET, 24, 0.05, 155041100,6057975, 'march2011'),
(PER, 22, 0.049, 21092900,839111, 'march2011'),
(PER, 23, 0.058, 6047500,1058782, 'march2011'),
(PER, 24, 0.05, 12701100,573436, 'march2011'),
(DEET, 31, 0.0472, 2791424,110614, 'endjune2011'),
(DEET, 32, 0.0517, 2839494,151767, 'endjune2011'),
(DEET, 33, 0.0487, 2800502,99872, 'endjune2011'),
(DEET, 34, 0.0478, 2620122,119003, 'endjune2011'),
(PER, 31, 0.0472, 10213386,258442, 'endjune2011'),
(PER, 32, 0.0517, 10011091,411195, 'endjune2011'),
(PER, 33, 0.0487, 8962571,149877, 'endjune2011'),
(PER, 34, 0.0478, 8089477,213776, 'endjune2011'),
(DEET, 36, 0.0506, 103010,0, 'endjune2011'),
(PER, 36, 0.0506, 790418,0, 'endjune2011'),
)
SAMPLES = {
'1' : (LE_1, 'Untreated sample'),
'A5': (LE_A5, 'A5 - DEET+Per, Foulard'),
'A3': (LE_A3, 'A3 - DEET, Foulard'),
'A2': (LE_A2, 'A2 - Per, Foulard'),
'A6': (LE_A6, 'A6 - Per+DEET, Foulard'),
'C5': (LE_C5, 'C5 - DEET+Per, PBA'),
'C3': (LE_C3, 'C3 - DEET, PBA'),
'C2': (LE_C2, 'C2 - Per, PBA'),
'C6': (LE_C6, 'C6 - Per+DEET, PBA'),
}
#---------------------------------------------------------------
#
# Local functions
#
#---------------------------------------------------------------
def calib_DEET_may2011(x):
""" Conversion of peak area to microg/mL"""
return x / 1416.6 / 1000.
def calib_DEET_midjune2011(x):
""" Conversion of peak area to microg/mL"""
return x / 1139.8 / 1000.
def calib_DEET_endjune2011(x):
""" Conversion of peak area to microg/mL"""
return x / 811.17 / 1000.
def calib_PER_may2011(x):
""" Conversion of peak area to microg/mL"""
return x / 221.06 / 1000.
def calib_PER_midjune2011(x):
""" Conversion of peak area to microg/mL"""
return x / 533.7 / 1000.
def calib_PER_endjune2011(x):
""" Conversion of peak area to microg/mL"""
return x / 404.25 / 1000.
def period2calib(period, compound):
if period == 'march2011':
if compound == DEET:
return calib_DEET_may2011
elif compound == PER:
return calib_PER_may2011
elif period == 'midjune2011':
if compound == DEET:
return calib_DEET_midjune2011
elif compound == PER:
return calib_PER_midjune2011
elif period == 'endjune2011':
if compound == DEET:
return calib_DEET_endjune2011
elif compound == PER:
return calib_PER_endjune2011
def comp_weight(xlst, type=None, time=None):
""" compute ng/mL of the peaks
xlst should be the different peak areas measured"""
weight = period2calib(time, type)(numpy.array(xlst))
return weight
def comp_content_per_gramtextile(ledata):
""" compute microg of compount per gram of material, based on
given Liquid Extraction data ledata
Returns compound in microg/g and efficiency first peak
"""
peaks = numpy.array([ledata[3], ledata[4]], float)
weight = comp_weight(peaks, ledata[0], ledata[5])
if str(ledata[1]) in EXTRA:
weight[0] *= LE_1stPEAKextra
weight[1] *= LE_2ndPEAKextra
else:
weight[0] *= LE_1stPEAK
weight[1] *= LE_2ndPEAK
total_amount = weight[0] + weight[1]
if total_amount == 0.:
return 0., 0.
eff = weight[0]/total_amount
return total_amount/ledata[2], eff
#---------------------------------------------------------------
#
# Classes
#
#---------------------------------------------------------------
#---------------------------------------------------------------
def main(detail=False):
samples = SAMPLES.keys()
samples.sort()
for key in samples:
data = SAMPLES[key]
print 'Values for sample: %s' % data[1]
avgDEET = {};
nrDEET = {}
avgPER = {};
nrPER = {}
for ledata in data[0]:
result = comp_content_per_gramtextile(ledata)
if detail:
print ' Sample %02d: eff 1stLE %f; content %s = %f microg/g'\
' Period %s' % (
ledata[1], result[1], ledata[0], result[0], ledata[5])
if ledata[0] == DEET:
if ledata[5] not in avgDEET:
avgDEET[ledata[5]] = 0.
nrDEET[ledata[5]] = 0
avgDEET[ledata[5]] += result[0]
nrDEET[ledata[5]] += 1
elif ledata[0] == PER:
if ledata[5] not in avgPER:
avgPER[ledata[5]] = 0.
nrPER[ledata[5]] = 0
avgPER[ledata[5]] += result[0]
nrPER[ledata[5]] += 1
for key in nrDEET:
print ' Avg DEET= %f microg/g in period %s' % \
(avgDEET[key]/nrDEET[key], key)
for key in nrPER:
print ' Avg PERM= %f microg/g in period %s' % \
(avgPER[key]/nrPER[key], key)
if ('midjune2011' in nrDEET) and ('endjune2011' in nrDEET):
print ' Avg DEET= %f microg/g in June' % \
((avgDEET['midjune2011']+avgDEET['endjune2011'])/ \
(nrDEET['midjune2011']+nrDEET['endjune2011']))
if ('midjune2011' in nrPER) and ('endjune2011' in nrPER):
print ' Avg PER= %f microg/g in June' % \
((avgPER['midjune2011']+avgPER['endjune2011'])/ \
(nrPER['midjune2011']+nrPER['endjune2011']))
if __name__ == '__main__':
main(detail=False) | gpl-2.0 |
XENON1T/cax | docs/conf.py | 1 | 8427 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# cax documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import cax
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Copying All XENON1T'
copyright = u'2016, Christopher Tunnell'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = cax.__version__
# The full version, including alpha/beta/rc tags.
release = cax.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'caxdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'cax.tex',
u'Copying All XENON1T Documentation',
u'Christopher Tunnell', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cax',
u'Copying All XENON1T Documentation',
[u'Christopher Tunnell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cax',
u'Copying All XENON1T Documentation',
u'Christopher Tunnell',
'cax',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| isc |
luowei/InputMethodWords | wubi_words2sqlite.py | 1 | 1386 | #!/usr/bin/python
# coding=utf-8
__author__ = 'luowei'
import sqlite3
import re
import codecs
conn = sqlite3.connect('test.db')
print "Opened database successfully"
conn.execute("DROP TABLE IF EXISTS wubi_words")
conn.execute('''CREATE TABLE wubi_words (id INTEGER PRIMARY KEY NOT NULL ,
code VARCHAR,words VARCHAR, count INTEGER DEFAULT 0,
`num` VARCHAR(10) DEFAULT NULL)''')
print "create table successfully"
rows = []
conn.text_factory = str
# 提示:用于检测文件编码的shell命令: file -i (linux) or file -I (osx)
# 读取txt文件
with codecs.open('wubi_words.txt', 'r','utf-16le') as f:
# with open('wubi_words.txt', 'r') as f:
for line in f:
# line = line.decode('utf-16le')
line = line.strip()
line = unicode(line).encode('utf-8')
line = line.strip()
if line:
data = re.split(',|=',line)
if len(data) == 3:
data.extend([1])
# print data
# conn.execute("INSERT INTO wubi_words(count,code,words,`num`) VALUES (?,?,?,1)",data)
rows.append(data)
else:
print '================:',data
# 批量插入数据集到数据库
conn.executemany("INSERT INTO wubi_words(count,code,words,`num`) VALUES (?,?,?,?)",rows)
conn.commit()
print "insert data successfully"
conn.close() | apache-2.0 |
pyblish/pyblish-win | lib/Python27/Lib/idlelib/idle_test/test_textview.py | 28 | 2848 | '''Test the functions and main class method of textView.py.'''
import unittest
import os
from test.test_support import requires
from Tkinter import Tk
from idlelib import textView as tv
from idlelib.idle_test.mock_idle import Func
from idlelib.idle_test.mock_tk import Mbox
orig_mbox = tv.tkMessageBox
class textviewClassTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.TV = TV = tv.TextViewer
TV.transient = Func()
TV.grab_set = Func()
TV.wait_window = Func()
@classmethod
def tearDownClass(cls):
cls.root.destroy()
TV = cls.TV
del cls.root, cls.TV
del TV.transient, TV.grab_set, TV.wait_window
def setUp(self):
TV = self.TV
TV.transient.__init__()
TV.grab_set.__init__()
TV.wait_window.__init__()
def test_init_modal(self):
TV = self.TV
view = TV(self.root, 'Title', 'test text')
self.assertTrue(TV.transient.called)
self.assertTrue(TV.grab_set.called)
self.assertTrue(TV.wait_window.called)
view.Ok()
def test_init_nonmodal(self):
TV = self.TV
view = TV(self.root, 'Title', 'test text', modal=False)
self.assertFalse(TV.transient.called)
self.assertFalse(TV.grab_set.called)
self.assertFalse(TV.wait_window.called)
view.Ok()
def test_ok(self):
view = self.TV(self.root, 'Title', 'test text', modal=False)
view.destroy = Func()
view.Ok()
self.assertTrue(view.destroy.called)
del view.destroy # unmask real function
view.destroy
class textviewTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
tv.tkMessageBox = Mbox
@classmethod
def tearDownClass(cls):
cls.root.destroy()
del cls.root
tv.tkMessageBox = orig_mbox
def test_view_text(self):
# If modal True, tkinter will error with 'can't invoke "event" command'
view = tv.view_text(self.root, 'Title', 'test text', modal=False)
self.assertIsInstance(view, tv.TextViewer)
def test_view_file(self):
test_dir = os.path.dirname(__file__)
testfile = os.path.join(test_dir, 'test_textview.py')
view = tv.view_file(self.root, 'Title', testfile, modal=False)
self.assertIsInstance(view, tv.TextViewer)
self.assertIn('Test', view.textView.get('1.0', '1.end'))
view.Ok()
# Mock messagebox will be used and view_file will not return anything
testfile = os.path.join(test_dir, '../notthere.py')
view = tv.view_file(self.root, 'Title', testfile, modal=False)
self.assertIsNone(view)
if __name__ == '__main__':
unittest.main(verbosity=2)
| lgpl-3.0 |
wqshi/test | scipy_central/comments/admin.py | 5 | 1212 | from django.utils.translation import ugettext_lazy as _
from django.contrib.comments.models import CommentFlag
from django.contrib.comments.admin import CommentsAdmin
from django.contrib import admin
from scipy_central.comments.models import SpcComment
class SpcCommentAdmin(CommentsAdmin):
"""
Custom admin interface for comments
defined on the top of built-in admin interface
"""
list_display = CommentsAdmin.list_display
fieldsets = (
(None,
{'fields': ('content_type', 'object_pk', 'site')}
),
(_('Content'),
{'fields': ('user', 'user_name', 'user_email', 'user_url', 'comment', 'rest_comment')}
),
(_('Metadata'),
{'fields': ('submit_date', 'ip_address', 'is_public', 'is_removed')}
),
)
class SpcCommentFlagAdmin(admin.ModelAdmin):
"""
Admin interface for comment flags
"""
list_display = ('flag', 'user', 'comment', 'flag_date')
search_fields = ['user__username', 'comment__user__username', 'flag_date']
list_filter = ['flag_date']
ordering = ['-flag_date']
admin.site.register(SpcComment, SpcCommentAdmin)
admin.site.register(CommentFlag, SpcCommentFlagAdmin)
| bsd-3-clause |
anselme333/NCFCC | NFD/docs/conf.py | 11 | 9075 | # -*- coding: utf-8 -*-
#
# NFD - Named Data Networking Forwarding Daemon documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 6 19:58:22 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'redmine_issue',
]
def addExtensionIfExists(extension):
try:
__import__(extension)
extensions.append(extension)
except ImportError:
sys.stderr.write("Extension '%s' in not available. "
"Some documentation may not build correctly.\n" % extension)
sys.stderr.write("To install, use \n"
" sudo pip install %s\n" % extension.replace('.', '-'))
addExtensionIfExists('sphinxcontrib.doxylink')
if os.getenv('GOOGLE_ANALYTICS', None):
addExtensionIfExists('sphinxcontrib.googleanalytics')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NFD - Named Data Networking Forwarding Daemon'
copyright = u'2014, Named Data Networking Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
html_theme = 'named_data_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['./']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = ".html"
# Output file base name for HTML help builder.
htmlhelp_basename = 'nfd-docs'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'nfd-docs.tex', u'NFD - Named Data Networking Forwarding Daemon Documentation',
u'Named Data Networking Project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('manpages/nfd', 'nfd', u'Named Data Networking Forwarding Daemon', None, 1),
('manpages/ndn-autoconfig-server', 'ndn-autoconfig-server',
u'NFD Auto-configuration Server', None, 1),
('manpages/ndn-autoconfig', 'ndn-autoconfig',
u'NFD Auto-configuration Client', None, 1),
('manpages/nfdc', 'nfdc',
u'NFD utility to manipulate the forwarding table (FIB)', None, 1),
('manpages/ndn-tlv-peek', 'ndn-tlv-peek', u'NFD consumer', None, 1),
('manpages/ndn-tlv-poke', 'ndn-tlv-poke', u'NFD producer', None, 1),
('manpages/nfd-autoreg', 'nfd-autoreg', u'NFD Auto-registration Server', None, 1),
('manpages/nfd-status-http-server', 'nfd-status-http-server',
u'NFD status HTTP server', None, 1),
('manpages/nfd-status', 'nfd-status', u'Command-line utility to show NFD status', None, 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
doxylink = {
'NFD' : ('NFD.tag', 'doxygen/'),
}
if os.getenv('GOOGLE_ANALYTICS', None):
googleanalytics_id = os.environ['GOOGLE_ANALYTICS']
googleanalytics_enabled = True
redmine_project_url = "http://redmine.named-data.net/"
| gpl-3.0 |
GuillaumeBadi/Python-App-Engine | lib/requests/packages/chardet/latin1prober.py | 1778 | 5232 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
| apache-2.0 |
gklyne/annalist | src/annalist_root/annalist/models/recordfield.py | 1 | 10166 | """
Annalist record field description
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import os
import os.path
import shutil
from django.conf import settings
from annalist import layout
from annalist.identifiers import ANNAL, RDFS
from annalist.models.entity import Entity
from annalist.models.entitydata import EntityData
from annalist.models.recordgroup import RecordGroup, RecordGroup_migration
from annalist.util import (
split_type_entity_id, extract_entity_id, make_type_entity_id
)
from annalist.exceptions import Annalist_Error
class RecordField(EntityData):
_entitytype = ANNAL.CURIE.Field
_entitytypeid = layout.FIELD_TYPEID
_entityroot = layout.COLL_FIELD_PATH
_entityview = layout.COLL_FIELD_VIEW
_entityfile = layout.FIELD_META_FILE
def __init__(self, parent, field_id):
"""
Initialize a new RecordField object, without metadta (yet).
parent is the parent collection to which the field belongs.
field_id the local identifier for the record field
"""
# assert altparent, "RecordField instantiated with no altparent"
super(RecordField, self).__init__(parent, field_id)
self._parent = parent
# log.debug("RecordField %s"%(field_id))
return
def get_property_uri(self):
"""
Return field's property URI
"""
return self.get(ANNAL.CURIE.property_uri, "@@undefined_property_uri@@")
def _migrate_filenames(self):
"""
Override EntityData method
"""
return None
def _map_entity_field_enum_val(self, entitydata, key, type_id, old_enum_val, new_enum_val):
"""
Map enumerated value of specified type
"""
if key in entitydata:
type_id_here, enum_val_here = split_type_entity_id(entitydata[key])
if type_id_here == type_id and enum_val_here == old_enum_val:
entitydata[key] = make_type_entity_id(type_id, new_enum_val)
return entitydata
def _migrate_values(self, entitydata):
"""
Field description entity format migration method.
The specification for this method is that it returns an entitydata value
which is a copy of the supplied entitydata with format migrations applied.
NOTE: implementations are free to apply migrations in-place. The resulting
entitydata should be exactly as the supplied data *should* appear in storage
to conform to the current format of the data. The migration function should
be idempotent; i.e.
x._migrate_values(x._migrate_values(e)) == x._migrate_values(e)
"""
field_id = entitydata[ANNAL.CURIE.id]
migration_map = (
[ (ANNAL.CURIE.options_typeref, ANNAL.CURIE.field_ref_type )
, (ANNAL.CURIE.restrict_values, ANNAL.CURIE.field_ref_restriction)
, (ANNAL.CURIE.target_field, ANNAL.CURIE.field_ref_field )
, (ANNAL.CURIE.field_target_type, ANNAL.CURIE.field_value_type )
])
entitydata = self._migrate_values_map_field_names(migration_map, entitydata)
# Fix up enumerated values to use new enumeration type names
field_enum_types = (
[ (ANNAL.CURIE.field_render_type, "_enum_render_type")
, (ANNAL.CURIE.field_value_mode, "_enum_value_mode")
])
for fkey, ftype in field_enum_types:
if fkey in entitydata and entitydata[fkey]:
entitydata[fkey] = make_type_entity_id(
ftype, extract_entity_id(entitydata[fkey])
)
# If comment and no tooltip, create tooltip and update comment
if (RDFS.CURIE.comment in entitydata) and (ANNAL.CURIE.tooltip not in entitydata):
label = entitydata.get(RDFS.CURIE.label, "Field '%s'"%field_id)
comment = entitydata[RDFS.CURIE.comment]
entitydata[ANNAL.CURIE.tooltip] = comment
entitydata[RDFS.CURIE.comment] = "# %s\r\n\r\n%s"%(label, comment)
# If reference to field group, copy group field list inline
if ANNAL.CURIE.group_ref in entitydata:
group_type_id, group_id = split_type_entity_id(
entitydata[ANNAL.CURIE.group_ref], default_type_id=layout.GROUP_TYPEID
)
if group_id != "":
log.info("Migrating group reference %s in field %s"%(group_id, field_id))
group_obj = RecordGroup_migration.load(self._parent, group_id)
if not group_obj:
msg = (
"Failed to load group '%s' for field '%s' in collection '%s'"%
(group_id, field_id, self._parent.get_id())
)
log.warning(msg)
self.set_error(msg)
# raise Annalist_Error(msg)
else:
field_value_type = entitydata[ANNAL.CURIE.field_value_type]
group_entity_type = group_obj[ANNAL.CURIE.group_entity_type]
if field_value_type and group_entity_type and field_value_type != group_entity_type:
log.warning(
"Group %s entity type %s differs from field %s value type %s"%
(group_id, group_entity_type, field_id, field_value_type)
)
entitydata[ANNAL.CURIE.field_fields] = group_obj[ANNAL.CURIE.group_fields]
del entitydata[ANNAL.CURIE.group_ref]
# Default render type to "Text"
if ANNAL.CURIE.field_render_type not in entitydata:
entitydata[ANNAL.CURIE.field_render_type] = "_enum_render_type/Text"
# Migrate changed render type names
entitydata = self._map_entity_field_enum_val(
entitydata, ANNAL.CURIE.field_render_type, "_enum_render_type",
"RepeatGroup", "Group_Seq"
)
entitydata = self._map_entity_field_enum_val(
entitydata, ANNAL.CURIE.field_render_type, "_enum_render_type",
"RepeatGroupRow", "Group_Seq_Row"
)
entitydata = self._map_entity_field_enum_val(
entitydata, ANNAL.CURIE.field_render_type, "_enum_render_type",
"Slug", "EntityRef"
)
# Calculate mode from other fields if not defined
val_render = entitydata[ANNAL.CURIE.field_render_type]
ref_type = entitydata.get(ANNAL.CURIE.field_ref_type, None)
ref_field = entitydata.get(ANNAL.CURIE.field_ref_field, None)
if ANNAL.CURIE.field_value_mode in entitydata:
val_mode = entitydata[ANNAL.CURIE.field_value_mode]
else:
val_mode = "Value_direct"
if ref_type and ref_field:
val_mode = "Value_field"
elif val_render == "RefMultifield":
val_mode = "Value_entity"
elif val_render == "URIImport":
val_mode = "Value_import"
elif val_render == "FileUpload":
val_mode = "Value_upload"
entitydata[ANNAL.CURIE.field_value_mode] = val_mode
# Consistency checks
if val_mode == "Value_field":
if ( not (ref_type and ref_field) ):
log.warning(
"RecordField %s: val_mode 'Value_field' requires values for %s and %s"%
(field_id, ANNAL.CURIE.field_ref_type, ANNAL.CURIE.field_ref_field)
)
elif val_mode == "Value_entity":
if not ref_type:
log.warning(
"RecordField %s: val_mode 'Value_entity' requires value for %s"%
(field_id, ANNAL.CURIE.field_ref_type)
)
if ref_field:
log.warning(
"RecordField %s: val_mode 'Value_entity' should not define value for %s"%
(field_id, ANNAL.CURIE.field_ref_field)
)
# Return result
return entitydata
def _pre_save_processing(self, entitydata):
"""
Pre-save value processing.
This method is called just before a value is saved to fill in or update
any values that were not specified in the form input.
The specification for this method is that it returns an entitydata value
which is a copy of the supplied entitydata with any data updates applied.
NOTE: implementations are free to apply updates in-place. The resulting
entitydata should be exactly as the supplied data *should* appear in storage.
The update function should be idempotent; i.e.
x._pre_save_processing(x._pre_save_processing(e)) == x._pre_save_processing(e)
"""
if not entitydata.get(ANNAL.CURIE.property_uri, None):
entitydata[ANNAL.CURIE.property_uri] = entitydata[ANNAL.CURIE.id]
return entitydata
def _post_update_processing(self, entitydata, post_update_flags):
"""
Post-update processing.
This method is called when a RecordField entity has been created or updated.
It invokes the containing collection method to regenerate the JSON LD context
for the collection to which the field belongs.
"""
self._parent.cache_add_field(self)
self._parent.generate_coll_jsonld_context(flags=post_update_flags)
return entitydata
def _post_remove_processing(self, post_update_flags):
"""
Post-remove processing.
This method is called when a RecordField entity has been removed.
"""
self._parent.cache_remove_field(self.get_id())
return
# End.
| mit |
njwilson23/rasterio | tests/test_vfs.py | 1 | 2816 | import logging
import sys
import pytest
import rasterio
from rasterio.profiles import default_gtiff_profile
from rasterio.vfs import parse_path, vsi_path
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
def test_parse_path_with_vfs():
"""Correctly parse path with legacy vfs parameter"""
assert parse_path('foo.tif', vfs='zip://tests/data/files.zip') == (
'foo.tif', 'tests/data/files.zip', 'zip')
def test_parse_path_zip():
"""Correctly parse VFS scheme URL"""
assert parse_path('zip://tests/data/files.zip!foo.tif') == (
'foo.tif', 'tests/data/files.zip', 'zip')
def test_parse_path_file_scheme():
"""Correctly parse file:// URL"""
assert parse_path('file://foo.tif') == (
'foo.tif', None, 'file')
def test_parse_path_file():
"""Correctly parse an ordinary filesystem path"""
assert parse_path('/foo.tif') == (
'/foo.tif', None, None)
def test_parse_unknown_scheme():
"""Raise exception for unknown WFS scheme"""
with pytest.raises(ValueError):
parse_path('http://foo.tif')
def test_vsi_path_scheme():
"""Correctly make a vsi path"""
assert vsi_path(
'foo.tif', 'tests/data/files.zip', 'zip') == '/vsizip/tests/data/files.zip/foo.tif'
def test_vsi_path_file():
"""Correctly make a ordinary file path from a parsed file:// URL"""
assert vsi_path(
'foo.tif', None, 'file') == 'foo.tif'
def test_vsi_path_file():
"""Correctly make and ordinary file path from a file path"""
assert vsi_path(
'foo.tif', None, 'file') == 'foo.tif'
def test_read_vfs_zip():
with rasterio.open(
'zip://tests/data/files.zip!/RGB.byte.tif') as src:
assert src.name == 'zip://tests/data/files.zip!/RGB.byte.tif'
assert src.count == 3
def test_read_vfs_file():
with rasterio.open(
'file://tests/data/RGB.byte.tif') as src:
assert src.name == 'file://tests/data/RGB.byte.tif'
assert src.count == 3
def test_read_vfs_zip_cmp_array():
with rasterio.open(
'zip://tests/data/files.zip!/RGB.byte.tif') as src:
zip_arr = src.read()
with rasterio.open(
'file://tests/data/RGB.byte.tif') as src:
file_arr = src.read()
assert zip_arr.dumps() == file_arr.dumps()
def test_read_vfs_none():
with rasterio.open(
'tests/data/RGB.byte.tif') as src:
assert src.name == 'tests/data/RGB.byte.tif'
assert src.count == 3
@pytest.mark.parametrize('mode', ['r+', 'w'])
def test_update_vfs(tmpdir, mode):
"""VFS datasets can not be created or updated"""
with pytest.raises(TypeError):
_ = rasterio.open(
'zip://{0}'.format(tmpdir), mode,
**default_gtiff_profile(
count=1, width=1, height=1))
| bsd-3-clause |
kastnerkyle/pylearn2 | pylearn2/config/yaml_parse.py | 1 | 19376 | """Support code for YAML parsing of experiment descriptions."""
import yaml
from pylearn2.utils import serial
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.string_utils import preprocess
from pylearn2.utils.call_check import checked_call
from pylearn2.utils.string_utils import match
from collections import namedtuple
import logging
import warnings
is_initialized = False
additional_environ = None
logger = logging.getLogger(__name__)
# Lightweight container for initial YAML evaluation.
#
# This is intended as a robust, forward-compatible intermediate representation
# for either internal consumption or external consumption by another tool e.g.
# hyperopt.
#
# We've included a slot for positionals just in case, though they are
# unsupported by the instantiation mechanism as yet.
BaseProxy = namedtuple('BaseProxy', ['callable', 'positionals',
'keywords', 'yaml_src'])
class Proxy(BaseProxy):
"""
An intermediate representation between initial YAML parse and object
instantiation.
Parameters
----------
callable : callable
The function/class to call to instantiate this node.
positionals : iterable
Placeholder for future support for positional arguments (`*args`).
keywords : dict-like
A mapping from keywords to arguments (`**kwargs`), which may be
`Proxy`s or `Proxy`s nested inside `dict` or `list` instances.
Keys must be strings that are valid Python variable names.
yaml_src : str
The YAML source that created this node, if available.
Notes
-----
This is intended as a robust, forward-compatible intermediate
representation for either internal consumption or external consumption
by another tool e.g. hyperopt.
This particular class mainly exists to override `BaseProxy`'s `__hash__`
(to avoid hashing unhashable namedtuple elements).
"""
__slots__ = []
def __hash__(self):
"""
Return a hash based on the object ID (to avoid hashing unhashable
namedtuple elements).
"""
return hash(id(self))
class ObjectProxy(Proxy):
"""
API compatibility wrapper for deprecated `ObjectProxy` class.
Parameters
----------
cls : callable
See `callable` in `Proxy` docstring.
kwds : dict
See `keywords` in `Proxy` docstring.
yaml_src : str
See `yaml_src` in `Proxy` docstring.
.. warning::
Deprecated, to be removed as of January 1, 2015.
"""
def __init__(self, cls, kwds, yaml_src):
self._warn()
super(ObjectProxy, self).__init__(callable=cls, positionals=(),
keywords=kwds, yaml_src=yaml_src)
def _warn(self, s):
"""
Issue a templated warning message about deprecation of this interface.
Parameters
----------
s : str
Prefix string for the warning message.
"""
warnings.warn("%s is deprecated. Switch to `Proxy`. "
"`ObjectProxy` will be removed on or after "
"January 10, 2015." % s, stacklevel=2)
def __getitem__(self, key):
"""
.. warning::
Deprecated, to be removed as of January 1, 2015.
"""
self._warn("ObjectProxy.__getitem__")
return self.kwds[key]
def __setitem__(self, key, value):
"""
.. warning::
Deprecated, to be removed as of January 1, 2015.
"""
self._warn("ObjectProxy.__setitem__")
self.kwds[key] = value
def __iter__(self):
"""
.. warning::
Deprecated, to be removed as of January 1, 2015.
"""
self._warn("ObjectProxy.__iter__")
return self.kwds.__iter__()
def keys(self):
"""
.. warning::
Deprecated, to be removed as of January 1, 2015.
"""
self._warn("ObjectProxy.keys")
return list(self.kwds)
@property
def kwds(self):
"""
.. warning::
Deprecated, to be removed as of January 1, 2015.
"""
self._warn("ObjectProxy.kwds")
return self.keywords
@property
def cls(self):
"""
.. warning::
Deprecated, to be removed as of January 1, 2015.
"""
self._warn("ObjectProxy.cls")
return self.callable
def instantiate(self):
"""
.. warning::
Deprecated, to be removed as of January 1, 2015.
Instantiate this object with the supplied parameters in `self.kwds`,
or if already instantiated, return the cached instance.
"""
self._warn("ObjectProxy.instantiate")
if self.instance is None:
self.instance = checked_call(self.cls, self.kwds)
try:
self.instance.yaml_src = self.yaml_src
except AttributeError:
pass
return self.instance
def do_not_recurse(value):
"""
Function symbol used for wrapping an unpickled object (which should
not be recursively expanded). This is recognized and respected by the
instantiation parser. Implementationally, no-op (returns the value
passed in as an argument).
Parameters
----------
value : object
The value to be returned.
Returns
-------
value : object
The same object passed in as an argument.
"""
return value
def _instantiate_proxy_tuple(proxy, bindings=None):
"""
Helper function for `_instantiate` that handles objects of the `Proxy`
class.
Parameters
----------
proxy : Proxy object
A `Proxy` object that.
bindings : dict, opitonal
A dictionary mapping previously instantiated `Proxy` objects
to their instantiated values.
Returns
-------
obj : object
The result object from recursively instantiating the object DAG.
"""
if proxy in bindings:
return bindings[proxy]
else:
# Respect do_not_recurse by just un-packing it (same as calling).
if proxy.callable == do_not_recurse:
obj = proxy.keywords['value']
else:
# TODO: add (requested) support for positionals (needs to be added
# to checked_call also).
if len(proxy.positionals) > 0:
raise NotImplementedError('positional arguments not yet '
'supported in proxy instantiation')
kwargs = dict((k, _instantiate(v, bindings))
for k, v in proxy.keywords.iteritems())
obj = checked_call(proxy.callable, kwargs)
try:
obj.yaml_src = proxy.yaml_src
except AttributeError: # Some classes won't allow this.
pass
bindings[proxy] = obj
return bindings[proxy]
def instantiate_all(graph):
"""
.. warning::
Deprecated, to be removed as of January 1, 2015.
API compatibility wrapper for deprecated `instantiate_all`.
Parameters
----------
graph : object
A `Proxy` object or list/dict/literal. Strings are run through
`preprocess`.
"""
def _instantiate(proxy, bindings=None):
"""
Instantiate a (hierarchy of) Proxy object(s).
Parameters
----------
proxy : object
A `Proxy` object or list/dict/literal. Strings are run through
`preprocess`.
bindings : dict, opitonal
A dictionary mapping previously instantiated `Proxy` objects
to their instantiated values.
Returns
-------
obj : object
The result object from recursively instantiating the object DAG.
Notes
-----
This should not be considered part of the stable, public API.
"""
if bindings is None:
bindings = {}
if isinstance(proxy, Proxy):
return _instantiate_proxy_tuple(proxy, bindings)
elif isinstance(proxy, dict):
# Recurse on the keys too, for backward compatibility.
# Is the key instantiation feature ever actually used, by anyone?
return dict((_instantiate(k, bindings), _instantiate(v, bindings))
for k, v in proxy.iteritems())
elif isinstance(proxy, list):
return [_instantiate(v, bindings) for v in proxy]
# In the future it might be good to consider a dict argument that provides
# a type->callable mapping for arbitrary transformations like this.
elif isinstance(proxy, basestring):
return preprocess(proxy)
else:
return proxy
def load(stream, environ=None, instantiate=True, **kwargs):
"""
Loads a YAML configuration from a string or file-like object.
Parameters
----------
stream : str or object
Either a string containing valid YAML or a file-like object
supporting the .read() interface.
environ : dict, optional
A dictionary used for ${FOO} substitutions in addition to
environment variables. If a key appears both in `os.environ`
and this dictionary, the value in this dictionary is used.
instantiate : bool, optional
If `False`, do not actually instantiate the objects but instead
produce a nested hierarchy of `Proxy` objects.
Returns
-------
graph : dict or object
The dictionary or object (if the top-level element specified
a Python object to instantiate), or a nested hierarchy of
`Proxy` objects.
Notes
-----
Other keyword arguments are passed on to `yaml.load`.
"""
global is_initialized
global additional_environ
if not is_initialized:
initialize()
additional_environ = environ
if isinstance(stream, basestring):
string = stream
else:
string = stream.read()
proxy_graph = yaml.load(string, **kwargs)
if instantiate:
return _instantiate(proxy_graph)
else:
return proxy_graph
def load_path(path, environ=None, instantiate=True, **kwargs):
"""
Convenience function for loading a YAML configuration from a file.
Parameters
----------
path : str
The path to the file to load on disk.
environ : dict, optional
A dictionary used for ${FOO} substitutions in addition to
environment variables. If a key appears both in `os.environ`
and this dictionary, the value in this dictionary is used.
instantiate : bool, optional
If `False`, do not actually instantiate the objects but instead
produce a nested hierarchy of `Proxy` objects.
Returns
-------
graph : dict or object
The dictionary or object (if the top-level element specified
a Python object to instantiate), or a nested hierarchy of
`Proxy` objects.
Notes
-----
Other keyword arguments are passed on to `yaml.load`.
"""
with open(path, 'r') as f:
content = ''.join(f.readlines())
# This is apparently here to avoid the odd instance where a file gets
# loaded as Unicode instead (see 03f238c6d). It's rare instance where
# basestring is not the right call.
if not isinstance(content, str):
raise AssertionError("Expected content to be of type str, got " +
str(type(content)))
return load(content, instantiate=instantiate, environ=environ, **kwargs)
def try_to_import(tag_suffix):
"""
.. todo::
WRITEME
"""
components = tag_suffix.split('.')
modulename = '.'.join(components[:-1])
try:
exec('import %s' % modulename)
except ImportError, e:
# We know it's an ImportError, but is it an ImportError related to
# this path,
# or did the module we're importing have an unrelated ImportError?
# and yes, this test can still have false positives, feel free to
# improve it
pieces = modulename.split('.')
str_e = str(e)
found = True in [piece.find(str(e)) != -1 for piece in pieces]
if found:
# The yaml file is probably to blame.
# Report the problem with the full module path from the YAML
# file
reraise_as(ImportError("Could not import %s; ImportError was %s" %
(modulename, str_e)))
else:
pcomponents = components[:-1]
assert len(pcomponents) >= 1
j = 1
while j <= len(pcomponents):
modulename = '.'.join(pcomponents[:j])
try:
exec('import %s' % modulename)
except Exception:
base_msg = 'Could not import %s' % modulename
if j > 1:
modulename = '.'.join(pcomponents[:j - 1])
base_msg += ' but could import %s' % modulename
reraise_as(ImportError(base_msg + '. Original exception: '
+ str(e)))
j += 1
try:
obj = eval(tag_suffix)
except AttributeError, e:
try:
# Try to figure out what the wrong field name was
# If we fail to do it, just fall back to giving the usual
# attribute error
pieces = tag_suffix.split('.')
module = '.'.join(pieces[:-1])
field = pieces[-1]
candidates = dir(eval(module))
msg = ('Could not evaluate %s. ' % tag_suffix +
'Did you mean ' + match(field, candidates) + '? ' +
'Original error was ' + str(e))
except Exception:
warnings.warn("Attempt to decipher AttributeError failed")
reraise_as(AttributeError('Could not evaluate %s. ' % tag_suffix +
'Original error was ' + str(e)))
reraise_as(AttributeError(msg))
return obj
def initialize():
"""
Initialize the configuration system by installing YAML handlers.
Automatically done on first call to load() specified in this file.
"""
global is_initialized
# Add the custom multi-constructor
yaml.add_multi_constructor('!obj:', multi_constructor_obj)
yaml.add_multi_constructor('!pkl:', multi_constructor_pkl)
yaml.add_multi_constructor('!import:', multi_constructor_import)
yaml.add_constructor('!import', constructor_import)
yaml.add_constructor("!float", constructor_float)
is_initialized = True
###############################################################################
# Callbacks used by PyYAML
def multi_constructor_obj(loader, tag_suffix, node):
"""
Callback used by PyYAML when a "!obj:" tag is encountered.
See PyYAML documentation for details on the call signature.
"""
yaml_src = yaml.serialize(node)
construct_mapping(node)
mapping = loader.construct_mapping(node)
assert hasattr(mapping, 'keys')
assert hasattr(mapping, 'values')
for key in mapping.keys():
if not isinstance(key, basestring):
message = "Received non string object (%s) as " \
"key in mapping." % str(key)
raise TypeError(message)
if '.' not in tag_suffix:
# TODO: I'm not sure how this was ever working without eval().
callable = eval(tag_suffix)
else:
callable = try_to_import(tag_suffix)
rval = Proxy(callable=callable, yaml_src=yaml_src, positionals=(),
keywords=mapping)
return rval
def multi_constructor_pkl(loader, tag_suffix, node):
"""
Callback used by PyYAML when a "!pkl:" tag is encountered.
"""
global additional_environ
if tag_suffix != "" and tag_suffix != u"":
raise AssertionError('Expected tag_suffix to be "" but it is "'
+ tag_suffix +
'": Put space between !pkl: and the filename.')
mapping = loader.construct_yaml_str(node)
obj = serial.load(preprocess(mapping, additional_environ))
proxy = Proxy(callable=do_not_recurse, positionals=(),
keywords={'value': obj}, yaml_src=yaml.serialize(node))
return proxy
def multi_constructor_import(loader, tag_suffix, node):
"""
Callback used by PyYAML when a "!import:" tag is encountered.
"""
if '.' not in tag_suffix:
raise yaml.YAMLError("!import: tag suffix contains no '.'")
return try_to_import(tag_suffix)
def constructor_import(loader, node):
"""
Callback used by PyYAML when a "!import <str>" tag is encountered.
This tag exects a (quoted) string as argument.
"""
value = loader.construct_scalar(node)
if '.' not in value:
raise yaml.YAMLError("import tag suffix contains no '.'")
return try_to_import(value)
def constructor_float(loader, node):
"""
Callback used by PyYAML when a "!float <str>" tag is encountered.
This tag exects a (quoted) string as argument.
"""
value = loader.construct_scalar(node)
return float(value)
def construct_mapping(node, deep=False):
# This is a modified version of yaml.BaseConstructor.construct_mapping
# in which a repeated key raises a ConstructorError
if not isinstance(node, yaml.nodes.MappingNode):
const = yaml.constructor
message = "expected a mapping node, but found"
raise const.ConstructorError(None, None,
"%s %s " % (message, node.id),
node.start_mark)
mapping = {}
constructor = yaml.constructor.BaseConstructor()
for key_node, value_node in node.value:
key = constructor.construct_object(key_node, deep=False)
try:
hash(key)
except TypeError, exc:
const = yaml.constructor
reraise_as(const.ConstructorError("while constructing a mapping",
node.start_mark,
"found unacceptable key (%s)" %
(exc, key_node.start_mark)))
if key in mapping:
const = yaml.constructor
raise const.ConstructorError("while constructing a mapping",
node.start_mark,
"found duplicate key (%s)" % key)
value = constructor.construct_object(value_node, deep=False)
mapping[key] = value
return mapping
if __name__ == "__main__":
initialize()
# Demonstration of how to specify objects, reference them
# later in the configuration, etc.
yamlfile = """{
"corruptor" : !obj:pylearn2.corruption.GaussianCorruptor &corr {
"corruption_level" : 0.9
},
"dae" : !obj:pylearn2.models.autoencoder.DenoisingAutoencoder {
"nhid" : 20,
"nvis" : 30,
"act_enc" : null,
"act_dec" : null,
"tied_weights" : true,
# we could have also just put the corruptor definition here
"corruptor" : *corr
}
}"""
# yaml.load can take a string or a file object
loaded = yaml.load(yamlfile)
logger.info(loaded)
# These two things should be the same object
assert loaded['corruptor'] is loaded['dae'].corruptor
| bsd-3-clause |
MountainWei/nova | nova/network/api.py | 41 | 23187 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from nova import exception
from nova.i18n import _LI
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as obj_base
from nova import policy
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution."""
@functools.wraps(func)
def wrapped(self, context, *args, **kwargs):
action = func.__name__
if not self.skip_policy_check:
check_policy(context, action)
return func(self, context, *args, **kwargs)
return wrapped
def check_policy(context, action):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
_action = 'network:%s' % action
policy.enforce(context, _action, target)
class API(base_api.NetworkAPI):
"""API for doing networking via the nova-network network manager.
This is a pluggable module - other implementations do networking via
other services (such as Neutron).
"""
def __init__(self, **kwargs):
self.network_rpcapi = network_rpcapi.NetworkAPI()
helper = utils.ExceptionHelper
# NOTE(vish): this local version of floating_manager has to convert
# ClientExceptions back since they aren't going over rpc.
self.floating_manager = helper(floating_ips.LocalManager())
super(API, self).__init__(**kwargs)
@wrap_check_policy
def get_all(self, context):
"""Get all the networks.
If it is an admin user then api will return all the
networks. If it is a normal user and nova Flat or FlatDHCP
networking is being used then api will return all
networks. Otherwise api will only return the networks which
belong to the user's project.
"""
if "nova.network.manager.Flat" in CONF.network_manager:
project_only = "allow_none"
else:
project_only = True
try:
return objects.NetworkList.get_all(context,
project_only=project_only)
except exception.NoNetworksFound:
return []
@wrap_check_policy
def get(self, context, network_uuid):
return objects.Network.get_by_uuid(context, network_uuid)
@wrap_check_policy
def create(self, context, **kwargs):
return self.network_rpcapi.create_networks(context, **kwargs)
@wrap_check_policy
def delete(self, context, network_uuid):
network = self.get(context, network_uuid)
if network.project_id is not None:
raise exception.NetworkInUse(network_id=network_uuid)
return self.network_rpcapi.delete_network(context, network_uuid, None)
@wrap_check_policy
def disassociate(self, context, network_uuid):
network = self.get(context, network_uuid)
objects.Network.disassociate(context, network.id,
host=True, project=True)
@wrap_check_policy
def get_fixed_ip(self, context, id):
return objects.FixedIP.get_by_id(context, id)
@wrap_check_policy
def get_fixed_ip_by_address(self, context, address):
return objects.FixedIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ip(self, context, id):
if not strutils.is_int_like(id):
raise exception.InvalidID(id=id)
return objects.FloatingIP.get_by_id(context, id)
@wrap_check_policy
def get_floating_ip_pools(self, context):
return objects.FloatingIP.get_pool_names(context)
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
return objects.FloatingIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ips_by_project(self, context):
return objects.FloatingIPList.get_by_project(context,
context.project_id)
@wrap_check_policy
def get_instance_id_by_floating_address(self, context, address):
fixed_ip = objects.FixedIP.get_by_floating_address(context, address)
if fixed_ip is None:
return None
else:
return fixed_ip.instance_uuid
@wrap_check_policy
def get_vifs_by_instance(self, context, instance):
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
for vif in vifs:
if vif.network_id is not None:
network = objects.Network.get_by_id(context, vif.network_id,
project_only='allow_none')
vif.net_uuid = network.uuid
return vifs
@wrap_check_policy
def get_vif_by_mac_address(self, context, mac_address):
vif = objects.VirtualInterface.get_by_address(context,
mac_address)
if vif.network_id is not None:
network = objects.Network.get_by_id(context, vif.network_id,
project_only='allow_none')
vif.net_uuid = network.uuid
return vif
@wrap_check_policy
def allocate_floating_ip(self, context, pool=None):
"""Adds (allocates) a floating ip to a project from a pool."""
return self.floating_manager.allocate_floating_ip(context,
context.project_id, False, pool)
@wrap_check_policy
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes (deallocates) a floating ip with address from a project."""
return self.floating_manager.deallocate_floating_ip(context, address,
affect_auto_assigned)
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating ip.
This api call was added to allow this to be done in one operation
if using neutron.
"""
address = floating_ip['address']
if floating_ip.get('fixed_ip_id'):
try:
self.disassociate_floating_ip(context, instance, address)
except exception.FloatingIpNotAssociated:
msg = ("Floating ip %s has already been disassociated, "
"perhaps by another concurrent action.") % address
LOG.debug(msg)
# release ip from project
return self.release_floating_ip(context, address)
@wrap_check_policy
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Ensures floating ip is allocated to the project in context.
Does not verify ownership of the fixed ip. Caller is assumed to have
checked that the instance is properly owned.
"""
orig_instance_uuid = self.floating_manager.associate_floating_ip(
context, floating_address, fixed_address, affect_auto_assigned)
if orig_instance_uuid:
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_LI('re-assign floating IP %(address)s from '
'instance %(instance_id)s'), msg_dict)
orig_instance = objects.Instance.get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
orig_instance)
@wrap_check_policy
@base_api.refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from fixed ip it is associated with."""
return self.floating_manager.disassociate_floating_ip(context, address,
affect_auto_assigned)
@wrap_check_policy
@base_api.refresh_cache
def allocate_for_instance(self, context, instance, vpn,
requested_networks, macs=None,
security_groups=None,
dhcp_options=None):
"""Allocates all network structures for an instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param vpn: A boolean, if True, indicate a vpn to access the instance.
:param requested_networks: A dictionary of requested_networks,
Optional value containing network_id, fixed_ip, and port_id.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
:param security_groups: None or security groups to allocate for
instance.
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
:returns: network info as from get_instance_nw_info() below
"""
# NOTE(vish): We can't do the floating ip allocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
flavor = instance.get_flavor()
args = {}
args['vpn'] = vpn
args['requested_networks'] = requested_networks
args['instance_id'] = instance.uuid
args['project_id'] = instance.project_id
args['host'] = instance.host
args['rxtx_factor'] = flavor['rxtx_factor']
args['macs'] = macs
args['dhcp_options'] = dhcp_options
nw_info = self.network_rpcapi.allocate_for_instance(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def deallocate_for_instance(self, context, instance,
requested_networks=None):
"""Deallocates all network structures related to instance."""
# NOTE(vish): We can't do the floating ip deallocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
if not isinstance(instance, obj_base.NovaObject):
instance = objects.Instance._from_db_object(context,
objects.Instance(), instance)
self.network_rpcapi.deallocate_for_instance(context, instance=instance,
requested_networks=requested_networks)
# NOTE(danms): Here for neutron compatibility
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def deallocate_port_for_instance(self, context, instance, port_id):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def list_ports(self, *args, **kwargs):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def show_port(self, *args, **kwargs):
raise NotImplementedError()
@wrap_check_policy
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Adds a fixed ip to instance from specified network."""
flavor = instance.get_flavor()
args = {'instance_id': instance.uuid,
'rxtx_factor': flavor['rxtx_factor'],
'host': instance.host,
'network_id': network_id}
nw_info = self.network_rpcapi.add_fixed_ip_to_instance(
context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Removes a fixed ip from instance from specified network."""
flavor = instance.get_flavor()
args = {'instance_id': instance.uuid,
'rxtx_factor': flavor['rxtx_factor'],
'host': instance.host,
'address': address}
nw_info = self.network_rpcapi.remove_fixed_ip_from_instance(
context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force adds another network to a project."""
self.network_rpcapi.add_network_to_project(context, project_id,
network_uuid)
@wrap_check_policy
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate or disassociate host or project to network."""
network = self.get(context, network_uuid)
if host is not base_api.SENTINEL:
if host is None:
objects.Network.disassociate(context, network.id,
host=True, project=False)
else:
network.host = host
network.save()
if project is not base_api.SENTINEL:
if project is None:
objects.Network.disassociate(context, network.id,
host=False, project=True)
else:
objects.Network.associate(context, project,
network_id=network.id, force=True)
@wrap_check_policy
def get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
return super(API, self).get_instance_nw_info(context, instance,
**kwargs)
def _get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
flavor = instance.get_flavor()
args = {'instance_id': instance.uuid,
'rxtx_factor': flavor['rxtx_factor'],
'host': instance.host,
'project_id': instance.project_id}
nw_info = self.network_rpcapi.get_instance_nw_info(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def validate_networks(self, context, requested_networks, num_instances):
"""validate the networks passed at the time of creating
the server.
Return the number of instances that can be successfully allocated
with the requested network configuration.
"""
if requested_networks:
self.network_rpcapi.validate_networks(context,
requested_networks)
# Neutron validation checks and returns how many of num_instances
# instances can be supported by the quota. For Nova network
# this is part of the subsequent quota check, so we just return
# the requested number in this case.
return num_instances
def create_pci_requests_for_sriov_ports(self, context,
pci_requests,
requested_networks):
"""Check requested networks for any SR-IOV port request.
Create a PCI request object for each SR-IOV port, and add it to the
pci_requests object that contains a list of PCI request object.
"""
# This is NOOP for Nova network since it doesn't support SR-IOV.
pass
@wrap_check_policy
def get_dns_domains(self, context):
"""Returns a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
return self.network_rpcapi.get_dns_domains(context)
@wrap_check_policy
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'dns_type': dns_type,
'domain': domain}
return self.network_rpcapi.add_dns_entry(context, **args)
@wrap_check_policy
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'domain': domain}
return self.network_rpcapi.modify_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.delete_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
return self.network_rpcapi.delete_dns_domain(context, domain=domain)
@wrap_check_policy
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
args = {'address': address, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_address(context, **args)
@wrap_check_policy
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_name(context, **args)
@wrap_check_policy
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
args = {'domain': domain, 'av_zone': availability_zone}
return self.network_rpcapi.create_private_dns_domain(context, **args)
@wrap_check_policy
def create_public_dns_domain(self, context, domain, project=None):
"""Create a public DNS domain with optional nova project."""
args = {'domain': domain, 'project': project}
return self.network_rpcapi.create_public_dns_domain(context, **args)
@wrap_check_policy
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures on hosts related to
instance.
"""
host = host or instance.host
# NOTE(tr3buchet): host is passed in cases where we need to setup
# or teardown the networks on a host which has been migrated to/from
# and instance.host is not yet or is no longer equal to
args = {'instance_id': instance.id,
'host': host,
'teardown': teardown}
self.network_rpcapi.setup_networks_on_host(context, **args)
def _get_multi_addresses(self, context, instance):
try:
fixed_ips = objects.FixedIPList.get_by_instance_uuid(
context, instance.uuid)
except exception.FixedIpNotFoundForInstance:
return False, []
addresses = []
for fixed in fixed_ips:
for floating in fixed.floating_ips:
addresses.append(floating.address)
return fixed_ips[0].network.multi_host, addresses
@wrap_check_policy
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
flavor = instance.get_flavor()
args = dict(
instance_uuid=instance.uuid,
rxtx_factor=flavor['rxtx_factor'],
project_id=instance.project_id,
source_compute=migration['source_compute'],
dest_compute=migration['dest_compute'],
floating_addresses=None,
)
multi_host, addresses = self._get_multi_addresses(context, instance)
if multi_host:
args['floating_addresses'] = addresses
args['host'] = migration['source_compute']
self.network_rpcapi.migrate_instance_start(context, **args)
@wrap_check_policy
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
flavor = instance.get_flavor()
args = dict(
instance_uuid=instance.uuid,
rxtx_factor=flavor['rxtx_factor'],
project_id=instance.project_id,
source_compute=migration['source_compute'],
dest_compute=migration['dest_compute'],
floating_addresses=None,
)
multi_host, addresses = self._get_multi_addresses(context, instance)
if multi_host:
args['floating_addresses'] = addresses
args['host'] = migration['dest_compute']
self.network_rpcapi.migrate_instance_finish(context, **args)
def setup_instance_network_on_host(self, context, instance, host):
"""Setup network for specified instance on host."""
self.migrate_instance_finish(context, instance,
{'source_compute': None,
'dest_compute': host})
def cleanup_instance_network_on_host(self, context, instance, host):
"""Cleanup network for specified instance on host."""
self.migrate_instance_start(context, instance,
{'source_compute': host,
'dest_compute': None})
| apache-2.0 |
mvaled/sentry | src/sentry/south_migrations/0083_migrate_dupe_groups.py | 5 | 39056 | # -*- coding: utf-8 -*-
from __future__ import print_function
import six
from south.v2 import DataMigration
class Migration(DataMigration):
def forwards(self, orm):
from django.db.models import F
from collections import defaultdict
from sentry.db.models import create_or_update
from sentry.utils.query import RangeQuerySetWrapper
# We don't fully merge results because it's simply not worth it
for group in RangeQuerySetWrapper(orm['sentry.Group'].objects.all()):
# could be already migrated
if not orm['sentry.Group'].objects.filter(id=group.id).exists():
continue
matches = list(
orm['sentry.Group'].objects.exclude(id=group.id)
.filter(checksum=group.checksum, project=group.project)
)
if not matches:
continue
print("Merging duplicate events for %r" % (group, ))
updates = defaultdict(int)
updates.update(
{
'first_seen': group.first_seen,
'last_seen': group.last_seen,
'active_at': group.active_at,
}
)
tag_updates = defaultdict(lambda: defaultdict(int))
counts = defaultdict(lambda: defaultdict(int))
for other in matches:
# migrate events first
orm['sentry.Event'].objects.filter(group=other).update(group=group)
updates['times_seen'] += other.times_seen
updates['users_seen'] += other.users_seen
updates['time_spent_total'] += other.time_spent_total
updates['time_spent_count'] += other.time_spent_count
for datecol in ('active_at', 'last_seen', 'first_seen'):
val = getattr(other, datecol)
if val and updates[datecol]:
updates[datecol] = max(val, updates[datecol])
elif val:
updates[datecol] = val
# determine missing tags
for tag in RangeQuerySetWrapper(
orm['sentry.MessageFilterValue'].objects.filter(group=other)
):
key = tag_updates[(tag.key, tag.value)]
key['times_seen'] += other.times_seen
for datecol in ('last_seen', 'first_seen'):
val = getattr(other, datecol)
if val and updates[datecol]:
updates[datecol] = max(val, updates[datecol])
elif val:
updates[datecol] = val
# determine counts
for count in RangeQuerySetWrapper(
orm['sentry.MessageCountByMinute'].objects.filter(group=other)
):
key = counts[count.date]
key['times_seen'] += count.times_seen
key['time_spent_total'] += count.time_spent_total
key['time_spent_count'] += count.time_spent_count
# migrate tags
for (key, value), data in six.iteritems(tag_updates):
defaults = {
'times_seen': F('times_seen') + data['times_seen'],
}
if 'last_seen' in data:
defaults['last_seen'] = data['last_seen']
if 'first_seen' in data:
defaults['first_seen'] = data['first_seen']
create_or_update(
orm['sentry.MessageFilterValue'],
project=group.project,
group=group,
key=key,
value=value,
values=values,
)
orm['sentry.MessageFilterValue'].objects.filter(group__in=matches).delete()
# migrate counts
for date, data in six.iteritems(counts):
create_or_update(
orm['sentry.MessageCountByMinute'],
project=group.project,
group=group,
date=date,
values={
'times_seen': F('times_seen') + data['times_seen'],
'time_spent_total': F('time_spent_total') + data['time_spent_total'],
'time_spent_count': F('time_spent_count') + data['time_spent_count'],
}
)
orm['sentry.MessageCountByMinute'].objects.filter(group__in=matches).delete()
orm['sentry.Group'].objects.filter(id=group.id).update(
times_seen=F('times_seen') + updates['times_seen'],
users_seen=F('users_seen') + updates['user_seen'],
time_spent_total=F('time_spent_total') + updates['time_spent_total'],
time_spent_count=F('time_spent_count') + updates['time_spent_count'],
last_seen=updates['last_seen'],
first_seen=updates['first_seen'],
active_at=updates['active_at'],
)
for other in matches:
other.delete()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('django.db.models.fields.AutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '30'
})
},
'contenttypes.contenttype': {
'Meta': {
'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"
},
'app_label': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'model': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '100'
})
},
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.affecteduserbygroup': {
'Meta': {
'unique_together': "(('project', 'tuser', 'group'),)",
'object_name': 'AffectedUserByGroup'
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'tuser': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.TrackedUser']",
'null': 'True'
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments':
('django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'null': 'True'
}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'server_name': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'site': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'time_spent': ('django.db.models.fields.FloatField', [], {
'null': 'True'
})
},
'sentry.filterkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'FilterKey'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
})
},
'sentry.filtervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'FilterValue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments':
('django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'null': 'True'
}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
),
'users_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.messagecountbyminute': {
'Meta': {
'unique_together': "(('project', 'group', 'date'),)",
'object_name': 'MessageCountByMinute'
},
'date': ('django.db.models.fields.DateTimeField', [], {
'db_index': 'True'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.messagefiltervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'MessageFilterValue'
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.messageindex': {
'Meta': {
'unique_together': "(('column', 'value', 'object_id'),)",
'object_name': 'MessageIndex'
},
'column': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {
'unique_together': "(('team', 'email'),)",
'object_name': 'PendingTeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'pending_member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '50'
})
},
'sentry.project': {
'Meta': {
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'owner': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_owned_project_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'slug': (
'django.db.models.fields.SlugField', [], {
'max_length': '50',
'unique': 'True',
'null': 'True'
}
),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']",
'null': 'True'
}
)
},
'sentry.projectcountbyminute': {
'Meta': {
'unique_together': "(('project', 'date'),)",
'object_name': 'ProjectCountByMinute'
},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
),
'user_added': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'keys_added_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {
'unique_together': "(('project', 'group'),)",
'object_name': 'SearchDocument'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_changed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '1'
})
},
'sentry.searchtoken': {
'Meta': {
'unique_together': "(('document', 'field', 'token'),)",
'object_name': 'SearchToken'
},
'document': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'token_set'",
'to': "orm['sentry.SearchDocument']"
}
),
'field':
('django.db.models.fields.CharField', [], {
'default': "'text'",
'max_length': '64'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '1'
}),
'token': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
},
'sentry.team': {
'Meta': {
'object_name': 'Team'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'owner':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
})
},
'sentry.teammember': {
'Meta': {
'unique_together': "(('team', 'user'),)",
'object_name': 'TeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '50'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_teammember_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.trackeduser': {
'Meta': {
'unique_together': "(('project', 'ident'),)",
'object_name': 'TrackedUser'
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'groups': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Group']",
'through': "orm['sentry.AffectedUserByGroup']",
'symmetrical': 'False'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'num_events': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
symmetrical = True
| bsd-3-clause |
whaleygeek/mb_sdcard | src/microbit/auto_serial/serial/tools/list_ports_osx.py | 141 | 6489 | #!/usr/bin/env python
# portable serial port access with python
#
# This is a module that gathers a list of serial ports including details on OSX
#
# code originally from https://github.com/makerbot/pyserial/tree/master/serial/tools
# with contributions from cibomahto, dgs3, FarMcKon, tedbrandston
# and modifications by cliechti
#
# this is distributed under a free software license, see license.txt
# List all of the callout devices in OS/X by querying IOKit.
# See the following for a reference of how to do this:
# http://developer.apple.com/library/mac/#documentation/DeviceDrivers/Conceptual/WorkingWSerial/WWSerial_SerialDevs/SerialDevices.html#//apple_ref/doc/uid/TP30000384-CIHGEAFD
# More help from darwin_hid.py
# Also see the 'IORegistryExplorer' for an idea of what we are actually searching
import ctypes
from ctypes import util
import re
iokit = ctypes.cdll.LoadLibrary(ctypes.util.find_library('IOKit'))
cf = ctypes.cdll.LoadLibrary(ctypes.util.find_library('CoreFoundation'))
kIOMasterPortDefault = ctypes.c_void_p.in_dll(iokit, "kIOMasterPortDefault")
kCFAllocatorDefault = ctypes.c_void_p.in_dll(cf, "kCFAllocatorDefault")
kCFStringEncodingMacRoman = 0
iokit.IOServiceMatching.restype = ctypes.c_void_p
iokit.IOServiceGetMatchingServices.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
iokit.IOServiceGetMatchingServices.restype = ctypes.c_void_p
iokit.IORegistryEntryGetParentEntry.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
iokit.IORegistryEntryCreateCFProperty.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint32]
iokit.IORegistryEntryCreateCFProperty.restype = ctypes.c_void_p
iokit.IORegistryEntryGetPath.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
iokit.IORegistryEntryGetPath.restype = ctypes.c_void_p
iokit.IORegistryEntryGetName.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
iokit.IORegistryEntryGetName.restype = ctypes.c_void_p
iokit.IOObjectGetClass.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
iokit.IOObjectGetClass.restype = ctypes.c_void_p
iokit.IOObjectRelease.argtypes = [ctypes.c_void_p]
cf.CFStringCreateWithCString.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int32]
cf.CFStringCreateWithCString.restype = ctypes.c_void_p
cf.CFStringGetCStringPtr.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
cf.CFStringGetCStringPtr.restype = ctypes.c_char_p
cf.CFNumberGetValue.argtypes = [ctypes.c_void_p, ctypes.c_uint32, ctypes.c_void_p]
cf.CFNumberGetValue.restype = ctypes.c_void_p
def get_string_property(device_t, property):
""" Search the given device for the specified string property
@param device_t Device to search
@param property String to search for.
@return Python string containing the value, or None if not found.
"""
key = cf.CFStringCreateWithCString(
kCFAllocatorDefault,
property.encode("mac_roman"),
kCFStringEncodingMacRoman
)
CFContainer = iokit.IORegistryEntryCreateCFProperty(
device_t,
key,
kCFAllocatorDefault,
0
);
output = None
if CFContainer:
output = cf.CFStringGetCStringPtr(CFContainer, 0)
return output
def get_int_property(device_t, property):
""" Search the given device for the specified string property
@param device_t Device to search
@param property String to search for.
@return Python string containing the value, or None if not found.
"""
key = cf.CFStringCreateWithCString(
kCFAllocatorDefault,
property.encode("mac_roman"),
kCFStringEncodingMacRoman
)
CFContainer = iokit.IORegistryEntryCreateCFProperty(
device_t,
key,
kCFAllocatorDefault,
0
);
number = ctypes.c_uint16()
if CFContainer:
output = cf.CFNumberGetValue(CFContainer, 2, ctypes.byref(number))
return number.value
def IORegistryEntryGetName(device):
pathname = ctypes.create_string_buffer(100) # TODO: Is this ok?
iokit.IOObjectGetClass(
device,
ctypes.byref(pathname)
)
return pathname.value
def GetParentDeviceByType(device, parent_type):
""" Find the first parent of a device that implements the parent_type
@param IOService Service to inspect
@return Pointer to the parent type, or None if it was not found.
"""
# First, try to walk up the IOService tree to find a parent of this device that is a IOUSBDevice.
while IORegistryEntryGetName(device) != parent_type:
parent = ctypes.c_void_p()
response = iokit.IORegistryEntryGetParentEntry(
device,
"IOService".encode("mac_roman"),
ctypes.byref(parent)
)
# If we weren't able to find a parent for the device, we're done.
if response != 0:
return None
device = parent
return device
def GetIOServicesByType(service_type):
"""
"""
serial_port_iterator = ctypes.c_void_p()
response = iokit.IOServiceGetMatchingServices(
kIOMasterPortDefault,
iokit.IOServiceMatching(service_type),
ctypes.byref(serial_port_iterator)
)
services = []
while iokit.IOIteratorIsValid(serial_port_iterator):
service = iokit.IOIteratorNext(serial_port_iterator)
if not service:
break
services.append(service)
iokit.IOObjectRelease(serial_port_iterator)
return services
def comports():
# Scan for all iokit serial ports
services = GetIOServicesByType('IOSerialBSDClient')
ports = []
for service in services:
info = []
# First, add the callout device file.
info.append(get_string_property(service, "IOCalloutDevice"))
# If the serial port is implemented by a
usb_device = GetParentDeviceByType(service, "IOUSBDevice")
if usb_device != None:
info.append(get_string_property(usb_device, "USB Product Name"))
info.append(
"USB VID:PID=%x:%x SNR=%s"%(
get_int_property(usb_device, "idVendor"),
get_int_property(usb_device, "idProduct"),
get_string_property(usb_device, "USB Serial Number"))
)
else:
info.append('n/a')
info.append('n/a')
ports.append(info)
return ports
# test
if __name__ == '__main__':
for port, desc, hwid in sorted(comports()):
print "%s: %s [%s]" % (port, desc, hwid)
| mit |
qtekfun/htcDesire820Kernel | external/qemu/offset_layout.py | 102 | 3163 | #!/usr/bin/python
import re
import sys
import getopt
DX = DY = PX = PY = KX = KY = 0
_RE_LINE = re.compile("^\s*(?P<keyword>[\w-]+)\s+{\s*$")
_RE_XY = re.compile("^(?P<start>\s*)(?P<xy>[x|y]\s+)(?P<num>\d+)(?P<end>\s*)$")
def main():
ParseArgs()
ParseInput()
def Usage():
print >>sys.stderr, """
Usage: %s --dx offset-x-display --dy offset-y-display --px offset-x-phone-buttons --py offset-y-phone-buttons --kx offset-x-keyboard --ky offset-y-keyboard < layout > layout2.
Unspecified offsets default to 0 (unchanged).
Reads from stdin, outputs to stdout.
Phone buttons: soft-left/top/righ/bottom, home, dpad, dial, power, etc.
Keyboard is the soft keyboard.
If your shell doesn't let you use negative integers, use _ for minus sign,
i.e. --dx _40 --dy _42 for <-40,-42).
""" % (sys.argv[0])
sys.exit(1)
def ParseArgs():
global DX, DY, PX, PY, KX, KY
try:
options, args = getopt.getopt(sys.argv[1:], "", ["dx=", "dy=", "px=", "py=", "kx=", "ky="])
for opt, value in options:
if opt in ["--dx"]:
DX = int(value.replace("_", "-"))
elif opt in ["--dy"]:
DY = int(value.replace("_", "-"))
elif opt in ["--px"]:
PX = int(value.replace("_", "-"))
elif opt in ["--py"]:
PY = int(value.replace("_", "-"))
elif opt in ["--kx"]:
KX = int(value.replace("_", "-"))
elif opt in ["--ky"]:
KY = int(value.replace("_", "-"))
else:
Usage()
except getopt.error, msg:
Usage()
def ParseInput():
global DX, DY, PX, PY, KX, KY
PHONE = [ "soft-left", "home", "back", "dpad-up", "dpad-down", "dpad-left", "dpad-right", "dpad-center", "phone-dial", "phone-hangup", "power", "volume-up", "volume-down" ]
KEYBOARD = [ "DEL", "CAP", "CAP2", "PERIOD", "ENTER", "ALT", "SYM", "AT", "SPACE", "SLASH", "COMMA", "ALT2" ]
mode = None
while True:
line = sys.stdin.readline()
if not line:
return
m_line = _RE_LINE.match(line)
if m_line:
keyword = m_line.group("keyword")
if keyword in ["display", "button"]:
mode = keyword
is_phone = False
is_keyboard = False
print >>sys.stderr, "Mode:", mode
else:
if mode == "button" and "{" in line:
is_phone = keyword in PHONE
is_keyboard = (len(keyword) == 1 and keyword.isalnum())
if not is_keyboard:
is_keyboard = keyword in KEYBOARD
elif "}" in line:
is_phone = False
is_keyboard = False
if mode == "display":
mode = None
else:
m_xy = _RE_XY.match(line)
if m_xy:
x = 0
y = 0
if mode == "display":
x = DX
y = DY
elif mode == "button" and is_phone:
x = PX
y = PY
elif mode == "button" and is_keyboard:
x = KX
y = KY
if x or y:
d = m_xy.groupdict()
n = int(d["num"])
if d["xy"].startswith("x"):
n += x
else:
n += y
d["num"] = n
line = "%(start)s%(xy)s%(num)s%(end)s" % d
sys.stdout.write(line)
if __name__ == "__main__":
main()
| gpl-2.0 |
99cloud/keystone_register | horizon/test/urls.py | 9 | 1458 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
URL patterns for testing Horizon views.
"""
from django.conf.urls.defaults import patterns, url, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
import horizon
urlpatterns = patterns('',
url(r'', include(horizon.urls)),
url(r"auth/login/", "django.contrib.auth.views.login",
{'template_name': "auth/login.html"},
name='login'),
url(r'auth/', include('django.contrib.auth.urls')),
url(r'^qunit/$',
TemplateView.as_view(template_name="horizon/qunit.html"),
name='qunit_tests')
)
urlpatterns += staticfiles_urlpatterns()
| apache-2.0 |
trunca/enigma2 | lib/python/Components/Renderer/ODChannelNumber.py | 4 | 1385 | from Components.VariableText import VariableText
from enigma import eLabel, iServiceInformation, eServiceReference, eServiceCenter
from Renderer import Renderer
#
# from vali, adapted for openpli, adapted for dmm
#
class ODChannelNumber(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
self.list = []
self.getList()
GUI_WIDGET = eLabel
def changed(self, what):
service = self.source.service
info = service and service.info()
if info is None:
self.text = ""
return
name = info.getName().replace('\xc2\x86', '').replace('\xc2\x87', '')
if name in self.list:
for idx in range(1, len(self.list)):
if name == self.list[idx-1]:
self.text = str(idx)
break
else:
self.text = '---'
def getList(self):
serviceHandler = eServiceCenter.getInstance()
services = serviceHandler.list(eServiceReference('1:7:1:0:0:0:0:0:0:0:(type == 1) || (type == 17) || (type == 195) || (type == 25) FROM BOUQUET "bouquets.tv" ORDER BY bouquet'))
bouquets = services and services.getContent("SN", True)
for bouquet in bouquets:
services = serviceHandler.list(eServiceReference(bouquet[0]))
channels = services and services.getContent("SN", True)
for channel in channels:
if not channel[0].startswith("1:64:"):
self.list.append(channel[1].replace('\xc2\x86', '').replace('\xc2\x87', ''))
| gpl-2.0 |
gaeun/open-event-orga-server | app/api/notifications.py | 1 | 1798 | from flask.ext.restplus import Namespace
from app.helpers.data import DataManager
from app.helpers.data_getter import DataGetter
from app.models.notifications import Notification as NotificationModel
from .helpers import custom_fields as fields
from .helpers.helpers import (
can_create,
requires_auth
)
from .helpers.utils import PAGINATED_MODEL, ServiceDAO, \
POST_RESPONSES
from .helpers.utils import Resource
api = Namespace('notifications', description='Notifications', path='/')
NOTIFICATION = api.model('Notification', {
'id': fields.Integer(required=True),
'email': fields.String(required=True),
'title': fields.String(),
'message': fields.String(),
'action': fields.String(),
'received_at': fields.DateTime(),
})
NOTIFICATION_PAGINATED = api.clone('NotificationPaginated', PAGINATED_MODEL, {
'results': fields.List(fields.Nested(NOTIFICATION))
})
NOTIFICATION_POST = api.clone('NotificationPost', NOTIFICATION)
del NOTIFICATION_POST['id']
# Create DAO
class NotificationDAO(ServiceDAO):
version_key = 'notifications_ver'
def create_user_notify(self, payload):
user = DataGetter.get_user_by_email(payload['email'])
DataManager().create_user_notification(user, payload['action'], payload['title'], payload['message'])
return user
DAO = NotificationDAO(NotificationModel, NOTIFICATION_POST)
@api.route('/events/<int:event_id>/notifications')
class UserNotifications(Resource):
@requires_auth
@can_create(DAO)
@api.doc('create_user_notification', responses=POST_RESPONSES)
@api.marshal_with(NOTIFICATION)
@api.expect(NOTIFICATION_POST)
def post(self, event_id):
"""Create user notification"""
return DAO.create_user_notify(
self.api.payload,
), 201
| gpl-3.0 |
e-mission/e-mission-server | emission/analysis/plotting/geojson/geojson_feature_converter.py | 2 | 15279 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import str
from builtins import *
import logging
import geojson as gj
import copy
import attrdict as ad
import pandas as pd
import emission.storage.timeseries.abstract_timeseries as esta
import emission.net.usercache.abstract_usercache as enua
import emission.storage.timeseries.timequery as estt
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.storage.decorations.section_queries as esds
import emission.storage.decorations.timeline as esdtl
import emission.core.wrapper.location as ecwl
import emission.core.wrapper.cleanedsection as ecwcs
import emission.core.wrapper.entry as ecwe
import emission.core.common as ecc
# TODO: Move this to the section_features class instead
import emission.analysis.intake.cleaning.location_smoothing as eaicl
import emission.analysis.config as eac
def _del_non_derializable(prop_dict, extra_keys):
for key in extra_keys:
if key in prop_dict:
del prop_dict[key]
def _stringify_foreign_key(prop_dict, key_names):
for key_name in key_names:
if hasattr(prop_dict, key_name):
setattr(prop_dict, key_name, str(getattr(prop_dict,key_name)))
def location_to_geojson(location):
"""
Converts a location wrapper object into geojson format.
This is pretty easy - it is a point.
Since we have other properties that we care about, we make it a feature.
Then, all the other stuff goes directly into the properties since the wrapper is a dict too!
:param location: the location object
:return: a geojson version of the location. the object is of type "Feature".
"""
try:
ret_feature = gj.Feature()
ret_feature.id = str(location.get_id())
ret_feature.geometry = location.data.loc
ret_feature.properties = copy.copy(location.data)
ret_feature.properties["feature_type"] = "location"
_del_non_derializable(ret_feature.properties, ["loc"])
return ret_feature
except Exception as e:
logging.exception(("Error while converting object %s" % location))
raise e
def place_to_geojson(place):
"""
Converts a place wrapper object into geojson format.
This is also pretty easy - it is just a point.
Since we have other properties that we care about, we make it a feature.
Then, all the other stuff goes directly into the properties since the wrapper is a dict too!
:param place: the place object
:return: a geojson version of the place. the object is of type "Feature".
"""
ret_feature = gj.Feature()
ret_feature.id = str(place.get_id())
ret_feature.geometry = place.data.location
ret_feature.properties = copy.copy(place.data)
ret_feature.properties["feature_type"] = "place"
# _stringify_foreign_key(ret_feature.properties, ["ending_trip", "starting_trip"])
_del_non_derializable(ret_feature.properties, ["location"])
return ret_feature
def stop_to_geojson(stop):
"""
Converts a stop wrapper object into geojson format.
This is also pretty easy - it is just a point.
Since we have other properties that we care about, we make it a feature.
Then, all the other stuff goes directly into the properties since the wrapper is a dict too!
:param stop: the stop object
:return: a geojson version of the stop. the object is of type "Feature".
"""
ret_feature = gj.Feature()
ret_feature.id = str(stop.get_id())
ret_feature.geometry = gj.LineString()
ret_feature.geometry.coordinates = [stop.data.enter_loc.coordinates, stop.data.exit_loc.coordinates]
ret_feature.properties = copy.copy(stop.data)
ret_feature.properties["feature_type"] = "stop"
# _stringify_foreign_key(ret_feature.properties, ["ending_section", "starting_section", "trip_id"])
_del_non_derializable(ret_feature.properties, ["location"])
return ret_feature
def section_to_geojson(section, tl):
"""
This is the trickiest part of the visualization.
The section is basically a collection of points with a line through them.
So the representation is a feature in which one feature which is the line, and one feature collection which is the set of point features.
:param section: the section to be converted
:return: a feature collection which is the geojson version of the section
"""
ts = esta.TimeSeries.get_time_series(section.user_id)
entry_it = ts.find_entries(["analysis/recreated_location"],
esda.get_time_query_for_trip_like(
"analysis/cleaned_section",
section.get_id()))
# TODO: Decide whether we want to use Rewrite to use dataframes throughout instead of python arrays.
# dataframes insert nans. We could use fillna to fill with default values, but if we are not actually
# using dataframe features here, it is unclear how much that would help.
feature_array = []
section_location_entries = [ecwe.Entry(entry) for entry in entry_it]
if len(section_location_entries) != 0:
logging.debug("first element in section_location_array = %s" % section_location_entries[0])
if not ecc.compare_rounded_arrays(section.data.end_loc.coordinates,
section_location_entries[-1].data.loc.coordinates,
digits=4):
logging.info("section_location_array[-1].data.loc %s != section.data.end_loc %s even after df.ts fix, filling gap" % \
(section_location_entries[-1].data.loc, section.data.end_loc))
if eac.get_config()["output.conversion.validityAssertions"]:
assert(False)
last_loc_doc = ts.get_entry_at_ts("background/filtered_location", "data.ts", section.data.end_ts)
if last_loc_doc is None:
logging.warning("can't find entry to patch gap, leaving gap")
else:
last_loc_entry = ecwe.Entry(last_loc_doc)
logging.debug("Adding new entry %s to fill the end point gap between %s and %s"
% (last_loc_entry.data.loc, section_location_entries[-1].data.loc,
section.data.end_loc))
section_location_entries.append(last_loc_entry)
points_line_feature = point_array_to_line(section_location_entries)
points_line_feature.id = str(section.get_id())
points_line_feature.properties.update(copy.copy(section.data))
# Update works on dicts, convert back to a section object to make the modes
# work properly
points_line_feature.properties = ecwcs.Cleanedsection(points_line_feature.properties)
points_line_feature.properties["feature_type"] = "section"
if eac.get_section_key_for_analysis_results() == esda.INFERRED_SECTION_KEY:
ise = esds.cleaned2inferred_section(section.user_id, section.get_id())
if ise is not None:
logging.debug("mapped cleaned section %s -> inferred section %s" %
(section.get_id(), ise.get_id()))
logging.debug("changing mode from %s -> %s" %
(points_line_feature.properties.sensed_mode, ise.data.sensed_mode))
points_line_feature.properties["sensed_mode"] = str(ise.data.sensed_mode)
else:
points_line_feature.properties["sensed_mode"] = str(points_line_feature.properties.sensed_mode)
else:
points_line_feature.properties["sensed_mode"] = str(points_line_feature.properties.sensed_mode)
_del_non_derializable(points_line_feature.properties, ["start_loc", "end_loc"])
# feature_array.append(gj.FeatureCollection(points_feature_array))
feature_array.append(points_line_feature)
return gj.FeatureCollection(feature_array)
def incident_to_geojson(incident):
ret_feature = gj.Feature()
ret_feature.id = str(incident.get_id())
ret_feature.geometry = gj.Point()
ret_feature.geometry.coordinates = incident.data.loc.coordinates
ret_feature.properties = copy.copy(incident.data)
ret_feature.properties["feature_type"] = "incident"
# _stringify_foreign_key(ret_feature.properties, ["ending_section", "starting_section", "trip_id"])
_del_non_derializable(ret_feature.properties, ["loc"])
return ret_feature
def geojson_incidents_in_range(user_id, start_ts, end_ts):
MANUAL_INCIDENT_KEY = "manual/incident"
ts = esta.TimeSeries.get_time_series(user_id)
uc = enua.UserCache.getUserCache(user_id)
tq = estt.TimeQuery("data.ts", start_ts, end_ts)
incident_entry_docs = list(ts.find_entries([MANUAL_INCIDENT_KEY], time_query=tq)) \
+ list(uc.getMessage([MANUAL_INCIDENT_KEY], tq))
incidents = [ecwe.Entry(doc) for doc in incident_entry_docs]
return list(map(incident_to_geojson, incidents))
def point_array_to_line(point_array):
points_line_string = gj.LineString()
# points_line_string.coordinates = [l.loc.coordinates for l in filtered_section_location_array]
points_line_string.coordinates = []
points_times = []
points_timestamps = []
for l in point_array:
# logging.debug("About to add %s to line_string " % l)
points_line_string.coordinates.append(l.data.loc.coordinates)
points_times.append(l.data.ts)
points_timestamps.append(int(round(l.data.ts * 1000)))
points_line_feature = gj.Feature()
points_line_feature.geometry = points_line_string
points_line_feature.properties = {}
points_line_feature.properties["times"] = points_times
points_line_feature.properties["timestamps"] = points_timestamps
return points_line_feature
def trip_to_geojson(trip, tl):
"""
Trips are the main focus of our current visualization, so they are most complex.
Each trip is represented as a feature collection with the following features:
- two features for the start and end places
- features for each stop in the trip
- features for each section in the trip
:param trip: the trip object to be converted
:param tl: the timeline used to retrieve related objects
:return: the geojson version of the trip
"""
feature_array = []
curr_start_place = tl.get_object(trip.data.start_place)
curr_end_place = tl.get_object(trip.data.end_place)
start_place_geojson = place_to_geojson(curr_start_place)
start_place_geojson["properties"]["feature_type"] = "start_place"
feature_array.append(start_place_geojson)
end_place_geojson = place_to_geojson(curr_end_place)
end_place_geojson["properties"]["feature_type"] = "end_place"
feature_array.append(end_place_geojson)
trip_tl = esdt.get_cleaned_timeline_for_trip(trip.user_id, trip.get_id())
stops = trip_tl.places
for stop in stops:
feature_array.append(stop_to_geojson(stop))
for i, section in enumerate(trip_tl.trips):
section_gj = section_to_geojson(section, tl)
feature_array.append(section_gj)
trip_geojson = gj.FeatureCollection(features=feature_array, properties=trip.data)
trip_geojson.id = str(trip.get_id())
feature_array.extend(geojson_incidents_in_range(trip.user_id,
curr_start_place.data.exit_ts,
curr_end_place.data.enter_ts))
if trip.metadata.key == esda.CLEANED_UNTRACKED_KEY:
# trip_geojson.properties["feature_type"] = "untracked"
# Since the "untracked" type is not correctly handled on the phone, we just
# skip these trips until
# https://github.com/e-mission/e-mission-phone/issues/118
# is fixed
# TODO: Once it is fixed, re-introduce the first line in this block
# and remove the None check in get_geojson_for_timeline
return None
else:
trip_geojson.properties["feature_type"] = "trip"
return trip_geojson
def get_geojson_for_ts(user_id, start_ts, end_ts):
tl = esdtl.get_cleaned_timeline(user_id, start_ts, end_ts)
tl.fill_start_end_places()
return get_geojson_for_timeline(user_id, tl)
def get_geojson_for_dt(user_id, start_local_dt, end_local_dt):
logging.debug("Getting geojson for %s -> %s" % (start_local_dt, end_local_dt))
tl = esdtl.get_cleaned_timeline_from_dt(user_id, start_local_dt, end_local_dt)
tl.fill_start_end_places()
return get_geojson_for_timeline(user_id, tl)
def get_geojson_for_timeline(user_id, tl):
"""
tl represents the "timeline" object that is queried for the trips and locations
"""
geojson_list = []
for trip in tl.trips:
try:
trip_geojson = trip_to_geojson(trip, tl)
if trip_geojson is not None:
geojson_list.append(trip_geojson)
except Exception as e:
logging.exception("Found error %s while processing trip %s" % (e, trip))
raise e
logging.debug("trip count = %d, geojson count = %d" %
(len(tl.trips), len(geojson_list)))
return geojson_list
def get_all_points_for_range(user_id, key, start_ts, end_ts):
import emission.storage.timeseries.timequery as estt
# import emission.core.wrapper.location as ecwl
tq = estt.TimeQuery("metadata.write_ts", start_ts, end_ts)
ts = esta.TimeSeries.get_time_series(user_id)
entry_it = ts.find_entries([key], tq)
points_array = [ecwe.Entry(entry) for entry in entry_it]
return get_feature_list_for_point_array(points_array)
def get_feature_list_for_point_array(points_array):
points_feature_array = [location_to_geojson(le) for le in points_array]
print ("Found %d features from %d points" %
(len(points_feature_array), len(points_array)))
feature_array = []
feature_array.append(gj.FeatureCollection(points_feature_array))
feature_array.append(point_array_to_line(points_array))
feature_coll = gj.FeatureCollection(feature_array)
return feature_coll
def get_feature_list_from_df(loc_time_df, ts="ts", latitude="latitude", longitude="longitude", fmt_time="fmt_time"):
"""
Input DF should have columns called "ts", "latitude" and "longitude", or the corresponding
columns can be passed in using the ts, latitude and longitude parameters
"""
points_array = get_location_entry_list_from_df(loc_time_df, ts, latitude, longitude, fmt_time)
return get_feature_list_for_point_array(points_array)
def get_location_entry_list_from_df(loc_time_df, ts="ts", latitude="latitude", longitude="longitude", fmt_time="fmt_time"):
location_entry_list = []
for idx, row in loc_time_df.iterrows():
retVal = {"latitude": row[latitude], "longitude": row[longitude], "ts": row[ts],
"_id": str(idx), "fmt_time": row[fmt_time], "loc": gj.Point(coordinates=[row[longitude], row[latitude]])}
location_entry_list.append(ecwe.Entry.create_entry(
"dummy_user", "background/location", ecwl.Location(retVal)))
return location_entry_list
| bsd-3-clause |
nikste/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/dask_io.py | 138 | 3844 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow dask.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
try:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
allowed_classes = (dd.Series, dd.DataFrame)
HAS_DASK = True
except ImportError:
HAS_DASK = False
def _add_to_index(df, start):
"""New dask.dataframe with values added to index of each subdataframe."""
df = df.copy()
df.index += start
return df
def _get_divisions(df):
"""Number of rows in each sub-dataframe."""
lengths = df.map_partitions(len).compute()
divisions = np.cumsum(lengths).tolist()
divisions.insert(0, 0)
return divisions
def _construct_dask_df_with_divisions(df):
"""Construct the new task graph and make a new dask.dataframe around it."""
divisions = _get_divisions(df)
# pylint: disable=protected-access
name = 'csv-index' + df._name
dsk = {(name, i): (_add_to_index, (df._name, i), divisions[i])
for i in range(df.npartitions)}
# pylint: enable=protected-access
from toolz import merge # pylint: disable=g-import-not-at-top
if isinstance(df, dd.DataFrame):
return dd.DataFrame(merge(dsk, df.dask), name, df.columns, divisions)
elif isinstance(df, dd.Series):
return dd.Series(merge(dsk, df.dask), name, df.name, divisions)
def extract_dask_data(data):
"""Extract data from dask.Series or dask.DataFrame for predictors.
Given a distributed dask.DataFrame or dask.Series containing columns or names
for one or more predictors, this operation returns a single dask.DataFrame or
dask.Series that can be iterated over.
Args:
data: A distributed dask.DataFrame or dask.Series.
Returns:
A dask.DataFrame or dask.Series that can be iterated over.
If the supplied argument is neither a dask.DataFrame nor a dask.Series this
operation returns it without modification.
"""
if isinstance(data, allowed_classes):
return _construct_dask_df_with_divisions(data)
else:
return data
def extract_dask_labels(labels):
"""Extract data from dask.Series or dask.DataFrame for labels.
Given a distributed dask.DataFrame or dask.Series containing exactly one
column or name, this operation returns a single dask.DataFrame or dask.Series
that can be iterated over.
Args:
labels: A distributed dask.DataFrame or dask.Series with exactly one
column or name.
Returns:
A dask.DataFrame or dask.Series that can be iterated over.
If the supplied argument is neither a dask.DataFrame nor a dask.Series this
operation returns it without modification.
Raises:
ValueError: If the supplied dask.DataFrame contains more than one
column or the supplied dask.Series contains more than
one name.
"""
if isinstance(labels, dd.DataFrame):
ncol = labels.columns
elif isinstance(labels, dd.Series):
ncol = labels.name
if isinstance(labels, allowed_classes):
if len(ncol) > 1:
raise ValueError('Only one column for labels is allowed.')
return _construct_dask_df_with_divisions(labels)
else:
return labels
| apache-2.0 |
jmhsi/justin_tinker | lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.py | 1323 | 1775 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
| apache-2.0 |
wikimedia/operations-debs-python-diamond | src/collectors/kvm/test/testkvm.py | 8 | 3172 | #!/usr/bin/python
# coding=utf-8
################################################################################
import os
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
StringIO # workaround for pyflakes issue #13
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from kvm import KVMCollector
################################################################################
class TestKVMCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('KVMCollector', {
'interval': 10,
})
self.collector = KVMCollector(config, None)
self.collector.PROC = os.path.dirname(__file__) + '/fixtures/'
def test_import(self):
self.assertTrue(KVMCollector)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_synthetic_data(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0'
+ '\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n'
)))
patch_open.start()
self.collector.collect()
patch_open.stop()
self.assertPublishedMany(publish_mock, {})
self.collector.collect()
metrics = {
'efer_reload': 0.000000,
'exits': 1436135848.000000,
'fpu_reload': 121764903.500000,
'halt_exits': 544586282.600000,
'halt_wakeup': 235093451.400000,
'host_state_reload': 801854250.600000,
'hypercalls': 0.000000,
'insn_emulation': 1314391264.700000,
'insn_emulation_fail': 0.000000,
'invlpg': 0.000000,
'io_exits': 248822813.200000,
'irq_exits': 701647108.400000,
'irq_injections': 986654069.600000,
'irq_window': 162240965.200000,
'largepages': 351789.400000,
'mmio_exits': 20169.400000,
'mmu_cache_miss': 1643.300000,
'mmu_flooded': 0.000000,
'mmu_pde_zapped': 0.000000,
'mmu_pte_updated': 0.000000,
'mmu_pte_write': 11144.000000,
'mmu_recycled': 0.000000,
'mmu_shadow_zapped': 384.700000,
'mmu_unsync': 0.000000,
'nmi_injections': 0.000000,
'nmi_window': 0.000000,
'pf_fixed': 355636.100000,
'pf_guest': 0.000000,
'remote_tlb_flush': 111.200000,
'request_irq': 0.000000,
'signal_exits': 0.000000,
'tlb_flush': 0.000000,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
| mit |
srossross/stable.world | stable_world/interact/bucket_configs/words.py | 1 | 18634 | # flake8: noqa
adjectives = ['average', 'big', 'colossal', 'fat', 'giant', 'gigantic', 'great', 'huge', 'immense', 'large', 'little', 'long', 'mammoth', 'massive', 'miniature', 'petite', 'puny', 'short', 'small', 'tall', 'tiny', 'boiling', 'breezy', 'broken', 'bumpy', 'chilly', 'cold', 'cool', 'creepy', 'crooked', 'cuddly', 'curly', 'damaged', 'damp', 'dirty', 'dry', 'dusty', 'filthy', 'flaky', 'fluffy', 'wet', 'broad', 'chubby', 'crooked', 'curved', 'deep', 'flat', 'high', 'hollow', 'low', 'narrow', 'round', 'shallow', 'skinny', 'square', 'steep', 'straight', 'wide', 'ancient', 'brief', 'early', 'fast', 'late', 'long', 'modern', 'old', 'old-fashioned', 'quick', 'rapid', 'short', 'slow', 'swift', 'young', 'abundant', 'empty', 'few', 'heavy', 'light', 'many', 'numerous', 'Sound', 'cooing', 'deafening', 'faint', 'harsh', 'high-pitched', 'hissing', 'hushed', 'husky', 'loud', 'melodic', 'moaning', 'mute', 'noisy', 'purring', 'quiet', 'raspy', 'resonant', 'screeching', 'shrill', 'silent', 'soft', 'squealing', 'thundering', 'voiceless', 'whispering', 'bitter', 'delicious', 'fresh', 'juicy', 'ripe', 'rotten', 'salty', 'sour', 'spicy', 'stale', 'sticky', 'strong', 'sweet', 'tasteless', 'tasty', 'thirsty', 'fluttering', 'fuzzy', 'greasy', 'grubby', 'hard', 'hot', 'icy', 'loose', 'melted', 'plastic', 'prickly', 'rainy', 'rough', 'scattered', 'shaggy', 'shaky', 'sharp', 'shivering', 'silky', 'slimy', 'slippery', 'smooth', 'soft', 'solid', 'steady', 'sticky', 'tender', 'tight', 'uneven', 'weak', 'wet', 'wooden', 'afraid', 'angry', 'annoyed', 'anxious', 'arrogant', 'ashamed', 'awful', 'bad', 'bewildered', 'bored', 'combative', 'condemned', 'confused', 'creepy', 'cruel', 'dangerous', 'defeated', 'defiant', 'depressed', 'disgusted', 'disturbed', 'eerie', 'embarrassed', 'envious', 'evil', 'fierce', 'foolish', 'frantic', 'frightened', 'grieving', 'helpless', 'homeless', 'hungry', 'hurt', 'ill', 'jealous', 'lonely', 'mysterious', 'naughty', 'nervous', 'obnoxious', 'outrageous', 'panicky', 'repulsive', 'scary', 'scornful', 'selfish', 'sore', 'tense', 'terrible', 'thoughtless', 'tired', 'troubled', 'upset', 'uptight', 'weary', 'wicked', 'worried', 'agreeable', 'amused', 'brave', 'calm', 'charming', 'cheerful', 'comfortable', 'cooperative', 'courageous', 'delightful', 'determined', 'eager', 'elated', 'enchanting', 'encouraging', 'energetic', 'enthusiastic', 'excited', 'exuberant', 'fair', 'faithful', 'fantastic', 'fine', 'friendly', 'funny', 'gentle', 'glorious', 'good', 'happy', 'healthy', 'helpful', 'hilarious', 'jolly', 'joyous', 'kind', 'lively', 'lovely', 'lucky', 'obedient', 'perfect', 'pleasant', 'proud', 'relieved', 'silly', 'smiling', 'splendid', 'successful', 'thoughtful', 'victorious', 'vivacious', 'witty', 'wonderful', 'zealous', 'zany', 'other', 'good', 'new', 'old', 'great', 'high', 'small', 'different', 'large', 'local', 'social', 'important', 'long', 'young', 'national', 'british', 'right', 'early', 'possible', 'big', 'little', 'political', 'able', 'late', 'general', 'full', 'far', 'low', 'public', 'available', 'bad', 'main', 'sure', 'clear', 'major', 'economic', 'only', 'likely', 'real', 'black', 'particular', 'international', 'special', 'difficult', 'certain', 'open', 'whole', 'white', 'free', 'short', 'easy', 'strong', 'european', 'central', 'similar', 'human', 'common', 'necessary', 'single', 'personal', 'hard', 'private', 'poor', 'financial', 'wide', 'foreign', 'simple', 'recent', 'concerned', 'american', 'various', 'close', 'fine', 'english', 'wrong', 'present', 'royal', 'natural', 'individual', 'nice', 'french', 'following', 'current', 'modern', 'labour', 'legal', 'happy', 'final', 'red', 'normal', 'serious', 'previous', 'total', 'prime', 'significant', 'industrial', 'sorry', 'dead', 'specific', 'appropriate', 'top', 'soviet', 'basic', 'military', 'original', 'successful', 'aware', 'hon', 'popular', 'heavy', 'professional', 'direct', 'dark', 'cold', 'ready', 'green', 'useful', 'effective', 'western', 'traditional', 'scottish', 'german', 'independent', 'deep', 'interesting', 'considerable', 'involved', 'physical', 'left', 'hot', 'existing', 'responsible', 'complete', 'medical', 'blue', 'extra', 'past', 'male', 'interested', 'fair', 'essential', 'beautiful', 'civil', 'primary', 'obvious', 'future', 'environmental', 'positive', 'senior', 'nuclear', 'annual', 'relevant', 'huge', 'rich', 'commercial', 'safe', 'regional', 'practical', 'official', 'separate', 'key', 'chief', 'regular', 'due', 'additional', 'active', 'powerful', 'complex', 'standard', 'impossible', 'light', 'warm', 'middle', 'fresh', 'sexual', 'front', 'domestic', 'actual', 'united', 'technical', 'ordinary', 'cheap', 'strange', 'internal', 'excellent', 'quiet', 'soft', 'potential', 'northern', 'religious', 'quick', 'very', 'famous', 'cultural', 'proper', 'broad', 'joint', 'formal', 'limited', 'conservative', 'lovely', 'usual', 'ltd', 'unable', 'rural', 'initial', 'substantial', 'christian', 'bright', 'average', 'leading', 'reasonable', 'immediate', 'suitable', 'equal', 'detailed', 'working', 'overall', 'female', 'afraid', 'democratic', 'growing', 'sufficient', 'scientific', 'eastern', 'correct', 'inc', 'irish', 'expensive', 'educational', 'mental', 'dangerous', 'critical', 'increased', 'familiar', 'unlikely', 'double', 'perfect', 'slow', 'tiny', 'dry', 'historical', 'thin', 'daily', 'southern', 'increasing', 'wild', 'alone', 'urban', 'empty', 'married', 'narrow', 'liberal', 'supposed', 'upper', 'apparent', 'tall', 'busy', 'bloody', 'prepared', 'russian', 'moral', 'careful', 'clean', 'attractive', 'japanese', 'vital', 'thick', 'alternative', 'fast', 'ancient', 'elderly', 'rare', 'external', 'capable', 'brief', 'wonderful', 'grand', 'typical', 'entire', 'grey', 'constant', 'vast', 'surprised', 'ideal', 'terrible', 'academic', 'funny', 'minor', 'pleased', 'severe', 'ill', 'corporate', 'negative', 'permanent', 'weak', 'brown', 'fundamental', 'odd', 'crucial', 'inner', 'used', 'criminal', 'contemporary', 'sharp', 'sick', 'near', 'roman', 'massive', 'unique', 'secondary', 'parliamentary', 'african', 'unknown', 'subsequent', 'angry', 'alive', 'guilty', 'lucky', 'enormous', 'well', 'communist', 'yellow', 'unusual', 'net', 'long-term', 'tough', 'dear', 'extensive', 'glad', 'remaining', 'agricultural', 'alright', 'healthy', 'italian', 'principal', 'tired', 'efficient', 'comfortable', 'chinese', 'relative', 'friendly', 'conventional', 'willing', 'sudden', 'proposed', 'voluntary', 'slight', 'valuable', 'dramatic', 'golden', 'temporary', 'federal', 'keen', 'flat', 'silent', 'indian', 'video-taped', 'worried', 'pale', 'statutory', 'welsh', 'dependent', 'firm', 'wet', 'competitive', 'armed', 'radical', 'outside', 'acceptable', 'sensitive', 'living', 'pure', 'global', 'emotional', 'sad', 'secret', 'rapid', 'adequate', 'fixed', 'sweet', 'administrative', 'wooden', 'remarkable', 'comprehensive', 'surprising', 'solid', 'rough', 'mere', 'mass', 'brilliant', 'maximum', 'absolute', 'tory', 'electronic', 'visual', 'electric', 'cool', 'spanish', 'literary', 'continuing', 'supreme', 'chemical', 'genuine', 'exciting', 'written', 'stupid', 'advanced', 'extreme', 'classical', 'fit', 'favourite', 'socialist', 'widespread', 'confident', 'straight', 'catholic', 'proud', 'numerous', 'opposite', 'distinct', 'mad', 'helpful', 'given', 'disabled', 'consistent', 'anxious', 'nervous', 'awful', 'stable', 'constitutional', 'satisfied', 'conscious', 'developing', 'strategic', 'holy', 'smooth', 'dominant', 'remote', 'theoretical', 'outstanding', 'pink', 'pretty', 'clinical', 'minimum', 'honest', 'impressive', 'related', 'residential', 'extraordinary', 'plain', 'visible', 'accurate', 'distant', 'still', 'greek', 'complicated', 'musical', 'precise', 'gentle', 'broken', 'live', 'silly', 'fat', 'tight', 'monetary', 'round', 'psychological', 'violent', 'unemployed', 'inevitable', 'junior', 'sensible', 'grateful', 'pleasant', 'dirty', 'structural', 'welcome', 'so-called', 'deaf']
nouns = ['richard', 'decryption', 'bangladesh', 'pony', 'futon', 'karate', 'oboe', 'fireplace', 'cribbage', 'vise', 'shack', 'rat', 'cellar', 'interloper', 'rediscovery', 'magician', 'bonnet', 'session', 'policeman', 'jackal', 'ashtray', 'form', 'discount', 'manservant', 'damage', 'bijou', 'bassinet', 'blouse', 'brome', 'tough-guy', 'space', 'beauty', 'arrow', 'yurt', 'responsibility', 'draw', 'edge', 'link', 'elephant', 'visor', 'crew', 'commercial', 'train', 'football', 'regret', 'bend', 'fatigues', 'december', 'till', 'chinese', 'vane', 'forgery', 'stocking', 'deformation', 'mint', 'geriatrician', 'recess', 'recommendation', 'definition', 'iraq', 'barometer', 'partner', 'king', 'person', 'accident', 'care', 'dragon', 'cowbell', 'strawberry', 'rethinking', 'attenuation', 'birdcage', 'review', 'winter', 'sabre', 'evidence', 'eggplant', 'ease', 'typhoon', 'arch-rival', 'floozie', 'frazzle', 'feature', 'lady', 'disgust', 'blade', 'gauge', 'diadem', 'octet', 'earmuffs', 'caption', 'ecumenist', 'second', 'mantua', 'coal', 'satisfaction', 'microlending', 'honoree', 'hospice', 'shallot', 'landform', 'pantsuit', 'north', 'drawing', 'manx', 'ear', 'analog', 'usher', 'tummy', 'theism', 'tangerine', 'bondsman', 'mantle', 'soil', 'composer', 'spectacle', 'bugle', 'pamphlet', 'apron', 'screw', 'sloth', 'sector', 'empowerment', 'sympathy', 'puffin', 'hops', 'effective', 'breakpoint', 'foot', 'summer', 'grey', 'cymbals', 'chastity', 'cotton', 'cash', 'cob', 'movie', 'yam', 'lighting', 'extreme', 'committee', 'zinc', 'bangle', 'original', 'inventory', 'health', 'crook', 'menu', 'phrase', 'catamaran', 'arm', 'godmother', 'scrip', 'compulsion', 'mark', 'use', 'trailer', 'nondisclosure', 'future', 'cashier', 'shovel', 'comradeship', 'airfare', 'gram', 'batter', 'tablecloth', 'bowling', 'fiddle', 'junker', 'tandem', 'chivalry', 'shopper', 'body', 'engineering', 'cousin', 'classroom', 'quiver', 'sky', 'canvas', 'tram', 'alcove', 'jewel', 'criteria', 'menorah', 'minister', 'pelt', 'polish', 'rub', 'sugar', 'capricorn', 'croissant', 'pitch', 'adapter', 'collision', 'michael', 'cloud', 'alibi', 'casino', 'sponge', 'octagon', 'rate', 'jury', 'dictaphone', 'pin', 'bongo', 'fundraising', 'august', 'playground', 'year', 'armor', 'sell', 'initial', 'peony', 'meal', 'plywood', 'retina', 'balloon', 'mechanic', 'rocker', 'tenement', 'block', 'temperature', 'hexagon', 'deer', 'babe', 'angora', 'hive', 'lead', 'purple', 'tear', 'gem', 'fur', 'crystallography', 'apparatus', 'oxford', 'pink', 'pudding', 'resource', 'industry', 'lever', 'mukluk', 'demand', 'almanac', 'paperback', 'wool', 'number', 'pilgrimage', 'production', 'liner', 'pasta', 'enquiry', 'activity', 'moustache', 'change', 'marxism', 'cherries', 'coonskin', 'crash', 'language', 'mousse', 'libra', 'outrigger', 'impress', 'sonnet', 'sweets', 'slider', 'aluminum', 'pvc', 'cappelletti', 'bracket', 'custard', 'tree', 'mistake', 'education', 'altitude', 'legume', 'shoulder', 'cocoa', 'bather', 'desert', 'perspective', 'interviewer', 'violet', 'calculation', 'underground', 'bunghole', 'character', 'shock', 'charge', 'material', 'atm', 'bass', 'tepee', 'patrol', 'cultivator', 'oldie', 'motorboat', 'hot', 'netbook', 'knuckle', 'september', 'cup', 'carnation', 'beyond', 'bag', 'porter', 'crib', 'great-grandmother', 'beach', 'heartwood', 'address', 'attraction', 'conference', 'oval', 'pancake', 'poland', 'backpack', 'alloy', 'contrary', 'bird', 'rectangle', 'pail', 'acoustic', 'dentist', 'downgrade', 'prelude', 'canteen', 'tomato', 'trapdoor', 'sleep', 'low', 'airmail', 'moth', 'consul', 'conversation', 'passion', 'eyeliner', 'carbon', 'ottoman', 'inspection', 'lizard', 'recruit', 'fly', 'well', 'command', 'party', 'goodbye', 'drama', 'mouser', 'moment', 'tutu', 'luttuce', 'pocket', 'volcano', 'bagpipes', 'bacon', 'clerk', 'pine', 'peach', 'water', 'cupboard', 'choice', 'television', 'sunday', 'sale', 'fratricide', 'sustainment', 'title', 'cement', 'publisher', 'editorial', 'cupola', 'elbow', 'nerve', 'vanity', 'knight', 'whorl', 'soda', 'malaysia', 'control', 'format', 'tank-top', 'trolley', 'funeral', 'former', 'diploma', 'pseudoscience', 'cesspool', 'net', 'marketing', 'umbrella', 'policy', 'cauliflower', 'offence', 'apple', 'find', 'netball', 'councilor', 'lion', 'onion', 'overclocking', 'chive', 'tanker', 'scarecrow', 'laborer', 'escape', 'savior', 'mezzanine', 'freight', 'music', 'shoestring', 'artificer', 'blackboard', 'riddle', 'pinto', 'monastery', 'west', 'hurry', 'fruit', 'ink', 'slash', 'hydrant', 'frost', 'noise', 'unblinking', 'replace', 'spacing', 'weasel', 'in-laws', 'friday', 'ride', 'trick', 'alpenhorn', 'sage', 'tabby', 'guitar', 'forestry', 'agreement', 'peak', 'pocket-watch', 'cameo', 'pen', 'gosling', 'save', 'grassland', 'packet', 'dog', 'sarah', 'kneejerk', 'possibility', 'maybe', 'cherry', 'misplacement', 'caravan', 'shred', 'bake', 'meaning', 'roller', 'problem', 'reception', 'pressurisation', 'design', 'chocolate', 'destiny', 'mailbox', 'cot', 'disease', 'toque', 'swimming', 'conspirator', 'corduroy', 'sleuth', 'potential', 'dark', 'pearl', 'gearshift', 'laparoscope', 'goal', 'cent', 'panda', 'bar', 'john', 'cottage', 'squid', 'curtain', 'vegetable', 'rope', 'insurgence', 'garment', 'submarine', 'butter', 'constellation', 'slippers', 'view', 'humidity', 'freighter', 'explanation', 'abolishment', 'difficulty', 'sun', 'dashboard', 'foot-rest', 'clasp', 'apartment', 'dugout', 'leg', 'college', 'heavy', 'work', 'litigation', 'raincoat', 'locket', 'procedure', 'seat', 'necklace', 'thigh', 'canoe', 'inlay', 'chess', 'father-in-law', 'effacement', 'basis', 'anklet', 'self', 'call', 'finance', 'dedication', 'spy', 'congressman', 'refrigerator', 'wrinkle', 'mist', 'understanding', 'depression', 'celebration', 'hyena', 'waterfall', 'eyelid', 'variety', 'crowd', 'emery', 'bungalow', 'espadrille', 'fishbone', 'philosophy', 'revolve', 'sycamore', 'toenail', 'harbor', 'bonsai', 'sweater', 'act', 'master', 'checkroom', 'beginner', 'recording', 'helen', 'portfolio', 'earthquake', 'click', 'gold', 'river', 'spume', 'lung', 'underneath', 'dogsled', 'historian', 'shoehorn', 'metronome', 'shoemaker', 'grain', 'cravat', 'sort', 'bottom', 'shofar', 'chandelier', 'output', 'lettuce', 'lily', 'currency', 'club', 'case', 'hat', 'vineyard', 'astrolabe', 'pad', 'transportation', 'sudan', 'hurricane', 'tulip', 'waterskiing', 'skylight', 'fawn', 'surgeon', 'venezuela', 'thongs', 'accelerator', 'venezuelan', 'eponym', 'geology', 'ridge', 'increase', 'cuff-links', 'wampum', 'vinyl', 'swan', 'spinach', 'interior', 'government', 'shop', 'sourwood', 'marriage', 'story-telling', 'tam', 'shoes', 'greece', 'pith', 'lapdog', 'creche', 'garter', 'revenue', 'sweats', 'need', 'juggernaut', 'midi', 'platinum', 'environment', 'assumption', 'authority', 'delete', 'actress', 'zebrafish', 'elk', 'south', 'vision', 'clover', 'wish', 'weekend', 'expression', 'rubber', 'maraca', 'plain', 'bengal', 'linen', 'video', 'finger', 'orchid', 'underclothes', 'pouch', 'energy', 'eyestrain', 'worklife', 'ordination', 'bunch', 'wednesday', 'watch', 'loggia', 'anger', 'chairperson', 'recorder', 'dealer', 'catacomb', 'alpha', 'pressroom', 'lumber', 'barstool', 'fiber', 'server', 'occupation', 'crocodile', 'cascade', 'flugelhorn', 'motel', 'chest', 'junk', 'wrong', 'mice', 'pansy', 'music-box', 'authorisation', 'thaw', 'clavicle', 'teaching', 'hip', 'gate', 'order', 'curl', 'hedgehog', 'sack', 'roadway', 'job', 'campanile', 'baby', 'refectory', 'candy', 'laura', 'sprinter', 'tremor', 'owl', 'ladder', 'galley', 'gladiolus', 'line', 'schooner', 'frown', 'fencing', 'wealth', 'client', 'ad', 'employ', 'marksman', 'toast', 'cornet', 'hall', 'chop', 'latency', 'councilman', 'opportunity', 'pneumonia', 'english', 'british', 'deployment', 'son', 'oeuvre', 'bootee', 'oyster', 'bowl', 'index', 'box', 'dirt', 'insulation', 'cloakroom', 'oncology', 'shoe-horn', 'clarinet', 'radish', 'fedelini', 'pusher', 'law', 'balcony', 'clogs', 'sled', 'corn', 'hand-holding', 'motion', 'korea', 'elixir', 'sturgeon', 'coinsurance', 'xylophone', 'handmaiden', 'big-rig', 'weird', 'settler', 'bite', 'russia', 'math', 'hostel', 'culvert', 'project', 'hope', 'banjo', 'frock', 'hygienic', 'miscarriage', 'mary', 'verve', 'debt', 'lounge', 'soybean', 'table', 'steak', 'building', 'titanium', 'caution', 'sock', 'route', 'sepal', 'solution', 'shadow', 'kind', 'e-book', 'step-brother', 'leprosy', 'squatter', 'interest', 'yak', 'larch', 'skulduggery', 'tom-tom', 'sheath', 'harpooner', 'linseed', 'astrology', 'nurse', 'tale', 'purse', 'router', 'kazoo', 'brain', 'wallet', 'lunch', 'speaker', 'geyser', 'tambour', 'skate', 'young', 'windshield', 'yarn', 'uzbekistan', 'snowmobiling', 'caddy', 'macrame', 'theater', 'turban', 'babies', 'anything', 'guilty', 'best-seller', 'america', 'bandanna', 'novel', 'crab', 'level', 'spray', 'knife-edge', 'kettledrum', 'billboard', 'thing', 'meet', 'poof', 'pimp', 'monster', 'redesign', 'scissors', 'homogenate', 'morning', 'pain', 'leo', 'feet', 'light', 'quantity', 'big', 'railway', 'mantel', 'starter', 'cyst', 'vibe', 'hood', 'demur', 'pharmacopoeia', 'tub', 'obi', 'sewer', 'rhinoceros', 'toothpick', 'ability', 'sledge', 'technician', 'gun', 'couch', 'complaint', 'ironclad', 'income', 'stamp', 'factory', 'hobby', 'anatomy', 'booty', 'event', 'margaret', 'colon', 'red', 'volume', 'psychoanalyst', 'asterisk', 'footnote', 'dilapidation', 'eyelids', 'massage', 'salesman', 'opera', 'pew', 'brandy', 'loincloth', 'fringe', 'gear', 'algebra', 'aries', 'cupcake', 'good-bye', 'wastebasket', 'mixer', 'kick', 'twine', 'spike', 'grease', 'bower', 'particular', 'tuesday', 'whale', 'achiever', 'buy', 'ring', 'noodle', 'sprout', 'wet-bar', 'tiara', 'piss', 'attachment', 'oil', 'council', 'minor', 'fertilizer', 'style', 'guide', 'candidate', 'danger', 'aquifer', 'phone', 'sunglasses', 'foray', 'towel', 'cheque', 'hamburger', 'hotel', 'men', 'blizzard', 'notebook', 'reflection', 'text', 'dromedary', 'jacket', 'bandolier', 'trapezium', 'cathedral', 'figurine', 'pencil', 'thought', 'thursday', 'thunderbolt', 'buyer', 'web', 'cost', 'confusion', 'diving', 'azimuth', 'primate', 'island', 'coil', 'turn', 'cicada', 'locomotive', 'nicety', 'flight', 'hill', 'exposition', 'keyboard', 'pedestrian', 'innervation', 'blueberry', 'plastic', 'range', 'reality', 'achieve', 'hearthside', 'representative', 'trim', 'digestion', 'feedback', 'pier', 'breastplate', 'structure', 'atrium', 'doubt', 'fusarium', 'hour', 'fortune', 'netsuke', 'clank', 'lier', 'force', 'belfry', 'hardware', 'suck', 'channel', 'distance', 'weeder', 'claus', 'broker', 'fortnight', 'eel', 'icon', 'shears', 'latex', 'chick', 'method', 'soccer', 'expansion', 'church']
| bsd-2-clause |
hgrecco/pyvisa-sim | pyvisa-sim/gpib.py | 1 | 1937 | # -*- coding: utf-8 -*-
"""
pyvisa-sim.gpib
~~~~~~~~~~~~~~~
GPIB simulated session.
:copyright: 2014 by PyVISA-sim Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
try:
import Queue as queue
except ImportError:
import queue
import time
from pyvisa import constants
from . import sessions
@sessions.Session.register(constants.InterfaceType.gpib, 'INSTR')
class GPIBInstrumentSession(sessions.Session):
def after_parsing(self):
self.attrs[constants.VI_ATTR_INTF_NUM] = int(self.parsed.board)
self.attrs[constants.VI_ATTR_GPIB_PRIMARY_ADDR] = int(self.parsed.primary_address)
self.attrs[constants.VI_ATTR_GPIB_SECONDARY_ADDR] = int(self.parsed.secondary_address)
def read(self, count):
end_char, _ = self.get_attribute(constants.VI_ATTR_TERMCHAR)
enabled, _ = self.get_attribute(constants.VI_ATTR_TERMCHAR_EN)
timeout, _ = self.get_attribute(constants.VI_ATTR_TMO_VALUE)
timeout /= 1000
start = time.time()
out = b''
while time.time() - start <= timeout:
last = self.device.read()
if not last:
time.sleep(.01)
continue
out += last
if enabled:
if len(out) > 0 and out[-1] == end_char:
return out, constants.StatusCode.success_termination_character_read
if len(out) == count:
return out, constants.StatusCode.success_max_count_read
else:
return out, constants.StatusCode.error_timeout
def write(self, data):
send_end = self.get_attribute(constants.VI_ATTR_SEND_END_EN)
for i in range(len(data)):
self.device.write(data[i:i+1])
if send_end:
# EOM4882
pass
| mit |
EvanK/ansible-modules-core | cloud/openstack/_glance_image.py | 23 | 9278 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['deprecated'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: glance_image
version_added: "1.2"
deprecated: Deprecated in 1.10. Use os_image instead
short_description: Add/Delete images from glance
description:
- Add or Remove images from the glance repository.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name that has to be given to the image
required: true
default: None
disk_format:
description:
- The format of the disk that is getting uploaded
required: false
default: qcow2
container_format:
description:
- The format of the container
required: false
default: bare
owner:
description:
- The owner of the image
required: false
default: None
min_disk:
description:
- The minimum disk space required to deploy this image
required: false
default: None
min_ram:
description:
- The minimum ram required to deploy this image
required: false
default: None
is_public:
description:
- Whether the image can be accessed publicly
required: false
default: 'yes'
copy_from:
description:
- A url from where the image can be downloaded, mutually exclusive with file parameter
required: false
default: None
timeout:
description:
- The time to wait for the image process to complete in seconds
required: false
default: 180
file:
description:
- The path to the file which has to be uploaded, mutually exclusive with copy_from
required: false
default: None
endpoint_type:
description:
- The name of the glance service's endpoint URL type
choices: [publicURL, internalURL]
required: false
default: publicURL
version_added: "1.7"
requirements:
- "python >= 2.6"
- "python-glanceclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
- name: Upload an image from an HTTP URL
glance_image:
login_username: admin
login_password: passme
login_tenant_name: admin
name: cirros
container_format: bare
disk_format: qcow2
state: present
copy_from: http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
'''
import time
try:
import glanceclient
HAS_GLANCECLIENT = True
except ImportError:
HAS_GLANCECLIENT = False
try:
from keystoneclient.v2_0 import client as ksclient
HAS_KEYSTONECLIENT = True
except ImportError:
HAS_KEYSTONECLIENT= False
def _get_ksclient(module, kwargs):
try:
client = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception as e:
module.fail_json(msg="Error authenticating to the keystone: %s " % e.message)
return client
def _get_endpoint(module, client, endpoint_type):
try:
endpoint = client.service_catalog.url_for(service_type='image', endpoint_type=endpoint_type)
except Exception as e:
module.fail_json(msg="Error getting endpoint for glance: %s" % e.message)
return endpoint
def _get_glance_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint =_get_endpoint(module, _ksclient, kwargs.get('endpoint_type'))
kwargs = {
'token': token,
}
try:
client = glanceclient.Client('1', endpoint, **kwargs)
except Exception as e:
module.fail_json(msg="Error in connecting to glance: %s" % e.message)
return client
def _glance_image_present(module, params, client):
try:
for image in client.images.list():
if image.name == params['name']:
return image.id
return None
except Exception as e:
module.fail_json(msg="Error in fetching image list: %s" % e.message)
def _glance_image_create(module, params, client):
kwargs = {
'name': params.get('name'),
'disk_format': params.get('disk_format'),
'container_format': params.get('container_format'),
'owner': params.get('owner'),
'is_public': params.get('is_public'),
'copy_from': params.get('copy_from'),
}
try:
timeout = float(params.get('timeout'))
expire = time.time() + timeout
image = client.images.create(**kwargs)
if not params['copy_from']:
image.update(data=open(params['file'], 'rb'))
while time.time() < expire:
image = client.images.get(image.id)
if image.status == 'active':
break
time.sleep(5)
except Exception as e:
module.fail_json(msg="Error in creating image: %s" % e.message)
if image.status == 'active':
module.exit_json(changed=True, result=image.status, id=image.id)
else:
module.fail_json(msg=" The module timed out, please check manually " + image.status)
def _glance_delete_image(module, params, client):
try:
for image in client.images.list():
if image.name == params['name']:
client.images.delete(image)
except Exception as e:
module.fail_json(msg="Error in deleting image: %s" % e.message)
module.exit_json(changed=True, result="Deleted")
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
disk_format = dict(default='qcow2', choices=['aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']),
container_format = dict(default='bare', choices=['aki', 'ari', 'bare', 'ovf']),
owner = dict(default=None),
min_disk = dict(default=None),
min_ram = dict(default=None),
is_public = dict(default=True),
copy_from = dict(default= None),
timeout = dict(default=180),
file = dict(default=None),
endpoint_type = dict(default='publicURL', choices=['publicURL', 'internalURL']),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['file','copy_from']],
)
if not HAS_GLANCECLIENT:
module.fail_json(msg='python-glanceclient is required for this module')
if not HAS_KEYSTONECLIENT:
module.fail_json(msg='python-keystoneclient is required for this module')
if module.params['state'] == 'present':
if not module.params['file'] and not module.params['copy_from']:
module.fail_json(msg="Either file or copy_from variable should be set to create the image")
client = _get_glance_client(module, module.params)
id = _glance_image_present(module, module.params, client)
if not id:
_glance_image_create(module, module.params, client)
module.exit_json(changed=False, id=id, result="success")
if module.params['state'] == 'absent':
client = _get_glance_client(module, module.params)
id = _glance_image_present(module, module.params, client)
if not id:
module.exit_json(changed=False, result="Success")
else:
_glance_delete_image(module, module.params, client)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
amitjamadagni/sympy | sympy/printing/ccode.py | 2 | 9629 | """
C code printer
The CCodePrinter converts single sympy expressions into single C expressions,
using the functions defined in math.h where possible.
A complete code generator, which uses ccode extensively, can be found in
sympy.utilities.codegen. The codegen module can be used to generate complete
source code files that are compilable without further modifications.
"""
from sympy.core import S, C
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence
from sympy.core.compatibility import default_sort_key
# dictionary mapping sympy function to (argument_conditions, C_function).
# Used in CCodePrinter._print_Function(self)
known_functions = {
"ceiling": [(lambda x: True, "ceil")],
"Abs": [(lambda x: not x.is_integer, "fabs")],
}
class CCodePrinter(CodePrinter):
"""A printer to convert python expressions to strings of c code"""
printmethod = "_ccode"
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 15,
'user_functions': {},
'human': True,
}
def __init__(self, settings={}):
"""Register function mappings supplied by user"""
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
for k, v in userfuncs.items():
if not isinstance(v, tuple):
userfuncs[k] = (lambda *x: True, v)
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
"""function to calculate score based on position among indices
This method is used to sort loops in an optimized order, see
CodePrinter._sort_optimized()
"""
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def doprint(self, expr, assign_to=None):
"""
Actually format the expression as C code.
"""
if isinstance(assign_to, basestring):
assign_to = C.Symbol(assign_to)
elif not isinstance(assign_to, (C.Basic, type(None))):
raise TypeError("CCodePrinter cannot assign to object of type %s" %
type(assign_to))
# keep a set of expressions that are not strictly translatable to C
# and number constants that must be declared and initialized
not_c = self._not_supported = set()
self._number_symbols = set()
# We treat top level Piecewise here to get if tests outside loops
lines = []
if isinstance(expr, C.Piecewise):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c is True:
lines.append("else {")
else:
lines.append("else if (%s) {" % self._print(c))
code0 = self._doprint_a_piece(e, assign_to)
lines.extend(code0)
lines.append("}")
else:
code0 = self._doprint_a_piece(expr, assign_to)
lines.extend(code0)
# format the output
if self._settings["human"]:
frontlines = []
if len(not_c) > 0:
frontlines.append("// Not C:")
for expr in sorted(not_c, key=str):
frontlines.append("// %s" % repr(expr))
for name, value in sorted(self._number_symbols, key=str):
frontlines.append("double const %s = %s;" % (name, value))
lines = frontlines + lines
lines = "\n".join(lines)
result = self.indent_code(lines)
else:
lines = self.indent_code("\n".join(lines))
result = self._number_symbols, not_c, lines
del self._not_supported
del self._number_symbols
return result
def _get_loop_opening_ending(self, indices):
"""Returns a tuple (open_lines, close_lines) containing lists of codelines
"""
open_lines = []
close_lines = []
loopstart = "for (int %(var)s=%(start)s; %(var)s<%(end)s; %(var)s++){"
for i in indices:
# C arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'var': self._print(i.label),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Pow(self, expr):
PREC = precedence(expr)
if expr.exp == -1:
return '1.0/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return 'sqrt(%s)' % self._print(expr.base)
else:
return 'pow(%s, %s)' % (self._print(expr.base),
self._print(expr.exp))
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d.0L/%d.0L' % (p, q)
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
inds = [ i.label for i in expr.indices ]
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += offset*inds[i]
offset *= dims[i]
return "%s[%s]" % (self._print(expr.base.label), self._print(elem))
def _print_Exp1(self, expr):
return "M_E"
def _print_Pi(self, expr):
return 'M_PI'
def _print_Infinity(self, expr):
return 'HUGE_VAL'
def _print_NegativeInfinity(self, expr):
return '-HUGE_VAL'
def _print_Piecewise(self, expr):
# This method is called only for inline if constructs
# Top level piecewise is handled in doprint()
ecpairs = ["((%s) ? (\n%s\n)\n" % (self._print(c), self._print(e))
for e, c in expr.args[:-1]]
last_line = ""
if expr.args[-1].cond is True:
last_line = ": (\n%s\n)" % self._print(expr.args[-1].expr)
else:
ecpairs.append("(%s) ? (\n%s\n" %
(self._print(expr.args[-1].cond),
self._print(expr.args[-1].expr)))
code = "%s" + last_line
return code % ": ".join(ecpairs) + " )"
def _print_And(self, expr):
PREC = precedence(expr)
return ' && '.join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Or(self, expr):
PREC = precedence(expr)
return ' || '.join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Not(self, expr):
PREC = precedence(expr)
return '!' + self.parenthesize(expr.args[0], PREC)
def _print_Function(self, expr):
if expr.func.__name__ in self.known_functions:
cond_cfunc = self.known_functions[expr.func.__name__]
for cond, cfunc in cond_cfunc:
if cond(*expr.args):
return "%s(%s)" % (cfunc, self.stringify(expr.args, ", "))
if hasattr(expr, '_imp_') and isinstance(expr._imp_, C.Lambda):
# inlined function
return self._print(expr._imp_(*expr.args))
return CodePrinter._print_Function(self, expr)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, basestring):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def ccode(expr, assign_to=None, **settings):
r"""Converts an expr to a string of c code
Parameters
==========
expr : sympy.core.Expr
a sympy expression to be converted
precision : optional
the precision for numbers such as pi [default=15]
user_functions : optional
A dictionary where keys are FunctionClass instances and values
are there string representations. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
cfunction_string)]. See below for examples.
human : optional
If True, the result is a single string that may contain some
constant declarations for the number symbols. If False, the
same information is returned in a more programmer-friendly
data structure.
Examples
========
>>> from sympy import ccode, symbols, Rational, sin
>>> x, tau = symbols(["x", "tau"])
>>> ccode((2*tau)**Rational(7,2))
'8*sqrt(2)*pow(tau, 7.0L/2.0L)'
>>> ccode(sin(x), assign_to="s")
's = sin(x);'
"""
return CCodePrinter(settings).doprint(expr, assign_to)
def print_ccode(expr, **settings):
"""Prints C representation of the given expression."""
print ccode(expr, **settings)
| bsd-3-clause |
KarolKraskiewicz/autoscaler | addon-resizer/vendor/github.com/ugorji/go/codec/test.py | 1516 | 4019 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"someday",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 |
furf/pledge_service | testlib/waitress/tests/test_adjustments.py | 32 | 5260 | import sys
if sys.version_info[:2] == (2, 6): # pragma: no cover
import unittest2 as unittest
else: # pragma: no cover
import unittest
class Test_asbool(unittest.TestCase):
def _callFUT(self, s):
from waitress.adjustments import asbool
return asbool(s)
def test_s_is_None(self):
result = self._callFUT(None)
self.assertEqual(result, False)
def test_s_is_True(self):
result = self._callFUT(True)
self.assertEqual(result, True)
def test_s_is_False(self):
result = self._callFUT(False)
self.assertEqual(result, False)
def test_s_is_true(self):
result = self._callFUT('True')
self.assertEqual(result, True)
def test_s_is_false(self):
result = self._callFUT('False')
self.assertEqual(result, False)
def test_s_is_yes(self):
result = self._callFUT('yes')
self.assertEqual(result, True)
def test_s_is_on(self):
result = self._callFUT('on')
self.assertEqual(result, True)
def test_s_is_1(self):
result = self._callFUT(1)
self.assertEqual(result, True)
class TestAdjustments(unittest.TestCase):
def _makeOne(self, **kw):
from waitress.adjustments import Adjustments
return Adjustments(**kw)
def test_goodvars(self):
inst = self._makeOne(
host='host',
port='8080',
threads='5',
trusted_proxy='192.168.1.1',
url_scheme='https',
backlog='20',
recv_bytes='200',
send_bytes='300',
outbuf_overflow='400',
inbuf_overflow='500',
connection_limit='1000',
cleanup_interval='1100',
channel_timeout='1200',
log_socket_errors='true',
max_request_header_size='1300',
max_request_body_size='1400',
expose_tracebacks='true',
ident='abc',
asyncore_loop_timeout='5',
asyncore_use_poll=True,
unix_socket='/tmp/waitress.sock',
unix_socket_perms='777',
url_prefix='///foo/',
)
self.assertEqual(inst.host, 'host')
self.assertEqual(inst.port, 8080)
self.assertEqual(inst.threads, 5)
self.assertEqual(inst.trusted_proxy, '192.168.1.1')
self.assertEqual(inst.url_scheme, 'https')
self.assertEqual(inst.backlog, 20)
self.assertEqual(inst.recv_bytes, 200)
self.assertEqual(inst.send_bytes, 300)
self.assertEqual(inst.outbuf_overflow, 400)
self.assertEqual(inst.inbuf_overflow, 500)
self.assertEqual(inst.connection_limit, 1000)
self.assertEqual(inst.cleanup_interval, 1100)
self.assertEqual(inst.channel_timeout, 1200)
self.assertEqual(inst.log_socket_errors, True)
self.assertEqual(inst.max_request_header_size, 1300)
self.assertEqual(inst.max_request_body_size, 1400)
self.assertEqual(inst.expose_tracebacks, True)
self.assertEqual(inst.asyncore_loop_timeout, 5)
self.assertEqual(inst.asyncore_use_poll, True)
self.assertEqual(inst.ident, 'abc')
self.assertEqual(inst.unix_socket, '/tmp/waitress.sock')
self.assertEqual(inst.unix_socket_perms, 0o777)
self.assertEqual(inst.url_prefix, '/foo')
def test_badvar(self):
self.assertRaises(ValueError, self._makeOne, nope=True)
class TestCLI(unittest.TestCase):
def parse(self, argv):
from waitress.adjustments import Adjustments
return Adjustments.parse_args(argv)
def test_noargs(self):
opts, args = self.parse([])
self.assertDictEqual(opts, {'call': False, 'help': False})
self.assertSequenceEqual(args, [])
def test_help(self):
opts, args = self.parse(['--help'])
self.assertDictEqual(opts, {'call': False, 'help': True})
self.assertSequenceEqual(args, [])
def test_call(self):
opts, args = self.parse(['--call'])
self.assertDictEqual(opts, {'call': True, 'help': False})
self.assertSequenceEqual(args, [])
def test_both(self):
opts, args = self.parse(['--call', '--help'])
self.assertDictEqual(opts, {'call': True, 'help': True})
self.assertSequenceEqual(args, [])
def test_positive_boolean(self):
opts, args = self.parse(['--expose-tracebacks'])
self.assertDictContainsSubset({'expose_tracebacks': 'true'}, opts)
self.assertSequenceEqual(args, [])
def test_negative_boolean(self):
opts, args = self.parse(['--no-expose-tracebacks'])
self.assertDictContainsSubset({'expose_tracebacks': 'false'}, opts)
self.assertSequenceEqual(args, [])
def test_cast_params(self):
opts, args = self.parse([
'--host=localhost',
'--port=80',
'--unix-socket-perms=777'
])
self.assertDictContainsSubset({
'host': 'localhost',
'port': '80',
'unix_socket_perms':'777',
}, opts)
self.assertSequenceEqual(args, [])
def test_bad_param(self):
import getopt
self.assertRaises(getopt.GetoptError, self.parse, ['--no-host'])
| apache-2.0 |
mesocentrefc/easybuild-framework | test/framework/sandbox/easybuild/easyblocks/foo.py | 3 | 1875 | ##
# Copyright 2009-2014 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing foo, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
class EB_foo(EasyBlock):
"""Support for building/installing foo."""
@staticmethod
def extra_options(more_extra_vars=None):
"""Custom easyconfig parameters for foo."""
if more_extra_vars is None:
more_extra_vars = {}
extra_vars = {
'foo_extra1': [None, "first foo-specific easyconfig parameter (mandatory)", MANDATORY],
'foo_extra2': ['FOO', "second foo-specific easyconfig parameter", CUSTOM],
}
extra_vars.update(more_extra_vars)
return EasyBlock.extra_options(extra_vars)
| gpl-2.0 |
dsolimando/Hot | hot-jython-modules/src/main/resources/distutils/command/install_headers.py | 85 | 1449 | """distutils.command.install_headers
Implements the Distutils 'install_headers' command, to install C/C++ header
files to the Python include directory."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install_headers.py 37828 2004-11-10 22:23:15Z loewis $"
import os
from distutils.core import Command
class install_headers (Command):
description = "install C/C++ header files"
user_options = [('install-dir=', 'd',
"directory to install header files to"),
('force', 'f',
"force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options (self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options (self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def run (self):
headers = self.distribution.headers
if not headers:
return
self.mkpath(self.install_dir)
for header in headers:
(out, _) = self.copy_file(header, self.install_dir)
self.outfiles.append(out)
def get_inputs (self):
return self.distribution.headers or []
def get_outputs (self):
return self.outfiles
# class install_headers
| gpl-3.0 |
gatita/django-imager | imagersite/imager_profile/views.py | 1 | 1811 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views.generic import TemplateView
from .forms import UserEditForm, ProfileEditForm
class ProfileView(TemplateView):
template_name = 'profile.html'
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
photo_count = self.request.user.photos.count()
album_count = self.request.user.albums.count()
context['photo_count'] = photo_count
context['album_count'] = album_count
return context
def profile_edit_view(request):
if request.method == 'POST':
user_form = UserEditForm(
request.POST,
instance=request.user
)
profile_form = ProfileEditForm(
request.POST,
instance=request.user.profile
)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
return HttpResponseRedirect(reverse('profile:profile'))
else:
context = {
'user_form': user_form.as_p,
'profile_form': profile_form.as_p
}
return render(
request,
'profile_edit.html',
context
)
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(instance=request.user.profile)
context = {
'user_form': user_form.as_p,
'profile_form': profile_form.as_p
}
return render(
request,
'profile_edit.html',
context
)
| mit |
aliasav/Bugman | bugman/bug/models.py | 1 | 2317 | from django.db import models
from django_extensions.db.fields import UUIDField
from developer.models import Developer
from reporter.models import Reporter
import datetime
# Create your models here.
class Bug(models.Model):
guid = UUIDField(db_index=True)
title = models.CharField(max_length=100, null=False, blank=False, db_index=True)
description = models.TextField(max_length=600, blank=True, null=True)
link = models.URLField(max_length=300, blank=True, null=True)
screenshot = models.ImageField(upload_to='bugs_screenshots', null=True, blank=True)
guidelines = models.TextField(blank=True, null=True)
# datetime fields
created_at = models.DateTimeField(auto_now_add=True, default=datetime.datetime.now())
fixed_at = models.DateTimeField(null=True)
# foreign keys
assigned_developer = models.ForeignKey(Developer, related_name='assigned_developer', db_index=True)
reporter = models.ForeignKey(Reporter, related_name='reporter', db_index=True)
# Bugs categories
type1 = 'functional'
type2 = 'logical'
type3 = 'UI'
type4 = 'design'
type5 = 'typographical'
type6 = 'system'
type7 = 'standards'
type8 = 'requirements'
CATEGORY_LIST = (
(type1, 'Functional'),
(type2, 'Logical'),
(type3, 'UI'),
(type4, 'Design'),
(type5, 'Typographical'),
(type6, 'System'),
(type7, 'Standards'),
(type8, 'Requirements'),
)
# Bug Statuses : (Open -> Assigned -> Closed) or (Open -> Assigned -> Cancelled or Deferred) or (Open -> Cancelled or Deferred)
status1 = 'open'
status2 = 'closed'
status3 = 'cancelled'
status4 = 'deferred'
status5 = 'assigned'
STATUS_LIST = (
(status1, 'Open'),
(status2, 'Closed'),
(status3, 'Cancelled'),
(status4, 'Deferred'),
(status5, 'Assigned')
)
# Priority
p1 = '1'
p2 = '2'
p3 = '3'
p4 = '4'
p5 = '5'
PRIORITY_LIST = (
(p1, '1'),
(p2, '2'),
(p3, '3'),
(p4, '4'),
(p5, '5')
)
# choice fields
category = models.CharField(max_length=30,choices=CATEGORY_LIST, null=False, blank=False, db_index=True)
status = models.CharField(max_length=30, choices=STATUS_LIST, null=False, db_index=True)
priority = models.CharField(max_length=2, choices=PRIORITY_LIST, null=False, db_index=True)
def __unicode__(self):
return '%s <--> %s : %s, %s, %s ' % (self.title, self.guid, self.reporter, self.status, self.category,)
| gpl-2.0 |
Mj258/weiboapi | srapyDemo/envs/Lib/site-packages/win32/lib/win32cryptcon.py | 8 | 73834 | # Generated by h2py from WinCrypt.h
def GET_ALG_CLASS(x): return (x & (7 << 13))
def GET_ALG_TYPE(x): return (x & (15 << 9))
def GET_ALG_SID(x): return (x & (511))
ALG_CLASS_ANY = (0)
ALG_CLASS_SIGNATURE = (1 << 13)
ALG_CLASS_MSG_ENCRYPT = (2 << 13)
ALG_CLASS_DATA_ENCRYPT = (3 << 13)
ALG_CLASS_HASH = (4 << 13)
ALG_CLASS_KEY_EXCHANGE = (5 << 13)
ALG_CLASS_ALL = (7 << 13)
ALG_TYPE_ANY = (0)
ALG_TYPE_DSS = (1 << 9)
ALG_TYPE_RSA = (2 << 9)
ALG_TYPE_BLOCK = (3 << 9)
ALG_TYPE_STREAM = (4 << 9)
ALG_TYPE_DH = (5 << 9)
ALG_TYPE_SECURECHANNEL = (6 << 9)
ALG_SID_ANY = (0)
ALG_SID_RSA_ANY = 0
ALG_SID_RSA_PKCS = 1
ALG_SID_RSA_MSATWORK = 2
ALG_SID_RSA_ENTRUST = 3
ALG_SID_RSA_PGP = 4
ALG_SID_DSS_ANY = 0
ALG_SID_DSS_PKCS = 1
ALG_SID_DSS_DMS = 2
ALG_SID_DES = 1
ALG_SID_3DES = 3
ALG_SID_DESX = 4
ALG_SID_IDEA = 5
ALG_SID_CAST = 6
ALG_SID_SAFERSK64 = 7
ALG_SID_SAFERSK128 = 8
ALG_SID_3DES_112 = 9
ALG_SID_CYLINK_MEK = 12
ALG_SID_RC5 = 13
ALG_SID_AES_128 = 14
ALG_SID_AES_192 = 15
ALG_SID_AES_256 = 16
ALG_SID_AES = 17
ALG_SID_SKIPJACK = 10
ALG_SID_TEK = 11
CRYPT_MODE_CBCI = 6
CRYPT_MODE_CFBP = 7
CRYPT_MODE_OFBP = 8
CRYPT_MODE_CBCOFM = 9
CRYPT_MODE_CBCOFMI = 10
ALG_SID_RC2 = 2
ALG_SID_RC4 = 1
ALG_SID_SEAL = 2
ALG_SID_DH_SANDF = 1
ALG_SID_DH_EPHEM = 2
ALG_SID_AGREED_KEY_ANY = 3
ALG_SID_KEA = 4
ALG_SID_MD2 = 1
ALG_SID_MD4 = 2
ALG_SID_MD5 = 3
ALG_SID_SHA = 4
ALG_SID_SHA1 = 4
ALG_SID_MAC = 5
ALG_SID_RIPEMD = 6
ALG_SID_RIPEMD160 = 7
ALG_SID_SSL3SHAMD5 = 8
ALG_SID_HMAC = 9
ALG_SID_TLS1PRF = 10
ALG_SID_HASH_REPLACE_OWF = 11
ALG_SID_SHA_256 = 12
ALG_SID_SHA_384 = 13
ALG_SID_SHA_512 = 14
ALG_SID_SSL3_MASTER = 1
ALG_SID_SCHANNEL_MASTER_HASH = 2
ALG_SID_SCHANNEL_MAC_KEY = 3
ALG_SID_PCT1_MASTER = 4
ALG_SID_SSL2_MASTER = 5
ALG_SID_TLS1_MASTER = 6
ALG_SID_SCHANNEL_ENC_KEY = 7
ALG_SID_EXAMPLE = 80
CALG_MD2 = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_MD2)
CALG_MD4 = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_MD4)
CALG_MD5 = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_MD5)
CALG_SHA = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_SHA)
CALG_SHA1 = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_SHA1)
CALG_MAC = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_MAC)
CALG_RSA_SIGN = (ALG_CLASS_SIGNATURE | ALG_TYPE_RSA | ALG_SID_RSA_ANY)
CALG_DSS_SIGN = (ALG_CLASS_SIGNATURE | ALG_TYPE_DSS | ALG_SID_DSS_ANY)
CALG_NO_SIGN = (ALG_CLASS_SIGNATURE | ALG_TYPE_ANY | ALG_SID_ANY)
CALG_RSA_KEYX = (ALG_CLASS_KEY_EXCHANGE|ALG_TYPE_RSA|ALG_SID_RSA_ANY)
CALG_DES = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_DES)
CALG_3DES_112 = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_3DES_112)
CALG_3DES = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_3DES)
CALG_DESX = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_DESX)
CALG_RC2 = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_RC2)
CALG_RC4 = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_STREAM|ALG_SID_RC4)
CALG_SEAL = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_STREAM|ALG_SID_SEAL)
CALG_DH_SF = (ALG_CLASS_KEY_EXCHANGE|ALG_TYPE_DH|ALG_SID_DH_SANDF)
CALG_DH_EPHEM = (ALG_CLASS_KEY_EXCHANGE|ALG_TYPE_DH|ALG_SID_DH_EPHEM)
CALG_AGREEDKEY_ANY = (ALG_CLASS_KEY_EXCHANGE|ALG_TYPE_DH|ALG_SID_AGREED_KEY_ANY)
CALG_KEA_KEYX = (ALG_CLASS_KEY_EXCHANGE|ALG_TYPE_DH|ALG_SID_KEA)
CALG_HUGHES_MD5 = (ALG_CLASS_KEY_EXCHANGE|ALG_TYPE_ANY|ALG_SID_MD5)
CALG_SKIPJACK = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_SKIPJACK)
CALG_TEK = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_TEK)
CALG_CYLINK_MEK = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_CYLINK_MEK)
CALG_SSL3_SHAMD5 = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_SSL3SHAMD5)
CALG_SSL3_MASTER = (ALG_CLASS_MSG_ENCRYPT|ALG_TYPE_SECURECHANNEL|ALG_SID_SSL3_MASTER)
CALG_SCHANNEL_MASTER_HASH = (ALG_CLASS_MSG_ENCRYPT|ALG_TYPE_SECURECHANNEL|ALG_SID_SCHANNEL_MASTER_HASH)
CALG_SCHANNEL_MAC_KEY = (ALG_CLASS_MSG_ENCRYPT|ALG_TYPE_SECURECHANNEL|ALG_SID_SCHANNEL_MAC_KEY)
CALG_SCHANNEL_ENC_KEY = (ALG_CLASS_MSG_ENCRYPT|ALG_TYPE_SECURECHANNEL|ALG_SID_SCHANNEL_ENC_KEY)
CALG_PCT1_MASTER = (ALG_CLASS_MSG_ENCRYPT|ALG_TYPE_SECURECHANNEL|ALG_SID_PCT1_MASTER)
CALG_SSL2_MASTER = (ALG_CLASS_MSG_ENCRYPT|ALG_TYPE_SECURECHANNEL|ALG_SID_SSL2_MASTER)
CALG_TLS1_MASTER = (ALG_CLASS_MSG_ENCRYPT|ALG_TYPE_SECURECHANNEL|ALG_SID_TLS1_MASTER)
CALG_RC5 = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_RC5)
CALG_HMAC = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_HMAC)
CALG_TLS1PRF = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_TLS1PRF)
CALG_HASH_REPLACE_OWF = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_HASH_REPLACE_OWF)
CALG_AES_128 = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_AES_128)
CALG_AES_192 = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_AES_192)
CALG_AES_256 = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_AES_256)
CALG_AES = (ALG_CLASS_DATA_ENCRYPT|ALG_TYPE_BLOCK|ALG_SID_AES)
CALG_SHA_256 = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_SHA_256)
CALG_SHA_384 = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_SHA_384)
CALG_SHA_512 = (ALG_CLASS_HASH | ALG_TYPE_ANY | ALG_SID_SHA_512)
CRYPT_VERIFYCONTEXT = (-268435456)
CRYPT_NEWKEYSET = 0x00000008
CRYPT_DELETEKEYSET = 0x00000010
CRYPT_MACHINE_KEYSET = 0x00000020
CRYPT_SILENT = 0x00000040
CRYPT_EXPORTABLE = 0x00000001
CRYPT_USER_PROTECTED = 0x00000002
CRYPT_CREATE_SALT = 0x00000004
CRYPT_UPDATE_KEY = 0x00000008
CRYPT_NO_SALT = 0x00000010
CRYPT_PREGEN = 0x00000040
CRYPT_RECIPIENT = 0x00000010
CRYPT_INITIATOR = 0x00000040
CRYPT_ONLINE = 0x00000080
CRYPT_SF = 0x00000100
CRYPT_CREATE_IV = 0x00000200
CRYPT_KEK = 0x00000400
CRYPT_DATA_KEY = 0x00000800
CRYPT_VOLATILE = 0x00001000
CRYPT_SGCKEY = 0x00002000
CRYPT_ARCHIVABLE = 0x00004000
RSA1024BIT_KEY = 0x04000000
CRYPT_SERVER = 0x00000400
KEY_LENGTH_MASK = (-65536)
CRYPT_Y_ONLY = 0x00000001
CRYPT_SSL2_FALLBACK = 0x00000002
CRYPT_DESTROYKEY = 0x00000004
CRYPT_OAEP = 0x00000040
CRYPT_BLOB_VER3 = 0x00000080
CRYPT_IPSEC_HMAC_KEY = 0x00000100
CRYPT_DECRYPT_RSA_NO_PADDING_CHECK = 0x00000020
CRYPT_SECRETDIGEST = 0x00000001
CRYPT_OWF_REPL_LM_HASH = 0x00000001
CRYPT_LITTLE_ENDIAN = 0x00000001
CRYPT_NOHASHOID = 0x00000001
CRYPT_TYPE2_FORMAT = 0x00000002
CRYPT_X931_FORMAT = 0x00000004
CRYPT_MACHINE_DEFAULT = 0x00000001
CRYPT_USER_DEFAULT = 0x00000002
CRYPT_DELETE_DEFAULT = 0x00000004
SIMPLEBLOB = 0x1
PUBLICKEYBLOB = 0x6
PRIVATEKEYBLOB = 0x7
PLAINTEXTKEYBLOB = 0x8
OPAQUEKEYBLOB = 0x9
PUBLICKEYBLOBEX = 0xA
SYMMETRICWRAPKEYBLOB = 0xB
AT_KEYEXCHANGE = 1
AT_SIGNATURE = 2
CRYPT_USERDATA = 1
KP_IV = 1
KP_SALT = 2
KP_PADDING = 3
KP_MODE = 4
KP_MODE_BITS = 5
KP_PERMISSIONS = 6
KP_ALGID = 7
KP_BLOCKLEN = 8
KP_KEYLEN = 9
KP_SALT_EX = 10
KP_P = 11
KP_G = 12
KP_Q = 13
KP_X = 14
KP_Y = 15
KP_RA = 16
KP_RB = 17
KP_INFO = 18
KP_EFFECTIVE_KEYLEN = 19
KP_SCHANNEL_ALG = 20
KP_CLIENT_RANDOM = 21
KP_SERVER_RANDOM = 22
KP_RP = 23
KP_PRECOMP_MD5 = 24
KP_PRECOMP_SHA = 25
KP_CERTIFICATE = 26
KP_CLEAR_KEY = 27
KP_PUB_EX_LEN = 28
KP_PUB_EX_VAL = 29
KP_KEYVAL = 30
KP_ADMIN_PIN = 31
KP_KEYEXCHANGE_PIN = 32
KP_SIGNATURE_PIN = 33
KP_PREHASH = 34
KP_ROUNDS = 35
KP_OAEP_PARAMS = 36
KP_CMS_KEY_INFO = 37
KP_CMS_DH_KEY_INFO = 38
KP_PUB_PARAMS = 39
KP_VERIFY_PARAMS = 40
KP_HIGHEST_VERSION = 41
KP_GET_USE_COUNT = 42
PKCS5_PADDING = 1
RANDOM_PADDING = 2
ZERO_PADDING = 3
CRYPT_MODE_CBC = 1
CRYPT_MODE_ECB = 2
CRYPT_MODE_OFB = 3
CRYPT_MODE_CFB = 4
CRYPT_MODE_CTS = 5
CRYPT_ENCRYPT = 0x0001
CRYPT_DECRYPT = 0x0002
CRYPT_EXPORT = 0x0004
CRYPT_READ = 0x0008
CRYPT_WRITE = 0x0010
CRYPT_MAC = 0x0020
CRYPT_EXPORT_KEY = 0x0040
CRYPT_IMPORT_KEY = 0x0080
CRYPT_ARCHIVE = 0x0100
HP_ALGID = 0x0001
HP_HASHVAL = 0x0002
HP_HASHSIZE = 0x0004
HP_HMAC_INFO = 0x0005
HP_TLS1PRF_LABEL = 0x0006
HP_TLS1PRF_SEED = 0x0007
CRYPT_FAILED = 0
CRYPT_SUCCEED = 1
def RCRYPT_SUCCEEDED(rt): return ((rt) == CRYPT_SUCCEED)
def RCRYPT_FAILED(rt): return ((rt) == CRYPT_FAILED)
PP_ENUMALGS = 1
PP_ENUMCONTAINERS = 2
PP_IMPTYPE = 3
PP_NAME = 4
PP_VERSION = 5
PP_CONTAINER = 6
PP_CHANGE_PASSWORD = 7
PP_KEYSET_SEC_DESCR = 8
PP_CERTCHAIN = 9
PP_KEY_TYPE_SUBTYPE = 10
PP_PROVTYPE = 16
PP_KEYSTORAGE = 17
PP_APPLI_CERT = 18
PP_SYM_KEYSIZE = 19
PP_SESSION_KEYSIZE = 20
PP_UI_PROMPT = 21
PP_ENUMALGS_EX = 22
PP_ENUMMANDROOTS = 25
PP_ENUMELECTROOTS = 26
PP_KEYSET_TYPE = 27
PP_ADMIN_PIN = 31
PP_KEYEXCHANGE_PIN = 32
PP_SIGNATURE_PIN = 33
PP_SIG_KEYSIZE_INC = 34
PP_KEYX_KEYSIZE_INC = 35
PP_UNIQUE_CONTAINER = 36
PP_SGC_INFO = 37
PP_USE_HARDWARE_RNG = 38
PP_KEYSPEC = 39
PP_ENUMEX_SIGNING_PROT = 40
PP_CRYPT_COUNT_KEY_USE = 41
CRYPT_FIRST = 1
CRYPT_NEXT = 2
CRYPT_SGC_ENUM = 4
CRYPT_IMPL_HARDWARE = 1
CRYPT_IMPL_SOFTWARE = 2
CRYPT_IMPL_MIXED = 3
CRYPT_IMPL_UNKNOWN = 4
CRYPT_IMPL_REMOVABLE = 8
CRYPT_SEC_DESCR = 0x00000001
CRYPT_PSTORE = 0x00000002
CRYPT_UI_PROMPT = 0x00000004
CRYPT_FLAG_PCT1 = 0x0001
CRYPT_FLAG_SSL2 = 0x0002
CRYPT_FLAG_SSL3 = 0x0004
CRYPT_FLAG_TLS1 = 0x0008
CRYPT_FLAG_IPSEC = 0x0010
CRYPT_FLAG_SIGNING = 0x0020
CRYPT_SGC = 0x0001
CRYPT_FASTSGC = 0x0002
PP_CLIENT_HWND = 1
PP_CONTEXT_INFO = 11
PP_KEYEXCHANGE_KEYSIZE = 12
PP_SIGNATURE_KEYSIZE = 13
PP_KEYEXCHANGE_ALG = 14
PP_SIGNATURE_ALG = 15
PP_DELETEKEY = 24
PROV_RSA_FULL = 1
PROV_RSA_SIG = 2
PROV_DSS = 3
PROV_FORTEZZA = 4
PROV_MS_EXCHANGE = 5
PROV_SSL = 6
PROV_RSA_SCHANNEL = 12
PROV_DSS_DH = 13
PROV_EC_ECDSA_SIG = 14
PROV_EC_ECNRA_SIG = 15
PROV_EC_ECDSA_FULL = 16
PROV_EC_ECNRA_FULL = 17
PROV_DH_SCHANNEL = 18
PROV_SPYRUS_LYNKS = 20
PROV_RNG = 21
PROV_INTEL_SEC = 22
PROV_REPLACE_OWF = 23
PROV_RSA_AES = 24
MS_DEF_PROV_A = "Microsoft Base Cryptographic Provider v1.0"
MS_DEF_PROV = MS_DEF_PROV_A
MS_ENHANCED_PROV_A = "Microsoft Enhanced Cryptographic Provider v1.0"
MS_ENHANCED_PROV = MS_ENHANCED_PROV_A
MS_STRONG_PROV_A = "Microsoft Strong Cryptographic Provider"
MS_STRONG_PROV = MS_STRONG_PROV_A
MS_DEF_RSA_SIG_PROV_A = "Microsoft RSA Signature Cryptographic Provider"
MS_DEF_RSA_SIG_PROV = MS_DEF_RSA_SIG_PROV_A
MS_DEF_RSA_SCHANNEL_PROV_A = "Microsoft RSA SChannel Cryptographic Provider"
MS_DEF_RSA_SCHANNEL_PROV = MS_DEF_RSA_SCHANNEL_PROV_A
MS_DEF_DSS_PROV_A = "Microsoft Base DSS Cryptographic Provider"
MS_DEF_DSS_PROV = MS_DEF_DSS_PROV_A
MS_DEF_DSS_DH_PROV_A = "Microsoft Base DSS and Diffie-Hellman Cryptographic Provider"
MS_DEF_DSS_DH_PROV = MS_DEF_DSS_DH_PROV_A
MS_ENH_DSS_DH_PROV_A = "Microsoft Enhanced DSS and Diffie-Hellman Cryptographic Provider"
MS_ENH_DSS_DH_PROV = MS_ENH_DSS_DH_PROV_A
MS_DEF_DH_SCHANNEL_PROV_A = "Microsoft DH SChannel Cryptographic Provider"
MS_DEF_DH_SCHANNEL_PROV = MS_DEF_DH_SCHANNEL_PROV_A
MS_SCARD_PROV_A = "Microsoft Base Smart Card Crypto Provider"
MS_SCARD_PROV = MS_SCARD_PROV_A
MS_ENH_RSA_AES_PROV_A = "Microsoft Enhanced RSA and AES Cryptographic Provider"
MS_ENH_RSA_AES_PROV = MS_ENH_RSA_AES_PROV_A
MAXUIDLEN = 64
EXPO_OFFLOAD_REG_VALUE = "ExpoOffload"
EXPO_OFFLOAD_FUNC_NAME = "OffloadModExpo"
szKEY_CRYPTOAPI_PRIVATE_KEY_OPTIONS = \
"Software\\Policies\\Microsoft\\Cryptography"
szFORCE_KEY_PROTECTION = "ForceKeyProtection"
dwFORCE_KEY_PROTECTION_DISABLED = 0x0
dwFORCE_KEY_PROTECTION_USER_SELECT = 0x1
dwFORCE_KEY_PROTECTION_HIGH = 0x2
szKEY_CACHE_ENABLED = "CachePrivateKeys"
szKEY_CACHE_SECONDS = "PrivateKeyLifetimeSeconds"
CUR_BLOB_VERSION = 2
SCHANNEL_MAC_KEY = 0x00000000
SCHANNEL_ENC_KEY = 0x00000001
INTERNATIONAL_USAGE = 0x00000001
szOID_RSA = "1.2.840.113549"
szOID_PKCS = "1.2.840.113549.1"
szOID_RSA_HASH = "1.2.840.113549.2"
szOID_RSA_ENCRYPT = "1.2.840.113549.3"
szOID_PKCS_1 = "1.2.840.113549.1.1"
szOID_PKCS_2 = "1.2.840.113549.1.2"
szOID_PKCS_3 = "1.2.840.113549.1.3"
szOID_PKCS_4 = "1.2.840.113549.1.4"
szOID_PKCS_5 = "1.2.840.113549.1.5"
szOID_PKCS_6 = "1.2.840.113549.1.6"
szOID_PKCS_7 = "1.2.840.113549.1.7"
szOID_PKCS_8 = "1.2.840.113549.1.8"
szOID_PKCS_9 = "1.2.840.113549.1.9"
szOID_PKCS_10 = "1.2.840.113549.1.10"
szOID_PKCS_12 = "1.2.840.113549.1.12"
szOID_RSA_RSA = "1.2.840.113549.1.1.1"
szOID_RSA_MD2RSA = "1.2.840.113549.1.1.2"
szOID_RSA_MD4RSA = "1.2.840.113549.1.1.3"
szOID_RSA_MD5RSA = "1.2.840.113549.1.1.4"
szOID_RSA_SHA1RSA = "1.2.840.113549.1.1.5"
szOID_RSA_SETOAEP_RSA = "1.2.840.113549.1.1.6"
szOID_RSA_DH = "1.2.840.113549.1.3.1"
szOID_RSA_data = "1.2.840.113549.1.7.1"
szOID_RSA_signedData = "1.2.840.113549.1.7.2"
szOID_RSA_envelopedData = "1.2.840.113549.1.7.3"
szOID_RSA_signEnvData = "1.2.840.113549.1.7.4"
szOID_RSA_digestedData = "1.2.840.113549.1.7.5"
szOID_RSA_hashedData = "1.2.840.113549.1.7.5"
szOID_RSA_encryptedData = "1.2.840.113549.1.7.6"
szOID_RSA_emailAddr = "1.2.840.113549.1.9.1"
szOID_RSA_unstructName = "1.2.840.113549.1.9.2"
szOID_RSA_contentType = "1.2.840.113549.1.9.3"
szOID_RSA_messageDigest = "1.2.840.113549.1.9.4"
szOID_RSA_signingTime = "1.2.840.113549.1.9.5"
szOID_RSA_counterSign = "1.2.840.113549.1.9.6"
szOID_RSA_challengePwd = "1.2.840.113549.1.9.7"
szOID_RSA_unstructAddr = "1.2.840.113549.1.9.8"
szOID_RSA_extCertAttrs = "1.2.840.113549.1.9.9"
szOID_RSA_certExtensions = "1.2.840.113549.1.9.14"
szOID_RSA_SMIMECapabilities = "1.2.840.113549.1.9.15"
szOID_RSA_preferSignedData = "1.2.840.113549.1.9.15.1"
szOID_RSA_SMIMEalg = "1.2.840.113549.1.9.16.3"
szOID_RSA_SMIMEalgESDH = "1.2.840.113549.1.9.16.3.5"
szOID_RSA_SMIMEalgCMS3DESwrap = "1.2.840.113549.1.9.16.3.6"
szOID_RSA_SMIMEalgCMSRC2wrap = "1.2.840.113549.1.9.16.3.7"
szOID_RSA_MD2 = "1.2.840.113549.2.2"
szOID_RSA_MD4 = "1.2.840.113549.2.4"
szOID_RSA_MD5 = "1.2.840.113549.2.5"
szOID_RSA_RC2CBC = "1.2.840.113549.3.2"
szOID_RSA_RC4 = "1.2.840.113549.3.4"
szOID_RSA_DES_EDE3_CBC = "1.2.840.113549.3.7"
szOID_RSA_RC5_CBCPad = "1.2.840.113549.3.9"
szOID_ANSI_X942 = "1.2.840.10046"
szOID_ANSI_X942_DH = "1.2.840.10046.2.1"
szOID_X957 = "1.2.840.10040"
szOID_X957_DSA = "1.2.840.10040.4.1"
szOID_X957_SHA1DSA = "1.2.840.10040.4.3"
szOID_DS = "2.5"
szOID_DSALG = "2.5.8"
szOID_DSALG_CRPT = "2.5.8.1"
szOID_DSALG_HASH = "2.5.8.2"
szOID_DSALG_SIGN = "2.5.8.3"
szOID_DSALG_RSA = "2.5.8.1.1"
szOID_OIW = "1.3.14"
szOID_OIWSEC = "1.3.14.3.2"
szOID_OIWSEC_md4RSA = "1.3.14.3.2.2"
szOID_OIWSEC_md5RSA = "1.3.14.3.2.3"
szOID_OIWSEC_md4RSA2 = "1.3.14.3.2.4"
szOID_OIWSEC_desECB = "1.3.14.3.2.6"
szOID_OIWSEC_desCBC = "1.3.14.3.2.7"
szOID_OIWSEC_desOFB = "1.3.14.3.2.8"
szOID_OIWSEC_desCFB = "1.3.14.3.2.9"
szOID_OIWSEC_desMAC = "1.3.14.3.2.10"
szOID_OIWSEC_rsaSign = "1.3.14.3.2.11"
szOID_OIWSEC_dsa = "1.3.14.3.2.12"
szOID_OIWSEC_shaDSA = "1.3.14.3.2.13"
szOID_OIWSEC_mdc2RSA = "1.3.14.3.2.14"
szOID_OIWSEC_shaRSA = "1.3.14.3.2.15"
szOID_OIWSEC_dhCommMod = "1.3.14.3.2.16"
szOID_OIWSEC_desEDE = "1.3.14.3.2.17"
szOID_OIWSEC_sha = "1.3.14.3.2.18"
szOID_OIWSEC_mdc2 = "1.3.14.3.2.19"
szOID_OIWSEC_dsaComm = "1.3.14.3.2.20"
szOID_OIWSEC_dsaCommSHA = "1.3.14.3.2.21"
szOID_OIWSEC_rsaXchg = "1.3.14.3.2.22"
szOID_OIWSEC_keyHashSeal = "1.3.14.3.2.23"
szOID_OIWSEC_md2RSASign = "1.3.14.3.2.24"
szOID_OIWSEC_md5RSASign = "1.3.14.3.2.25"
szOID_OIWSEC_sha1 = "1.3.14.3.2.26"
szOID_OIWSEC_dsaSHA1 = "1.3.14.3.2.27"
szOID_OIWSEC_dsaCommSHA1 = "1.3.14.3.2.28"
szOID_OIWSEC_sha1RSASign = "1.3.14.3.2.29"
szOID_OIWDIR = "1.3.14.7.2"
szOID_OIWDIR_CRPT = "1.3.14.7.2.1"
szOID_OIWDIR_HASH = "1.3.14.7.2.2"
szOID_OIWDIR_SIGN = "1.3.14.7.2.3"
szOID_OIWDIR_md2 = "1.3.14.7.2.2.1"
szOID_OIWDIR_md2RSA = "1.3.14.7.2.3.1"
szOID_INFOSEC = "2.16.840.1.101.2.1"
szOID_INFOSEC_sdnsSignature = "2.16.840.1.101.2.1.1.1"
szOID_INFOSEC_mosaicSignature = "2.16.840.1.101.2.1.1.2"
szOID_INFOSEC_sdnsConfidentiality = "2.16.840.1.101.2.1.1.3"
szOID_INFOSEC_mosaicConfidentiality = "2.16.840.1.101.2.1.1.4"
szOID_INFOSEC_sdnsIntegrity = "2.16.840.1.101.2.1.1.5"
szOID_INFOSEC_mosaicIntegrity = "2.16.840.1.101.2.1.1.6"
szOID_INFOSEC_sdnsTokenProtection = "2.16.840.1.101.2.1.1.7"
szOID_INFOSEC_mosaicTokenProtection = "2.16.840.1.101.2.1.1.8"
szOID_INFOSEC_sdnsKeyManagement = "2.16.840.1.101.2.1.1.9"
szOID_INFOSEC_mosaicKeyManagement = "2.16.840.1.101.2.1.1.10"
szOID_INFOSEC_sdnsKMandSig = "2.16.840.1.101.2.1.1.11"
szOID_INFOSEC_mosaicKMandSig = "2.16.840.1.101.2.1.1.12"
szOID_INFOSEC_SuiteASignature = "2.16.840.1.101.2.1.1.13"
szOID_INFOSEC_SuiteAConfidentiality = "2.16.840.1.101.2.1.1.14"
szOID_INFOSEC_SuiteAIntegrity = "2.16.840.1.101.2.1.1.15"
szOID_INFOSEC_SuiteATokenProtection = "2.16.840.1.101.2.1.1.16"
szOID_INFOSEC_SuiteAKeyManagement = "2.16.840.1.101.2.1.1.17"
szOID_INFOSEC_SuiteAKMandSig = "2.16.840.1.101.2.1.1.18"
szOID_INFOSEC_mosaicUpdatedSig = "2.16.840.1.101.2.1.1.19"
szOID_INFOSEC_mosaicKMandUpdSig = "2.16.840.1.101.2.1.1.20"
szOID_INFOSEC_mosaicUpdatedInteg = "2.16.840.1.101.2.1.1.21"
szOID_COMMON_NAME = "2.5.4.3"
szOID_SUR_NAME = "2.5.4.4"
szOID_DEVICE_SERIAL_NUMBER = "2.5.4.5"
szOID_COUNTRY_NAME = "2.5.4.6"
szOID_LOCALITY_NAME = "2.5.4.7"
szOID_STATE_OR_PROVINCE_NAME = "2.5.4.8"
szOID_STREET_ADDRESS = "2.5.4.9"
szOID_ORGANIZATION_NAME = "2.5.4.10"
szOID_ORGANIZATIONAL_UNIT_NAME = "2.5.4.11"
szOID_TITLE = "2.5.4.12"
szOID_DESCRIPTION = "2.5.4.13"
szOID_SEARCH_GUIDE = "2.5.4.14"
szOID_BUSINESS_CATEGORY = "2.5.4.15"
szOID_POSTAL_ADDRESS = "2.5.4.16"
szOID_POSTAL_CODE = "2.5.4.17"
szOID_POST_OFFICE_BOX = "2.5.4.18"
szOID_PHYSICAL_DELIVERY_OFFICE_NAME = "2.5.4.19"
szOID_TELEPHONE_NUMBER = "2.5.4.20"
szOID_TELEX_NUMBER = "2.5.4.21"
szOID_TELETEXT_TERMINAL_IDENTIFIER = "2.5.4.22"
szOID_FACSIMILE_TELEPHONE_NUMBER = "2.5.4.23"
szOID_X21_ADDRESS = "2.5.4.24"
szOID_INTERNATIONAL_ISDN_NUMBER = "2.5.4.25"
szOID_REGISTERED_ADDRESS = "2.5.4.26"
szOID_DESTINATION_INDICATOR = "2.5.4.27"
szOID_PREFERRED_DELIVERY_METHOD = "2.5.4.28"
szOID_PRESENTATION_ADDRESS = "2.5.4.29"
szOID_SUPPORTED_APPLICATION_CONTEXT = "2.5.4.30"
szOID_MEMBER = "2.5.4.31"
szOID_OWNER = "2.5.4.32"
szOID_ROLE_OCCUPANT = "2.5.4.33"
szOID_SEE_ALSO = "2.5.4.34"
szOID_USER_PASSWORD = "2.5.4.35"
szOID_USER_CERTIFICATE = "2.5.4.36"
szOID_CA_CERTIFICATE = "2.5.4.37"
szOID_AUTHORITY_REVOCATION_LIST = "2.5.4.38"
szOID_CERTIFICATE_REVOCATION_LIST = "2.5.4.39"
szOID_CROSS_CERTIFICATE_PAIR = "2.5.4.40"
szOID_GIVEN_NAME = "2.5.4.42"
szOID_INITIALS = "2.5.4.43"
szOID_DN_QUALIFIER = "2.5.4.46"
szOID_DOMAIN_COMPONENT = "0.9.2342.19200300.100.1.25"
szOID_PKCS_12_FRIENDLY_NAME_ATTR = "1.2.840.113549.1.9.20"
szOID_PKCS_12_LOCAL_KEY_ID = "1.2.840.113549.1.9.21"
szOID_PKCS_12_KEY_PROVIDER_NAME_ATTR = "1.3.6.1.4.1.311.17.1"
szOID_LOCAL_MACHINE_KEYSET = "1.3.6.1.4.1.311.17.2"
szOID_KEYID_RDN = "1.3.6.1.4.1.311.10.7.1"
CERT_RDN_ANY_TYPE = 0
CERT_RDN_ENCODED_BLOB = 1
CERT_RDN_OCTET_STRING = 2
CERT_RDN_NUMERIC_STRING = 3
CERT_RDN_PRINTABLE_STRING = 4
CERT_RDN_TELETEX_STRING = 5
CERT_RDN_T61_STRING = 5
CERT_RDN_VIDEOTEX_STRING = 6
CERT_RDN_IA5_STRING = 7
CERT_RDN_GRAPHIC_STRING = 8
CERT_RDN_VISIBLE_STRING = 9
CERT_RDN_ISO646_STRING = 9
CERT_RDN_GENERAL_STRING = 10
CERT_RDN_UNIVERSAL_STRING = 11
CERT_RDN_INT4_STRING = 11
CERT_RDN_BMP_STRING = 12
CERT_RDN_UNICODE_STRING = 12
CERT_RDN_UTF8_STRING = 13
CERT_RDN_TYPE_MASK = 0x000000FF
CERT_RDN_FLAGS_MASK = (-16777216)
CERT_RDN_ENABLE_T61_UNICODE_FLAG = (-2147483648)
CERT_RDN_ENABLE_UTF8_UNICODE_FLAG = 0x20000000
CERT_RDN_DISABLE_CHECK_TYPE_FLAG = 0x40000000
CERT_RDN_DISABLE_IE4_UTF8_FLAG = 0x01000000
CERT_RSA_PUBLIC_KEY_OBJID = szOID_RSA_RSA
CERT_DEFAULT_OID_PUBLIC_KEY_SIGN = szOID_RSA_RSA
CERT_DEFAULT_OID_PUBLIC_KEY_XCHG = szOID_RSA_RSA
CERT_V1 = 0
CERT_V2 = 1
CERT_V3 = 2
CERT_INFO_VERSION_FLAG = 1
CERT_INFO_SERIAL_NUMBER_FLAG = 2
CERT_INFO_SIGNATURE_ALGORITHM_FLAG = 3
CERT_INFO_ISSUER_FLAG = 4
CERT_INFO_NOT_BEFORE_FLAG = 5
CERT_INFO_NOT_AFTER_FLAG = 6
CERT_INFO_SUBJECT_FLAG = 7
CERT_INFO_SUBJECT_PUBLIC_KEY_INFO_FLAG = 8
CERT_INFO_ISSUER_UNIQUE_ID_FLAG = 9
CERT_INFO_SUBJECT_UNIQUE_ID_FLAG = 10
CERT_INFO_EXTENSION_FLAG = 11
CRL_V1 = 0
CRL_V2 = 1
CERT_REQUEST_V1 = 0
CERT_KEYGEN_REQUEST_V1 = 0
CTL_V1 = 0
CERT_ENCODING_TYPE_MASK = 0x0000FFFF
CMSG_ENCODING_TYPE_MASK = (-65536)
def GET_CERT_ENCODING_TYPE(X): return (X & CERT_ENCODING_TYPE_MASK)
def GET_CMSG_ENCODING_TYPE(X): return (X & CMSG_ENCODING_TYPE_MASK)
CRYPT_ASN_ENCODING = 0x00000001
CRYPT_NDR_ENCODING = 0x00000002
X509_ASN_ENCODING = 0x00000001
X509_NDR_ENCODING = 0x00000002
PKCS_7_ASN_ENCODING = 0x00010000
PKCS_7_NDR_ENCODING = 0x00020000
CRYPT_FORMAT_STR_MULTI_LINE = 0x0001
CRYPT_FORMAT_STR_NO_HEX = 0x0010
CRYPT_FORMAT_SIMPLE = 0x0001
CRYPT_FORMAT_X509 = 0x0002
CRYPT_FORMAT_OID = 0x0004
CRYPT_FORMAT_RDN_SEMICOLON = 0x0100
CRYPT_FORMAT_RDN_CRLF = 0x0200
CRYPT_FORMAT_RDN_UNQUOTE = 0x0400
CRYPT_FORMAT_RDN_REVERSE = 0x0800
CRYPT_FORMAT_COMMA = 0x1000
CRYPT_FORMAT_SEMICOLON = CRYPT_FORMAT_RDN_SEMICOLON
CRYPT_FORMAT_CRLF = CRYPT_FORMAT_RDN_CRLF
CRYPT_ENCODE_NO_SIGNATURE_BYTE_REVERSAL_FLAG = 0x8
CRYPT_ENCODE_ALLOC_FLAG = 0x8000
CRYPT_UNICODE_NAME_ENCODE_ENABLE_T61_UNICODE_FLAG = \
CERT_RDN_ENABLE_T61_UNICODE_FLAG
CRYPT_UNICODE_NAME_ENCODE_ENABLE_UTF8_UNICODE_FLAG = \
CERT_RDN_ENABLE_UTF8_UNICODE_FLAG
CRYPT_UNICODE_NAME_ENCODE_DISABLE_CHECK_TYPE_FLAG = \
CERT_RDN_DISABLE_CHECK_TYPE_FLAG
CRYPT_SORTED_CTL_ENCODE_HASHED_SUBJECT_IDENTIFIER_FLAG = 0x10000
CRYPT_DECODE_NOCOPY_FLAG = 0x1
CRYPT_DECODE_TO_BE_SIGNED_FLAG = 0x2
CRYPT_DECODE_SHARE_OID_STRING_FLAG = 0x4
CRYPT_DECODE_NO_SIGNATURE_BYTE_REVERSAL_FLAG = 0x8
CRYPT_DECODE_ALLOC_FLAG = 0x8000
CRYPT_UNICODE_NAME_DECODE_DISABLE_IE4_UTF8_FLAG = \
CERT_RDN_DISABLE_IE4_UTF8_FLAG
CRYPT_ENCODE_DECODE_NONE = 0
X509_CERT = 1
X509_CERT_TO_BE_SIGNED = 2
X509_CERT_CRL_TO_BE_SIGNED = 3
X509_CERT_REQUEST_TO_BE_SIGNED = 4
X509_EXTENSIONS = 5
X509_NAME_VALUE = 6
X509_NAME = 7
X509_PUBLIC_KEY_INFO = 8
X509_AUTHORITY_KEY_ID = 9
X509_KEY_ATTRIBUTES = 10
X509_KEY_USAGE_RESTRICTION = 11
X509_ALTERNATE_NAME = 12
X509_BASIC_CONSTRAINTS = 13
X509_KEY_USAGE = 14
X509_BASIC_CONSTRAINTS2 = 15
X509_CERT_POLICIES = 16
PKCS_UTC_TIME = 17
PKCS_TIME_REQUEST = 18
RSA_CSP_PUBLICKEYBLOB = 19
X509_UNICODE_NAME = 20
X509_KEYGEN_REQUEST_TO_BE_SIGNED = 21
PKCS_ATTRIBUTE = 22
PKCS_CONTENT_INFO_SEQUENCE_OF_ANY = 23
X509_UNICODE_NAME_VALUE = 24
X509_ANY_STRING = X509_NAME_VALUE
X509_UNICODE_ANY_STRING = X509_UNICODE_NAME_VALUE
X509_OCTET_STRING = 25
X509_BITS = 26
X509_INTEGER = 27
X509_MULTI_BYTE_INTEGER = 28
X509_ENUMERATED = 29
X509_CHOICE_OF_TIME = 30
X509_AUTHORITY_KEY_ID2 = 31
X509_AUTHORITY_INFO_ACCESS = 32
X509_SUBJECT_INFO_ACCESS = X509_AUTHORITY_INFO_ACCESS
X509_CRL_REASON_CODE = X509_ENUMERATED
PKCS_CONTENT_INFO = 33
X509_SEQUENCE_OF_ANY = 34
X509_CRL_DIST_POINTS = 35
X509_ENHANCED_KEY_USAGE = 36
PKCS_CTL = 37
X509_MULTI_BYTE_UINT = 38
X509_DSS_PUBLICKEY = X509_MULTI_BYTE_UINT
X509_DSS_PARAMETERS = 39
X509_DSS_SIGNATURE = 40
PKCS_RC2_CBC_PARAMETERS = 41
PKCS_SMIME_CAPABILITIES = 42
X509_QC_STATEMENTS_EXT = 42
PKCS_RSA_PRIVATE_KEY = 43
PKCS_PRIVATE_KEY_INFO = 44
PKCS_ENCRYPTED_PRIVATE_KEY_INFO = 45
X509_PKIX_POLICY_QUALIFIER_USERNOTICE = 46
X509_DH_PUBLICKEY = X509_MULTI_BYTE_UINT
X509_DH_PARAMETERS = 47
PKCS_ATTRIBUTES = 48
PKCS_SORTED_CTL = 49
X509_ECC_SIGNATURE = 47
X942_DH_PARAMETERS = 50
X509_BITS_WITHOUT_TRAILING_ZEROES = 51
X942_OTHER_INFO = 52
X509_CERT_PAIR = 53
X509_ISSUING_DIST_POINT = 54
X509_NAME_CONSTRAINTS = 55
X509_POLICY_MAPPINGS = 56
X509_POLICY_CONSTRAINTS = 57
X509_CROSS_CERT_DIST_POINTS = 58
CMC_DATA = 59
CMC_RESPONSE = 60
CMC_STATUS = 61
CMC_ADD_EXTENSIONS = 62
CMC_ADD_ATTRIBUTES = 63
X509_CERTIFICATE_TEMPLATE = 64
OCSP_SIGNED_REQUEST = 65
OCSP_REQUEST = 66
OCSP_RESPONSE = 67
OCSP_BASIC_SIGNED_RESPONSE = 68
OCSP_BASIC_RESPONSE = 69
X509_LOGOTYPE_EXT = 70
X509_BIOMETRIC_EXT = 71
CNG_RSA_PUBLIC_KEY_BLOB = 72
X509_OBJECT_IDENTIFIER = 73
X509_ALGORITHM_IDENTIFIER = 74
PKCS_RSA_SSA_PSS_PARAMETERS = 75
PKCS_RSAES_OAEP_PARAMETERS = 76
ECC_CMS_SHARED_INFO = 77
TIMESTAMP_REQUEST = 78
TIMESTAMP_RESPONSE = 79
TIMESTAMP_INFO = 80
X509_CERT_BUNDLE = 81
PKCS7_SIGNER_INFO = 500
CMS_SIGNER_INFO = 501
szOID_AUTHORITY_KEY_IDENTIFIER = "2.5.29.1"
szOID_KEY_ATTRIBUTES = "2.5.29.2"
szOID_CERT_POLICIES_95 = "2.5.29.3"
szOID_KEY_USAGE_RESTRICTION = "2.5.29.4"
szOID_SUBJECT_ALT_NAME = "2.5.29.7"
szOID_ISSUER_ALT_NAME = "2.5.29.8"
szOID_BASIC_CONSTRAINTS = "2.5.29.10"
szOID_KEY_USAGE = "2.5.29.15"
szOID_PRIVATEKEY_USAGE_PERIOD = "2.5.29.16"
szOID_BASIC_CONSTRAINTS2 = "2.5.29.19"
szOID_CERT_POLICIES = "2.5.29.32"
szOID_ANY_CERT_POLICY = "2.5.29.32.0"
szOID_AUTHORITY_KEY_IDENTIFIER2 = "2.5.29.35"
szOID_SUBJECT_KEY_IDENTIFIER = "2.5.29.14"
szOID_SUBJECT_ALT_NAME2 = "2.5.29.17"
szOID_ISSUER_ALT_NAME2 = "2.5.29.18"
szOID_CRL_REASON_CODE = "2.5.29.21"
szOID_REASON_CODE_HOLD = "2.5.29.23"
szOID_CRL_DIST_POINTS = "2.5.29.31"
szOID_ENHANCED_KEY_USAGE = "2.5.29.37"
szOID_CRL_NUMBER = "2.5.29.20"
szOID_DELTA_CRL_INDICATOR = "2.5.29.27"
szOID_ISSUING_DIST_POINT = "2.5.29.28"
szOID_FRESHEST_CRL = "2.5.29.46"
szOID_NAME_CONSTRAINTS = "2.5.29.30"
szOID_POLICY_MAPPINGS = "2.5.29.33"
szOID_LEGACY_POLICY_MAPPINGS = "2.5.29.5"
szOID_POLICY_CONSTRAINTS = "2.5.29.36"
szOID_RENEWAL_CERTIFICATE = "1.3.6.1.4.1.311.13.1"
szOID_ENROLLMENT_NAME_VALUE_PAIR = "1.3.6.1.4.1.311.13.2.1"
szOID_ENROLLMENT_CSP_PROVIDER = "1.3.6.1.4.1.311.13.2.2"
szOID_OS_VERSION = "1.3.6.1.4.1.311.13.2.3"
szOID_ENROLLMENT_AGENT = "1.3.6.1.4.1.311.20.2.1"
szOID_PKIX = "1.3.6.1.5.5.7"
szOID_PKIX_PE = "1.3.6.1.5.5.7.1"
szOID_AUTHORITY_INFO_ACCESS = "1.3.6.1.5.5.7.1.1"
szOID_CERT_EXTENSIONS = "1.3.6.1.4.1.311.2.1.14"
szOID_NEXT_UPDATE_LOCATION = "1.3.6.1.4.1.311.10.2"
szOID_REMOVE_CERTIFICATE = "1.3.6.1.4.1.311.10.8.1"
szOID_CROSS_CERT_DIST_POINTS = "1.3.6.1.4.1.311.10.9.1"
szOID_CTL = "1.3.6.1.4.1.311.10.1"
szOID_SORTED_CTL = "1.3.6.1.4.1.311.10.1.1"
szOID_SERIALIZED = "1.3.6.1.4.1.311.10.3.3.1"
szOID_NT_PRINCIPAL_NAME = "1.3.6.1.4.1.311.20.2.3"
szOID_PRODUCT_UPDATE = "1.3.6.1.4.1.311.31.1"
szOID_ANY_APPLICATION_POLICY = "1.3.6.1.4.1.311.10.12.1"
szOID_AUTO_ENROLL_CTL_USAGE = "1.3.6.1.4.1.311.20.1"
szOID_ENROLL_CERTTYPE_EXTENSION = "1.3.6.1.4.1.311.20.2"
szOID_CERT_MANIFOLD = "1.3.6.1.4.1.311.20.3"
szOID_CERTSRV_CA_VERSION = "1.3.6.1.4.1.311.21.1"
szOID_CERTSRV_PREVIOUS_CERT_HASH = "1.3.6.1.4.1.311.21.2"
szOID_CRL_VIRTUAL_BASE = "1.3.6.1.4.1.311.21.3"
szOID_CRL_NEXT_PUBLISH = "1.3.6.1.4.1.311.21.4"
szOID_KP_CA_EXCHANGE = "1.3.6.1.4.1.311.21.5"
szOID_KP_KEY_RECOVERY_AGENT = "1.3.6.1.4.1.311.21.6"
szOID_CERTIFICATE_TEMPLATE = "1.3.6.1.4.1.311.21.7"
szOID_ENTERPRISE_OID_ROOT = "1.3.6.1.4.1.311.21.8"
szOID_RDN_DUMMY_SIGNER = "1.3.6.1.4.1.311.21.9"
szOID_APPLICATION_CERT_POLICIES = "1.3.6.1.4.1.311.21.10"
szOID_APPLICATION_POLICY_MAPPINGS = "1.3.6.1.4.1.311.21.11"
szOID_APPLICATION_POLICY_CONSTRAINTS = "1.3.6.1.4.1.311.21.12"
szOID_ARCHIVED_KEY_ATTR = "1.3.6.1.4.1.311.21.13"
szOID_CRL_SELF_CDP = "1.3.6.1.4.1.311.21.14"
szOID_REQUIRE_CERT_CHAIN_POLICY = "1.3.6.1.4.1.311.21.15"
szOID_ARCHIVED_KEY_CERT_HASH = "1.3.6.1.4.1.311.21.16"
szOID_ISSUED_CERT_HASH = "1.3.6.1.4.1.311.21.17"
szOID_DS_EMAIL_REPLICATION = "1.3.6.1.4.1.311.21.19"
szOID_REQUEST_CLIENT_INFO = "1.3.6.1.4.1.311.21.20"
szOID_ENCRYPTED_KEY_HASH = "1.3.6.1.4.1.311.21.21"
szOID_CERTSRV_CROSSCA_VERSION = "1.3.6.1.4.1.311.21.22"
szOID_NTDS_REPLICATION = "1.3.6.1.4.1.311.25.1"
szOID_SUBJECT_DIR_ATTRS = "2.5.29.9"
szOID_PKIX_KP = "1.3.6.1.5.5.7.3"
szOID_PKIX_KP_SERVER_AUTH = "1.3.6.1.5.5.7.3.1"
szOID_PKIX_KP_CLIENT_AUTH = "1.3.6.1.5.5.7.3.2"
szOID_PKIX_KP_CODE_SIGNING = "1.3.6.1.5.5.7.3.3"
szOID_PKIX_KP_EMAIL_PROTECTION = "1.3.6.1.5.5.7.3.4"
szOID_PKIX_KP_IPSEC_END_SYSTEM = "1.3.6.1.5.5.7.3.5"
szOID_PKIX_KP_IPSEC_TUNNEL = "1.3.6.1.5.5.7.3.6"
szOID_PKIX_KP_IPSEC_USER = "1.3.6.1.5.5.7.3.7"
szOID_PKIX_KP_TIMESTAMP_SIGNING = "1.3.6.1.5.5.7.3.8"
szOID_IPSEC_KP_IKE_INTERMEDIATE = "1.3.6.1.5.5.8.2.2"
szOID_KP_CTL_USAGE_SIGNING = "1.3.6.1.4.1.311.10.3.1"
szOID_KP_TIME_STAMP_SIGNING = "1.3.6.1.4.1.311.10.3.2"
szOID_SERVER_GATED_CRYPTO = "1.3.6.1.4.1.311.10.3.3"
szOID_SGC_NETSCAPE = "2.16.840.1.113730.4.1"
szOID_KP_EFS = "1.3.6.1.4.1.311.10.3.4"
szOID_EFS_RECOVERY = "1.3.6.1.4.1.311.10.3.4.1"
szOID_WHQL_CRYPTO = "1.3.6.1.4.1.311.10.3.5"
szOID_NT5_CRYPTO = "1.3.6.1.4.1.311.10.3.6"
szOID_OEM_WHQL_CRYPTO = "1.3.6.1.4.1.311.10.3.7"
szOID_EMBEDDED_NT_CRYPTO = "1.3.6.1.4.1.311.10.3.8"
szOID_ROOT_LIST_SIGNER = "1.3.6.1.4.1.311.10.3.9"
szOID_KP_QUALIFIED_SUBORDINATION = "1.3.6.1.4.1.311.10.3.10"
szOID_KP_KEY_RECOVERY = "1.3.6.1.4.1.311.10.3.11"
szOID_KP_DOCUMENT_SIGNING = "1.3.6.1.4.1.311.10.3.12"
szOID_KP_LIFETIME_SIGNING = "1.3.6.1.4.1.311.10.3.13"
szOID_KP_MOBILE_DEVICE_SOFTWARE = "1.3.6.1.4.1.311.10.3.14"
szOID_DRM = "1.3.6.1.4.1.311.10.5.1"
szOID_DRM_INDIVIDUALIZATION = "1.3.6.1.4.1.311.10.5.2"
szOID_LICENSES = "1.3.6.1.4.1.311.10.6.1"
szOID_LICENSE_SERVER = "1.3.6.1.4.1.311.10.6.2"
szOID_KP_SMARTCARD_LOGON = "1.3.6.1.4.1.311.20.2.2"
szOID_YESNO_TRUST_ATTR = "1.3.6.1.4.1.311.10.4.1"
szOID_PKIX_POLICY_QUALIFIER_CPS = "1.3.6.1.5.5.7.2.1"
szOID_PKIX_POLICY_QUALIFIER_USERNOTICE = "1.3.6.1.5.5.7.2.2"
szOID_CERT_POLICIES_95_QUALIFIER1 = "2.16.840.1.113733.1.7.1.1"
CERT_UNICODE_RDN_ERR_INDEX_MASK = 0x3FF
CERT_UNICODE_RDN_ERR_INDEX_SHIFT = 22
CERT_UNICODE_ATTR_ERR_INDEX_MASK = 0x003F
CERT_UNICODE_ATTR_ERR_INDEX_SHIFT = 16
CERT_UNICODE_VALUE_ERR_INDEX_MASK = 0x0000FFFF
CERT_UNICODE_VALUE_ERR_INDEX_SHIFT = 0
CERT_DIGITAL_SIGNATURE_KEY_USAGE = 0x80
CERT_NON_REPUDIATION_KEY_USAGE = 0x40
CERT_KEY_ENCIPHERMENT_KEY_USAGE = 0x20
CERT_DATA_ENCIPHERMENT_KEY_USAGE = 0x10
CERT_KEY_AGREEMENT_KEY_USAGE = 0x08
CERT_KEY_CERT_SIGN_KEY_USAGE = 0x04
CERT_OFFLINE_CRL_SIGN_KEY_USAGE = 0x02
CERT_CRL_SIGN_KEY_USAGE = 0x02
CERT_ENCIPHER_ONLY_KEY_USAGE = 0x01
CERT_DECIPHER_ONLY_KEY_USAGE = 0x80
CERT_ALT_NAME_OTHER_NAME = 1
CERT_ALT_NAME_RFC822_NAME = 2
CERT_ALT_NAME_DNS_NAME = 3
CERT_ALT_NAME_X400_ADDRESS = 4
CERT_ALT_NAME_DIRECTORY_NAME = 5
CERT_ALT_NAME_EDI_PARTY_NAME = 6
CERT_ALT_NAME_URL = 7
CERT_ALT_NAME_IP_ADDRESS = 8
CERT_ALT_NAME_REGISTERED_ID = 9
CERT_ALT_NAME_ENTRY_ERR_INDEX_MASK = 0xFF
CERT_ALT_NAME_ENTRY_ERR_INDEX_SHIFT = 16
CERT_ALT_NAME_VALUE_ERR_INDEX_MASK = 0x0000FFFF
CERT_ALT_NAME_VALUE_ERR_INDEX_SHIFT = 0
CERT_CA_SUBJECT_FLAG = 0x80
CERT_END_ENTITY_SUBJECT_FLAG = 0x40
szOID_PKIX_ACC_DESCR = "1.3.6.1.5.5.7.48"
szOID_PKIX_OCSP = "1.3.6.1.5.5.7.48.1"
szOID_PKIX_CA_ISSUERS = "1.3.6.1.5.5.7.48.2"
CRL_REASON_UNSPECIFIED = 0
CRL_REASON_KEY_COMPROMISE = 1
CRL_REASON_CA_COMPROMISE = 2
CRL_REASON_AFFILIATION_CHANGED = 3
CRL_REASON_SUPERSEDED = 4
CRL_REASON_CESSATION_OF_OPERATION = 5
CRL_REASON_CERTIFICATE_HOLD = 6
CRL_REASON_REMOVE_FROM_CRL = 8
CRL_DIST_POINT_NO_NAME = 0
CRL_DIST_POINT_FULL_NAME = 1
CRL_DIST_POINT_ISSUER_RDN_NAME = 2
CRL_REASON_UNUSED_FLAG = 0x80
CRL_REASON_KEY_COMPROMISE_FLAG = 0x40
CRL_REASON_CA_COMPROMISE_FLAG = 0x20
CRL_REASON_AFFILIATION_CHANGED_FLAG = 0x10
CRL_REASON_SUPERSEDED_FLAG = 0x08
CRL_REASON_CESSATION_OF_OPERATION_FLAG = 0x04
CRL_REASON_CERTIFICATE_HOLD_FLAG = 0x02
CRL_DIST_POINT_ERR_INDEX_MASK = 0x7F
CRL_DIST_POINT_ERR_INDEX_SHIFT = 24
CRL_DIST_POINT_ERR_CRL_ISSUER_BIT = (-2147483648)
CROSS_CERT_DIST_POINT_ERR_INDEX_MASK = 0xFF
CROSS_CERT_DIST_POINT_ERR_INDEX_SHIFT = 24
CERT_EXCLUDED_SUBTREE_BIT = (-2147483648)
SORTED_CTL_EXT_FLAGS_OFFSET = (0*4)
SORTED_CTL_EXT_COUNT_OFFSET = (1*4)
SORTED_CTL_EXT_MAX_COLLISION_OFFSET = (2*4)
SORTED_CTL_EXT_HASH_BUCKET_OFFSET = (3*4)
SORTED_CTL_EXT_HASHED_SUBJECT_IDENTIFIER_FLAG = 0x1
CERT_DSS_R_LEN = 20
CERT_DSS_S_LEN = 20
CERT_DSS_SIGNATURE_LEN = (CERT_DSS_R_LEN + CERT_DSS_S_LEN)
CERT_MAX_ASN_ENCODED_DSS_SIGNATURE_LEN = (2 + 2*(2 + 20 +1))
CRYPT_X942_COUNTER_BYTE_LENGTH = 4
CRYPT_X942_KEY_LENGTH_BYTE_LENGTH = 4
CRYPT_X942_PUB_INFO_BYTE_LENGTH = (512/8)
CRYPT_RC2_40BIT_VERSION = 160
CRYPT_RC2_56BIT_VERSION = 52
CRYPT_RC2_64BIT_VERSION = 120
CRYPT_RC2_128BIT_VERSION = 58
szOID_VERISIGN_PRIVATE_6_9 = "2.16.840.1.113733.1.6.9"
szOID_VERISIGN_ONSITE_JURISDICTION_HASH = "2.16.840.1.113733.1.6.11"
szOID_VERISIGN_BITSTRING_6_13 = "2.16.840.1.113733.1.6.13"
szOID_VERISIGN_ISS_STRONG_CRYPTO = "2.16.840.1.113733.1.8.1"
szOID_NETSCAPE = "2.16.840.1.113730"
szOID_NETSCAPE_CERT_EXTENSION = "2.16.840.1.113730.1"
szOID_NETSCAPE_CERT_TYPE = "2.16.840.1.113730.1.1"
szOID_NETSCAPE_BASE_URL = "2.16.840.1.113730.1.2"
szOID_NETSCAPE_REVOCATION_URL = "2.16.840.1.113730.1.3"
szOID_NETSCAPE_CA_REVOCATION_URL = "2.16.840.1.113730.1.4"
szOID_NETSCAPE_CERT_RENEWAL_URL = "2.16.840.1.113730.1.7"
szOID_NETSCAPE_CA_POLICY_URL = "2.16.840.1.113730.1.8"
szOID_NETSCAPE_SSL_SERVER_NAME = "2.16.840.1.113730.1.12"
szOID_NETSCAPE_COMMENT = "2.16.840.1.113730.1.13"
szOID_NETSCAPE_DATA_TYPE = "2.16.840.1.113730.2"
szOID_NETSCAPE_CERT_SEQUENCE = "2.16.840.1.113730.2.5"
NETSCAPE_SSL_CLIENT_AUTH_CERT_TYPE = 0x80
NETSCAPE_SSL_SERVER_AUTH_CERT_TYPE = 0x40
NETSCAPE_SMIME_CERT_TYPE = 0x20
NETSCAPE_SIGN_CERT_TYPE = 0x10
NETSCAPE_SSL_CA_CERT_TYPE = 0x04
NETSCAPE_SMIME_CA_CERT_TYPE = 0x02
NETSCAPE_SIGN_CA_CERT_TYPE = 0x01
szOID_CT_PKI_DATA = "1.3.6.1.5.5.7.12.2"
szOID_CT_PKI_RESPONSE = "1.3.6.1.5.5.7.12.3"
szOID_PKIX_NO_SIGNATURE = "1.3.6.1.5.5.7.6.2"
szOID_CMC = "1.3.6.1.5.5.7.7"
szOID_CMC_STATUS_INFO = "1.3.6.1.5.5.7.7.1"
szOID_CMC_IDENTIFICATION = "1.3.6.1.5.5.7.7.2"
szOID_CMC_IDENTITY_PROOF = "1.3.6.1.5.5.7.7.3"
szOID_CMC_DATA_RETURN = "1.3.6.1.5.5.7.7.4"
szOID_CMC_TRANSACTION_ID = "1.3.6.1.5.5.7.7.5"
szOID_CMC_SENDER_NONCE = "1.3.6.1.5.5.7.7.6"
szOID_CMC_RECIPIENT_NONCE = "1.3.6.1.5.5.7.7.7"
szOID_CMC_ADD_EXTENSIONS = "1.3.6.1.5.5.7.7.8"
szOID_CMC_ENCRYPTED_POP = "1.3.6.1.5.5.7.7.9"
szOID_CMC_DECRYPTED_POP = "1.3.6.1.5.5.7.7.10"
szOID_CMC_LRA_POP_WITNESS = "1.3.6.1.5.5.7.7.11"
szOID_CMC_GET_CERT = "1.3.6.1.5.5.7.7.15"
szOID_CMC_GET_CRL = "1.3.6.1.5.5.7.7.16"
szOID_CMC_REVOKE_REQUEST = "1.3.6.1.5.5.7.7.17"
szOID_CMC_REG_INFO = "1.3.6.1.5.5.7.7.18"
szOID_CMC_RESPONSE_INFO = "1.3.6.1.5.5.7.7.19"
szOID_CMC_QUERY_PENDING = "1.3.6.1.5.5.7.7.21"
szOID_CMC_ID_POP_LINK_RANDOM = "1.3.6.1.5.5.7.7.22"
szOID_CMC_ID_POP_LINK_WITNESS = "1.3.6.1.5.5.7.7.23"
szOID_CMC_ID_CONFIRM_CERT_ACCEPTANCE = "1.3.6.1.5.5.7.7.24"
szOID_CMC_ADD_ATTRIBUTES = "1.3.6.1.4.1.311.10.10.1"
CMC_TAGGED_CERT_REQUEST_CHOICE = 1
CMC_OTHER_INFO_NO_CHOICE = 0
CMC_OTHER_INFO_FAIL_CHOICE = 1
CMC_OTHER_INFO_PEND_CHOICE = 2
CMC_STATUS_SUCCESS = 0
CMC_STATUS_FAILED = 2
CMC_STATUS_PENDING = 3
CMC_STATUS_NO_SUPPORT = 4
CMC_STATUS_CONFIRM_REQUIRED = 5
CMC_FAIL_BAD_ALG = 0
CMC_FAIL_BAD_MESSAGE_CHECK = 1
CMC_FAIL_BAD_REQUEST = 2
CMC_FAIL_BAD_TIME = 3
CMC_FAIL_BAD_CERT_ID = 4
CMC_FAIL_UNSUPORTED_EXT = 5
CMC_FAIL_MUST_ARCHIVE_KEYS = 6
CMC_FAIL_BAD_IDENTITY = 7
CMC_FAIL_POP_REQUIRED = 8
CMC_FAIL_POP_FAILED = 9
CMC_FAIL_NO_KEY_REUSE = 10
CMC_FAIL_INTERNAL_CA_ERROR = 11
CMC_FAIL_TRY_LATER = 12
CRYPT_OID_ENCODE_OBJECT_FUNC = "CryptDllEncodeObject"
CRYPT_OID_DECODE_OBJECT_FUNC = "CryptDllDecodeObject"
CRYPT_OID_ENCODE_OBJECT_EX_FUNC = "CryptDllEncodeObjectEx"
CRYPT_OID_DECODE_OBJECT_EX_FUNC = "CryptDllDecodeObjectEx"
CRYPT_OID_CREATE_COM_OBJECT_FUNC = "CryptDllCreateCOMObject"
CRYPT_OID_VERIFY_REVOCATION_FUNC = "CertDllVerifyRevocation"
CRYPT_OID_VERIFY_CTL_USAGE_FUNC = "CertDllVerifyCTLUsage"
CRYPT_OID_FORMAT_OBJECT_FUNC = "CryptDllFormatObject"
CRYPT_OID_FIND_OID_INFO_FUNC = "CryptDllFindOIDInfo"
CRYPT_OID_FIND_LOCALIZED_NAME_FUNC = "CryptDllFindLocalizedName"
CRYPT_OID_REGPATH = "Software\\Microsoft\\Cryptography\\OID"
CRYPT_OID_REG_ENCODING_TYPE_PREFIX = "EncodingType "
CRYPT_OID_REG_DLL_VALUE_NAME = u"Dll"
CRYPT_OID_REG_FUNC_NAME_VALUE_NAME = u"FuncName"
CRYPT_OID_REG_FUNC_NAME_VALUE_NAME_A = "FuncName"
CRYPT_OID_REG_FLAGS_VALUE_NAME = u"CryptFlags"
CRYPT_DEFAULT_OID = "DEFAULT"
CRYPT_INSTALL_OID_FUNC_BEFORE_FLAG = 1
CRYPT_GET_INSTALLED_OID_FUNC_FLAG = 0x1
CRYPT_REGISTER_FIRST_INDEX = 0
CRYPT_REGISTER_LAST_INDEX = (-1)
CRYPT_MATCH_ANY_ENCODING_TYPE = (-1)
CRYPT_HASH_ALG_OID_GROUP_ID = 1
CRYPT_ENCRYPT_ALG_OID_GROUP_ID = 2
CRYPT_PUBKEY_ALG_OID_GROUP_ID = 3
CRYPT_SIGN_ALG_OID_GROUP_ID = 4
CRYPT_RDN_ATTR_OID_GROUP_ID = 5
CRYPT_EXT_OR_ATTR_OID_GROUP_ID = 6
CRYPT_ENHKEY_USAGE_OID_GROUP_ID = 7
CRYPT_POLICY_OID_GROUP_ID = 8
CRYPT_TEMPLATE_OID_GROUP_ID = 9
CRYPT_LAST_OID_GROUP_ID = 9
CRYPT_FIRST_ALG_OID_GROUP_ID = CRYPT_HASH_ALG_OID_GROUP_ID
CRYPT_LAST_ALG_OID_GROUP_ID = CRYPT_SIGN_ALG_OID_GROUP_ID
CRYPT_OID_INHIBIT_SIGNATURE_FORMAT_FLAG = 0x1
CRYPT_OID_USE_PUBKEY_PARA_FOR_PKCS7_FLAG = 0x2
CRYPT_OID_NO_NULL_ALGORITHM_PARA_FLAG = 0x4
CRYPT_OID_INFO_OID_KEY = 1
CRYPT_OID_INFO_NAME_KEY = 2
CRYPT_OID_INFO_ALGID_KEY = 3
CRYPT_OID_INFO_SIGN_KEY = 4
CRYPT_INSTALL_OID_INFO_BEFORE_FLAG = 1
CRYPT_LOCALIZED_NAME_ENCODING_TYPE = 0
CRYPT_LOCALIZED_NAME_OID = "LocalizedNames"
szOID_PKCS_7_DATA = "1.2.840.113549.1.7.1"
szOID_PKCS_7_SIGNED = "1.2.840.113549.1.7.2"
szOID_PKCS_7_ENVELOPED = "1.2.840.113549.1.7.3"
szOID_PKCS_7_SIGNEDANDENVELOPED = "1.2.840.113549.1.7.4"
szOID_PKCS_7_DIGESTED = "1.2.840.113549.1.7.5"
szOID_PKCS_7_ENCRYPTED = "1.2.840.113549.1.7.6"
szOID_PKCS_9_CONTENT_TYPE = "1.2.840.113549.1.9.3"
szOID_PKCS_9_MESSAGE_DIGEST = "1.2.840.113549.1.9.4"
CMSG_DATA = 1
CMSG_SIGNED = 2
CMSG_ENVELOPED = 3
CMSG_SIGNED_AND_ENVELOPED = 4
CMSG_HASHED = 5
CMSG_ENCRYPTED = 6
CMSG_ALL_FLAGS = -1
CMSG_DATA_FLAG = (1 << CMSG_DATA)
CMSG_SIGNED_FLAG = (1 << CMSG_SIGNED)
CMSG_ENVELOPED_FLAG = (1 << CMSG_ENVELOPED)
CMSG_SIGNED_AND_ENVELOPED_FLAG = (1 << CMSG_SIGNED_AND_ENVELOPED)
CMSG_HASHED_FLAG = (1 << CMSG_HASHED)
CMSG_ENCRYPTED_FLAG = (1 << CMSG_ENCRYPTED)
CERT_ID_ISSUER_SERIAL_NUMBER = 1
CERT_ID_KEY_IDENTIFIER = 2
CERT_ID_SHA1_HASH = 3
CMSG_KEY_AGREE_EPHEMERAL_KEY_CHOICE = 1
CMSG_KEY_AGREE_STATIC_KEY_CHOICE = 2
CMSG_MAIL_LIST_HANDLE_KEY_CHOICE = 1
CMSG_KEY_TRANS_RECIPIENT = 1
CMSG_KEY_AGREE_RECIPIENT = 2
CMSG_MAIL_LIST_RECIPIENT = 3
CMSG_SP3_COMPATIBLE_ENCRYPT_FLAG = (-2147483648)
CMSG_RC4_NO_SALT_FLAG = 0x40000000
CMSG_INDEFINITE_LENGTH = ((-1))
CMSG_BARE_CONTENT_FLAG = 0x00000001
CMSG_LENGTH_ONLY_FLAG = 0x00000002
CMSG_DETACHED_FLAG = 0x00000004
CMSG_AUTHENTICATED_ATTRIBUTES_FLAG = 0x00000008
CMSG_CONTENTS_OCTETS_FLAG = 0x00000010
CMSG_MAX_LENGTH_FLAG = 0x00000020
CMSG_CMS_ENCAPSULATED_CONTENT_FLAG = 0x00000040
CMSG_CRYPT_RELEASE_CONTEXT_FLAG = 0x00008000
CMSG_TYPE_PARAM = 1
CMSG_CONTENT_PARAM = 2
CMSG_BARE_CONTENT_PARAM = 3
CMSG_INNER_CONTENT_TYPE_PARAM = 4
CMSG_SIGNER_COUNT_PARAM = 5
CMSG_SIGNER_INFO_PARAM = 6
CMSG_SIGNER_CERT_INFO_PARAM = 7
CMSG_SIGNER_HASH_ALGORITHM_PARAM = 8
CMSG_SIGNER_AUTH_ATTR_PARAM = 9
CMSG_SIGNER_UNAUTH_ATTR_PARAM = 10
CMSG_CERT_COUNT_PARAM = 11
CMSG_CERT_PARAM = 12
CMSG_CRL_COUNT_PARAM = 13
CMSG_CRL_PARAM = 14
CMSG_ENVELOPE_ALGORITHM_PARAM = 15
CMSG_RECIPIENT_COUNT_PARAM = 17
CMSG_RECIPIENT_INDEX_PARAM = 18
CMSG_RECIPIENT_INFO_PARAM = 19
CMSG_HASH_ALGORITHM_PARAM = 20
CMSG_HASH_DATA_PARAM = 21
CMSG_COMPUTED_HASH_PARAM = 22
CMSG_ENCRYPT_PARAM = 26
CMSG_ENCRYPTED_DIGEST = 27
CMSG_ENCODED_SIGNER = 28
CMSG_ENCODED_MESSAGE = 29
CMSG_VERSION_PARAM = 30
CMSG_ATTR_CERT_COUNT_PARAM = 31
CMSG_ATTR_CERT_PARAM = 32
CMSG_CMS_RECIPIENT_COUNT_PARAM = 33
CMSG_CMS_RECIPIENT_INDEX_PARAM = 34
CMSG_CMS_RECIPIENT_ENCRYPTED_KEY_INDEX_PARAM = 35
CMSG_CMS_RECIPIENT_INFO_PARAM = 36
CMSG_UNPROTECTED_ATTR_PARAM = 37
CMSG_SIGNER_CERT_ID_PARAM = 38
CMSG_CMS_SIGNER_INFO_PARAM = 39
CMSG_SIGNED_DATA_V1 = 1
CMSG_SIGNED_DATA_V3 = 3
CMSG_SIGNED_DATA_PKCS_1_5_VERSION = CMSG_SIGNED_DATA_V1
CMSG_SIGNED_DATA_CMS_VERSION = CMSG_SIGNED_DATA_V3
CMSG_SIGNER_INFO_V1 = 1
CMSG_SIGNER_INFO_V3 = 3
CMSG_SIGNER_INFO_PKCS_1_5_VERSION = CMSG_SIGNER_INFO_V1
CMSG_SIGNER_INFO_CMS_VERSION = CMSG_SIGNER_INFO_V3
CMSG_HASHED_DATA_V0 = 0
CMSG_HASHED_DATA_V2 = 2
CMSG_HASHED_DATA_PKCS_1_5_VERSION = CMSG_HASHED_DATA_V0
CMSG_HASHED_DATA_CMS_VERSION = CMSG_HASHED_DATA_V2
CMSG_ENVELOPED_DATA_V0 = 0
CMSG_ENVELOPED_DATA_V2 = 2
CMSG_ENVELOPED_DATA_PKCS_1_5_VERSION = CMSG_ENVELOPED_DATA_V0
CMSG_ENVELOPED_DATA_CMS_VERSION = CMSG_ENVELOPED_DATA_V2
CMSG_KEY_AGREE_ORIGINATOR_CERT = 1
CMSG_KEY_AGREE_ORIGINATOR_PUBLIC_KEY = 2
CMSG_ENVELOPED_RECIPIENT_V0 = 0
CMSG_ENVELOPED_RECIPIENT_V2 = 2
CMSG_ENVELOPED_RECIPIENT_V3 = 3
CMSG_ENVELOPED_RECIPIENT_V4 = 4
CMSG_KEY_TRANS_PKCS_1_5_VERSION = CMSG_ENVELOPED_RECIPIENT_V0
CMSG_KEY_TRANS_CMS_VERSION = CMSG_ENVELOPED_RECIPIENT_V2
CMSG_KEY_AGREE_VERSION = CMSG_ENVELOPED_RECIPIENT_V3
CMSG_MAIL_LIST_VERSION = CMSG_ENVELOPED_RECIPIENT_V4
CMSG_CTRL_VERIFY_SIGNATURE = 1
CMSG_CTRL_DECRYPT = 2
CMSG_CTRL_VERIFY_HASH = 5
CMSG_CTRL_ADD_SIGNER = 6
CMSG_CTRL_DEL_SIGNER = 7
CMSG_CTRL_ADD_SIGNER_UNAUTH_ATTR = 8
CMSG_CTRL_DEL_SIGNER_UNAUTH_ATTR = 9
CMSG_CTRL_ADD_CERT = 10
CMSG_CTRL_DEL_CERT = 11
CMSG_CTRL_ADD_CRL = 12
CMSG_CTRL_DEL_CRL = 13
CMSG_CTRL_ADD_ATTR_CERT = 14
CMSG_CTRL_DEL_ATTR_CERT = 15
CMSG_CTRL_KEY_TRANS_DECRYPT = 16
CMSG_CTRL_KEY_AGREE_DECRYPT = 17
CMSG_CTRL_MAIL_LIST_DECRYPT = 18
CMSG_CTRL_VERIFY_SIGNATURE_EX = 19
CMSG_CTRL_ADD_CMS_SIGNER_INFO = 20
CMSG_VERIFY_SIGNER_PUBKEY = 1
CMSG_VERIFY_SIGNER_CERT = 2
CMSG_VERIFY_SIGNER_CHAIN = 3
CMSG_VERIFY_SIGNER_NULL = 4
CMSG_OID_GEN_ENCRYPT_KEY_FUNC = "CryptMsgDllGenEncryptKey"
CMSG_OID_EXPORT_ENCRYPT_KEY_FUNC = "CryptMsgDllExportEncryptKey"
CMSG_OID_IMPORT_ENCRYPT_KEY_FUNC = "CryptMsgDllImportEncryptKey"
CMSG_CONTENT_ENCRYPT_PAD_ENCODED_LEN_FLAG = 0x00000001
CMSG_DEFAULT_INSTALLABLE_FUNC_OID = 1
CMSG_CONTENT_ENCRYPT_FREE_PARA_FLAG = 0x00000001
CMSG_CONTENT_ENCRYPT_RELEASE_CONTEXT_FLAG = 0x00008000
CMSG_OID_GEN_CONTENT_ENCRYPT_KEY_FUNC = "CryptMsgDllGenContentEncryptKey"
CMSG_KEY_TRANS_ENCRYPT_FREE_PARA_FLAG = 0x00000001
CMSG_OID_EXPORT_KEY_TRANS_FUNC = "CryptMsgDllExportKeyTrans"
CMSG_KEY_AGREE_ENCRYPT_FREE_PARA_FLAG = 0x00000001
CMSG_KEY_AGREE_ENCRYPT_FREE_MATERIAL_FLAG = 0x00000002
CMSG_KEY_AGREE_ENCRYPT_FREE_PUBKEY_ALG_FLAG = 0x00000004
CMSG_KEY_AGREE_ENCRYPT_FREE_PUBKEY_PARA_FLAG = 0x00000008
CMSG_KEY_AGREE_ENCRYPT_FREE_PUBKEY_BITS_FLAG = 0x00000010
CMSG_OID_EXPORT_KEY_AGREE_FUNC = "CryptMsgDllExportKeyAgree"
CMSG_MAIL_LIST_ENCRYPT_FREE_PARA_FLAG = 0x00000001
CMSG_OID_EXPORT_MAIL_LIST_FUNC = "CryptMsgDllExportMailList"
CMSG_OID_IMPORT_KEY_TRANS_FUNC = "CryptMsgDllImportKeyTrans"
CMSG_OID_IMPORT_KEY_AGREE_FUNC = "CryptMsgDllImportKeyAgree"
CMSG_OID_IMPORT_MAIL_LIST_FUNC = "CryptMsgDllImportMailList"
# Certificate property id's used with CertGetCertificateContextProperty
CERT_KEY_PROV_HANDLE_PROP_ID = 1
CERT_KEY_PROV_INFO_PROP_ID = 2
CERT_SHA1_HASH_PROP_ID = 3
CERT_MD5_HASH_PROP_ID = 4
CERT_HASH_PROP_ID = CERT_SHA1_HASH_PROP_ID
CERT_KEY_CONTEXT_PROP_ID = 5
CERT_KEY_SPEC_PROP_ID = 6
CERT_IE30_RESERVED_PROP_ID = 7
CERT_PUBKEY_HASH_RESERVED_PROP_ID = 8
CERT_ENHKEY_USAGE_PROP_ID = 9
CERT_CTL_USAGE_PROP_ID = CERT_ENHKEY_USAGE_PROP_ID
CERT_NEXT_UPDATE_LOCATION_PROP_ID = 10
CERT_FRIENDLY_NAME_PROP_ID = 11
CERT_PVK_FILE_PROP_ID = 12
CERT_DESCRIPTION_PROP_ID = 13
CERT_ACCESS_STATE_PROP_ID = 14
CERT_SIGNATURE_HASH_PROP_ID = 15
CERT_SMART_CARD_DATA_PROP_ID = 16
CERT_EFS_PROP_ID = 17
CERT_FORTEZZA_DATA_PROP_ID = 18
CERT_ARCHIVED_PROP_ID = 19
CERT_KEY_IDENTIFIER_PROP_ID = 20
CERT_AUTO_ENROLL_PROP_ID = 21
CERT_PUBKEY_ALG_PARA_PROP_ID = 22
CERT_CROSS_CERT_DIST_POINTS_PROP_ID = 23
CERT_ISSUER_PUBLIC_KEY_MD5_HASH_PROP_ID = 24
CERT_SUBJECT_PUBLIC_KEY_MD5_HASH_PROP_ID = 25
CERT_ENROLLMENT_PROP_ID = 26
CERT_DATE_STAMP_PROP_ID = 27
CERT_ISSUER_SERIAL_NUMBER_MD5_HASH_PROP_ID = 28
CERT_SUBJECT_NAME_MD5_HASH_PROP_ID = 29
CERT_EXTENDED_ERROR_INFO_PROP_ID = 30
CERT_RENEWAL_PROP_ID = 64
CERT_ARCHIVED_KEY_HASH_PROP_ID = 65
CERT_AUTO_ENROLL_RETRY_PROP_ID = 66
CERT_AIA_URL_RETRIEVED_PROP_ID = 67
CERT_AUTHORITY_INFO_ACCESS_PROP_ID = 68
CERT_BACKED_UP_PROP_ID = 69
CERT_OCSP_RESPONSE_PROP_ID = 70
CERT_REQUEST_ORIGINATOR_PROP_ID = 71
CERT_SOURCE_LOCATION_PROP_ID = 72
CERT_SOURCE_URL_PROP_ID = 73
CERT_NEW_KEY_PROP_ID = 74
CERT_OCSP_CACHE_PREFIX_PROP_ID = 75
CERT_SMART_CARD_ROOT_INFO_PROP_ID = 76
CERT_NO_AUTO_EXPIRE_CHECK_PROP_ID = 77
CERT_NCRYPT_KEY_HANDLE_PROP_ID = 78
CERT_HCRYPTPROV_OR_NCRYPT_KEY_HANDLE_PROP_ID = 79
CERT_SUBJECT_INFO_ACCESS_PROP_ID = 80
CERT_CA_OCSP_AUTHORITY_INFO_ACCESS_PROP_ID = 81
CERT_CA_DISABLE_CRL_PROP_ID = 82
CERT_ROOT_PROGRAM_CERT_POLICIES_PROP_ID = 83
CERT_ROOT_PROGRAM_NAME_CONSTRAINTS_PROP_ID = 84
CERT_SUBJECT_OCSP_AUTHORITY_INFO_ACCESS_PROP_ID = 85
CERT_SUBJECT_DISABLE_CRL_PROP_ID = 86
CERT_CEP_PROP_ID = 87
CERT_SIGN_HASH_CNG_ALG_PROP_ID = 89
CERT_SCARD_PIN_ID_PROP_ID = 90
CERT_SCARD_PIN_INFO_PROP_ID = 91
CERT_FIRST_RESERVED_PROP_ID = 92
CERT_LAST_RESERVED_PROP_ID = 0x00007FFF
CERT_FIRST_USER_PROP_ID = 0x00008000
CERT_LAST_USER_PROP_ID = 0x0000FFFF
szOID_CERT_PROP_ID_PREFIX = "1.3.6.1.4.1.311.10.11."
szOID_CERT_KEY_IDENTIFIER_PROP_ID = "1.3.6.1.4.1.311.10.11.20"
szOID_CERT_ISSUER_SERIAL_NUMBER_MD5_HASH_PROP_ID = \
"1.3.6.1.4.1.311.10.11.28"
szOID_CERT_SUBJECT_NAME_MD5_HASH_PROP_ID = \
"1.3.6.1.4.1.311.10.11.29"
CERT_ACCESS_STATE_WRITE_PERSIST_FLAG = 0x1
CERT_ACCESS_STATE_SYSTEM_STORE_FLAG = 0x2
CERT_ACCESS_STATE_LM_SYSTEM_STORE_FLAG = 0x4
CERT_SET_KEY_PROV_HANDLE_PROP_ID = 0x00000001
CERT_SET_KEY_CONTEXT_PROP_ID = 0x00000001
sz_CERT_STORE_PROV_MEMORY = "Memory"
sz_CERT_STORE_PROV_FILENAME_W = "File"
sz_CERT_STORE_PROV_FILENAME = sz_CERT_STORE_PROV_FILENAME_W
sz_CERT_STORE_PROV_SYSTEM_W = "System"
sz_CERT_STORE_PROV_SYSTEM = sz_CERT_STORE_PROV_SYSTEM_W
sz_CERT_STORE_PROV_PKCS7 = "PKCS7"
sz_CERT_STORE_PROV_SERIALIZED = "Serialized"
sz_CERT_STORE_PROV_COLLECTION = "Collection"
sz_CERT_STORE_PROV_SYSTEM_REGISTRY_W = "SystemRegistry"
sz_CERT_STORE_PROV_SYSTEM_REGISTRY = sz_CERT_STORE_PROV_SYSTEM_REGISTRY_W
sz_CERT_STORE_PROV_PHYSICAL_W = "Physical"
sz_CERT_STORE_PROV_PHYSICAL = sz_CERT_STORE_PROV_PHYSICAL_W
sz_CERT_STORE_PROV_SMART_CARD_W = "SmartCard"
sz_CERT_STORE_PROV_SMART_CARD = sz_CERT_STORE_PROV_SMART_CARD_W
sz_CERT_STORE_PROV_LDAP_W = "Ldap"
sz_CERT_STORE_PROV_LDAP = sz_CERT_STORE_PROV_LDAP_W
CERT_STORE_SIGNATURE_FLAG = 0x00000001
CERT_STORE_TIME_VALIDITY_FLAG = 0x00000002
CERT_STORE_REVOCATION_FLAG = 0x00000004
CERT_STORE_NO_CRL_FLAG = 0x00010000
CERT_STORE_NO_ISSUER_FLAG = 0x00020000
CERT_STORE_BASE_CRL_FLAG = 0x00000100
CERT_STORE_DELTA_CRL_FLAG = 0x00000200
CERT_STORE_NO_CRYPT_RELEASE_FLAG = 0x00000001
CERT_STORE_SET_LOCALIZED_NAME_FLAG = 0x00000002
CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004
CERT_STORE_DELETE_FLAG = 0x00000010
CERT_STORE_UNSAFE_PHYSICAL_FLAG = 0x00000020
CERT_STORE_SHARE_STORE_FLAG = 0x00000040
CERT_STORE_SHARE_CONTEXT_FLAG = 0x00000080
CERT_STORE_MANIFOLD_FLAG = 0x00000100
CERT_STORE_ENUM_ARCHIVED_FLAG = 0x00000200
CERT_STORE_UPDATE_KEYID_FLAG = 0x00000400
CERT_STORE_BACKUP_RESTORE_FLAG = 0x00000800
CERT_STORE_READONLY_FLAG = 0x00008000
CERT_STORE_OPEN_EXISTING_FLAG = 0x00004000
CERT_STORE_CREATE_NEW_FLAG = 0x00002000
CERT_STORE_MAXIMUM_ALLOWED_FLAG = 0x00001000
CERT_SYSTEM_STORE_MASK = (-65536)
CERT_SYSTEM_STORE_RELOCATE_FLAG = (-2147483648)
CERT_SYSTEM_STORE_UNPROTECTED_FLAG = 0x40000000
CERT_SYSTEM_STORE_LOCATION_MASK = 0x00FF0000
CERT_SYSTEM_STORE_LOCATION_SHIFT = 16
CERT_SYSTEM_STORE_CURRENT_USER_ID = 1
CERT_SYSTEM_STORE_LOCAL_MACHINE_ID = 2
CERT_SYSTEM_STORE_CURRENT_SERVICE_ID = 4
CERT_SYSTEM_STORE_SERVICES_ID = 5
CERT_SYSTEM_STORE_USERS_ID = 6
CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY_ID = 7
CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY_ID = 8
CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE_ID = 9
CERT_SYSTEM_STORE_CURRENT_USER = \
(CERT_SYSTEM_STORE_CURRENT_USER_ID << CERT_SYSTEM_STORE_LOCATION_SHIFT)
CERT_SYSTEM_STORE_LOCAL_MACHINE = \
(CERT_SYSTEM_STORE_LOCAL_MACHINE_ID << CERT_SYSTEM_STORE_LOCATION_SHIFT)
CERT_SYSTEM_STORE_CURRENT_SERVICE = \
(CERT_SYSTEM_STORE_CURRENT_SERVICE_ID << CERT_SYSTEM_STORE_LOCATION_SHIFT)
CERT_SYSTEM_STORE_SERVICES = \
(CERT_SYSTEM_STORE_SERVICES_ID << CERT_SYSTEM_STORE_LOCATION_SHIFT)
CERT_SYSTEM_STORE_USERS = \
(CERT_SYSTEM_STORE_USERS_ID << CERT_SYSTEM_STORE_LOCATION_SHIFT)
CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY = \
(CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY_ID << \
CERT_SYSTEM_STORE_LOCATION_SHIFT)
CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY = \
(CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY_ID << \
CERT_SYSTEM_STORE_LOCATION_SHIFT)
CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE = \
(CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE_ID << \
CERT_SYSTEM_STORE_LOCATION_SHIFT)
CERT_PROT_ROOT_DISABLE_CURRENT_USER_FLAG = 0x1
CERT_PROT_ROOT_INHIBIT_ADD_AT_INIT_FLAG = 0x2
CERT_PROT_ROOT_INHIBIT_PURGE_LM_FLAG = 0x4
CERT_PROT_ROOT_DISABLE_LM_AUTH_FLAG = 0x8
CERT_PROT_ROOT_ONLY_LM_GPT_FLAG = 0x8
CERT_PROT_ROOT_DISABLE_NT_AUTH_REQUIRED_FLAG = 0x10
CERT_PROT_ROOT_DISABLE_NOT_DEFINED_NAME_CONSTRAINT_FLAG = 0x20
CERT_TRUST_PUB_ALLOW_TRUST_MASK = 0x00000003
CERT_TRUST_PUB_ALLOW_END_USER_TRUST = 0x00000000
CERT_TRUST_PUB_ALLOW_MACHINE_ADMIN_TRUST = 0x00000001
CERT_TRUST_PUB_ALLOW_ENTERPRISE_ADMIN_TRUST = 0x00000002
CERT_TRUST_PUB_CHECK_PUBLISHER_REV_FLAG = 0x00000100
CERT_TRUST_PUB_CHECK_TIMESTAMP_REV_FLAG = 0x00000200
CERT_AUTH_ROOT_AUTO_UPDATE_LOCAL_MACHINE_REGPATH = ur"Software\Microsoft\SystemCertificates\AuthRoot\AutoUpdate"
CERT_AUTH_ROOT_AUTO_UPDATE_DISABLE_UNTRUSTED_ROOT_LOGGING_FLAG = 0x1
CERT_AUTH_ROOT_AUTO_UPDATE_DISABLE_PARTIAL_CHAIN_LOGGING_FLAG = 0x2
CERT_AUTH_ROOT_AUTO_UPDATE_ROOT_DIR_URL_VALUE_NAME = u"RootDirUrl"
CERT_AUTH_ROOT_AUTO_UPDATE_SYNC_DELTA_TIME_VALUE_NAME = u"SyncDeltaTime"
CERT_AUTH_ROOT_AUTO_UPDATE_FLAGS_VALUE_NAME = u"Flags"
CERT_AUTH_ROOT_CTL_FILENAME = u"authroot.stl"
CERT_AUTH_ROOT_CTL_FILENAME_A = "authroot.stl"
CERT_AUTH_ROOT_CAB_FILENAME = u"authrootstl.cab"
CERT_AUTH_ROOT_SEQ_FILENAME = "authrootseq.txt"
CERT_AUTH_ROOT_CERT_EXT = ".crt"
CERT_GROUP_POLICY_SYSTEM_STORE_REGPATH = ur"Software\Policies\Microsoft\SystemCertificates"
CERT_EFSBLOB_REGPATH = CERT_GROUP_POLICY_SYSTEM_STORE_REGPATH + ur"\EFS"
CERT_EFSBLOB_VALUE_NAME = u"EFSBlob"
CERT_PROT_ROOT_FLAGS_REGPATH = CERT_GROUP_POLICY_SYSTEM_STORE_REGPATH +ur"\Root\ProtectedRoots"
CERT_PROT_ROOT_FLAGS_VALUE_NAME = u"Flags"
CERT_TRUST_PUB_SAFER_GROUP_POLICY_REGPATH = CERT_GROUP_POLICY_SYSTEM_STORE_REGPATH + ur"\TrustedPublisher\Safer"
CERT_LOCAL_MACHINE_SYSTEM_STORE_REGPATH = ur"Software\Microsoft\SystemCertificates"
CERT_TRUST_PUB_SAFER_LOCAL_MACHINE_REGPATH = CERT_LOCAL_MACHINE_SYSTEM_STORE_REGPATH + ur"\TrustedPublisher\Safer"
CERT_TRUST_PUB_AUTHENTICODE_FLAGS_VALUE_NAME = u"AuthenticodeFlags"
CERT_OCM_SUBCOMPONENTS_LOCAL_MACHINE_REGPATH = ur"SOFTWARE\Microsoft\Windows\CurrentVersion\Setup\OC Manager\Subcomponents"
CERT_OCM_SUBCOMPONENTS_ROOT_AUTO_UPDATE_VALUE_NAME = ur"RootAutoUpdate"
CERT_DISABLE_ROOT_AUTO_UPDATE_REGPATH = CERT_GROUP_POLICY_SYSTEM_STORE_REGPATH + ur"\AuthRoot"
CERT_DISABLE_ROOT_AUTO_UPDATE_VALUE_NAME = u"DisableRootAutoUpdate"
CERT_AUTH_ROOT_AUTO_UPDATE_LOCAL_MACHINE_REGPATH = CERT_LOCAL_MACHINE_SYSTEM_STORE_REGPATH + ur"\AuthRoot\AutoUpdate"
CERT_REGISTRY_STORE_REMOTE_FLAG = 0x10000
CERT_REGISTRY_STORE_SERIALIZED_FLAG = 0x20000
CERT_REGISTRY_STORE_CLIENT_GPT_FLAG = (-2147483648)
CERT_REGISTRY_STORE_LM_GPT_FLAG = 0x01000000
CERT_REGISTRY_STORE_ROAMING_FLAG = 0x40000
CERT_REGISTRY_STORE_MY_IE_DIRTY_FLAG = 0x80000
CERT_IE_DIRTY_FLAGS_REGPATH = ur"Software\Microsoft\Cryptography\IEDirtyFlags"
CERT_FILE_STORE_COMMIT_ENABLE_FLAG = 0x10000
CERT_LDAP_STORE_SIGN_FLAG = 0x10000
CERT_LDAP_STORE_AREC_EXCLUSIVE_FLAG = 0x20000
CERT_LDAP_STORE_OPENED_FLAG = 0x40000
CERT_LDAP_STORE_UNBIND_FLAG = 0x80000
CRYPT_OID_OPEN_STORE_PROV_FUNC = "CertDllOpenStoreProv"
CERT_STORE_PROV_EXTERNAL_FLAG = 0x1
CERT_STORE_PROV_DELETED_FLAG = 0x2
CERT_STORE_PROV_NO_PERSIST_FLAG = 0x4
CERT_STORE_PROV_SYSTEM_STORE_FLAG = 0x8
CERT_STORE_PROV_LM_SYSTEM_STORE_FLAG = 0x10
CERT_STORE_PROV_CLOSE_FUNC = 0
CERT_STORE_PROV_READ_CERT_FUNC = 1
CERT_STORE_PROV_WRITE_CERT_FUNC = 2
CERT_STORE_PROV_DELETE_CERT_FUNC = 3
CERT_STORE_PROV_SET_CERT_PROPERTY_FUNC = 4
CERT_STORE_PROV_READ_CRL_FUNC = 5
CERT_STORE_PROV_WRITE_CRL_FUNC = 6
CERT_STORE_PROV_DELETE_CRL_FUNC = 7
CERT_STORE_PROV_SET_CRL_PROPERTY_FUNC = 8
CERT_STORE_PROV_READ_CTL_FUNC = 9
CERT_STORE_PROV_WRITE_CTL_FUNC = 10
CERT_STORE_PROV_DELETE_CTL_FUNC = 11
CERT_STORE_PROV_SET_CTL_PROPERTY_FUNC = 12
CERT_STORE_PROV_CONTROL_FUNC = 13
CERT_STORE_PROV_FIND_CERT_FUNC = 14
CERT_STORE_PROV_FREE_FIND_CERT_FUNC = 15
CERT_STORE_PROV_GET_CERT_PROPERTY_FUNC = 16
CERT_STORE_PROV_FIND_CRL_FUNC = 17
CERT_STORE_PROV_FREE_FIND_CRL_FUNC = 18
CERT_STORE_PROV_GET_CRL_PROPERTY_FUNC = 19
CERT_STORE_PROV_FIND_CTL_FUNC = 20
CERT_STORE_PROV_FREE_FIND_CTL_FUNC = 21
CERT_STORE_PROV_GET_CTL_PROPERTY_FUNC = 22
CERT_STORE_PROV_WRITE_ADD_FLAG = 0x1
CERT_STORE_SAVE_AS_STORE = 1
CERT_STORE_SAVE_AS_PKCS7 = 2
CERT_STORE_SAVE_TO_FILE = 1
CERT_STORE_SAVE_TO_MEMORY = 2
CERT_STORE_SAVE_TO_FILENAME_A = 3
CERT_STORE_SAVE_TO_FILENAME_W = 4
CERT_STORE_SAVE_TO_FILENAME = CERT_STORE_SAVE_TO_FILENAME_W
CERT_CLOSE_STORE_FORCE_FLAG = 0x00000001
CERT_CLOSE_STORE_CHECK_FLAG = 0x00000002
CERT_COMPARE_MASK = 0xFFFF
CERT_COMPARE_SHIFT = 16
CERT_COMPARE_ANY = 0
CERT_COMPARE_SHA1_HASH = 1
CERT_COMPARE_NAME = 2
CERT_COMPARE_ATTR = 3
CERT_COMPARE_MD5_HASH = 4
CERT_COMPARE_PROPERTY = 5
CERT_COMPARE_PUBLIC_KEY = 6
CERT_COMPARE_HASH = CERT_COMPARE_SHA1_HASH
CERT_COMPARE_NAME_STR_A = 7
CERT_COMPARE_NAME_STR_W = 8
CERT_COMPARE_KEY_SPEC = 9
CERT_COMPARE_ENHKEY_USAGE = 10
CERT_COMPARE_CTL_USAGE = CERT_COMPARE_ENHKEY_USAGE
CERT_COMPARE_SUBJECT_CERT = 11
CERT_COMPARE_ISSUER_OF = 12
CERT_COMPARE_EXISTING = 13
CERT_COMPARE_SIGNATURE_HASH = 14
CERT_COMPARE_KEY_IDENTIFIER = 15
CERT_COMPARE_CERT_ID = 16
CERT_COMPARE_CROSS_CERT_DIST_POINTS = 17
CERT_COMPARE_PUBKEY_MD5_HASH = 18
CERT_FIND_ANY = (CERT_COMPARE_ANY << CERT_COMPARE_SHIFT)
CERT_FIND_SHA1_HASH = (CERT_COMPARE_SHA1_HASH << CERT_COMPARE_SHIFT)
CERT_FIND_MD5_HASH = (CERT_COMPARE_MD5_HASH << CERT_COMPARE_SHIFT)
CERT_FIND_SIGNATURE_HASH = (CERT_COMPARE_SIGNATURE_HASH << CERT_COMPARE_SHIFT)
CERT_FIND_KEY_IDENTIFIER = (CERT_COMPARE_KEY_IDENTIFIER << CERT_COMPARE_SHIFT)
CERT_FIND_HASH = CERT_FIND_SHA1_HASH
CERT_FIND_PROPERTY = (CERT_COMPARE_PROPERTY << CERT_COMPARE_SHIFT)
CERT_FIND_PUBLIC_KEY = (CERT_COMPARE_PUBLIC_KEY << CERT_COMPARE_SHIFT)
CERT_FIND_SUBJECT_NAME = (CERT_COMPARE_NAME << CERT_COMPARE_SHIFT | \
CERT_INFO_SUBJECT_FLAG)
CERT_FIND_SUBJECT_ATTR = (CERT_COMPARE_ATTR << CERT_COMPARE_SHIFT | \
CERT_INFO_SUBJECT_FLAG)
CERT_FIND_ISSUER_NAME = (CERT_COMPARE_NAME << CERT_COMPARE_SHIFT | \
CERT_INFO_ISSUER_FLAG)
CERT_FIND_ISSUER_ATTR = (CERT_COMPARE_ATTR << CERT_COMPARE_SHIFT | \
CERT_INFO_ISSUER_FLAG)
CERT_FIND_SUBJECT_STR_A = (CERT_COMPARE_NAME_STR_A << CERT_COMPARE_SHIFT | \
CERT_INFO_SUBJECT_FLAG)
CERT_FIND_SUBJECT_STR_W = (CERT_COMPARE_NAME_STR_W << CERT_COMPARE_SHIFT | \
CERT_INFO_SUBJECT_FLAG)
CERT_FIND_SUBJECT_STR = CERT_FIND_SUBJECT_STR_W
CERT_FIND_ISSUER_STR_A = (CERT_COMPARE_NAME_STR_A << CERT_COMPARE_SHIFT | \
CERT_INFO_ISSUER_FLAG)
CERT_FIND_ISSUER_STR_W = (CERT_COMPARE_NAME_STR_W << CERT_COMPARE_SHIFT | \
CERT_INFO_ISSUER_FLAG)
CERT_FIND_ISSUER_STR = CERT_FIND_ISSUER_STR_W
CERT_FIND_KEY_SPEC = (CERT_COMPARE_KEY_SPEC << CERT_COMPARE_SHIFT)
CERT_FIND_ENHKEY_USAGE = (CERT_COMPARE_ENHKEY_USAGE << CERT_COMPARE_SHIFT)
CERT_FIND_CTL_USAGE = CERT_FIND_ENHKEY_USAGE
CERT_FIND_SUBJECT_CERT = (CERT_COMPARE_SUBJECT_CERT << CERT_COMPARE_SHIFT)
CERT_FIND_ISSUER_OF = (CERT_COMPARE_ISSUER_OF << CERT_COMPARE_SHIFT)
CERT_FIND_EXISTING = (CERT_COMPARE_EXISTING << CERT_COMPARE_SHIFT)
CERT_FIND_CERT_ID = (CERT_COMPARE_CERT_ID << CERT_COMPARE_SHIFT)
CERT_FIND_CROSS_CERT_DIST_POINTS = \
(CERT_COMPARE_CROSS_CERT_DIST_POINTS << CERT_COMPARE_SHIFT)
CERT_FIND_PUBKEY_MD5_HASH = \
(CERT_COMPARE_PUBKEY_MD5_HASH << CERT_COMPARE_SHIFT)
CERT_FIND_OPTIONAL_ENHKEY_USAGE_FLAG = 0x1
CERT_FIND_EXT_ONLY_ENHKEY_USAGE_FLAG = 0x2
CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG = 0x4
CERT_FIND_NO_ENHKEY_USAGE_FLAG = 0x8
CERT_FIND_OR_ENHKEY_USAGE_FLAG = 0x10
CERT_FIND_VALID_ENHKEY_USAGE_FLAG = 0x20
CERT_FIND_OPTIONAL_CTL_USAGE_FLAG = CERT_FIND_OPTIONAL_ENHKEY_USAGE_FLAG
CERT_FIND_EXT_ONLY_CTL_USAGE_FLAG = \
CERT_FIND_EXT_ONLY_ENHKEY_USAGE_FLAG
CERT_FIND_PROP_ONLY_CTL_USAGE_FLAG = \
CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG
CERT_FIND_NO_CTL_USAGE_FLAG = CERT_FIND_NO_ENHKEY_USAGE_FLAG
CERT_FIND_OR_CTL_USAGE_FLAG = CERT_FIND_OR_ENHKEY_USAGE_FLAG
CERT_FIND_VALID_CTL_USAGE_FLAG = CERT_FIND_VALID_ENHKEY_USAGE_FLAG
CERT_SET_PROPERTY_IGNORE_PERSIST_ERROR_FLAG = (-2147483648)
CERT_SET_PROPERTY_INHIBIT_PERSIST_FLAG = 0x40000000
CTL_ENTRY_FROM_PROP_CHAIN_FLAG = 0x1
CRL_FIND_ANY = 0
CRL_FIND_ISSUED_BY = 1
CRL_FIND_EXISTING = 2
CRL_FIND_ISSUED_FOR = 3
CRL_FIND_ISSUED_BY_AKI_FLAG = 0x1
CRL_FIND_ISSUED_BY_SIGNATURE_FLAG = 0x2
CRL_FIND_ISSUED_BY_DELTA_FLAG = 0x4
CRL_FIND_ISSUED_BY_BASE_FLAG = 0x8
CERT_STORE_ADD_NEW = 1
CERT_STORE_ADD_USE_EXISTING = 2
CERT_STORE_ADD_REPLACE_EXISTING = 3
CERT_STORE_ADD_ALWAYS = 4
CERT_STORE_ADD_REPLACE_EXISTING_INHERIT_PROPERTIES = 5
CERT_STORE_ADD_NEWER = 6
CERT_STORE_ADD_NEWER_INHERIT_PROPERTIES = 7
CERT_STORE_CERTIFICATE_CONTEXT = 1
CERT_STORE_CRL_CONTEXT = 2
CERT_STORE_CTL_CONTEXT = 3
CERT_STORE_ALL_CONTEXT_FLAG = -1
CERT_STORE_CERTIFICATE_CONTEXT_FLAG = \
(1 << CERT_STORE_CERTIFICATE_CONTEXT)
CERT_STORE_CRL_CONTEXT_FLAG = \
(1 << CERT_STORE_CRL_CONTEXT)
CERT_STORE_CTL_CONTEXT_FLAG = \
(1 << CERT_STORE_CTL_CONTEXT)
CTL_ANY_SUBJECT_TYPE = 1
CTL_CERT_SUBJECT_TYPE = 2
CTL_FIND_ANY = 0
CTL_FIND_SHA1_HASH = 1
CTL_FIND_MD5_HASH = 2
CTL_FIND_USAGE = 3
CTL_FIND_SUBJECT = 4
CTL_FIND_EXISTING = 5
CTL_FIND_NO_LIST_ID_CBDATA = (-1)
CTL_FIND_SAME_USAGE_FLAG = 0x1
CERT_STORE_CTRL_RESYNC = 1
CERT_STORE_CTRL_NOTIFY_CHANGE = 2
CERT_STORE_CTRL_COMMIT = 3
CERT_STORE_CTRL_AUTO_RESYNC = 4
CERT_STORE_CTRL_CANCEL_NOTIFY = 5
CERT_STORE_CTRL_INHIBIT_DUPLICATE_HANDLE_FLAG = 0x1
CERT_STORE_CTRL_COMMIT_FORCE_FLAG = 0x1
CERT_STORE_CTRL_COMMIT_CLEAR_FLAG = 0x2
CERT_STORE_LOCALIZED_NAME_PROP_ID = 0x1000
CERT_CREATE_CONTEXT_NOCOPY_FLAG = 0x1
CERT_CREATE_CONTEXT_SORTED_FLAG = 0x2
CERT_CREATE_CONTEXT_NO_HCRYPTMSG_FLAG = 0x4
CERT_CREATE_CONTEXT_NO_ENTRY_FLAG = 0x8
CERT_PHYSICAL_STORE_ADD_ENABLE_FLAG = 0x1
CERT_PHYSICAL_STORE_OPEN_DISABLE_FLAG = 0x2
CERT_PHYSICAL_STORE_REMOTE_OPEN_DISABLE_FLAG = 0x4
CERT_PHYSICAL_STORE_INSERT_COMPUTER_NAME_ENABLE_FLAG = 0x8
CERT_PHYSICAL_STORE_PREDEFINED_ENUM_FLAG = 0x1
# Names of physical cert stores
CERT_PHYSICAL_STORE_DEFAULT_NAME = u".Default"
CERT_PHYSICAL_STORE_GROUP_POLICY_NAME = u".GroupPolicy"
CERT_PHYSICAL_STORE_LOCAL_MACHINE_NAME = u".LocalMachine"
CERT_PHYSICAL_STORE_DS_USER_CERTIFICATE_NAME = u".UserCertificate"
CERT_PHYSICAL_STORE_LOCAL_MACHINE_GROUP_POLICY_NAME = u".LocalMachineGroupPolicy"
CERT_PHYSICAL_STORE_ENTERPRISE_NAME = u".Enterprise"
CERT_PHYSICAL_STORE_AUTH_ROOT_NAME = u".AuthRoot"
CERT_PHYSICAL_STORE_SMART_CARD_NAME = u".SmartCard"
CRYPT_OID_OPEN_SYSTEM_STORE_PROV_FUNC = "CertDllOpenSystemStoreProv"
CRYPT_OID_REGISTER_SYSTEM_STORE_FUNC = "CertDllRegisterSystemStore"
CRYPT_OID_UNREGISTER_SYSTEM_STORE_FUNC = "CertDllUnregisterSystemStore"
CRYPT_OID_ENUM_SYSTEM_STORE_FUNC = "CertDllEnumSystemStore"
CRYPT_OID_REGISTER_PHYSICAL_STORE_FUNC = "CertDllRegisterPhysicalStore"
CRYPT_OID_UNREGISTER_PHYSICAL_STORE_FUNC = "CertDllUnregisterPhysicalStore"
CRYPT_OID_ENUM_PHYSICAL_STORE_FUNC = "CertDllEnumPhysicalStore"
CRYPT_OID_SYSTEM_STORE_LOCATION_VALUE_NAME = u"SystemStoreLocation"
CMSG_TRUSTED_SIGNER_FLAG = 0x1
CMSG_SIGNER_ONLY_FLAG = 0x2
CMSG_USE_SIGNER_INDEX_FLAG = 0x4
CMSG_CMS_ENCAPSULATED_CTL_FLAG = 0x00008000
CMSG_ENCODE_SORTED_CTL_FLAG = 0x1
CMSG_ENCODE_HASHED_SUBJECT_IDENTIFIER_FLAG = 0x2
CERT_VERIFY_INHIBIT_CTL_UPDATE_FLAG = 0x1
CERT_VERIFY_TRUSTED_SIGNERS_FLAG = 0x2
CERT_VERIFY_NO_TIME_CHECK_FLAG = 0x4
CERT_VERIFY_ALLOW_MORE_USAGE_FLAG = 0x8
CERT_VERIFY_UPDATED_CTL_FLAG = 0x1
CERT_CONTEXT_REVOCATION_TYPE = 1
CERT_VERIFY_REV_CHAIN_FLAG = 0x00000001
CERT_VERIFY_CACHE_ONLY_BASED_REVOCATION = 0x00000002
CERT_VERIFY_REV_ACCUMULATIVE_TIMEOUT_FLAG = 0x00000004
CERT_UNICODE_IS_RDN_ATTRS_FLAG = 0x1
CERT_CASE_INSENSITIVE_IS_RDN_ATTRS_FLAG = 0x2
CRYPT_VERIFY_CERT_SIGN_SUBJECT_BLOB = 1
CRYPT_VERIFY_CERT_SIGN_SUBJECT_CERT = 2
CRYPT_VERIFY_CERT_SIGN_SUBJECT_CRL = 3
CRYPT_VERIFY_CERT_SIGN_ISSUER_PUBKEY = 1
CRYPT_VERIFY_CERT_SIGN_ISSUER_CERT = 2
CRYPT_VERIFY_CERT_SIGN_ISSUER_CHAIN = 3
CRYPT_VERIFY_CERT_SIGN_ISSUER_NULL = 4
CRYPT_DEFAULT_CONTEXT_AUTO_RELEASE_FLAG = 0x00000001
CRYPT_DEFAULT_CONTEXT_PROCESS_FLAG = 0x00000002
CRYPT_DEFAULT_CONTEXT_CERT_SIGN_OID = 1
CRYPT_DEFAULT_CONTEXT_MULTI_CERT_SIGN_OID = 2
CRYPT_OID_EXPORT_PUBLIC_KEY_INFO_FUNC = "CryptDllExportPublicKeyInfoEx"
CRYPT_OID_IMPORT_PUBLIC_KEY_INFO_FUNC = "CryptDllImportPublicKeyInfoEx"
CRYPT_ACQUIRE_CACHE_FLAG = 0x00000001
CRYPT_ACQUIRE_USE_PROV_INFO_FLAG = 0x00000002
CRYPT_ACQUIRE_COMPARE_KEY_FLAG = 0x00000004
CRYPT_ACQUIRE_SILENT_FLAG = 0x00000040
CRYPT_FIND_USER_KEYSET_FLAG = 0x00000001
CRYPT_FIND_MACHINE_KEYSET_FLAG = 0x00000002
CRYPT_FIND_SILENT_KEYSET_FLAG = 0x00000040
CRYPT_OID_IMPORT_PRIVATE_KEY_INFO_FUNC = "CryptDllImportPrivateKeyInfoEx"
CRYPT_OID_EXPORT_PRIVATE_KEY_INFO_FUNC = "CryptDllExportPrivateKeyInfoEx"
CRYPT_DELETE_KEYSET = CRYPT_DELETEKEYSET
CERT_SIMPLE_NAME_STR = 1
CERT_OID_NAME_STR = 2
CERT_X500_NAME_STR = 3
CERT_NAME_STR_SEMICOLON_FLAG = 0x40000000
CERT_NAME_STR_NO_PLUS_FLAG = 0x20000000
CERT_NAME_STR_NO_QUOTING_FLAG = 0x10000000
CERT_NAME_STR_CRLF_FLAG = 0x08000000
CERT_NAME_STR_COMMA_FLAG = 0x04000000
CERT_NAME_STR_REVERSE_FLAG = 0x02000000
CERT_NAME_STR_DISABLE_IE4_UTF8_FLAG = 0x00010000
CERT_NAME_STR_ENABLE_T61_UNICODE_FLAG = 0x00020000
CERT_NAME_STR_ENABLE_UTF8_UNICODE_FLAG = 0x00040000
CERT_NAME_EMAIL_TYPE = 1
CERT_NAME_RDN_TYPE = 2
CERT_NAME_ATTR_TYPE = 3
CERT_NAME_SIMPLE_DISPLAY_TYPE = 4
CERT_NAME_FRIENDLY_DISPLAY_TYPE = 5
CERT_NAME_DNS_TYPE = 6
CERT_NAME_URL_TYPE = 7
CERT_NAME_UPN_TYPE = 8
CERT_NAME_ISSUER_FLAG = 0x1
CERT_NAME_DISABLE_IE4_UTF8_FLAG = 0x00010000
CRYPT_MESSAGE_BARE_CONTENT_OUT_FLAG = 0x00000001
CRYPT_MESSAGE_ENCAPSULATED_CONTENT_OUT_FLAG = 0x00000002
CRYPT_MESSAGE_KEYID_SIGNER_FLAG = 0x00000004
CRYPT_MESSAGE_SILENT_KEYSET_FLAG = 0x00000040
CRYPT_MESSAGE_KEYID_RECIPIENT_FLAG = 0x4
CERT_QUERY_OBJECT_FILE = 0x00000001
CERT_QUERY_OBJECT_BLOB = 0x00000002
CERT_QUERY_CONTENT_CERT = 1
CERT_QUERY_CONTENT_CTL = 2
CERT_QUERY_CONTENT_CRL = 3
CERT_QUERY_CONTENT_SERIALIZED_STORE = 4
CERT_QUERY_CONTENT_SERIALIZED_CERT = 5
CERT_QUERY_CONTENT_SERIALIZED_CTL = 6
CERT_QUERY_CONTENT_SERIALIZED_CRL = 7
CERT_QUERY_CONTENT_PKCS7_SIGNED = 8
CERT_QUERY_CONTENT_PKCS7_UNSIGNED = 9
CERT_QUERY_CONTENT_PKCS7_SIGNED_EMBED = 10
CERT_QUERY_CONTENT_PKCS10 = 11
CERT_QUERY_CONTENT_PFX = 12
CERT_QUERY_CONTENT_CERT_PAIR = 13
CERT_QUERY_CONTENT_FLAG_CERT = \
( 1 << CERT_QUERY_CONTENT_CERT)
CERT_QUERY_CONTENT_FLAG_CTL = \
( 1 << CERT_QUERY_CONTENT_CTL)
CERT_QUERY_CONTENT_FLAG_CRL = \
( 1 << CERT_QUERY_CONTENT_CRL)
CERT_QUERY_CONTENT_FLAG_SERIALIZED_STORE = \
( 1 << CERT_QUERY_CONTENT_SERIALIZED_STORE)
CERT_QUERY_CONTENT_FLAG_SERIALIZED_CERT = \
( 1 << CERT_QUERY_CONTENT_SERIALIZED_CERT)
CERT_QUERY_CONTENT_FLAG_SERIALIZED_CTL = \
( 1 << CERT_QUERY_CONTENT_SERIALIZED_CTL)
CERT_QUERY_CONTENT_FLAG_SERIALIZED_CRL = \
( 1 << CERT_QUERY_CONTENT_SERIALIZED_CRL)
CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED = \
( 1 << CERT_QUERY_CONTENT_PKCS7_SIGNED)
CERT_QUERY_CONTENT_FLAG_PKCS7_UNSIGNED = \
( 1 << CERT_QUERY_CONTENT_PKCS7_UNSIGNED)
CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED = \
( 1 << CERT_QUERY_CONTENT_PKCS7_SIGNED_EMBED)
CERT_QUERY_CONTENT_FLAG_PKCS10 = \
( 1 << CERT_QUERY_CONTENT_PKCS10)
CERT_QUERY_CONTENT_FLAG_PFX = \
( 1 << CERT_QUERY_CONTENT_PFX)
CERT_QUERY_CONTENT_FLAG_CERT_PAIR = \
( 1 << CERT_QUERY_CONTENT_CERT_PAIR)
CERT_QUERY_CONTENT_FLAG_ALL = \
CERT_QUERY_CONTENT_FLAG_CERT | \
CERT_QUERY_CONTENT_FLAG_CTL | \
CERT_QUERY_CONTENT_FLAG_CRL | \
CERT_QUERY_CONTENT_FLAG_SERIALIZED_STORE | \
CERT_QUERY_CONTENT_FLAG_SERIALIZED_CERT | \
CERT_QUERY_CONTENT_FLAG_SERIALIZED_CTL | \
CERT_QUERY_CONTENT_FLAG_SERIALIZED_CRL | \
CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED | \
CERT_QUERY_CONTENT_FLAG_PKCS7_UNSIGNED | \
CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED | \
CERT_QUERY_CONTENT_FLAG_PKCS10 | \
CERT_QUERY_CONTENT_FLAG_PFX | \
CERT_QUERY_CONTENT_FLAG_CERT_PAIR
CERT_QUERY_FORMAT_BINARY = 1
CERT_QUERY_FORMAT_BASE64_ENCODED = 2
CERT_QUERY_FORMAT_ASN_ASCII_HEX_ENCODED = 3
CERT_QUERY_FORMAT_FLAG_BINARY = \
( 1 << CERT_QUERY_FORMAT_BINARY)
CERT_QUERY_FORMAT_FLAG_BASE64_ENCODED = \
( 1 << CERT_QUERY_FORMAT_BASE64_ENCODED)
CERT_QUERY_FORMAT_FLAG_ASN_ASCII_HEX_ENCODED = \
( 1 << CERT_QUERY_FORMAT_ASN_ASCII_HEX_ENCODED)
CERT_QUERY_FORMAT_FLAG_ALL = \
CERT_QUERY_FORMAT_FLAG_BINARY | \
CERT_QUERY_FORMAT_FLAG_BASE64_ENCODED | \
CERT_QUERY_FORMAT_FLAG_ASN_ASCII_HEX_ENCODED
CREDENTIAL_OID_PASSWORD_CREDENTIALS_A = 1
CREDENTIAL_OID_PASSWORD_CREDENTIALS_W = 2
CREDENTIAL_OID_PASSWORD_CREDENTIALS = CREDENTIAL_OID_PASSWORD_CREDENTIALS_W
SCHEME_OID_RETRIEVE_ENCODED_OBJECT_FUNC = "SchemeDllRetrieveEncodedObject"
SCHEME_OID_RETRIEVE_ENCODED_OBJECTW_FUNC = "SchemeDllRetrieveEncodedObjectW"
CONTEXT_OID_CREATE_OBJECT_CONTEXT_FUNC = "ContextDllCreateObjectContext"
CONTEXT_OID_CERTIFICATE = 1
CONTEXT_OID_CRL = 2
CONTEXT_OID_CTL = 3
CONTEXT_OID_PKCS7 = 4
CONTEXT_OID_CAPI2_ANY = 5
CONTEXT_OID_OCSP_RESP = 6
CRYPT_RETRIEVE_MULTIPLE_OBJECTS = 0x00000001
CRYPT_CACHE_ONLY_RETRIEVAL = 0x00000002
CRYPT_WIRE_ONLY_RETRIEVAL = 0x00000004
CRYPT_DONT_CACHE_RESULT = 0x00000008
CRYPT_ASYNC_RETRIEVAL = 0x00000010
CRYPT_STICKY_CACHE_RETRIEVAL = 0x00001000
CRYPT_LDAP_SCOPE_BASE_ONLY_RETRIEVAL = 0x00002000
CRYPT_OFFLINE_CHECK_RETRIEVAL = 0x00004000
CRYPT_LDAP_INSERT_ENTRY_ATTRIBUTE = 0x00008000
CRYPT_LDAP_SIGN_RETRIEVAL = 0x00010000
CRYPT_NO_AUTH_RETRIEVAL = 0x00020000
CRYPT_LDAP_AREC_EXCLUSIVE_RETRIEVAL = 0x00040000
CRYPT_AIA_RETRIEVAL = 0x00080000
CRYPT_VERIFY_CONTEXT_SIGNATURE = 0x00000020
CRYPT_VERIFY_DATA_HASH = 0x00000040
CRYPT_KEEP_TIME_VALID = 0x00000080
CRYPT_DONT_VERIFY_SIGNATURE = 0x00000100
CRYPT_DONT_CHECK_TIME_VALIDITY = 0x00000200
CRYPT_CHECK_FRESHNESS_TIME_VALIDITY = 0x00000400
CRYPT_ACCUMULATIVE_TIMEOUT = 0x00000800
CRYPT_PARAM_ASYNC_RETRIEVAL_COMPLETION = 1
CRYPT_PARAM_CANCEL_ASYNC_RETRIEVAL = 2
CRYPT_GET_URL_FROM_PROPERTY = 0x00000001
CRYPT_GET_URL_FROM_EXTENSION = 0x00000002
CRYPT_GET_URL_FROM_UNAUTH_ATTRIBUTE = 0x00000004
CRYPT_GET_URL_FROM_AUTH_ATTRIBUTE = 0x00000008
URL_OID_GET_OBJECT_URL_FUNC = "UrlDllGetObjectUrl"
TIME_VALID_OID_GET_OBJECT_FUNC = "TimeValidDllGetObject"
TIME_VALID_OID_FLUSH_OBJECT_FUNC = "TimeValidDllFlushObject"
TIME_VALID_OID_GET_CTL = 1
TIME_VALID_OID_GET_CRL = 2
TIME_VALID_OID_GET_CRL_FROM_CERT = 3
TIME_VALID_OID_GET_FRESHEST_CRL_FROM_CERT = 4
TIME_VALID_OID_GET_FRESHEST_CRL_FROM_CRL = 5
TIME_VALID_OID_FLUSH_CTL = 1
TIME_VALID_OID_FLUSH_CRL = 2
TIME_VALID_OID_FLUSH_CRL_FROM_CERT = 3
TIME_VALID_OID_FLUSH_FRESHEST_CRL_FROM_CERT = 4
TIME_VALID_OID_FLUSH_FRESHEST_CRL_FROM_CRL = 5
CRYPTPROTECT_PROMPT_ON_UNPROTECT = 0x1
CRYPTPROTECT_PROMPT_ON_PROTECT = 0x2
CRYPTPROTECT_PROMPT_RESERVED = 0x04
CRYPTPROTECT_PROMPT_STRONG = 0x08
CRYPTPROTECT_PROMPT_REQUIRE_STRONG = 0x10
CRYPTPROTECT_UI_FORBIDDEN = 0x1
CRYPTPROTECT_LOCAL_MACHINE = 0x4
CRYPTPROTECT_CRED_SYNC = 0x8
CRYPTPROTECT_AUDIT = 0x10
CRYPTPROTECT_NO_RECOVERY = 0x20
CRYPTPROTECT_VERIFY_PROTECTION = 0x40
CRYPTPROTECT_CRED_REGENERATE = 0x80
CRYPTPROTECT_FIRST_RESERVED_FLAGVAL = 0x0FFFFFFF
CRYPTPROTECT_LAST_RESERVED_FLAGVAL = (-1)
CRYPTPROTECTMEMORY_BLOCK_SIZE = 16
CRYPTPROTECTMEMORY_SAME_PROCESS = 0x00
CRYPTPROTECTMEMORY_CROSS_PROCESS = 0x01
CRYPTPROTECTMEMORY_SAME_LOGON = 0x02
CERT_CREATE_SELFSIGN_NO_SIGN = 1
CERT_CREATE_SELFSIGN_NO_KEY_INFO = 2
CRYPT_KEYID_MACHINE_FLAG = 0x00000020
CRYPT_KEYID_ALLOC_FLAG = 0x00008000
CRYPT_KEYID_DELETE_FLAG = 0x00000010
CRYPT_KEYID_SET_NEW_FLAG = 0x00002000
CERT_CHAIN_MAX_AIA_URL_COUNT_IN_CERT_DEFAULT = 5
CERT_CHAIN_MAX_AIA_URL_RETRIEVAL_COUNT_PER_CHAIN_DEFAULT = 10
CERT_CHAIN_MAX_AIA_URL_RETRIEVAL_BYTE_COUNT_DEFAULT = 100000
CERT_CHAIN_MAX_AIA_URL_RETRIEVAL_CERT_COUNT_DEFAULT = 10
CERT_CHAIN_CACHE_END_CERT = 0x00000001
CERT_CHAIN_THREAD_STORE_SYNC = 0x00000002
CERT_CHAIN_CACHE_ONLY_URL_RETRIEVAL = 0x00000004
CERT_CHAIN_USE_LOCAL_MACHINE_STORE = 0x00000008
CERT_CHAIN_ENABLE_CACHE_AUTO_UPDATE = 0x00000010
CERT_CHAIN_ENABLE_SHARE_STORE = 0x00000020
CERT_TRUST_NO_ERROR = 0x00000000
CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001
CERT_TRUST_IS_NOT_TIME_NESTED = 0x00000002
CERT_TRUST_IS_REVOKED = 0x00000004
CERT_TRUST_IS_NOT_SIGNATURE_VALID = 0x00000008
CERT_TRUST_IS_NOT_VALID_FOR_USAGE = 0x00000010
CERT_TRUST_IS_UNTRUSTED_ROOT = 0x00000020
CERT_TRUST_REVOCATION_STATUS_UNKNOWN = 0x00000040
CERT_TRUST_IS_CYCLIC = 0x00000080
CERT_TRUST_INVALID_EXTENSION = 0x00000100
CERT_TRUST_INVALID_POLICY_CONSTRAINTS = 0x00000200
CERT_TRUST_INVALID_BASIC_CONSTRAINTS = 0x00000400
CERT_TRUST_INVALID_NAME_CONSTRAINTS = 0x00000800
CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000
CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000
CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000
CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000
CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000
CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000
CERT_TRUST_IS_PARTIAL_CHAIN = 0x00010000
CERT_TRUST_CTL_IS_NOT_TIME_VALID = 0x00020000
CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID = 0x00040000
CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE = 0x00080000
CERT_TRUST_HAS_EXACT_MATCH_ISSUER = 0x00000001
CERT_TRUST_HAS_KEY_MATCH_ISSUER = 0x00000002
CERT_TRUST_HAS_NAME_MATCH_ISSUER = 0x00000004
CERT_TRUST_IS_SELF_SIGNED = 0x00000008
CERT_TRUST_HAS_PREFERRED_ISSUER = 0x00000100
CERT_TRUST_HAS_ISSUANCE_CHAIN_POLICY = 0x00000200
CERT_TRUST_HAS_VALID_NAME_CONSTRAINTS = 0x00000400
CERT_TRUST_IS_COMPLEX_CHAIN = 0x00010000
USAGE_MATCH_TYPE_AND = 0x00000000
USAGE_MATCH_TYPE_OR = 0x00000001
CERT_CHAIN_REVOCATION_CHECK_END_CERT = 0x10000000
CERT_CHAIN_REVOCATION_CHECK_CHAIN = 0x20000000
CERT_CHAIN_REVOCATION_CHECK_CHAIN_EXCLUDE_ROOT = 0x40000000
CERT_CHAIN_REVOCATION_CHECK_CACHE_ONLY = (-2147483648)
CERT_CHAIN_REVOCATION_ACCUMULATIVE_TIMEOUT = 0x08000000
CERT_CHAIN_DISABLE_PASS1_QUALITY_FILTERING = 0x00000040
CERT_CHAIN_RETURN_LOWER_QUALITY_CONTEXTS = 0x00000080
CERT_CHAIN_DISABLE_AUTH_ROOT_AUTO_UPDATE = 0x00000100
CERT_CHAIN_TIMESTAMP_TIME = 0x00000200
REVOCATION_OID_CRL_REVOCATION = 1
CERT_CHAIN_FIND_BY_ISSUER = 1
CERT_CHAIN_FIND_BY_ISSUER_COMPARE_KEY_FLAG = 0x0001
CERT_CHAIN_FIND_BY_ISSUER_COMPLEX_CHAIN_FLAG = 0x0002
CERT_CHAIN_FIND_BY_ISSUER_CACHE_ONLY_URL_FLAG = 0x0004
CERT_CHAIN_FIND_BY_ISSUER_LOCAL_MACHINE_FLAG = 0x0008
CERT_CHAIN_FIND_BY_ISSUER_NO_KEY_FLAG = 0x4000
CERT_CHAIN_FIND_BY_ISSUER_CACHE_ONLY_FLAG = 0x8000
CERT_CHAIN_POLICY_IGNORE_NOT_TIME_VALID_FLAG = 0x00000001
CERT_CHAIN_POLICY_IGNORE_CTL_NOT_TIME_VALID_FLAG = 0x00000002
CERT_CHAIN_POLICY_IGNORE_NOT_TIME_NESTED_FLAG = 0x00000004
CERT_CHAIN_POLICY_IGNORE_INVALID_BASIC_CONSTRAINTS_FLAG = 0x00000008
CERT_CHAIN_POLICY_IGNORE_ALL_NOT_TIME_VALID_FLAGS = ( \
CERT_CHAIN_POLICY_IGNORE_NOT_TIME_VALID_FLAG | \
CERT_CHAIN_POLICY_IGNORE_CTL_NOT_TIME_VALID_FLAG | \
CERT_CHAIN_POLICY_IGNORE_NOT_TIME_NESTED_FLAG \
)
CERT_CHAIN_POLICY_ALLOW_UNKNOWN_CA_FLAG = 0x00000010
CERT_CHAIN_POLICY_IGNORE_WRONG_USAGE_FLAG = 0x00000020
CERT_CHAIN_POLICY_IGNORE_INVALID_NAME_FLAG = 0x00000040
CERT_CHAIN_POLICY_IGNORE_INVALID_POLICY_FLAG = 0x00000080
CERT_CHAIN_POLICY_IGNORE_END_REV_UNKNOWN_FLAG = 0x00000100
CERT_CHAIN_POLICY_IGNORE_CTL_SIGNER_REV_UNKNOWN_FLAG = 0x00000200
CERT_CHAIN_POLICY_IGNORE_CA_REV_UNKNOWN_FLAG = 0x00000400
CERT_CHAIN_POLICY_IGNORE_ROOT_REV_UNKNOWN_FLAG = 0x00000800
CERT_CHAIN_POLICY_IGNORE_ALL_REV_UNKNOWN_FLAGS = ( \
CERT_CHAIN_POLICY_IGNORE_END_REV_UNKNOWN_FLAG | \
CERT_CHAIN_POLICY_IGNORE_CTL_SIGNER_REV_UNKNOWN_FLAG | \
CERT_CHAIN_POLICY_IGNORE_CA_REV_UNKNOWN_FLAG | \
CERT_CHAIN_POLICY_IGNORE_ROOT_REV_UNKNOWN_FLAG \
)
CERT_CHAIN_POLICY_ALLOW_TESTROOT_FLAG = 0x00008000
CERT_CHAIN_POLICY_TRUST_TESTROOT_FLAG = 0x00004000
CRYPT_OID_VERIFY_CERTIFICATE_CHAIN_POLICY_FUNC = \
"CertDllVerifyCertificateChainPolicy"
AUTHTYPE_CLIENT = 1
AUTHTYPE_SERVER = 2
BASIC_CONSTRAINTS_CERT_CHAIN_POLICY_CA_FLAG = (-2147483648)
BASIC_CONSTRAINTS_CERT_CHAIN_POLICY_END_ENTITY_FLAG = 0x40000000
MICROSOFT_ROOT_CERT_CHAIN_POLICY_ENABLE_TEST_ROOT_FLAG = 0x00010000
CRYPT_STRING_BASE64HEADER = 0x00000000
CRYPT_STRING_BASE64 = 0x00000001
CRYPT_STRING_BINARY = 0x00000002
CRYPT_STRING_BASE64REQUESTHEADER = 0x00000003
CRYPT_STRING_HEX = 0x00000004
CRYPT_STRING_HEXASCII = 0x00000005
CRYPT_STRING_BASE64_ANY = 0x00000006
CRYPT_STRING_ANY = 0x00000007
CRYPT_STRING_HEX_ANY = 0x00000008
CRYPT_STRING_BASE64X509CRLHEADER = 0x00000009
CRYPT_STRING_HEXADDR = 0x0000000a
CRYPT_STRING_HEXASCIIADDR = 0x0000000b
CRYPT_STRING_NOCR = (-2147483648)
CRYPT_USER_KEYSET = 0x00001000
PKCS12_IMPORT_RESERVED_MASK = (-65536)
REPORT_NO_PRIVATE_KEY = 0x0001
REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY = 0x0002
EXPORT_PRIVATE_KEYS = 0x0004
PKCS12_EXPORT_RESERVED_MASK = (-65536)
# Certificate store provider types used with CertOpenStore
CERT_STORE_PROV_MSG = 1
CERT_STORE_PROV_MEMORY = 2
CERT_STORE_PROV_FILE = 3
CERT_STORE_PROV_REG = 4
CERT_STORE_PROV_PKCS7 = 5
CERT_STORE_PROV_SERIALIZED = 6
CERT_STORE_PROV_FILENAME = 8
CERT_STORE_PROV_SYSTEM = 10
CERT_STORE_PROV_COLLECTION = 11
CERT_STORE_PROV_SYSTEM_REGISTRY = 13
CERT_STORE_PROV_PHYSICAL = 14
CERT_STORE_PROV_SMART_CARD = 15
CERT_STORE_PROV_LDAP = 16
URL_OID_CERTIFICATE_ISSUER = 1
URL_OID_CERTIFICATE_CRL_DIST_POINT = 2
URL_OID_CTL_ISSUER = 3
URL_OID_CTL_NEXT_UPDATE = 4
URL_OID_CRL_ISSUER = 5
URL_OID_CERTIFICATE_FRESHEST_CRL = 6
URL_OID_CRL_FRESHEST_CRL = 7
URL_OID_CROSS_CERT_DIST_POINT = 8
URL_OID_CERTIFICATE_OCSP = 9
URL_OID_CERTIFICATE_OCSP_AND_CRL_DIST_POINT = 10
URL_OID_CERTIFICATE_CRL_DIST_POINT_AND_OCSP = 11
URL_OID_CROSS_CERT_SUBJECT_INFO_ACCESS = 12
URL_OID_CERTIFICATE_ONLY_OCSP = 13
| mit |
dot-bob/Marlin-Duplicator-6 | buildroot/share/PlatformIO/scripts/jgaurora_a5s_a1_with_bootloader.py | 10 | 1915 | import os
Import("env")
# Relocate firmware from 0x08000000 to 0x0800A000
env['CPPDEFINES'].remove(("VECT_TAB_ADDR", "0x8000000"))
#alternatively, for STSTM <=5.1.0 use line below
#env['CPPDEFINES'].remove(("VECT_TAB_ADDR", 134217728))
env['CPPDEFINES'].append(("VECT_TAB_ADDR", "0x0800A000"))
custom_ld_script = os.path.abspath("buildroot/share/PlatformIO/ldscripts/jgaurora_a5s_a1.ld")
for i, flag in enumerate(env["LINKFLAGS"]):
if "-Wl,-T" in flag:
env["LINKFLAGS"][i] = "-Wl,-T" + custom_ld_script
elif flag == "-T":
env["LINKFLAGS"][i + 1] = custom_ld_script
#append ${PROGNAME}.bin firmware after bootloader and save it as 'jgaurora_firmware.bin'
def addboot(source,target,env):
firmware = open(target[0].path, "rb")
lengthfirmware = os.path.getsize(target[0].path)
bootloader_dir = "buildroot/share/PlatformIO/scripts/jgaurora_bootloader.bin"
bootloader = open(bootloader_dir, "rb")
lengthbootloader = os.path.getsize(bootloader_dir)
firmware_with_boothloader_dir = target[0].dir.path +'/firmware_with_bootloader.bin'
if os.path.exists(firmware_with_boothloader_dir):
os.remove(firmware_with_boothloader_dir)
firmwareimage = open(firmware_with_boothloader_dir, "wb")
position = 0
while position < lengthbootloader:
byte = bootloader.read(1)
firmwareimage.write(byte)
position += 1
position = 0
while position < lengthfirmware:
byte = firmware.read(1)
firmwareimage.write(byte)
position += 1
bootloader.close()
firmware.close()
firmwareimage.close()
firmware_without_bootloader_dir = target[0].dir.path+'/firmware_for_sd_upload.bin'
if os.path.exists(firmware_without_bootloader_dir):
os.remove(firmware_without_bootloader_dir)
os.rename(target[0].path, firmware_without_bootloader_dir)
#os.rename(target[0].dir.path+'/firmware_with_bootloader.bin', target[0].dir.path+'/firmware.bin')
env.AddPostAction("$BUILD_DIR/${PROGNAME}.bin", addboot);
| gpl-3.0 |
tttthemanCorp/CardmeleonAppEngine | django/contrib/flatpages/tests/middleware.py | 152 | 3308 | import os
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
class FlatpageMiddlewareTests(TestCase):
fixtures = ['sample_flatpages']
urls = 'django.contrib.flatpages.tests.urls'
def setUp(self):
self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES
flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
if flatpage_middleware_class not in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES += (flatpage_middleware_class,)
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(
os.path.dirname(__file__),
'templates'
),
)
self.old_LOGIN_URL = settings.LOGIN_URL
settings.LOGIN_URL = '/accounts/login/'
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
settings.LOGIN_URL = self.old_LOGIN_URL
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', 'test@example.com', 's3krit')
self.client.login(username='testuser',password='s3krit')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middlware"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middlware"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_authenticated_flatpage(self):
"A flatpage served by the middleware can require authentication"
response = self.client.get('/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/sekrit/')
User.objects.create_user('testuser', 'test@example.com', 's3krit')
self.client.login(username='testuser',password='s3krit')
response = self.client.get('/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
| bsd-3-clause |
jim-easterbrook/pywws | src/pywws/service/__init__.py | 1 | 19094 | # pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2018-20 pywws contributors
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Base classes for "service" uploaders.
.. inheritance-diagram:: CatchupDataService FileService LiveDataService
:top-classes: pywws.service.ServiceBase
"""
from __future__ import absolute_import, print_function, unicode_literals
from ast import literal_eval
from collections import deque
from datetime import datetime, timedelta
import os
import sys
import threading
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
import pywws
from pywws.constants import SECOND
import pywws.logger
import pywws.storage
import pywws.template
class Queue(deque):
def __init__(self, start, *arg, **kw):
super(Queue, self).__init__(*arg, **kw)
self._start = start
def append(self, x):
super(Queue, self).append(x)
if x is None:
return
self.append = super(Queue, self).append
self._start()
def full(self):
"""Are there already too many uploads on the queue."""
return len(self) >= 50
class ServiceBase(threading.Thread):
"""Base class for all service uploaders.
Uploaders use a separate thread to allow the main program thread to
continue even if a service is slow to respond. Items to upload are
passed to the thread via a thread safe queue. The thread is started
when the first item is put on the queue. To shut down the thread put
:py:obj:`None` on the queue, e.g. by calling :py:meth:`stop`.
There are two types of uploader derived from this class.
:py:class:`DataServiceBase` is used by uploaders that send defined
sets of data, typically as an HTML "post" or "get" operation.
:py:class:`FileService` is used to upload files, including free form
text such as a Twitter message.
All service classes must provide a :py:attr:`logger` object so that
logging messages carry the right module name, and define a
:py:attr:`service_name` string. They must also define a
:py:meth:`session` method.
"""
config = {}
"""Defines the user configuration of the uploader. Each item must be
of the form ``name: (default (str), required (bool), fixed_key (str
or None))``. ``name`` is the ``weather.ini`` value name, ``default``
is a default value, ``required`` defines whether a value must be
supplied at run time, and ``fixed_key`` defines if and to where in
:py:attr:`~DataServiceBase.fixed_data` the value should be copied.
"""
interval = timedelta(seconds=40)
"""Sets the minimum period between the timestamps of uploaded data.
For some services this can be less than the weather station's "live"
data period (48 seconds) whereas others may require 5 or 15 minutes
between readings.
"""
logger = None
"""A :py:class:`logging.Logger` object created with the module name.
This is typically done as follows::
logger = logging.getLogger(__name__)
"""
service_name = ''
"""A short name used to refer to the service in weather.ini. It
should be all lower case. The best name to use is the last part of
the module's file name, as follows::
service_name = os.path.splitext(os.path.basename(__file__))[0]
"""
def __init__(self, context, check_params=True):
super(ServiceBase, self).__init__()
self.context = context
self.queue = Queue(self.start)
# get user configuration
self.params = {}
check = []
for key, (default, required, fixed_key) in self.config.items():
self.params[key] = context.params.get(
self.service_name, key, default)
if required:
check.append(key)
if fixed_key and self.params[key]:
# copy fixed_data to avoid changing class definition
self.fixed_data = dict(self.fixed_data)
self.fixed_data[fixed_key] = self.params[key]
# check values
if check_params:
self.check_params(*check)
def check_params(self, *keys):
"""Ensure user has set required values in weather.ini.
Normally the :py:data:`~ServiceBase.config` names with
``required`` set are checked, but if your uploader has a
``register`` method you may need to check for other data.
:param str keys: the :py:data:`~ServiceBase.config` names to
verify.
"""
for key in keys:
if not self.params[key]:
raise RuntimeError('"{}" not set in weather.ini'.format(key))
def session(self):
"""Context manager factory function for a batch of one or more
uploads.
This makes it easy to ensure any resources such as an internet
connection are properly closed after a batch of uploads. Use the
:py:func:`contextlib.contextmanager` decorator when you
implement this method.
For a typical example, see the source code of the
:py:mod:`pywws.service.openweathermap` module. If your upload
can't benefit from a session object yield :py:obj:`None`, as in
:py:mod:`pywws.service.copy`.
"""
raise NotImplementedError()
def run(self):
""" """
self.logger.debug('thread started ' + self.name)
self.old_message = ''
if self.context.live_logging:
polling_interval = self.interval.total_seconds() / 20
polling_interval = min(max(polling_interval, 4.0), 40.0)
else:
polling_interval = 4.0
while not self.context.shutdown.is_set():
OK = True
if self.queue:
try:
OK = self.upload_batch()
except Exception as ex:
self.logger.exception(ex)
OK = False
if OK:
pause = polling_interval
elif self.context.live_logging:
# upload failed, wait before trying again
pause = 40.0
else:
# upload failed or nothing more to do
break
self.context.shutdown.wait(pause)
def stop(self):
if self.is_alive():
self.logger.debug('stopping thread ' + self.name)
self.queue.append(None)
def log(self, message):
if message == self.old_message:
self.logger.debug(message)
else:
self.logger.error(message)
self.old_message = message
class DataServiceBase(ServiceBase):
"""Base class for "data" services.
A "data" service uploader sends defined sets of data, typically as
an HTML "post" or "get" operation. Service classes should be based
on :py:class:`CatchupDataService` or :py:class:`LiveDataService`,
depending on whether the service allows uploading of past data, for
example to fill in gaps if the server (or pywws client) goes down
for a few hours or days.
Data service classes must provide a :py:attr:`template` string to
define how to convert pywws data before uploading. Required methods
are :py:meth:`~ServiceBase.session` and :py:meth:`upload_data`. If
the service has a separate authorisation or registration process
this can be done in a :py:meth:`~pywws.service.mastodon.register`
method. See :py:mod:`pywws.service.mastodon` for an example.
"""
template = ''
"""Defines the conversion of pywws data to key, value pairs required
by the service. The template string is passed to
:py:mod:`pywws.template`, then the result is passed to
:py:func:`~ast.literal_eval` to create a :py:obj:`dict`. This rather
complex process allows great flexibility, but you do have to be
careful with use of quotation marks. """
fixed_data = {}
"""Defines a set of ``key: value`` pairs that are the same for every
data upload. This might include the station's location or the
software name & version. Values set by the user should be included
in the weather.ini config defined in :py:data:`~ServiceBase.config`.
"""
def __init__(self, context, check_params=True):
super(DataServiceBase, self).__init__(context, check_params)
# check config
template = context.params.get(self.service_name, 'template')
if template == 'default':
context.params.unset(self.service_name, 'template')
elif template:
self.logger.critical(
'obsolete item "template" found in weather.ini '
'section [{}]'.format(self.service_name))
# create templater
if self.template:
self.templater = pywws.template.Template(context, use_locale=False)
self.template_file = None
# get time stamp of last uploaded data
self.last_update = context.status.get_datetime(
'last update', self.service_name)
if not self.last_update:
if self.catchup:
self.last_update = datetime.utcnow() - timedelta(
days=self.catchup)
else:
self.last_update = datetime.min
def upload_data(self, session, prepared_data={}):
"""Upload one data set to the service.
Every data service class must implement this method.
:param object session: the object created by
:py:meth:`~ServiceBase.session`. This is typically used to
communicate with the server and is automatically closed when
a batch of uploads has finished.
:param dict prepared_data: a set of key: value pairs to upload.
The keys and values must all be text strings.
"""
raise NotImplementedError()
def queue_data(self, timestamp, data):
if not self.valid_data(data):
return False
prepared_data = self.prepare_data(data)
prepared_data.update(self.fixed_data)
self.logger.debug('data: %s', str(prepared_data))
self.queue.append((timestamp, prepared_data))
return True
def prepare_data(self, data):
if not self.template_file:
self.template_file = StringIO(self.template)
data_str = self.templater.make_text(self.template_file, data)
self.template_file.seek(0)
return literal_eval('{' + data_str + '}')
def valid_data(self, data):
return True
class CatchupDataService(DataServiceBase):
catchup = 7
"""Sets the number of days of past data that can be uploaded when a
service is first used.
"""
def queue_data(self, timestamp, data):
if timestamp and timestamp < self.last_update + self.interval:
return False
OK = super(CatchupDataService, self).queue_data(timestamp, data)
if OK and timestamp:
self.last_update = timestamp
return OK
def do_catchup(self, do_all=False):
start = self.last_update + self.interval
if do_all:
for data in self.context.calib_data[start:]:
while self.queue.full():
self.context.shutdown.wait(4.0)
if self.context.shutdown.is_set():
return True
self.queue_data(data['idx'], data)
return True
for data in self.context.calib_data[start:]:
if self.queue.full():
return True
if self.queue_data(data['idx'], data):
return False
return True
def upload(self, live_data=None, test_mode=False, options=()):
if self.queue.full():
return
if test_mode:
start = self.context.calib_data.before(datetime.max)
else:
start = self.last_update + self.interval
for data in self.context.calib_data[start:]:
timestamp = data['idx']
if test_mode:
timestamp = None
if self.queue_data(timestamp, data):
return
if live_data:
self.queue_data(live_data['idx'], live_data)
def upload_batch(self):
OK = True
count = 0
with self.session() as session:
while self.queue and not self.context.shutdown.is_set():
# send upload without taking it off queue
upload = self.queue[0]
if upload is None:
OK = False
break
timestamp, prepared_data = upload
OK, message = self.upload_data(
session, prepared_data=prepared_data)
self.log(message)
if not OK:
break
count += 1
if timestamp:
self.context.status.set(
'last update', self.service_name, str(timestamp))
# finally remove upload from queue
self.queue.popleft()
if count > 1:
self.logger.warning('{:d} records sent'.format(count))
elif count:
self.logger.info('1 record sent')
return OK
class LiveDataService(DataServiceBase):
catchup = None
def do_catchup(self, do_all=False):
return True
def upload(self, live_data=None, test_mode=False, options=()):
if self.queue.full():
return
if live_data:
data = live_data
else:
idx = self.context.calib_data.before(datetime.max)
if not idx:
return
data = self.context.calib_data[idx]
timestamp = data['idx']
if test_mode:
timestamp = None
self.queue_data(timestamp, data)
def upload_batch(self):
# get most recent upload on queue
upload = self.queue.popleft()
while self.queue and self.queue[0] is not None:
upload = self.queue.popleft()
if upload is None:
return False
timestamp, prepared_data = upload
# check time since last upload
if timestamp and timestamp < self.last_update + self.interval:
return True
with self.session() as session:
OK, message = self.upload_data(session, prepared_data=prepared_data)
self.log(message)
if OK:
self.logger.info('1 record sent')
if timestamp:
self.last_update = timestamp
self.context.status.set(
'last update', self.service_name, str(timestamp))
return OK
class FileService(ServiceBase):
"""Base class for "file" services.
"""
def do_catchup(self, do_all=False):
self.upload(options=literal_eval(
self.context.status.get('pending', self.service_name, '[]')))
return True
def upload(self, live_data=None, options=()):
for item in options:
if self.queue.full() or (item in self.queue):
continue
self.queue.append(item)
def upload_batch(self):
pending = literal_eval(
self.context.status.get('pending', self.service_name, '[]'))
OK = True
count = 0
with self.session() as session:
while self.queue and not self.context.shutdown.is_set():
upload = self.queue[0]
if upload is None:
OK = False
break
if os.path.isabs(upload):
path = upload
else:
path = os.path.join(self.context.output_dir, upload)
if not os.path.isfile(path):
if upload in pending:
pending.remove(upload)
self.queue.popleft()
continue
self.logger.debug('file: %s', path)
OK, message = self.upload_file(session, path)
self.log(message)
if OK:
if upload in pending:
pending.remove(upload)
count += 1
else:
if upload not in pending:
pending.append(upload)
break
self.queue.popleft()
self.context.status.set('pending', self.service_name, repr(pending))
if count > 1:
self.logger.info('{:d} uploads'.format(count))
elif count:
self.logger.info('1 upload')
return OK
def main(class_, argv=None):
import argparse
import inspect
if argv is None:
argv = sys.argv
docstring = inspect.getdoc(sys.modules[class_.__module__])
if sys.version_info[0] < 3:
docstring = docstring.decode('utf-8')
docstring = docstring.split('\n\n')
parser = argparse.ArgumentParser(
description=docstring[0], epilog=docstring[1])
if hasattr(class_, 'register'):
parser.add_argument('-r', '--register', action='store_true',
help='register (or update) with service')
if issubclass(class_, CatchupDataService):
parser.add_argument('-c', '--catchup', action='store_true',
help='upload all data since last upload')
parser.add_argument('-v', '--verbose', action='count',
help='increase amount of reassuring messages')
parser.add_argument('data_dir', help='root directory of the weather data')
if issubclass(class_, FileService):
parser.add_argument('file', nargs='*', help='file to be uploaded')
args = parser.parse_args(argv[1:])
pywws.logger.setup_handler(args.verbose or 0)
with pywws.storage.pywws_context(args.data_dir) as context:
if 'register' in args and args.register:
uploader = class_(context, check_params=False)
uploader.register()
context.flush()
return 0
uploader = class_(context)
if issubclass(class_, FileService):
uploader.upload(options=map(os.path.abspath, args.file))
elif issubclass(class_, CatchupDataService) and args.catchup:
uploader.do_catchup(do_all=True)
else:
uploader.upload(test_mode=True)
uploader.stop()
return 0
| gpl-2.0 |
tedelhourani/ansible | lib/ansible/modules/network/avi/avi_certificatemanagementprofile.py | 27 | 3642 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_certificatemanagementprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of CertificateManagementProfile Avi RESTful Object
description:
- This module is used to configure CertificateManagementProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
name:
description:
- Name of the pki profile.
required: true
script_params:
description:
- List of customparams.
script_path:
description:
- Script_path of certificatemanagementprofile.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create CertificateManagementProfile object
avi_certificatemanagementprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_certificatemanagementprofile
"""
RETURN = '''
obj:
description: CertificateManagementProfile (api/certificatemanagementprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
name=dict(type='str', required=True),
script_params=dict(type='list',),
script_path=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'certificatemanagementprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
noahbenson/pimms | pimms/table.py | 1 | 19575 | ####################################################################################################
# pimms/table.py
# Classes for storing immutable data tables.
# By Noah C. Benson
import copy, types, sys, pint, six
import numpy as np
import pyrsistent as ps
from functools import reduce
from .util import (merge, is_pmap, is_map, LazyPMap, lazy_map, is_lazy_map,
is_quantity, is_unit, is_str, is_int, is_vector,
quant, iquant, mag, unit, qhash, units, imm_array,
getargspec_py27like)
from .immutable import (immutable, value, param, require, option)
if sys.version_info[0] == 3: from collections import abc as colls
else: import collections as colls
def _ndarray_assoc(arr, k, v):
'_ndarray_assoc(arr, k, v) duplicates arr to a writeable array, sets arr2[k]=v, returns arr2'
arr = np.array(arr)
arr[k] = v
arr.setflags(write=False)
return arr
class ITableRow(colls.Mapping):
'''
ITableRow is a class that works with the ITable class to quickly and lazily allow access to
individual rows as if they were individual persistent maps. For all intents and purposes, an
ITableRow object should be treated as a dict object that cannot be changed.
Note that ITableRow is not an immutable class, but its members cannot be changed. The class
is intended as a hidden subclass that is very efficient.
'''
def __init__(self, data, colnames, rownum):
object.__setattr__(self, 'data', data)
object.__setattr__(self, 'column_names', colnames)
object.__setattr__(self, 'row_number', rownum)
def keys(self):
return self.column_names
def __setattr__(self, k, v):
raise RuntimeError('ITableRow object is immutable')
def __getitem__(self, key):
return self.data[key][self.row_number]
def __setitem__(self, key, value):
raise RuntimeError('Cannot set row of immutable table')
def __delitem__(self, key):
raise RuntimeError('Cannot set row of immutable table')
def __iter__(self):
dat = self.data
n = self.row_number
for col in self.column_names:
yield col
def __len__(self):
return len(self.column_names)
def asdict(self):
return {k:self.data[k][self.row_number] for k in self.__iter__()}
def aspmap(self):
return ps.pmap(self.asdict())
def __repr__(self):
return repr(self.asdict())
def __hash__(self):
return hash(self.aspmap())
@immutable
class ITable(colls.Mapping):
'''
The ITable class is a simple immutable datatable.
'''
def __init__(self, data, n=None):
self.data = data
self._row_count = n
def __hash__(self):
# we want to make sure arrays are immutable
return qhash(self.data)
def __getstate__(self):
d = self.__dict__.copy()
d['data'] = {k:(mag(v), unit(v)) if is_quantity(v) else (v, None)
for (k,v) in six.iteritems(self.data)}
return d
def __setstate__(self, d):
dat = d['data']
object.__setattr__(self, 'data',
ps.pmap({k:(imm_array(u) if v is None else iquant(u, v))
for (k,(u,v)) in six.iteritems(dat)}))
object.__setattr__(self, '_row_count', None)
@staticmethod
def _filter_col(vec):
'_filter_col(vec) yields a read-only numpy array version of the given column vector'
if isinstance(vec, types.FunctionType) and getargspec_py27like(vec)[0] == []:
return lambda:ITable._filter_col(vec())
elif is_quantity(vec):
m = mag(vec)
mm = ITable._filter_col(m)
return vec if m is mm else quant(mm, unit(vec))
else:
return imm_array(vec)
@param
def data(d):
'''
itbl.data is an immutable map of the given itable in which property names are associated
with their data vectors.
'''
# we want to check these values and clean them up as we go, but if this is a lazy map, we
# want to do that lazily...
if is_map(d):
if not is_lazy_map(d): d = lazy_map(d)
def _make_lambda(k): return (lambda:ITable._filter_col(d[k]))
return lazy_map(
{k:_make_lambda(k) if d.is_lazy(k) else ITable._filter_col(d[k])
for k in six.iterkeys(d)})
else:
raise ValueError('Unable to interpret data argument; must be a mapping')
@param
def _row_count(n):
'''
itbl._row_count is the row count, as provided by internal methods when the row count can be
known ahead of time. It should not geberally be used; use itbl.row_count instead.
'''
return n
@require
def validate_data(data):
'''
ITable data is required to be a PMap with keys that are strings.
'''
if not isinstance(data, ps.PMap):
raise ValueError('data is required to be a persistent map')
if not all(isinstance(k, six.string_types) for k in six.iterkeys(data)):
raise ValueError('data keys must be strings')
return True
@require
def validate_row_count(_row_count):
'''
ITable _row_count must be a non-negative integer or None.
'''
if _row_count is None: return True
else: return is_int(_row_count) and _row_count >= 0
@value
def column_names(data):
'''
itbl.column_names is a tuple of the names of the columns of the data table.
'''
return tuple(six.iterkeys(data))
@value
def row_count(data, _row_count):
'''
itbl.row_count is the number of rows in the given datatable itbl.
'''
if len(data) == 0:
return 0
elif _row_count:
return _row_count
elif is_lazy_map(data):
# if data is a lazy map, we look first for a column that isn't lazy:
k = next(data.iternormal(), None)
k = k if k else next(data.itermemoized(), None)
k = k if k else next(data.iterkeys())
return len(data[k])
else:
return len(next(six.itervalues(data), []))
@value
def columns(data, row_count):
'''
itbl.columns is a tuple of the columns in the given datatable itbl. Anything that depends on
columns includes a de-facto check that all columns are the same length.
'''
cols = tuple(v for v in six.itervalues(data))
if not all(len(c) == row_count for c in cols):
raise ValueError('itable columns do not all have identical lengths!')
return cols
@value
def rows(data, row_count, column_names):
'''
itbl.rows is a tuple of all the persistent maps that makeup the rows of the data table.
'''
return tuple([ITableRow(data, column_names, i) for i in range(row_count)])
@value
def dataframe(data):
'''
itbl.dataframe is a pandas dataframe object that is equivalent to the given itable. Note
you must have pandas installed for this to work; an exception will be raised when this
value is requested if you do not.
'''
import pandas
return pandas.DataFrame.from_dict(dict(data))
# Methods
def set(self, k, v):
'''
itbl.set(name, val) yields a new itable object identical to the given itbl except that it
includes the vector val under the given column name.
itbl.set(row, map) updates just the given row to have the properties in the given map; if
this results in a new column being added, it will have the value None for all other rows.
itbl.set(rows, m) allows a sequence of rows to be set by passing rows as either a list or
slice; m may also be a single map or a sequence of maps whose size matches that of rows.
Alternately, m may be an itable whose row-size matches that of rows; in this case new
column names may again be added.
'''
dat = self.data
if isinstance(k, six.string_types):
if isinstance(v, (ITable, colls.Mapping)): v = v[k]
v = self._filter_col(v)
new_data = self.data.set(k, v)
return ITable(new_data, n=len(v))
elif is_int(k):
# This is an awful slow way to do things
def _make_lambda(k):
return lambda:_ndarray_assoc(dat[k], k, v[k]) if k in v else dat[k]
new_map = {k:_make_lambda(k) for k in six.iterkeys(dat)}
nones = np.full((self.row_count,), None)
for (vk,v) in six.iteritems(v):
if vk not in new_map:
new_map[vk] = _ndarray_assoc(nones, k, v)
return ITable(lazy_map(new_map), n=self.row_count)
elif not k:
return self
elif isinstance(k[0], six.string_types):
nones = np.full((self.row_count,), None)
newdat = self.data
if isinstance(v, ITable):
def _make_lambda(k): return (lambda:self._filter_col(v[kk]))
v = lazy_map({kk:_make_lambda(kk) for kk in k})
elif not isinstance(v, colls.Mapping):
v = np.asarray(v)
if len(v) == self.row_count and v.shape[1] == len(k): v = v.T
v = {kk:self._filter_col(vv) for (kk,vv) in zip(k,v)}
for kk in six.iterkeys(v):
def _make_lambda(k): return (lambda:self._filter_col(v[kk]))
newdat = newdat.set(kk, _make_lambda(kk) if kk in v else nones)
return ITable(newdat, n=self.row_count)
else:
(keys, vals) = (k,v)
dat = self.data
nones = np.full((self.row_count,), None)
knones = np.full((len(keys),), None)
if isinstance(vals, (ITable, colls.Mapping)):
def _make_lambda(k):
return lambda:_ndarray_assoc(
dat[k] if k in dat else nones,
keys,
vals[k] if k in vals else knones)
dat = reduce(
lambda m,k: m.set(k, _make_lambda(k)),
six.iteritems(vals.data if isinstance(vals, ITable) else vals),
dat)
else:
def _make_lambda(k): return lambda:np.asarray([v[k] for v in vals])
cols = lazy_map({k:_make_lambda(k) for k in six.iterkeys(vals[0])})
def _make_lambda(k):
return lambda:_ndarray_assoc(
dat[k] if k in dat else nones,
keys,
cols[k])
dat = reduce(
lambda m,k: m.set(k, _make_lambda(k)),
six.iterkeys(vals[0]),
dat)
return ITable(dat, n=self.row_count)
def discard(self, cols):
'''
itbl.discard(arg) discards either the list of rows, given as ingtegers, or the list of
columns, given as strings.
'''
if not cols: return self
dat = self.data
vecq = is_vector(cols)
if is_str(cols) or (vecq and len(cols) > 0 and is_str(cols[0])):
cols = set(cols if vecq else [cols])
def _make_lambda(k): return lambda:dat[k]
return ITable(lazy_map({k:_make_lambda(k) for k in six.iterkeys(dat) if k not in cols}),
n=self.row_count)
elif isinstance(cols, slice) or is_int(cols) or \
(vecq and len(cols) > 0 and is_int(cols[0])):
def _make_lambda(k): return lambda:np.delete(dat[k], cols, 0)
newdat = lazy_map({k:_make_lambda(k) for k in six.iterkeys(dat)})
return ITable(newdat, n=len(np.delete(np.ones((self.row_count,)), cols, 0)))
elif vecq and len(cols) == 0: return self
else: raise ValueError('ITable.discard requires integers or strings')
def is_lazy(self, k):
'''
itable.is_lazy(k) yields True if k is a lazy value in the given itable, as in a lazy map.
'''
return self.data.is_lazy(k)
def is_memoized(self, k):
'''
itable.is_memoized(k) yields True if k is a memoized value in the given itable, as in a lazy
map.
'''
return self.data.is_memoized(k)
def is_normal(self, k):
'''
itable.is_normal(k) yields True if k is a normal value in the given itable, as in a lazy
map.
'''
return self.data.is_normal(k)
def iterkeys(self):
return self.data.iterkeys()
def iteritems(self):
return self.data.iteritems()
def iterlazy(self):
'''
itable.iterlazy() yields an iterator over the lazy keys only (memoized lazy keys are not
considered lazy).
'''
return self.data.iterlazy()
def itermemoized(self):
'''
itable.itermemoized() yields an iterator over the memoized keys only (neihter unmemoized
lazy keys nor normal keys are considered memoized).
'''
return self.data.itermemoized()
def iternormal(self):
'''
itable.iternormal() yields an iterator over the normal unlazy keys only (memoized lazy keys
are not considered normal).
'''
return self.data.iternormal()
def map(self, f):
'''
itbl.map(f) yields the result of mapping the rows of the given datatable itbl over the
given function f.
'''
if isinstance(f, six.string_types) and f in self.data: return self.data[f]
(args, vargs, kwargs, dflts) = getargspec_py27like(f)
dflts = dflts if dflts else ()
dflts = tuple([None for _ in range(len(args) - len(dflts))]) + dflts
# we have to decide what to map over...
return map(f, self.rows)
def where(self, f):
'''
itbl.where(f) yields the indices for which itbl.map(f) yields True.
'''
return [i for (i,v) in enumerate(self.map(f)) if v]
def select(self, arg):
'''
itbl.select(idcs) yields a sub-table in which only the rows indicated by the given list of
indices are kept.
itbl.select(f) keeps all rows for which the function f yields True.
'''
if isinstance(arg, types.FunctionType):
arg = self.where(arg)
else:
n = len(arg)
if n == self.row_count and set(arg) == set([0,1]):
arg = [i for (i,b) in enumerate(arg) if b]
n = len(arg)
dat = self.data
def _make_lambda(k): return lambda:dat[k][arg]
return ITable(
lazy_map({k:_make_lambda(k) for k in six.iterkeys(dat)}),
n=n)
def merge(self, *args, **kwargs):
'''
itbl.merge(...) yields a copy of the ITable object itbl that has been merged left-to-right
with the given arguments.
'''
return itable(self.data, *args, **kwargs).persist()
def __getitem__(self, rows, cols=Ellipsis):
'''
itbl[row_number] yields the map associated with the given row in the ITable object itbl; the
row_number may alternately be a slice.
itbl[[r1, r2...]] yields a duplicate itable containing only the given rows of itbl.
itbl[column_name] yields the numpy array associated with the given column name.
itbl[[c1, c2...]] yields a duplicate itable containing only the given columns of itbl.
itbl[rows, cols] is equivalent to itbl[rows][cols] (in fact, rows and cols may be given in
any order).
'''
if cols is not Ellipsis: return self[rows][cols]
if is_int(rows):
return self.rows[rows]
elif isinstance(rows, six.string_types):
return self.data[rows]
elif rows is None or len(rows) == 0:
return ITable(ps.m(), n=0)
elif isinstance(rows, slice) or is_int(rows[0]):
n = len(range(rows.start, rows.stop, rows.step)) if isinstance(rows, slice) else \
len(rows)
dat = self.data
def _make_lambda(dat,k): return lambda:dat[k][rows]
return ITable(
lazy_map({k:_make_lambda(dat,k) for k in six.iterkeys(dat)}),
n=n)
else:
rows = set(rows)
dat = self.data
return ITable(
reduce(lambda m,k: m if k in rows else m.remove(k), six.iterkeys(dat), dat),
n=self.row_count)
def __repr__(self):
return 'itable(%s, <%d rows>)' % (self.column_names, self.row_count)
def __iter__(self):
return six.iterkeys(self.data)
def __len__(self):
return len(self.data)
def __contains__(self, k):
return ((0 <= k < self.row_count) if is_int(k) else
(k in self.data) if isinstance(k, six.string_types) else
False)
def iterrows(self):
'''
itbl.iterrows() iterates over the rows of the givan itable itbl.
'''
return iter(self.rows)
def itable(*args, **kwargs):
'''
itable(...) yields a new immutable table object from the given set of arguments. The arguments
may be any number of maps or itables followed by any number of keyword arguments. All the
entries from the arguments and keywords are collapsed left-to-right (respecting laziness),
and the resulting column set is returned as the itable. Arguments and maps may contain
values that are functions of zero arguments; these are considered lazy values and are not
evaluated by the itable function.
'''
# a couple things we want to check first... does our argument list reduce to just an empty
# itable or just a single itable?
if len(args) == 0 and len(kwargs) == 0:
return ITable({}, n=0)
elif len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], ITable):
return args[0]
# we want to try to convert any arguments we can from datatables into maps
try:
import pandas
args = [{k:a[k].values for k in a.keys()} if isinstance(a, pandas.DataFrame) else a
for a in args]
except Exception: pass
# now we want to merge these together and make them one lazy map
m0 = lazy_map(merge(args, kwargs))
# see if we can deduce the row size from a non-lazy argument:
v = m0
for mm in (list(args) + [kwargs]):
if len(mm) == 0: continue
if not is_lazy_map(mm):
for k in six.iterkeys(mm):
try: v = mm[k]
except Exception: continue
break
else:
for k in mm.iternormal():
try: v = mm[k]
except Exception: continue
break
for k in (mm.itermemoized() if v is m0 else []):
try: v = mm[k]
except Exception: continue
break
if v is not m0: break
return ITable(m0, n=(None if v is m0 else len(v)))
def is_itable(arg):
'''
is_itable(x) yields True if x is an ITable object and False otherwise.
'''
return isinstance(arg, ITable)
| gpl-2.0 |
mmnelemane/nova | nova/tests/unit/scheduler/filters/test_affinity_filters.py | 56 | 8801 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova import objects
from nova.scheduler.filters import affinity_filter
from nova import test
from nova.tests.unit.scheduler import fakes
CONF = cfg.CONF
CONF.import_opt('my_ip', 'nova.netconf')
class TestDifferentHostFilter(test.NoDBTestCase):
def setUp(self):
super(TestDifferentHostFilter, self).setUp()
self.filt_cls = affinity_filter.DifferentHostFilter()
def test_affinity_different_filter_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
inst1 = objects.Instance(uuid='different')
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'different_host': ['same'], }}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_no_list_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'different_host': 'same'}}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_fails(self):
inst1 = objects.Instance(uuid='same')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'different_host': ['same'], }}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_none(self):
inst1 = objects.Instance(uuid='same')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': None}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
class TestSameHostFilter(test.NoDBTestCase):
def setUp(self):
super(TestSameHostFilter, self).setUp()
self.filt_cls = affinity_filter.SameHostFilter()
def test_affinity_same_filter_passes(self):
inst1 = objects.Instance(uuid='same')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'same_host': ['same'], }}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_no_list_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'same_host': 'same'}}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_fails(self):
inst1 = objects.Instance(uuid='different')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'same_host': ['same'], }}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_handles_none(self):
inst1 = objects.Instance(uuid='different')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': None}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
class TestSimpleCIDRAffinityFilter(test.NoDBTestCase):
def setUp(self):
super(TestSimpleCIDRAffinityFilter, self).setUp()
self.filt_cls = affinity_filter.SimpleCIDRAffinityFilter()
def test_affinity_simple_cidr_filter_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'cidr': '/24',
'build_near_host_ip': affinity_ip}}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_fails(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'cidr': '/32',
'build_near_host_ip': affinity_ip}}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_handles_none(self):
host = fakes.FakeHostState('host1', 'node1', {})
affinity_ip = CONF.my_ip.split('.')[0:3]
affinity_ip.append('100')
affinity_ip = str.join('.', affinity_ip)
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': None}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
class TestGroupAffinityFilter(test.NoDBTestCase):
def _test_group_anti_affinity_filter_passes(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['affinity']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': [policy]}
filter_properties['group_hosts'] = []
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties['group_hosts'] = ['host2']
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_group_anti_affinity_filter_passes(self):
self._test_group_anti_affinity_filter_passes(
affinity_filter.ServerGroupAntiAffinityFilter(),
'anti-affinity')
def _test_group_anti_affinity_filter_fails(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_policies': [policy],
'group_hosts': ['host1']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_group_anti_affinity_filter_fails(self):
self._test_group_anti_affinity_filter_fails(
affinity_filter.ServerGroupAntiAffinityFilter(),
'anti-affinity')
def _test_group_affinity_filter_passes(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['anti-affinity']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['affinity'],
'group_hosts': ['host1']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_group_affinity_filter_passes(self):
self._test_group_affinity_filter_passes(
affinity_filter.ServerGroupAffinityFilter(), 'affinity')
def _test_group_affinity_filter_fails(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_policies': [policy],
'group_hosts': ['host2']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_group_affinity_filter_fails(self):
self._test_group_affinity_filter_fails(
affinity_filter.ServerGroupAffinityFilter(), 'affinity')
| apache-2.0 |
scrollback/kuma | vendor/packages/sqlalchemy/test/perf/objselectspeed.py | 8 | 4213 | import time, resource
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.test.util import gc_collect
from sqlalchemy.test import profiling
db = create_engine('sqlite://')
metadata = MetaData(db)
Person_table = Table('Person', metadata,
Column('id', Integer, primary_key=True),
Column('type', String(10)),
Column('name', String(40)),
Column('sex', Integer),
Column('age', Integer))
Employee_table = Table('Employee', metadata,
Column('id', Integer, ForeignKey('Person.id'), primary_key=True),
Column('foo', String(40)),
Column('bar', Integer),
Column('bat', Integer))
class RawPerson(object): pass
class Person(object): pass
mapper(Person, Person_table)
class JoinedPerson(object):pass
class Employee(JoinedPerson):pass
mapper(JoinedPerson, Person_table, \
polymorphic_on=Person_table.c.type, polymorphic_identity='person')
mapper(Employee, Employee_table, \
inherits=JoinedPerson, polymorphic_identity='employee')
compile_mappers()
def setup():
metadata.create_all()
i = Person_table.insert()
data = [{'name':'John Doe','sex':1,'age':35, 'type':'employee'}] * 100
for j in xrange(500):
i.execute(data)
# note we arent fetching from employee_table,
# so we can leave it empty even though its "incorrect"
#i = Employee_table.insert()
#data = [{'foo':'foo', 'bar':'bar':'bat':'bat'}] * 100
#for j in xrange(500):
# i.execute(data)
print "Inserted 50,000 rows"
def sqlite_select(entity_cls):
conn = db.connect().connection
cr = conn.cursor()
cr.execute("SELECT id, name, sex, age FROM Person")
people = []
for row in cr.fetchall():
person = entity_cls()
person.id = row[0]
person.name = row[1]
person.sex = row[2]
person.age = row[3]
people.append(person)
cr.close()
conn.close()
def sql_select(entity_cls):
people = []
for row in Person_table.select().execute().fetchall():
person = entity_cls()
person.id = row['id']
person.name = row['name']
person.sex = row['sex']
person.age = row['age']
people.append(person)
#@profiling.profiled(report=True, always=True)
def orm_select():
session = create_session()
people = session.query(Person).all()
#@profiling.profiled(report=True, always=True)
def joined_orm_select():
session = create_session()
people = session.query(JoinedPerson).all()
def all():
setup()
try:
t, t2 = 0, 0
def usage(label):
now = resource.getrusage(resource.RUSAGE_SELF)
print "%s: %0.3fs real, %0.3fs user, %0.3fs sys" % (
label, t2 - t,
now.ru_utime - usage.last.ru_utime,
now.ru_stime - usage.last.ru_stime)
usage.snap(now)
usage.snap = lambda stats=None: setattr(
usage, 'last', stats or resource.getrusage(resource.RUSAGE_SELF))
gc_collect()
usage.snap()
t = time.clock()
sqlite_select(RawPerson)
t2 = time.clock()
usage('sqlite select/native')
gc_collect()
usage.snap()
t = time.clock()
sqlite_select(Person)
t2 = time.clock()
usage('sqlite select/instrumented')
gc_collect()
usage.snap()
t = time.clock()
sql_select(RawPerson)
t2 = time.clock()
usage('sqlalchemy.sql select/native')
gc_collect()
usage.snap()
t = time.clock()
sql_select(Person)
t2 = time.clock()
usage('sqlalchemy.sql select/instrumented')
gc_collect()
usage.snap()
t = time.clock()
orm_select()
t2 = time.clock()
usage('sqlalchemy.orm fetch')
gc_collect()
usage.snap()
t = time.clock()
joined_orm_select()
t2 = time.clock()
usage('sqlalchemy.orm "joined" fetch')
finally:
metadata.drop_all()
if __name__ == '__main__':
all()
| mpl-2.0 |
UFOCoins/ufo | test/functional/disconnect_ban.py | 13 | 5318 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node disconnect and ban behavior"""
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
wait_until,
)
class DisconnectBanTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Test setban and listbanned RPCs")
self.log.info("setban: successfully ban single IP address")
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point
self.nodes[1].setban("127.0.0.1", "add")
wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("clearbanned: successfully clear ban list")
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].setban("127.0.0.0/24", "add")
self.log.info("setban: fail to ban an already banned subnet")
assert_equal(len(self.nodes[1].listbanned()), 1)
assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
self.log.info("setban: fail to ban an invalid subnet")
assert_raises_rpc_error(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
self.log.info("setban remove: fail to unban a non-banned subnet")
assert_raises_rpc_error(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("setban remove: successfully unban subnet")
self.nodes[1].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.log.info("setban: test persistence across node restart")
self.nodes[1].setban("127.0.0.0/32", "add")
self.nodes[1].setban("127.0.0.0/24", "add")
# Set the mocktime so we can control when bans expire
old_time = int(time.time())
self.nodes[1].setmocktime(old_time)
self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds
self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds
listBeforeShutdown = self.nodes[1].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
# Move time forward by 3 seconds so the third ban has expired
self.nodes[1].setmocktime(old_time + 3)
assert_equal(len(self.nodes[1].listbanned()), 3)
self.stop_node(1)
self.start_node(1)
listAfterShutdown = self.nodes[1].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
# Clear ban lists
self.nodes[1].clearbanned()
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Test disconnectnode RPCs")
self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
node1 = self.nodes[0].getpeerinfo()[0]['addr']
assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)
self.log.info("disconnectnode: fail to disconnect when calling with junk address")
assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address=address1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
connect_nodes_bi(self.nodes, 0, 1) # reconnect the node
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully disconnect node by node id")
id1 = self.nodes[0].getpeerinfo()[0]['id']
self.nodes[0].disconnectnode(nodeid=id1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
DisconnectBanTest().main()
| mit |
Dfelker/ansible | lib/ansible/cli/pull.py | 30 | 9393 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
########################################################
import datetime
import os
import random
import shutil
import socket
import sys
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.cli import CLI
from ansible.plugins import module_loader
from ansible.utils.display import Display
from ansible.utils.cmd_functions import run_cmd
########################################################
class PullCLI(CLI):
''' code behind ansible ad-hoc cli'''
DEFAULT_REPO_TYPE = 'git'
DEFAULT_PLAYBOOK = 'local.yml'
PLAYBOOK_ERRORS = {
1: 'File does not exist',
2: 'File is not readable'
}
SUPPORTED_REPO_MODULES = ['git']
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage='%prog <host-pattern> [options]',
connect_opts=True,
vault_opts=True,
runtask_opts=True,
)
# options unique to pull
self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run')
self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
help='only run the playbook if the repository has been updated')
self.parser.add_option('-s', '--sleep', dest='sleep', default=None,
help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests')
self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true',
help='run the playbook even if the repository could not be updated')
self.parser.add_option('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to')
self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository')
self.parser.add_option('-C', '--checkout', dest='checkout',
help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.')
self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
help='adds the hostkey for the repo url if not already added')
self.parser.add_option('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE,
help='Repository module name, which ansible will use to check out the repo. Default is %s.' % self.DEFAULT_REPO_TYPE)
self.parser.add_option('--verify-commit', dest='verify', default=False, action='store_true',
help='verify GPG signature of checked out commit, if it fails abort running the playbook.'
' This needs the corresponding VCS module to support such an operation')
self.options, self.args = self.parser.parse_args()
if self.options.sleep:
try:
secs = random.randint(0,int(self.options.sleep))
self.options.sleep = secs
except ValueError:
raise AnsibleOptionsError("%s is not a number." % self.options.sleep)
if not self.options.url:
raise AnsibleOptionsError("URL for repository not specified, use -h for help")
if len(self.args) != 1:
raise AnsibleOptionsError("Missing target hosts")
if self.options.module_name not in self.SUPPORTED_REPO_MODULES:
raise AnsibleOptionsError("Unsuported repo module %s, choices are %s" % (self.options.module_name, ','.join(self.SUPPORTED_REPO_MODULES)))
self.display.verbosity = self.options.verbosity
self.validate_conflicts(vault_opts=True)
def run(self):
''' use Runner lib to do SSH things '''
super(PullCLI, self).run()
# log command line
now = datetime.datetime.now()
self.display.display(now.strftime("Starting Ansible Pull at %F %T"))
self.display.display(' '.join(sys.argv))
# Build Checkout command
# Now construct the ansible command
limit_opts = 'localhost:%s:127.0.0.1' % socket.getfqdn()
base_opts = '-c local "%s"' % limit_opts
if self.options.verbosity > 0:
base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ])
# Attempt to use the inventory passed in as an argument
# It might not yet have been downloaded so use localhost if note
if not self.options.inventory or not os.path.exists(self.options.inventory):
inv_opts = 'localhost,'
else:
inv_opts = self.options.inventory
#TODO: enable more repo modules hg/svn?
if self.options.module_name == 'git':
repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest)
if self.options.checkout:
repo_opts += ' version=%s' % self.options.checkout
if self.options.accept_host_key:
repo_opts += ' accept_hostkey=yes'
if self.options.private_key_file:
repo_opts += ' key_file=%s' % self.options.private_key_file
if self.options.verify:
repo_opts += ' verify_commit=yes'
path = module_loader.find_plugin(self.options.module_name)
if path is None:
raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name))
bin_path = os.path.dirname(os.path.abspath(sys.argv[0]))
cmd = '%s/ansible -i "%s" %s -m %s -a "%s"' % (
bin_path, inv_opts, base_opts, self.options.module_name, repo_opts
)
for ev in self.options.extra_vars:
cmd += ' -e "%s"' % ev
# Nap?
if self.options.sleep:
self.display.display("Sleeping for %d seconds..." % self.options.sleep)
time.sleep(self.options.sleep);
# RUN the Checkout command
rc, out, err = run_cmd(cmd, live=True)
if rc != 0:
if self.options.force:
self.display.warning("Unable to update repository. Continuing with (forced) run of playbook.")
else:
return rc
elif self.options.ifchanged and '"changed": true' not in out:
self.display.display("Repository has not changed, quitting.")
return 0
playbook = self.select_playbook(path)
if playbook is None:
raise AnsibleOptionsError("Could not find a playbook to run.")
# Build playbook command
cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
if self.options.vault_password_file:
cmd += " --vault-password-file=%s" % self.options.vault_password_file
if self.options.inventory:
cmd += ' -i "%s"' % self.options.inventory
for ev in self.options.extra_vars:
cmd += ' -e "%s"' % ev
if self.options.ask_sudo_pass:
cmd += ' -K'
if self.options.tags:
cmd += ' -t "%s"' % self.options.tags
os.chdir(self.options.dest)
# RUN THE PLAYBOOK COMMAND
rc, out, err = run_cmd(cmd, live=True)
if self.options.purge:
os.chdir('/')
try:
shutil.rmtree(self.options.dest)
except Exception, e:
self.display.error("Failed to remove %s: %s" % (self.options.dest, str(e)))
return rc
def try_playbook(self, path):
if not os.path.exists(path):
return 1
if not os.access(path, os.R_OK):
return 2
return 0
def select_playbook(self, path):
playbook = None
if len(self.args) > 0 and self.args[0] is not None:
playbook = os.path.join(path, self.args[0])
rc = self.try_playbook(playbook)
if rc != 0:
self.display.warning("%s: %s" % (playbook, self.PLAYBOOK_ERRORS[rc]))
return None
return playbook
else:
fqdn = socket.getfqdn()
hostpb = os.path.join(path, fqdn + '.yml')
shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml')
localpb = os.path.join(path, DEFAULT_PLAYBOOK)
errors = []
for pb in [hostpb, shorthostpb, localpb]:
rc = self.try_playbook(pb)
if rc == 0:
playbook = pb
break
else:
errors.append("%s: %s" % (pb, self.PLAYBOOK_ERRORS[rc]))
if playbook is None:
self.display.warning("\n".join(errors))
return playbook
| gpl-3.0 |
RuseGame/RuseBackend | game.py | 1 | 7680 | from random import shuffle
NUM_PLAYERS = 5
ALIASES = ("Pink", "Green", "Blue", "White", "Orange")
MOVE_TYPES = ("send", "spoof", "wiretap", "ambush")
class Game:
"""
An object to contain an entire game of Ruse.
"""
def __init__(self, game_id, emitter):
self.game_id = game_id
self.emitter = emitter
self.players = {}
self.turns = []
def add_player(self, player_cookie, nickname):
"""
Adds the player to the game.
Fails if:
5 players already joined or
player already joined
"""
if len(self.players.keys()) > 4:
return False
if player_cookie in self.players:
return False
player = Player(player_cookie, nickname)
self.players[player_cookie] = player
if len(self.players.keys()) == 5:
self.start_game()
return True
def start_game(self):
"""
Give everyone an alias and target, then start the turn
"""
aliases = list(ALIASES)
shuffle(aliases)
player_list = list(self.players.values())
shuffle(player_list)
for player in player_list:
player.alias = aliases.pop()
for i in range(len(player_list)-1):
player_list[i].target = player_list[i+1].alias
player_list[-1].target = player_list[0].alias
for player in player_list:
player.inbox[0].append("Hello " + str(player) + ",\n" +
"Your target is Mr. " + player.target + ".")
self.emitter.update(player.cookie, player._to_dict())
self.start_turn()
def start_turn(self):
self.turns.append(Turn(len(self.turns)+1))
for player in self.players.values():
player.inbox.append([])
def _validate_send(self, mover_alias, move):
other_players = [a for a in ALIASES if a != mover_alias]
send_from = move.get("from")
send_to = move.get("to")
if send_from != mover_alias or \
send_to not in other_players:
return False
return True
def _validate_spoof(self, mover_alias, move):
spoofed_from = move.get("from")
spoofed_to = move.get("to")
spoofer = move.get("spoofer")
if spoofer != mover_alias or spoofed_to == spoofed_from:
return False
if spoofed_to not in ALIASES or \
spoofed_from not in ALIASES:
return False
return True
def _validate_wiretap(self, mover_alias, move):
target = move.get("target")
tapper = move.get("tapper")
direction = move.get("direction")
if tapper != mover_alias or target not in ALIASES:
return False
if direction != "incoming" and direction != "outgoing":
return False
return True
def _validate_ambush(self, mover_alias, other_players, move):
other_players = [a for a in ALIASES if a != mover_alias]
target = move.get("target")
attacker = move.get("attacker")
if attacker != mover_alias or target not in other_players:
return False
return True
def _validate_moves(self, mover_alias, move_list):
action_points = 4
validate_map = {"send": self._validate_send,
"spoof": self._validate_spoof,
"wiretap": self._validate_wiretap,
"ambush": self._validate_ambush}
cost_map = {"send": 1,
"spoof": 2,
"wiretap": 3,
"ambush": 4}
for move in move_list:
move_type = move.get("move_type")
if move_type not in validate_map:
return False
if not validate_map[move_type](mover_alias, move):
return False
action_points -= cost_map[move_type]
return action_points >= 0
def process_moves(self, player_cookie, move_list):
if self.turns == []:
return False
current_turn = self.turns[-1]
submitting_player = self.players[player_cookie]
if self._validate_moves(submitting_player.alias, move_list):
if current_turn.submit(submitting_player.alias, move_list):
if current_turn.missing_players() == set():
self.resolve_turn()
return True
return False
def resolve_turn(self):
ending_turn = self.turns[-1]
alias_map = {player.alias: player
for player
in self.players.values()}
for message in ending_turn.messages:
report = '"{}"\n- Mr. {}'.format(
message.get("message"),
message.get("from"))
alias_map[message.get("to")].inbox[-1].append(report)
for wiretap in ending_turn.wiretaps:
report = "From your wiretap:\n"
in_out = "to" if wiretap.get("direction") == "incoming" else "from"
for message in ending_turn.messages:
if message.get(in_out) == wiretap.get("target"):
new_message = 'Mr. {}:\n"{}"\n- Mr. {}'.format(
message.get("to"),
message.get("message"),
message.get("from"))
report += new_message
alias_map[wiretap.get("tapper")].inbox[-1].append(report)
hits = {alias: []
for alias
in alias_map.keys()}
for attack in ending_turn.attacks:
hits[attack.get("target")].append(attack.get("attacker"))
for target, attackers in hits.items():
if len(attackers) > 1:
alias_map[target].inbox[-1].append("You have been killed.")
self.end_game()
return
elif len(attackers) > 0:
alias_map[target].inbox[-1].append(
"You were unsucessfully attacked by Mr. " + attackers[0])
for cookie, player in self.players.items():
self.emitter.update(cookie, player._to_dict())
self.start_turn()
def end_game(self):
for cookie, player in self.players.items():
player.inbox.append(["Game Over."])
self.emitter.update(cookie, player._to_dict())
class Player:
def __init__(self, cookie, nickname):
self.cookie = cookie
self.name = nickname
self.inbox = [[]]
self.alias = None
self.target = None
def __str__(self):
return "Mr. " + self.alias
def _to_dict(self):
return {
"name": self.name,
"inbox": self.inbox,
"alias": self.alias,
"target": self.target
}
class Turn:
def __init__(self, turn_number):
self.turn_number = turn_number
self.players = set(ALIASES)
self.submitted = set()
self.messages = []
self.wiretaps = []
self.attacks = []
def submit(self, alias, moves):
if alias not in self.submitted:
for move in moves:
move_type = move.get("move_type")
if move_type == "send" or move_type == "spoof":
self.messages.append(move)
elif move_type == "wiretap":
self.wiretaps.append(move)
elif move_type == "ambush":
self.attacks.append(move)
self.submitted.add(alias)
return True
return False
def missing_players(self):
return self.players - self.submitted
| gpl-2.0 |
SlimRoms/android_external_chromium | chrome/common/extensions/docs/examples/apps/hello-python/main.py | 70 | 5222 | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import users
from google.appengine.api import urlfetch
from google.appengine.ext.webapp import template
from google.appengine.api.urlfetch import DownloadError
import oauth2
import urllib
import logging
import os
import time
from django.utils import simplejson
# Configuration
CONFIG = {
'oauth_consumer_key': 'anonymous',
'oauth_consumer_secret': 'anonymous',
'license_server': 'https://www.googleapis.com',
'license_path': '%(server)s/chromewebstore/v1/licenses/%(appid)s/%(userid)s',
'oauth_token': 'INSERT OAUTH TOKEN HERE',
'oauth_token_secret': 'INSERT OAUTH TOKEN SECRET HERE',
'app_id': 'INSERT APPLICATION ID HERE',
}
# Check to see if the server has been deployed. In the dev server, this
# env variable will start with 'Development', in production, it will start with
# 'Google App Engine'
IS_PRODUCTION = os.environ['SERVER_SOFTWARE'].startswith('Google App Engine')
# Valid access levels that may be returned by the license server.
VALID_ACCESS_LEVELS = ['FREE_TRIAL', 'FULL']
def fetch_license_data(userid):
"""Fetches the license for a given user by making an OAuth signed request
to the license server.
Args:
userid OpenID of the user you are checking access for.
Returns:
The server's response as text.
"""
url = CONFIG['license_path'] % {
'server': CONFIG['license_server'],
'appid': CONFIG['app_id'],
'userid': urllib.quote_plus(userid),
}
oauth_token = oauth2.Token(**{
'key': CONFIG['oauth_token'],
'secret': CONFIG['oauth_token_secret']
})
oauth_consumer = oauth2.Consumer(**{
'key': CONFIG['oauth_consumer_key'],
'secret': CONFIG['oauth_consumer_secret']
})
logging.debug('Requesting %s' % url)
client = oauth2.Client(oauth_consumer, oauth_token)
resp, content = client.request(url, 'GET')
logging.debug('Got response code %s, content %s' % (resp, content))
return content
def parse_license_data(userid):
"""Returns the license for a given user as a structured object.
Args:
userid: The OpenID of the user to check.
Returns:
An object with the following parameters:
error: True if something went wrong, False otherwise.
message: A descriptive message if error is True.
access: One of 'NO', 'FREE_TRIAL', or 'FULL' depending on the access.
"""
license = {'error': False, 'message': '', 'access': 'NO'}
try:
response_text = fetch_license_data(userid)
try:
logging.debug('Attempting to JSON parse: %s' % response_text)
json = simplejson.loads(response_text)
logging.debug('Got license server response: %s' % json)
except ValueError:
logging.exception('Could not parse response as JSON: %s' % response_text)
license['error'] = True
license['message'] = 'Could not parse the license server response'
except DownloadError:
logging.exception('Could not fetch license data')
license['error'] = True
license['message'] = 'Could not fetch license data'
if json.has_key('error'):
license['error'] = True
license['message'] = json['error']['message']
elif json['result'] == 'YES' and json['accessLevel'] in VALID_ACCESS_LEVELS:
license['access'] = json['accessLevel']
return license
class MainHandler(webapp.RequestHandler):
"""Request handler class."""
def get(self):
"""Handler for GET requests."""
user = users.get_current_user()
if user:
if IS_PRODUCTION:
# We should use federated_identity in production, since the license
# server requires an OpenID
userid = user.federated_identity()
else:
# On the dev server, we won't have access to federated_identity, so
# just use a default OpenID which will never return YES.
# If you want to test different response values on the development
# server, just change this default value (e.g. append '-yes' or
# '-trial').
userid = ('https://www.google.com/accounts/o8/id?'
'id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
license_data = parse_license_data(userid)
template_data = {
'license': license_data,
'user_name': user.nickname(),
'user_id': userid,
'user_logout': users.create_logout_url(self.request.uri),
}
else:
# Force the OpenID login endpoint to be for Google accounts only, since
# the license server doesn't support any other type of OpenID provider.
login_url = users.create_login_url(dest_url='/',
federated_identity='google.com/accounts/o8/id')
template_data = {
'user_login': login_url,
}
# Render a simple template
path = os.path.join(os.path.dirname(__file__), 'templates', 'index.html')
self.response.out.write(template.render(path, template_data))
if __name__ == '__main__':
application = webapp.WSGIApplication([
('/', MainHandler),
], debug=False)
util.run_wsgi_app(application)
| bsd-3-clause |
tomquas/mongrel2 | examples/chat/chat.py | 94 | 1233 | import simplejson as json
from mongrel2 import handler
sender_id = "82209006-86FF-4982-B5EA-D1E29E55D481"
conn = handler.Connection(sender_id, "tcp://127.0.0.1:9999",
"tcp://127.0.0.1:9998")
users = {}
user_list = []
while True:
try:
req = conn.recv_json()
except:
print "FAILED RECV JSON"
continue
data = req.data
print "DATA", data, req.conn_id
if data["type"] == "join":
conn.deliver_json(req.sender, users.keys(), data)
users[req.conn_id] = data['user']
user_list = [u[1] for u in users.items()]
conn.reply_json(req, {'type': 'userList', 'users': user_list})
elif data["type"] == "disconnect":
print "DISCONNECTED", req.conn_id
if req.conn_id in users:
data['user'] = users[req.conn_id]
del users[req.conn_id]
if len(users.keys()) > 0:
conn.deliver_json(req.sender, users.keys(), data)
user_list = [u[1] for u in users.items()]
elif req.conn_id not in users:
users[req.conn_id] = data['user']
elif data['type'] == "msg":
conn.deliver_json(req.sender, users.keys(), data)
print "REGISTERED USERS:", len(users)
| bsd-3-clause |
youdonghai/intellij-community | python/lib/Lib/distutils/command/sdist.py | 98 | 18046 | """distutils.command.sdist
Implements the Distutils 'sdist' command (create a source distribution)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: sdist.py 61268 2008-03-06 07:14:26Z martin.v.loewis $"
import sys, os, string
from types import *
from glob import glob
from distutils.core import Command
from distutils import dir_util, dep_util, file_util, archive_util
from distutils.text_file import TextFile
from distutils.errors import *
from distutils.filelist import FileList
from distutils import log
def show_formats ():
"""Print all possible values for the 'formats' option (used by
the "--help-formats" command-line option).
"""
from distutils.fancy_getopt import FancyGetopt
from distutils.archive_util import ARCHIVE_FORMATS
formats=[]
for format in ARCHIVE_FORMATS.keys():
formats.append(("formats=" + format, None,
ARCHIVE_FORMATS[format][2]))
formats.sort()
pretty_printer = FancyGetopt(formats)
pretty_printer.print_help(
"List of available source distribution formats:")
class sdist (Command):
description = "create a source distribution (tarball, zip file, etc.)"
user_options = [
('template=', 't',
"name of manifest template file [default: MANIFEST.in]"),
('manifest=', 'm',
"name of manifest file [default: MANIFEST]"),
('use-defaults', None,
"include the default file set in the manifest "
"[default; disable with --no-defaults]"),
('no-defaults', None,
"don't include the default file set"),
('prune', None,
"specifically exclude files/directories that should not be "
"distributed (build tree, RCS/CVS dirs, etc.) "
"[default; disable with --no-prune]"),
('no-prune', None,
"don't automatically exclude anything"),
('manifest-only', 'o',
"just regenerate the manifest and then stop "
"(implies --force-manifest)"),
('force-manifest', 'f',
"forcibly regenerate the manifest and carry on as usual"),
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
]
boolean_options = ['use-defaults', 'prune',
'manifest-only', 'force-manifest',
'keep-temp']
help_options = [
('help-formats', None,
"list available distribution formats", show_formats),
]
negative_opt = {'no-defaults': 'use-defaults',
'no-prune': 'prune' }
default_format = { 'posix': 'gztar',
'java': 'gztar',
'nt': 'zip' }
def initialize_options (self):
# 'template' and 'manifest' are, respectively, the names of
# the manifest template and manifest file.
self.template = None
self.manifest = None
# 'use_defaults': if true, we will include the default file set
# in the manifest
self.use_defaults = 1
self.prune = 1
self.manifest_only = 0
self.force_manifest = 0
self.formats = None
self.keep_temp = 0
self.dist_dir = None
self.archive_files = None
def finalize_options (self):
if self.manifest is None:
self.manifest = "MANIFEST"
if self.template is None:
self.template = "MANIFEST.in"
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError, \
"don't know how to create source distributions " + \
"on platform %s" % os.name
bad_format = archive_util.check_archive_formats(self.formats)
if bad_format:
raise DistutilsOptionError, \
"unknown archive format '%s'" % bad_format
if self.dist_dir is None:
self.dist_dir = "dist"
def run (self):
# 'filelist' contains the list of files that will make up the
# manifest
self.filelist = FileList()
# Ensure that all required meta-data is given; warn if not (but
# don't die, it's not *that* serious!)
self.check_metadata()
# Do whatever it takes to get the list of files to process
# (process the manifest template, read an existing manifest,
# whatever). File list is accumulated in 'self.filelist'.
self.get_file_list()
# If user just wanted us to regenerate the manifest, stop now.
if self.manifest_only:
return
# Otherwise, go ahead and create the source distribution tarball,
# or zipfile, or whatever.
self.make_distribution()
def check_metadata (self):
"""Ensure that all required elements of meta-data (name, version,
URL, (author and author_email) or (maintainer and
maintainer_email)) are supplied by the Distribution object; warn if
any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: " +
string.join(missing, ", "))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
# check_metadata ()
def get_file_list (self):
"""Figure out the list of files to include in the source
distribution, and put it in 'self.filelist'. This might involve
reading the manifest template (and writing the manifest), or just
reading the manifest, or just using the default file set -- it all
depends on the user's options and the state of the filesystem.
"""
# If we have a manifest template, see if it's newer than the
# manifest; if so, we'll regenerate the manifest.
template_exists = os.path.isfile(self.template)
if template_exists:
template_newer = dep_util.newer(self.template, self.manifest)
# The contents of the manifest file almost certainly depend on the
# setup script as well as the manifest template -- so if the setup
# script is newer than the manifest, we'll regenerate the manifest
# from the template. (Well, not quite: if we already have a
# manifest, but there's no template -- which will happen if the
# developer elects to generate a manifest some other way -- then we
# can't regenerate the manifest, so we don't.)
self.debug_print("checking if %s newer than %s" %
(self.distribution.script_name, self.manifest))
setup_newer = dep_util.newer(self.distribution.script_name,
self.manifest)
# cases:
# 1) no manifest, template exists: generate manifest
# (covered by 2a: no manifest == template newer)
# 2) manifest & template exist:
# 2a) template or setup script newer than manifest:
# regenerate manifest
# 2b) manifest newer than both:
# do nothing (unless --force or --manifest-only)
# 3) manifest exists, no template:
# do nothing (unless --force or --manifest-only)
# 4) no manifest, no template: generate w/ warning ("defaults only")
manifest_outofdate = (template_exists and
(template_newer or setup_newer))
force_regen = self.force_manifest or self.manifest_only
manifest_exists = os.path.isfile(self.manifest)
neither_exists = (not template_exists and not manifest_exists)
# Regenerate the manifest if necessary (or if explicitly told to)
if manifest_outofdate or neither_exists or force_regen:
if not template_exists:
self.warn(("manifest template '%s' does not exist " +
"(using default file list)") %
self.template)
self.filelist.findall()
if self.use_defaults:
self.add_defaults()
if template_exists:
self.read_template()
if self.prune:
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
# Don't regenerate the manifest, just read it in.
else:
self.read_manifest()
# get_file_list ()
def add_defaults (self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
standards = [('README', 'README.txt'), self.distribution.script_name]
for fn in standards:
if type(fn) is TupleType:
alts = fn
got_it = 0
for fn in alts:
if os.path.exists(fn):
got_it = 1
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
string.join(alts, ', '))
else:
if os.path.exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
if files:
self.filelist.extend(files)
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
self.filelist.extend(build_py.get_source_files())
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
# add_defaults ()
def read_template (self):
"""Read and parse manifest template file named by self.template.
(usually "MANIFEST.in") The parsing and processing is done by
'self.filelist', which updates itself accordingly.
"""
log.info("reading manifest template '%s'", self.template)
template = TextFile(self.template,
strip_comments=1,
skip_blanks=1,
join_lines=1,
lstrip_ws=1,
rstrip_ws=1,
collapse_join=1)
while 1:
line = template.readline()
if line is None: # end of file
break
try:
self.filelist.process_template_line(line)
except DistutilsTemplateError, msg:
self.warn("%s, line %d: %s" % (template.filename,
template.current_line,
msg))
# read_template ()
def prune_file_list (self):
"""Prune off branches that might slip into the file list as created
by 'read_template()', but really don't belong there:
* the build tree (typically "build")
* the release tree itself (only an issue if we ran "sdist"
previously with --keep-temp, or it aborted)
* any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
"""
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
self.filelist.exclude_pattern(r'(^|/)(RCS|CVS|\.svn|\.hg|\.git|\.bzr|_darcs)/.*', is_regex=1)
def write_manifest (self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
self.execute(file_util.write_file,
(self.manifest, self.filelist.files),
"writing manifest file '%s'" % self.manifest)
# write_manifest ()
def read_manifest (self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest)
try:
while 1:
line = manifest.readline()
if line == '': # end of file
break
if line[-1] == '\n':
line = line[0:-1]
self.filelist.append(line)
finally:
manifest.close()
# read_manifest ()
def make_release_tree (self, base_dir, files):
"""Create the directory tree that will become the source
distribution archive. All directories implied by the filenames in
'files' are created under 'base_dir', and then we hard link or copy
(if hard linking is unavailable) those files into place.
Essentially, this duplicates the developer's source tree, but in a
directory named after the distribution, containing only the files
to be distributed.
"""
# Create all the directories under 'base_dir' necessary to
# put 'files' there; the 'mkpath()' is just so we don't die
# if the manifest happens to be empty.
self.mkpath(base_dir)
dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
# And walk over the list of files, either making a hard link (if
# os.link exists) to each one that doesn't already exist in its
# corresponding location under 'base_dir', or copying each file
# that's out-of-date in 'base_dir'. (Usually, all files will be
# out-of-date, because by default we blow away 'base_dir' when
# we're done making the distribution archives.)
if hasattr(os, 'link'): # can make hard links on this system
link = 'hard'
msg = "making hard links in %s..." % base_dir
else: # nope, have to copy
link = None
msg = "copying files to %s..." % base_dir
if not files:
log.warn("no files to distribute -- empty manifest?")
else:
log.info(msg)
for file in files:
if not os.path.isfile(file):
log.warn("'%s' not a regular file -- skipping" % file)
else:
dest = os.path.join(base_dir, file)
self.copy_file(file, dest, link=link)
self.distribution.metadata.write_pkg_info(base_dir)
# make_release_tree ()
def make_distribution (self):
"""Create the source distribution(s). First, we create the release
tree with 'make_release_tree()'; then, we create all required
archive files (according to 'self.formats') from the release tree.
Finally, we clean up by blowing away the release tree (unless
'self.keep_temp' is true). The list of archive files created is
stored so it can be retrieved later by 'get_archive_files()'.
"""
# Don't warn about missing meta-data here -- should be (and is!)
# done elsewhere.
base_dir = self.distribution.get_fullname()
base_name = os.path.join(self.dist_dir, base_dir)
self.make_release_tree(base_dir, self.filelist.files)
archive_files = [] # remember names of files we create
for fmt in self.formats:
file = self.make_archive(base_name, fmt, base_dir=base_dir)
archive_files.append(file)
self.distribution.dist_files.append(('sdist', '', file))
self.archive_files = archive_files
if not self.keep_temp:
dir_util.remove_tree(base_dir, dry_run=self.dry_run)
def get_archive_files (self):
"""Return the list of archive files created when the command
was run, or None if the command hasn't run yet.
"""
return self.archive_files
# class sdist
| apache-2.0 |
ojengwa/odoo | openerp/tools/cache.py | 226 | 6865 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# decorator makes wrappers that have the same API as their wrapped function;
# this is important for the openerp.api.guess() that relies on signatures
from collections import defaultdict
from decorator import decorator
from inspect import getargspec
import logging
_logger = logging.getLogger(__name__)
class ormcache_counter(object):
""" Statistic counters for cache entries. """
__slots__ = ['hit', 'miss', 'err']
def __init__(self):
self.hit = 0
self.miss = 0
self.err = 0
@property
def ratio(self):
return 100.0 * self.hit / (self.hit + self.miss or 1)
# statistic counters dictionary, maps (dbname, modelname, method) to counter
STAT = defaultdict(ormcache_counter)
class ormcache(object):
""" LRU cache decorator for orm methods. """
def __init__(self, skiparg=2, size=8192, multi=None, timeout=None):
self.skiparg = skiparg
def __call__(self, method):
self.method = method
lookup = decorator(self.lookup, method)
lookup.clear_cache = self.clear
return lookup
def lru(self, model):
counter = STAT[(model.pool.db_name, model._name, self.method)]
return model.pool.cache, (model._name, self.method), counter
def lookup(self, method, *args, **kwargs):
d, key0, counter = self.lru(args[0])
key = key0 + args[self.skiparg:]
try:
r = d[key]
counter.hit += 1
return r
except KeyError:
counter.miss += 1
value = d[key] = self.method(*args, **kwargs)
return value
except TypeError:
counter.err += 1
return self.method(*args, **kwargs)
def clear(self, model, *args):
""" Remove *args entry from the cache or all keys if *args is undefined """
d, key0, _ = self.lru(model)
if args:
_logger.warn("ormcache.clear arguments are deprecated and ignored "
"(while clearing caches on (%s).%s)",
model._name, self.method.__name__)
d.clear_prefix(key0)
model.pool._any_cache_cleared = True
class ormcache_context(ormcache):
def __init__(self, skiparg=2, size=8192, accepted_keys=()):
super(ormcache_context,self).__init__(skiparg,size)
self.accepted_keys = accepted_keys
def __call__(self, method):
# remember which argument is context
args = getargspec(method)[0]
self.context_pos = args.index('context')
return super(ormcache_context, self).__call__(method)
def lookup(self, method, *args, **kwargs):
d, key0, counter = self.lru(args[0])
# Note. The decorator() wrapper (used in __call__ above) will resolve
# arguments, and pass them positionally to lookup(). This is why context
# is not passed through kwargs!
if self.context_pos < len(args):
context = args[self.context_pos] or {}
else:
context = kwargs.get('context') or {}
ckey = [(k, context[k]) for k in self.accepted_keys if k in context]
# Beware: do not take the context from args!
key = key0 + args[self.skiparg:self.context_pos] + tuple(ckey)
try:
r = d[key]
counter.hit += 1
return r
except KeyError:
counter.miss += 1
value = d[key] = self.method(*args, **kwargs)
return value
except TypeError:
counter.err += 1
return self.method(*args, **kwargs)
class ormcache_multi(ormcache):
def __init__(self, skiparg=2, size=8192, multi=3):
assert skiparg <= multi
super(ormcache_multi, self).__init__(skiparg, size)
self.multi = multi
def lookup(self, method, *args, **kwargs):
d, key0, counter = self.lru(args[0])
base_key = key0 + args[self.skiparg:self.multi] + args[self.multi+1:]
ids = args[self.multi]
result = {}
missed = []
# first take what is available in the cache
for i in ids:
key = base_key + (i,)
try:
result[i] = d[key]
counter.hit += 1
except Exception:
counter.miss += 1
missed.append(i)
if missed:
# call the method for the ids that were not in the cache
args = list(args)
args[self.multi] = missed
result.update(method(*args, **kwargs))
# store those new results back in the cache
for i in missed:
key = base_key + (i,)
d[key] = result[i]
return result
class dummy_cache(object):
""" Cache decorator replacement to actually do no caching. """
def __init__(self, *l, **kw):
pass
def __call__(self, fn):
fn.clear_cache = self.clear
return fn
def clear(self, *l, **kw):
pass
def log_ormcache_stats(sig=None, frame=None):
""" Log statistics of ormcache usage by database, model, and method. """
from openerp.modules.registry import RegistryManager
import threading
me = threading.currentThread()
me_dbname = me.dbname
entries = defaultdict(int)
for dbname, reg in RegistryManager.registries.iteritems():
for key in reg.cache.iterkeys():
entries[(dbname,) + key[:2]] += 1
for key, count in sorted(entries.items()):
dbname, model_name, method = key
me.dbname = dbname
stat = STAT[key]
_logger.info("%6d entries, %6d hit, %6d miss, %6d err, %4.1f%% ratio, for %s.%s",
count, stat.hit, stat.miss, stat.err, stat.ratio, model_name, method.__name__)
me.dbname = me_dbname
# For backward compatibility
cache = ormcache
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
samuaz/kernel_msm_gee | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
rahuldan/sympy | sympy/logic/inference.py | 76 | 7641 | """Inference in propositional logic"""
from __future__ import print_function, division
from sympy.logic.boolalg import And, Not, conjuncts, to_cnf
from sympy.core.compatibility import ordered
from sympy.core.sympify import sympify
def literal_symbol(literal):
"""
The symbol in this literal (without the negation).
Examples
========
>>> from sympy.abc import A
>>> from sympy.logic.inference import literal_symbol
>>> literal_symbol(A)
A
>>> literal_symbol(~A)
A
"""
if literal is True or literal is False:
return literal
try:
if literal.is_Symbol:
return literal
if literal.is_Not:
return literal_symbol(literal.args[0])
else:
raise ValueError
except (AttributeError, ValueError):
raise ValueError("Argument must be a boolean literal.")
def satisfiable(expr, algorithm="dpll2", all_models=False):
"""
Check satisfiability of a propositional sentence.
Returns a model when it succeeds.
Returns {true: true} for trivially true expressions.
On setting all_models to True, if given expr is satisfiable then
returns a generator of models. However, if expr is unsatisfiable
then returns a generator containing the single element False.
Examples
========
>>> from sympy.abc import A, B
>>> from sympy.logic.inference import satisfiable
>>> satisfiable(A & ~B)
{A: True, B: False}
>>> satisfiable(A & ~A)
False
>>> satisfiable(True)
{True: True}
>>> next(satisfiable(A & ~A, all_models=True))
False
>>> models = satisfiable((A >> B) & B, all_models=True)
>>> next(models)
{A: False, B: True}
>>> next(models)
{A: True, B: True}
>>> def use_models(models):
... for model in models:
... if model:
... # Do something with the model.
... print(model)
... else:
... # Given expr is unsatisfiable.
... print("UNSAT")
>>> use_models(satisfiable(A >> ~A, all_models=True))
{A: False}
>>> use_models(satisfiable(A ^ A, all_models=True))
UNSAT
"""
expr = to_cnf(expr)
if algorithm == "dpll":
from sympy.logic.algorithms.dpll import dpll_satisfiable
return dpll_satisfiable(expr)
elif algorithm == "dpll2":
from sympy.logic.algorithms.dpll2 import dpll_satisfiable
return dpll_satisfiable(expr, all_models)
raise NotImplementedError
def valid(expr):
"""
Check validity of a propositional sentence.
A valid propositional sentence is True under every assignment.
Examples
========
>>> from sympy.abc import A, B
>>> from sympy.logic.inference import valid
>>> valid(A | ~A)
True
>>> valid(A | B)
False
References
==========
.. [1] http://en.wikipedia.org/wiki/Validity
"""
return not satisfiable(Not(expr))
def pl_true(expr, model={}, deep=False):
"""
Returns whether the given assignment is a model or not.
If the assignment does not specify the value for every proposition,
this may return None to indicate 'not obvious'.
Parameters
==========
model : dict, optional, default: {}
Mapping of symbols to boolean values to indicate assignment.
deep: boolean, optional, default: False
Gives the value of the expression under partial assignments
correctly. May still return None to indicate 'not obvious'.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy.logic.inference import pl_true
>>> pl_true( A & B, {A: True, B: True})
True
>>> pl_true(A & B, {A: False})
False
>>> pl_true(A & B, {A: True})
>>> pl_true(A & B, {A: True}, deep=True)
>>> pl_true(A >> (B >> A))
>>> pl_true(A >> (B >> A), deep=True)
True
>>> pl_true(A & ~A)
>>> pl_true(A & ~A, deep=True)
False
>>> pl_true(A & B & (~A | ~B), {A: True})
>>> pl_true(A & B & (~A | ~B), {A: True}, deep=True)
False
"""
from sympy.core.symbol import Symbol
from sympy.logic.boolalg import BooleanFunction
boolean = (True, False)
def _validate(expr):
if isinstance(expr, Symbol) or expr in boolean:
return True
if not isinstance(expr, BooleanFunction):
return False
return all(_validate(arg) for arg in expr.args)
if expr in boolean:
return expr
expr = sympify(expr)
if not _validate(expr):
raise ValueError("%s is not a valid boolean expression" % expr)
model = dict((k, v) for k, v in model.items() if v in boolean)
result = expr.subs(model)
if result in boolean:
return bool(result)
if deep:
model = dict((k, True) for k in result.atoms())
if pl_true(result, model):
if valid(result):
return True
else:
if not satisfiable(result):
return False
return None
def entails(expr, formula_set={}):
"""
Check whether the given expr_set entail an expr.
If formula_set is empty then it returns the validity of expr.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy.logic.inference import entails
>>> entails(A, [A >> B, B >> C])
False
>>> entails(C, [A >> B, B >> C, A])
True
>>> entails(A >> B)
False
>>> entails(A >> (B >> A))
True
References
==========
.. [1] http://en.wikipedia.org/wiki/Logical_consequence
"""
formula_set = list(formula_set)
formula_set.append(Not(expr))
return not satisfiable(And(*formula_set))
class KB(object):
"""Base class for all knowledge bases"""
def __init__(self, sentence=None):
self.clauses_ = set()
if sentence:
self.tell(sentence)
def tell(self, sentence):
raise NotImplementedError
def ask(self, query):
raise NotImplementedError
def retract(self, sentence):
raise NotImplementedError
@property
def clauses(self):
return list(ordered(self.clauses_))
class PropKB(KB):
"""A KB for Propositional Logic. Inefficient, with no indexing."""
def tell(self, sentence):
"""Add the sentence's clauses to the KB
Examples
========
>>> from sympy.logic.inference import PropKB
>>> from sympy.abc import x, y
>>> l = PropKB()
>>> l.clauses
[]
>>> l.tell(x | y)
>>> l.clauses
[Or(x, y)]
>>> l.tell(y)
>>> l.clauses
[y, Or(x, y)]
"""
for c in conjuncts(to_cnf(sentence)):
self.clauses_.add(c)
def ask(self, query):
"""Checks if the query is true given the set of clauses.
Examples
========
>>> from sympy.logic.inference import PropKB
>>> from sympy.abc import x, y
>>> l = PropKB()
>>> l.tell(x & ~y)
>>> l.ask(x)
True
>>> l.ask(y)
False
"""
return entails(query, self.clauses_)
def retract(self, sentence):
"""Remove the sentence's clauses from the KB
Examples
========
>>> from sympy.logic.inference import PropKB
>>> from sympy.abc import x, y
>>> l = PropKB()
>>> l.clauses
[]
>>> l.tell(x | y)
>>> l.clauses
[Or(x, y)]
>>> l.retract(x | y)
>>> l.clauses
[]
"""
for c in conjuncts(to_cnf(sentence)):
self.clauses_.discard(c)
| bsd-3-clause |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_2/django/utils/regex_helper.py | 361 | 12079 | """
Functions for reversing a regular expression (used in reverse URL resolving).
Used internally by Django and not intended for external use.
This is not, and is not intended to be, a complete reg-exp decompiler. It
should be good enough for a large class of URLS, however.
"""
# Mapping of an escape character to a representative of that class. So, e.g.,
# "\w" is replaced by "x" in a reverse URL. A value of None means to ignore
# this sequence. Any missing key is mapped to itself.
ESCAPE_MAPPINGS = {
"A": None,
"b": None,
"B": None,
"d": u"0",
"D": u"x",
"s": u" ",
"S": u"x",
"w": u"x",
"W": u"!",
"Z": None,
}
class Choice(list):
"""
Used to represent multiple possibilities at this point in a pattern string.
We use a distinguished type, rather than a list, so that the usage in the
code is clear.
"""
class Group(list):
"""
Used to represent a capturing group in the pattern string.
"""
class NonCapture(list):
"""
Used to represent a non-capturing group in the pattern string.
"""
def normalize(pattern):
"""
Given a reg-exp pattern, normalizes it to a list of forms that suffice for
reverse matching. This does the following:
(1) For any repeating sections, keeps the minimum number of occurrences
permitted (this means zero for optional groups).
(2) If an optional group includes parameters, include one occurrence of
that group (along with the zero occurrence case from step (1)).
(3) Select the first (essentially an arbitrary) element from any character
class. Select an arbitrary character for any unordered class (e.g. '.'
or '\w') in the pattern.
(5) Ignore comments and any of the reg-exp flags that won't change
what we construct ("iLmsu"). "(?x)" is an error, however.
(6) Raise an error on all other non-capturing (?...) forms (e.g.
look-ahead and look-behind matches) and any disjunctive ('|')
constructs.
Django's URLs for forward resolving are either all positional arguments or
all keyword arguments. That is assumed here, as well. Although reverse
resolving can be done using positional args when keyword args are
specified, the two cannot be mixed in the same reverse() call.
"""
# Do a linear scan to work out the special features of this pattern. The
# idea is that we scan once here and collect all the information we need to
# make future decisions.
result = []
non_capturing_groups = []
consume_next = True
pattern_iter = next_char(iter(pattern))
num_args = 0
# A "while" loop is used here because later on we need to be able to peek
# at the next character and possibly go around without consuming another
# one at the top of the loop.
try:
ch, escaped = pattern_iter.next()
except StopIteration:
return zip([u''], [[]])
try:
while True:
if escaped:
result.append(ch)
elif ch == '.':
# Replace "any character" with an arbitrary representative.
result.append(u".")
elif ch == '|':
# FIXME: One day we'll should do this, but not in 1.0.
raise NotImplementedError
elif ch == "^":
pass
elif ch == '$':
break
elif ch == ')':
# This can only be the end of a non-capturing group, since all
# other unescaped parentheses are handled by the grouping
# section later (and the full group is handled there).
#
# We regroup everything inside the capturing group so that it
# can be quantified, if necessary.
start = non_capturing_groups.pop()
inner = NonCapture(result[start:])
result = result[:start] + [inner]
elif ch == '[':
# Replace ranges with the first character in the range.
ch, escaped = pattern_iter.next()
result.append(ch)
ch, escaped = pattern_iter.next()
while escaped or ch != ']':
ch, escaped = pattern_iter.next()
elif ch == '(':
# Some kind of group.
ch, escaped = pattern_iter.next()
if ch != '?' or escaped:
# A positional group
name = "_%d" % num_args
num_args += 1
result.append(Group(((u"%%(%s)s" % name), name)))
walk_to_end(ch, pattern_iter)
else:
ch, escaped = pattern_iter.next()
if ch in "iLmsu#":
# All of these are ignorable. Walk to the end of the
# group.
walk_to_end(ch, pattern_iter)
elif ch == ':':
# Non-capturing group
non_capturing_groups.append(len(result))
elif ch != 'P':
# Anything else, other than a named group, is something
# we cannot reverse.
raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
else:
ch, escaped = pattern_iter.next()
if ch != '<':
raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
# We are in a named capturing group. Extra the name and
# then skip to the end.
name = []
ch, escaped = pattern_iter.next()
while ch != '>':
name.append(ch)
ch, escaped = pattern_iter.next()
param = ''.join(name)
result.append(Group(((u"%%(%s)s" % param), param)))
walk_to_end(ch, pattern_iter)
elif ch in "*?+{":
# Quanitifers affect the previous item in the result list.
count, ch = get_quantifier(ch, pattern_iter)
if ch:
# We had to look ahead, but it wasn't need to compute the
# quanitifer, so use this character next time around the
# main loop.
consume_next = False
if count == 0:
if contains(result[-1], Group):
# If we are quantifying a capturing group (or
# something containing such a group) and the minimum is
# zero, we must also handle the case of one occurrence
# being present. All the quantifiers (except {0,0},
# which we conveniently ignore) that have a 0 minimum
# also allow a single occurrence.
result[-1] = Choice([None, result[-1]])
else:
result.pop()
elif count > 1:
result.extend([result[-1]] * (count - 1))
else:
# Anything else is a literal.
result.append(ch)
if consume_next:
ch, escaped = pattern_iter.next()
else:
consume_next = True
except StopIteration:
pass
except NotImplementedError:
# A case of using the disjunctive form. No results for you!
return zip([u''], [[]])
return zip(*flatten_result(result))
def next_char(input_iter):
"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead).
Yields the next character, along with a boolean indicating whether it is a
raw (unescaped) character or not.
"""
for ch in input_iter:
if ch != '\\':
yield ch, False
continue
ch = input_iter.next()
representative = ESCAPE_MAPPINGS.get(ch, ch)
if representative is None:
continue
yield representative, True
def walk_to_end(ch, input_iter):
"""
The iterator is currently inside a capturing group. We want to walk to the
close of this group, skipping over any nested groups and handling escaped
parentheses correctly.
"""
if ch == '(':
nesting = 1
else:
nesting = 0
for ch, escaped in input_iter:
if escaped:
continue
elif ch == '(':
nesting += 1
elif ch == ')':
if not nesting:
return
nesting -= 1
def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Returns the minimum number of occurences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = input_iter.next()
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = input_iter.next()
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = input_iter.next()
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch
def contains(source, inst):
"""
Returns True if the "source" contains an instance of "inst". False,
otherwise.
"""
if isinstance(source, inst):
return True
if isinstance(source, NonCapture):
for elt in source:
if contains(elt, inst):
return True
return False
def flatten_result(source):
"""
Turns the given source sequence into a list of reg-exp possibilities and
their arguments. Returns a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
"""
if source is None:
return [u''], [[]]
if isinstance(source, Group):
if source[1] is None:
params = []
else:
params = [source[1]]
return [source[0]], [params]
result = [u'']
result_args = [[]]
pos = last = 0
for pos, elt in enumerate(source):
if isinstance(elt, basestring):
continue
piece = u''.join(source[last:pos])
if isinstance(elt, Group):
piece += elt[0]
param = elt[1]
else:
param = None
last = pos + 1
for i in range(len(result)):
result[i] += piece
if param:
result_args[i].append(param)
if isinstance(elt, (Choice, NonCapture)):
if isinstance(elt, NonCapture):
elt = [elt]
inner_result, inner_args = [], []
for item in elt:
res, args = flatten_result(item)
inner_result.extend(res)
inner_args.extend(args)
new_result = []
new_args = []
for item, args in zip(result, result_args):
for i_item, i_args in zip(inner_result, inner_args):
new_result.append(item + i_item)
new_args.append(args[:] + i_args)
result = new_result
result_args = new_args
if pos >= last:
piece = u''.join(source[last:])
for i in range(len(result)):
result[i] += piece
return result, result_args
| mit |
Kiiv/CouchPotatoServer | libs/xmpp/commands.py | 200 | 16116 | ## $Id: commands.py,v 1.17 2007/08/28 09:54:15 normanr Exp $
## Ad-Hoc Command manager
## Mike Albon (c) 5th January 2005
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
"""This module is a ad-hoc command processor for xmpppy. It uses the plug-in mechanism like most of the core library. It depends on a DISCO browser manager.
There are 3 classes here, a command processor Commands like the Browser, and a command template plugin Command, and an example command.
To use this module:
Instansiate the module with the parent transport and disco browser manager as parameters.
'Plug in' commands using the command template.
The command feature must be added to existing disco replies where neccessary.
What it supplies:
Automatic command registration with the disco browser manager.
Automatic listing of commands in the public command list.
A means of handling requests, by redirection though the command manager.
"""
from protocol import *
from client import PlugIn
class Commands(PlugIn):
"""Commands is an ancestor of PlugIn and can be attached to any session.
The commands class provides a lookup and browse mechnism. It follows the same priciple of the Browser class, for Service Discovery to provide the list of commands, it adds the 'list' disco type to your existing disco handler function.
How it works:
The commands are added into the existing Browser on the correct nodes. When the command list is built the supplied discovery handler function needs to have a 'list' option in type. This then gets enumerated, all results returned as None are ignored.
The command executed is then called using it's Execute method. All session management is handled by the command itself.
"""
def __init__(self, browser):
"""Initialises class and sets up local variables"""
PlugIn.__init__(self)
DBG_LINE='commands'
self._exported_methods=[]
self._handlers={'':{}}
self._browser = browser
def plugin(self, owner):
"""Makes handlers within the session"""
# Plug into the session and the disco manager
# We only need get and set, results are not needed by a service provider, only a service user.
owner.RegisterHandler('iq',self._CommandHandler,typ='set',ns=NS_COMMANDS)
owner.RegisterHandler('iq',self._CommandHandler,typ='get',ns=NS_COMMANDS)
self._browser.setDiscoHandler(self._DiscoHandler,node=NS_COMMANDS,jid='')
def plugout(self):
"""Removes handlers from the session"""
# unPlug from the session and the disco manager
self._owner.UnregisterHandler('iq',self._CommandHandler,ns=NS_COMMANDS)
for jid in self._handlers:
self._browser.delDiscoHandler(self._DiscoHandler,node=NS_COMMANDS)
def _CommandHandler(self,conn,request):
"""The internal method to process the routing of command execution requests"""
# This is the command handler itself.
# We must:
# Pass on command execution to command handler
# (Do we need to keep session details here, or can that be done in the command?)
jid = str(request.getTo())
try:
node = request.getTagAttr('command','node')
except:
conn.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
if self._handlers.has_key(jid):
if self._handlers[jid].has_key(node):
self._handlers[jid][node]['execute'](conn,request)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
elif self._handlers[''].has_key(node):
self._handlers[''][node]['execute'](conn,request)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
def _DiscoHandler(self,conn,request,typ):
"""The internal method to process service discovery requests"""
# This is the disco manager handler.
if typ == 'items':
# We must:
# Generate a list of commands and return the list
# * This handler does not handle individual commands disco requests.
# Pseudo:
# Enumerate the 'item' disco of each command for the specified jid
# Build responce and send
# To make this code easy to write we add an 'list' disco type, it returns a tuple or 'none' if not advertised
list = []
items = []
jid = str(request.getTo())
# Get specific jid based results
if self._handlers.has_key(jid):
for each in self._handlers[jid].keys():
items.append((jid,each))
else:
# Get generic results
for each in self._handlers[''].keys():
items.append(('',each))
if items != []:
for each in items:
i = self._handlers[each[0]][each[1]]['disco'](conn,request,'list')
if i != None:
list.append(Node(tag='item',attrs={'jid':i[0],'node':i[1],'name':i[2]}))
iq = request.buildReply('result')
if request.getQuerynode(): iq.setQuerynode(request.getQuerynode())
iq.setQueryPayload(list)
conn.send(iq)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
elif typ == 'info':
return {'ids':[{'category':'automation','type':'command-list'}],'features':[]}
def addCommand(self,name,cmddisco,cmdexecute,jid=''):
"""The method to call if adding a new command to the session, the requred parameters of cmddisco and cmdexecute are the methods to enable that command to be executed"""
# This command takes a command object and the name of the command for registration
# We must:
# Add item into disco
# Add item into command list
if not self._handlers.has_key(jid):
self._handlers[jid]={}
self._browser.setDiscoHandler(self._DiscoHandler,node=NS_COMMANDS,jid=jid)
if self._handlers[jid].has_key(name):
raise NameError,'Command Exists'
else:
self._handlers[jid][name]={'disco':cmddisco,'execute':cmdexecute}
# Need to add disco stuff here
self._browser.setDiscoHandler(cmddisco,node=name,jid=jid)
def delCommand(self,name,jid=''):
"""Removed command from the session"""
# This command takes a command object and the name used for registration
# We must:
# Remove item from disco
# Remove item from command list
if not self._handlers.has_key(jid):
raise NameError,'Jid not found'
if not self._handlers[jid].has_key(name):
raise NameError, 'Command not found'
else:
#Do disco removal here
command = self.getCommand(name,jid)['disco']
del self._handlers[jid][name]
self._browser.delDiscoHandler(command,node=name,jid=jid)
def getCommand(self,name,jid=''):
"""Returns the command tuple"""
# This gets the command object with name
# We must:
# Return item that matches this name
if not self._handlers.has_key(jid):
raise NameError,'Jid not found'
elif not self._handlers[jid].has_key(name):
raise NameError,'Command not found'
else:
return self._handlers[jid][name]
class Command_Handler_Prototype(PlugIn):
"""This is a prototype command handler, as each command uses a disco method
and execute method you can implement it any way you like, however this is
my first attempt at making a generic handler that you can hang process
stages on too. There is an example command below.
The parameters are as follows:
name : the name of the command within the jabber environment
description : the natural language description
discofeatures : the features supported by the command
initial : the initial command in the from of {'execute':commandname}
All stages set the 'actions' dictionary for each session to represent the possible options available.
"""
name = 'examplecommand'
count = 0
description = 'an example command'
discofeatures = [NS_COMMANDS,NS_DATA]
# This is the command template
def __init__(self,jid=''):
"""Set up the class"""
PlugIn.__init__(self)
DBG_LINE='command'
self.sessioncount = 0
self.sessions = {}
# Disco information for command list pre-formatted as a tuple
self.discoinfo = {'ids':[{'category':'automation','type':'command-node','name':self.description}],'features': self.discofeatures}
self._jid = jid
def plugin(self,owner):
"""Plug command into the commands class"""
# The owner in this instance is the Command Processor
self._commands = owner
self._owner = owner._owner
self._commands.addCommand(self.name,self._DiscoHandler,self.Execute,jid=self._jid)
def plugout(self):
"""Remove command from the commands class"""
self._commands.delCommand(self.name,self._jid)
def getSessionID(self):
"""Returns an id for the command session"""
self.count = self.count+1
return 'cmd-%s-%d'%(self.name,self.count)
def Execute(self,conn,request):
"""The method that handles all the commands, and routes them to the correct method for that stage."""
# New request or old?
try:
session = request.getTagAttr('command','sessionid')
except:
session = None
try:
action = request.getTagAttr('command','action')
except:
action = None
if action == None: action = 'execute'
# Check session is in session list
if self.sessions.has_key(session):
if self.sessions[session]['jid']==request.getFrom():
# Check action is vaild
if self.sessions[session]['actions'].has_key(action):
# Execute next action
self.sessions[session]['actions'][action](conn,request)
else:
# Stage not presented as an option
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
else:
# Jid and session don't match. Go away imposter
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
elif session != None:
# Not on this sessionid you won't.
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
else:
# New session
self.initial[action](conn,request)
def _DiscoHandler(self,conn,request,type):
"""The handler for discovery events"""
if type == 'list':
return (request.getTo(),self.name,self.description)
elif type == 'items':
return []
elif type == 'info':
return self.discoinfo
class TestCommand(Command_Handler_Prototype):
""" Example class. You should read source if you wish to understate how it works.
Generally, it presents a "master" that giudes user through to calculate something.
"""
name = 'testcommand'
description = 'a noddy example command'
def __init__(self,jid=''):
""" Init internal constants. """
Command_Handler_Prototype.__init__(self,jid)
self.initial = {'execute':self.cmdFirstStage}
def cmdFirstStage(self,conn,request):
""" Determine """
# This is the only place this should be repeated as all other stages should have SessionIDs
try:
session = request.getTagAttr('command','sessionid')
except:
session = None
if session == None:
session = self.getSessionID()
self.sessions[session]={'jid':request.getFrom(),'actions':{'cancel':self.cmdCancel,'next':self.cmdSecondStage,'execute':self.cmdSecondStage},'data':{'type':None}}
# As this is the first stage we only send a form
reply = request.buildReply('result')
form = DataForm(title='Select type of operation',data=['Use the combobox to select the type of calculation you would like to do, then click Next',DataField(name='calctype',desc='Calculation Type',value=self.sessions[session]['data']['type'],options=[['circlediameter','Calculate the Diameter of a circle'],['circlearea','Calculate the area of a circle']],typ='list-single',required=1)])
replypayload = [Node('actions',attrs={'execute':'next'},payload=[Node('next')]),form]
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':session,'status':'executing'},payload=replypayload)
self._owner.send(reply)
raise NodeProcessed
def cmdSecondStage(self,conn,request):
form = DataForm(node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA))
self.sessions[request.getTagAttr('command','sessionid')]['data']['type']=form.getField('calctype').getValue()
self.sessions[request.getTagAttr('command','sessionid')]['actions']={'cancel':self.cmdCancel,None:self.cmdThirdStage,'previous':self.cmdFirstStage,'execute':self.cmdThirdStage,'next':self.cmdThirdStage}
# The form generation is split out to another method as it may be called by cmdThirdStage
self.cmdSecondStageReply(conn,request)
def cmdSecondStageReply(self,conn,request):
reply = request.buildReply('result')
form = DataForm(title = 'Enter the radius', data=['Enter the radius of the circle (numbers only)',DataField(desc='Radius',name='radius',typ='text-single')])
replypayload = [Node('actions',attrs={'execute':'complete'},payload=[Node('complete'),Node('prev')]),form]
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'executing'},payload=replypayload)
self._owner.send(reply)
raise NodeProcessed
def cmdThirdStage(self,conn,request):
form = DataForm(node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA))
try:
num = float(form.getField('radius').getValue())
except:
self.cmdSecondStageReply(conn,request)
from math import pi
if self.sessions[request.getTagAttr('command','sessionid')]['data']['type'] == 'circlearea':
result = (num**2)*pi
else:
result = num*2*pi
reply = request.buildReply('result')
form = DataForm(typ='result',data=[DataField(desc='result',name='result',value=result)])
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'completed'},payload=[form])
self._owner.send(reply)
raise NodeProcessed
def cmdCancel(self,conn,request):
reply = request.buildReply('result')
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'cancelled'})
self._owner.send(reply)
del self.sessions[request.getTagAttr('command','sessionid')]
| gpl-3.0 |
CentroGeo/geonode | geonode/geoserver/tasks.py | 2 | 24675 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
import re
import shutil
from django.conf import settings
from django.db import transaction
from django.contrib.auth import get_user_model
from django.core.management import call_command
from django.utils.translation import ugettext_lazy as _
from django.contrib.staticfiles.templatetags import staticfiles
from celery.utils.log import get_task_logger
from geonode.celery_app import app
from geonode.tasks.tasks import (
AcquireLock,
FaultTolerantTask)
from geonode import GeoNodeException
from geonode.upload import signals
from geonode.layers.models import (
Layer, UploadSession)
from geonode.base.models import (
ResourceBase)
from geonode.utils import (
is_monochromatic_image,
set_resource_default_links)
from geonode.geoserver.upload import geoserver_upload
from geonode.security.utils import spec_perms_is_empty
from geonode.catalogue.models import catalogue_post_save
from .helpers import (
gs_catalog,
ogc_server_settings,
gs_slurp,
set_styles,
get_sld_for,
set_layer_style,
cascading_delete,
fetch_gs_resource,
create_gs_thumbnail,
set_attributes_from_geoserver,
_invalidate_geowebcache_layer,
_stylefilterparams_geowebcache_layer)
logger = get_task_logger(__name__)
@app.task(
bind=True,
base=FaultTolerantTask,
name='geonode.geoserver.tasks.geoserver_update_layers',
queue='geoserver.catalog',
expires=600,
acks_late=False,
autoretry_for=(Exception, ),
retry_kwargs={'max_retries': 3, 'countdown': 10},
retry_backoff=True,
retry_backoff_max=700,
retry_jitter=True)
def geoserver_update_layers(self, *args, **kwargs):
"""
Runs update layers.
"""
lock_id = f'{self.request.id}'
with AcquireLock(lock_id) as lock:
if lock.acquire() is True:
return gs_slurp(*args, **kwargs)
@app.task(
bind=True,
base=FaultTolerantTask,
name='geonode.geoserver.tasks.geoserver_set_style',
queue='geoserver.catalog',
expires=30,
acks_late=False,
autoretry_for=(Exception, ),
retry_kwargs={'max_retries': 3, 'countdown': 10},
retry_backoff=True,
retry_backoff_max=700,
retry_jitter=True)
def geoserver_set_style(
self,
instance_id,
base_file):
"""
Sets styles from SLD file.
"""
instance = None
try:
instance = Layer.objects.get(id=instance_id)
except Layer.DoesNotExist:
logger.debug(f"Layer id {instance_id} does not exist yet!")
raise
lock_id = f'{self.request.id}'
with AcquireLock(lock_id) as lock:
if lock.acquire() is True:
try:
sld = open(base_file, "rb").read()
set_layer_style(
instance,
instance.alternate,
sld,
base_file=base_file)
except Exception as e:
logger.exception(e)
@app.task(
bind=True,
base=FaultTolerantTask,
name='geonode.geoserver.tasks.geoserver_create_style',
queue='geoserver.catalog',
expires=30,
acks_late=False,
autoretry_for=(Exception, ),
retry_kwargs={'max_retries': 3, 'countdown': 10},
retry_backoff=True,
retry_backoff_max=700,
retry_jitter=True)
def geoserver_create_style(
self,
instance_id,
name,
sld_file,
tempdir):
"""
Sets or create styles from Upload Session.
"""
instance = None
try:
instance = Layer.objects.get(id=instance_id)
except Layer.DoesNotExist:
logger.debug(f"Layer id {instance_id} does not exist yet!")
raise
lock_id = f'{self.request.id}'
with AcquireLock(lock_id) as lock:
if lock.acquire() is True and instance:
if sld_file and os.path.exists(sld_file) and os.access(sld_file, os.R_OK):
f = None
if os.path.isfile(sld_file):
try:
f = open(sld_file, 'r')
except Exception:
pass
elif tempdir and os.path.exists(tempdir):
if os.path.isfile(os.path.join(tempdir, sld_file)):
try:
f = open(os.path.join(tempdir, sld_file), 'r')
except Exception:
pass
if f:
sld = f.read()
f.close()
if not gs_catalog.get_style(name=name, workspace=settings.DEFAULT_WORKSPACE):
style = gs_catalog.create_style(
name,
sld,
raw=True,
workspace=settings.DEFAULT_WORKSPACE)
gs_layer = gs_catalog.get_layer(name)
_default_style = gs_layer.default_style
gs_layer.default_style = style
gs_catalog.save(gs_layer)
set_styles(instance, gs_catalog)
try:
gs_catalog.delete(_default_style)
except Exception as e:
logger.exception(e)
else:
get_sld_for(gs_catalog, instance)
else:
get_sld_for(gs_catalog, instance)
@app.task(
bind=True,
base=FaultTolerantTask,
name='geonode.geoserver.tasks.geoserver_finalize_upload',
queue='geoserver.events',
expires=600,
acks_late=False,
autoretry_for=(Exception, ),
retry_kwargs={'max_retries': 3, 'countdown': 10},
retry_backoff=True,
retry_backoff_max=700,
retry_jitter=True)
def geoserver_finalize_upload(
self,
import_id,
instance_id,
permissions,
created,
xml_file,
sld_file,
sld_uploaded,
tempdir):
"""
Finalize Layer and GeoServer configuration:
- Sets Layer Metadata from XML and updates GeoServer Layer accordingly.
- Sets Default Permissions
"""
instance = None
try:
instance = Layer.objects.get(id=instance_id)
except Layer.DoesNotExist:
logger.debug(f"Layer id {instance_id} does not exist yet!")
raise
lock_id = f'{self.request.id}'
with AcquireLock(lock_id) as lock:
if lock.acquire() is True:
from geonode.upload.models import Upload
upload = Upload.objects.get(import_id=import_id)
upload.layer = instance
upload.save()
try:
# Update the upload sessions
geonode_upload_sessions = UploadSession.objects.filter(resource=instance)
geonode_upload_sessions.update(processed=False)
instance.upload_session = geonode_upload_sessions.first()
except Exception as e:
logger.exception(e)
# Sanity checks
if isinstance(xml_file, list):
if len(xml_file) > 0:
xml_file = xml_file[0]
else:
xml_file = None
elif not isinstance(xml_file, str):
xml_file = None
if xml_file and os.path.exists(xml_file) and os.access(xml_file, os.R_OK):
instance.metadata_uploaded = True
try:
gs_resource = gs_catalog.get_resource(
name=instance.name,
store=instance.store,
workspace=instance.workspace)
except Exception:
try:
gs_resource = gs_catalog.get_resource(
name=instance.alternate,
store=instance.store,
workspace=instance.workspace)
except Exception:
try:
gs_resource = gs_catalog.get_resource(
name=instance.alternate or instance.typename)
except Exception:
gs_resource = None
if gs_resource:
# Updating GeoServer resource
gs_resource.title = instance.title
gs_resource.abstract = instance.abstract
gs_catalog.save(gs_resource)
if gs_resource.store:
instance.storeType = gs_resource.store.resource_type
if not instance.alternate:
instance.alternate = f"{gs_resource.store.workspace.name}:{gs_resource.name}"
if sld_uploaded:
geoserver_set_style(instance.id, sld_file)
else:
geoserver_create_style(instance.id, instance.name, sld_file, tempdir)
logger.debug(f'Finalizing (permissions and notifications) Layer {instance}')
instance.handle_moderated_uploads()
if permissions is not None and not spec_perms_is_empty(permissions):
logger.debug(f'Setting permissions {permissions} for {instance.name}')
instance.set_permissions(permissions, created=created)
instance.save(notify=not created)
try:
logger.debug(f"... Cleaning up the temporary folders {tempdir}")
if tempdir and os.path.exists(tempdir):
shutil.rmtree(tempdir)
except Exception as e:
logger.warning(e)
finally:
upload.complete = True
upload.save()
signals.upload_complete.send(sender=geoserver_finalize_upload, layer=instance)
@app.task(
bind=True,
base=FaultTolerantTask,
name='geonode.geoserver.tasks.geoserver_post_save_layers',
queue='geoserver.catalog',
expires=3600,
acks_late=False,
autoretry_for=(Exception, ),
retry_kwargs={'max_retries': 3, 'countdown': 10},
retry_backoff=True,
retry_backoff_max=700,
retry_jitter=True)
def geoserver_post_save_layers(
self,
instance_id,
*args, **kwargs):
"""
Runs update layers.
"""
from geonode.geoserver.signals import geoserver_post_save_complete
instance = None
try:
instance = Layer.objects.get(id=instance_id)
except Layer.DoesNotExist:
logger.debug(f"Layer id {instance_id} does not exist yet!")
raise
lock_id = f'{self.request.id}'
with AcquireLock(lock_id) as lock:
if lock.acquire() is True:
# Don't run this signal if is a Layer from a remote service
if getattr(instance, "remote_service", None) is not None or instance.storeType == "remoteStore":
# Creating Layer Thumbnail by sending a signal
geoserver_post_save_complete.send(
sender=instance.__class__, instance=instance, update_fields=['thumbnail_url'])
return
# Don't run this signal handler if it is a tile layer or a remote store (Service)
# Currently only gpkg files containing tiles will have this type & will be served via MapProxy.
if hasattr(instance, 'storeType') and getattr(instance, 'storeType') in ['tileStore', 'remoteStore']:
# Creating Layer Thumbnail by sending a signal
geoserver_post_save_complete.send(
sender=instance.__class__, instance=instance, update_fields=['thumbnail_url'])
return instance
if isinstance(instance, ResourceBase):
if hasattr(instance, 'layer'):
instance = instance.layer
else:
return
geonode_upload_sessions = UploadSession.objects.filter(resource=instance)
geonode_upload_sessions.update(processed=False)
instance.set_dirty_state()
gs_resource = None
values = None
_tries = 0
_max_tries = getattr(ogc_server_settings, "MAX_RETRIES", 2)
# If the store in None then it's a new instance from an upload,
# only in this case run the geoserver_upload method
if not instance.store or getattr(instance, 'overwrite', False):
base_file, info = instance.get_base_file()
# There is no need to process it if there is no file.
if base_file is None:
return
gs_name, workspace, values, gs_resource = geoserver_upload(
instance,
base_file.file.path,
instance.owner,
instance.name,
overwrite=True,
title=instance.title,
abstract=instance.abstract,
charset=instance.charset
)
values, gs_resource = fetch_gs_resource(instance, values, _tries)
while not gs_resource and _tries < _max_tries:
values, gs_resource = fetch_gs_resource(instance, values, _tries)
_tries += 1
# Get metadata links
metadata_links = []
for link in instance.link_set.metadata():
metadata_links.append((link.mime, link.name, link.url))
if gs_resource:
logger.debug(f"Found geoserver resource for this layer: {instance.name}")
gs_resource.metadata_links = metadata_links
instance.gs_resource = gs_resource
# Update Attribution link
if instance.poc:
# gsconfig now utilizes an attribution dictionary
gs_resource.attribution = {
'title': str(instance.poc),
'width': None,
'height': None,
'href': None,
'url': None,
'type': None}
profile = get_user_model().objects.get(username=instance.poc.username)
site_url = settings.SITEURL.rstrip('/') if settings.SITEURL.startswith('http') else settings.SITEURL
gs_resource.attribution_link = site_url + profile.get_absolute_url()
# Iterate over values from geoserver.
for key in ['alternate', 'store', 'storeType']:
# attr_name = key if 'typename' not in key else 'alternate'
# print attr_name
setattr(instance, key, values[key])
try:
if settings.RESOURCE_PUBLISHING:
if instance.is_published != gs_resource.advertised:
gs_resource.advertised = 'true'
if not settings.FREETEXT_KEYWORDS_READONLY:
# AF: Warning - this won't allow people to have empty keywords on GeoNode
if len(instance.keyword_list()) == 0 and gs_resource.keywords:
for keyword in gs_resource.keywords:
if keyword not in instance.keyword_list():
instance.keywords.add(keyword)
if any(instance.keyword_list()):
keywords = instance.keyword_list()
gs_resource.keywords = [kw for kw in list(set(keywords))]
# gs_resource should only be called if
# ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings, "BACKEND_WRITE_ENABLED", True):
gs_catalog.save(gs_resource)
except Exception as e:
msg = (f'Error while trying to save resource named {gs_resource} in GeoServer, '
f'try to use: "{e}"')
e.args = (msg,)
logger.exception(e)
# store the resource to avoid another geoserver call in the post_save
"""Get information from geoserver.
The attributes retrieved include:
* Bounding Box
* SRID
"""
try:
# This is usually done in Layer.pre_save, however if the hooks
# are bypassed by custom create/updates we need to ensure the
# bbox is calculated properly.
srid = gs_resource.projection
bbox = gs_resource.native_bbox
instance.set_bbox_polygon([bbox[0], bbox[2], bbox[1], bbox[3]], srid)
except Exception as e:
logger.exception(e)
srid = instance.srid
bbox = instance.bbox
if instance.srid:
instance.srid_url = f"http://www.spatialreference.org/ref/{instance.srid.replace(':', '/').lower()}/"
elif instance.bbox_polygon is not None:
# Guessing 'EPSG:4326' by default
instance.srid = 'EPSG:4326'
else:
raise GeoNodeException(_("Invalid Projection. Layer is missing CRS!"))
to_update = {
'title': instance.title or instance.name,
'abstract': instance.abstract or "",
'alternate': instance.alternate
}
if is_monochromatic_image(instance.thumbnail_url):
to_update['thumbnail_url'] = staticfiles.static(settings.MISSING_THUMBNAIL)
# Save all the modified information in the instance without triggering signals.
try:
with transaction.atomic():
ResourceBase.objects.filter(
id=instance.resourcebase_ptr.id).update(
**to_update)
# to_update['name'] = instance.name,
to_update['workspace'] = gs_resource.store.workspace.name
to_update['store'] = gs_resource.store.name
to_update['storeType'] = instance.storeType
to_update['typename'] = instance.alternate
Layer.objects.filter(id=instance.id).update(**to_update)
# Dealing with the BBOX: this is a trick to let GeoDjango storing original coordinates
instance.set_bbox_polygon([bbox[0], bbox[2], bbox[1], bbox[3]], 'EPSG:4326')
Layer.objects.filter(id=instance.id).update(
bbox_polygon=instance.bbox_polygon, srid=srid)
# Refresh from DB
instance.refresh_from_db()
except Exception as e:
logger.exception(e)
try:
with transaction.atomic():
match = re.match(r'^(EPSG:)?(?P<srid>\d{4,6})$', str(srid))
instance.bbox_polygon.srid = int(match.group('srid')) if match else 4326
Layer.objects.filter(id=instance.id).update(
ll_bbox_polygon=instance.bbox_polygon, srid=srid)
# Refresh from DB
instance.refresh_from_db()
except Exception as e:
logger.warning(e)
try:
with transaction.atomic():
instance.bbox_polygon.srid = 4326
Layer.objects.filter(id=instance.id).update(
ll_bbox_polygon=instance.bbox_polygon, srid=srid)
# Refresh from DB
instance.refresh_from_db()
except Exception as e:
logger.warning(e)
# Refreshing CSW records
logger.debug(f"... Updating the Catalogue entries for Layer {instance.title}")
try:
catalogue_post_save(instance=instance, sender=instance.__class__)
except Exception as e:
logger.exception(e)
# Refreshing layer links
logger.debug(f"... Creating Default Resource Links for Layer {instance.title}")
try:
set_resource_default_links(instance, instance, prune=True)
except Exception as e:
logger.exception(e)
# Save layer attributes
logger.debug(f"... Refresh GeoServer attributes list for Layer {instance.title}")
try:
set_attributes_from_geoserver(instance)
except Exception as e:
logger.exception(e)
# Save layer styles
logger.debug(f"... Refresh Legend links for Layer {instance.title}")
try:
set_styles(instance, gs_catalog)
except Exception as e:
logger.exception(e)
# Invalidate GeoWebCache for the updated resource
try:
_stylefilterparams_geowebcache_layer(instance.alternate)
_invalidate_geowebcache_layer(instance.alternate)
except Exception:
pass
# Creating Layer Thumbnail by sending a signal
geoserver_post_save_complete.send(
sender=instance.__class__, instance=instance, update_fields=['thumbnail_url'])
try:
geonode_upload_sessions = UploadSession.objects.filter(resource=instance)
geonode_upload_sessions.update(processed=True)
except Exception as e:
logger.exception(e)
finally:
instance.clear_dirty_state()
# Updating HAYSTACK Indexes if needed
if settings.HAYSTACK_SEARCH:
call_command('update_index')
@app.task(
bind=True,
base=FaultTolerantTask,
name='geonode.geoserver.tasks.geoserver_create_thumbnail',
queue='geoserver.events',
expires=30,
acks_late=False,
autoretry_for=(Exception, ),
retry_kwargs={'max_retries': 3, 'countdown': 10},
retry_backoff=True,
retry_backoff_max=700,
retry_jitter=True)
def geoserver_create_thumbnail(self, instance_id, overwrite=True, check_bbox=True):
"""
Runs create_gs_thumbnail.
"""
instance = None
try:
instance = ResourceBase.objects.get(id=instance_id).get_real_instance()
except Exception:
logger.error(f"Resource id {instance_id} does not exist yet!")
raise
lock_id = f'{self.request.id}'
with AcquireLock(lock_id) as lock:
if lock.acquire() is True:
try:
create_gs_thumbnail(instance, overwrite=overwrite, check_bbox=check_bbox)
logger.debug(f"... Created Thumbnail for Layer {instance.title}")
except Exception as e:
geoserver_create_thumbnail.retry(exc=e)
@app.task(
bind=True,
base=FaultTolerantTask,
name='geonode.geoserver.tasks.geoserver_cascading_delete',
queue='cleanup',
expires=600,
acks_late=False,
autoretry_for=(Exception, ),
retry_kwargs={'max_retries': 3, 'countdown': 10},
retry_backoff=True,
retry_backoff_max=700,
retry_jitter=True)
def geoserver_cascading_delete(self, *args, **kwargs):
"""
Runs cascading_delete.
"""
lock_id = f'{self.request.id}'
with AcquireLock(lock_id) as lock:
if lock.acquire() is True:
return cascading_delete(*args, **kwargs)
| gpl-3.0 |
NervanaSystems/neon | neon/data/dataloader_transformers.py | 1 | 7276 | from __future__ import division
import numpy as np
from neon import NervanaObject
class DataLoaderTransformer(NervanaObject):
"""
DataLoaderTransformers are used to transform the output of a DataLoader.
DataLoader doesn't have easy access to the device or graph, so any
computation that should happen there should use a DataLoaderTransformer.
"""
def __init__(self, dataloader, index=None):
super(DataLoaderTransformer, self).__init__()
self.dataloader = dataloader
self.index = index
if self.index is not None:
# input shape is contiguous
data_size = np.prod(self.dataloader.shapes()[index])
self._shape = (data_size, self.be.bsz)
def __getattr__(self, key):
return getattr(self.dataloader, key)
def __iter__(self):
for tup in self.dataloader:
if self.index is None:
yield self.transform(tup)
else:
ret = self.transform(tup[self.index])
if ret is None:
raise ValueError(
'{} returned None from a transformer'.format(
self.__class__.__name__
)
)
out = list(tup)
out[self.index] = ret
yield out
def transform(self, t):
raise NotImplemented()
class OneHot(DataLoaderTransformer):
"""
OneHot will convert `index` into a onehot vector.
"""
def __init__(self, dataloader, index, nclasses, *args, **kwargs):
super(OneHot, self).__init__(dataloader, index, *args, **kwargs)
self.output = self.be.iobuf(nclasses, parallelism='Data')
def transform(self, t):
self.output[:] = self.be.onehot(t, axis=0)
return self.output
class PixelWiseOneHot(DataLoaderTransformer):
"""
OneHot will convert `index` into a onehot vector.
"""
def __init__(self, dataloader, index, nclasses, *args, **kwargs):
super(PixelWiseOneHot, self).__init__(dataloader, index, *args, **kwargs)
self.output = None
self.nclasses = nclasses
def transform(self, t):
if self.output is None:
self.output = self.be.iobuf(self.nclasses*t.shape[0], dtype=np.int32)
self.outview = self.output.reshape((self.nclasses, -1))
self.outview[:] = self.be.onehot(t.reshape((1, -1)), axis=0)
return self.output
class TypeCast(DataLoaderTransformer):
"""
TypeCast data from dataloader at `index` to dtype and move into
device memory if not already.
"""
def __init__(self, dataloader, index, dtype, *args, **kwargs):
super(TypeCast, self).__init__(
dataloader, index=index, *args, **kwargs
)
self.output = self.be.iobuf(self._shape[0], dtype=dtype, parallelism='Data')
def transform(self, t):
self.output[:] = t
return self.output
class Retuple(DataLoaderTransformer):
"""
Converts data from dataloader to a tuple of tuples with first element as
the input tuple and second element as the target tuple.
"""
def __init__(self, dataloader, data=(0,), target=(1,), *args, **kwargs):
super(Retuple, self).__init__(
dataloader, index=None, *args, **kwargs
)
self._data = data
self._target = target
self.output = None
def transform(self, t):
if len(self._data) > 1:
data = tuple(t[ii] for ii in self._data)
else:
data = t[self._data[0]]
if len(self._target) > 1:
target = tuple(t[ii] for ii in self._target)
else:
target = t[self._target[0]]
return (data, target)
class BGRMeanSubtract(DataLoaderTransformer):
"""
subtract pixel_mean from data at `index`. Assumes data is in CxHxWxN
"""
def __init__(self, dataloader, index, pixel_mean=[127, 119, 104], *args, **kwargs):
super(BGRMeanSubtract, self).__init__(
dataloader, index=index, *args, **kwargs
)
pixel_mean = np.asarray(pixel_mean)
self.pixel_mean = self.be.array(pixel_mean[:, np.newaxis])
def transform(self, t):
# create a view of t and modify that. Modifying t directly doesn't
# work for some reason ...
tr = t.reshape((3, -1))
tr[:] = tr - self.pixel_mean
return t
class ValueNormalize(DataLoaderTransformer):
"""
normalize values at `index`
"""
def __init__(self, dataloader, index, source_range=[0., 255.],
target_range=[0., 1.], *args, **kwargs):
super(ValueNormalize, self).__init__(
dataloader, index=index, *args, **kwargs
)
source_range = np.asarray(source_range)
target_range = np.asarray(target_range)
self.xmin = self.be.array(source_range[0])
self.xspan = self.be.array(source_range[1]-source_range[0])
self.ymin = self.be.array(target_range[0])
self.yspan = self.be.array(target_range[1]-target_range[0])
def transform(self, t):
# create a view of t and modify that. Modifying t directly doesn't
# work for some reason ...
tr = t.reshape((3, -1))
tr[:] = (tr - self.xmin) / self.xspan * self.yspan + self.ymin
return t
class DumpImage(DataLoaderTransformer):
def __init__(self, dataloader, index, image_index, outshape,
output_directory=None, *args, **kwargs):
"""
dump image number `image_index` in data `index` to a random
file in `output_directory`.
"""
super(DumpImage, self).__init__(
dataloader, index=index, *args, **kwargs
)
self.outshape = outshape
self.image_index = image_index
self.output_directory = output_directory or '/tmp'
if self.output_directory[-1] != '/':
self.output_directory += '/'
def transform(self, t):
# grab one image from t, and bring it into host mem
if isinstance(t, np.ndarray):
a = t
else:
a = t.get()
a = a[:, self.image_index]
# hack to convert single channel image to 3 channel for later processing
if self.outshape[0] is 1:
nshape = (3, self.outshape[1], self.outshape[2])
img2 = np.ndarray(nshape, dtype='uint8')
a = a.reshape((self.outshape[1], self.outshape[2]))
img2[0, :, :] = a
img2[1, :, :] = a
img2[2, :, :] = a
a = img2
else:
# coming from DataLoader first dimensioned has been flattened
a = a.reshape(self.outshape)
# transpose from CHW to H W C
a = a.transpose(1, 2, 0)
# reorder color channel
a = a[:, :, ::-1]
# TODO: see if this can be removed.
a = a.astype('uint8')
from PIL import Image as PILImage
img = PILImage.fromarray(a)
img.save(self.filename())
# return unmodified tensor
return t
def filename(self):
"""
generate random filename
"""
import random
return self.output_directory + str(random.random()) + '.png'
| apache-2.0 |
mapr/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf3/table.py | 56 | 9408 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import TABLENS
from .element import Element
# Autogenerated
def Body(**args):
return Element(qname = (TABLENS,'body'), **args)
def CalculationSettings(**args):
return Element(qname = (TABLENS,'calculation-settings'), **args)
def CellAddress(**args):
return Element(qname = (TABLENS,'cell-address'), **args)
def CellContentChange(**args):
return Element(qname = (TABLENS,'cell-content-change'), **args)
def CellContentDeletion(**args):
return Element(qname = (TABLENS,'cell-content-deletion'), **args)
def CellRangeSource(**args):
return Element(qname = (TABLENS,'cell-range-source'), **args)
def ChangeDeletion(**args):
return Element(qname = (TABLENS,'change-deletion'), **args)
def ChangeTrackTableCell(**args):
return Element(qname = (TABLENS,'change-track-table-cell'), **args)
def Consolidation(**args):
return Element(qname = (TABLENS,'consolidation'), **args)
def ContentValidation(**args):
return Element(qname = (TABLENS,'content-validation'), **args)
def ContentValidations(**args):
return Element(qname = (TABLENS,'content-validations'), **args)
def CoveredTableCell(**args):
return Element(qname = (TABLENS,'covered-table-cell'), **args)
def CutOffs(**args):
return Element(qname = (TABLENS,'cut-offs'), **args)
def DataPilotDisplayInfo(**args):
return Element(qname = (TABLENS,'data-pilot-display-info'), **args)
def DataPilotField(**args):
return Element(qname = (TABLENS,'data-pilot-field'), **args)
def DataPilotFieldReference(**args):
return Element(qname = (TABLENS,'data-pilot-field-reference'), **args)
def DataPilotGroup(**args):
return Element(qname = (TABLENS,'data-pilot-group'), **args)
def DataPilotGroupMember(**args):
return Element(qname = (TABLENS,'data-pilot-group-member'), **args)
def DataPilotGroups(**args):
return Element(qname = (TABLENS,'data-pilot-groups'), **args)
def DataPilotLayoutInfo(**args):
return Element(qname = (TABLENS,'data-pilot-layout-info'), **args)
def DataPilotLevel(**args):
return Element(qname = (TABLENS,'data-pilot-level'), **args)
def DataPilotMember(**args):
return Element(qname = (TABLENS,'data-pilot-member'), **args)
def DataPilotMembers(**args):
return Element(qname = (TABLENS,'data-pilot-members'), **args)
def DataPilotSortInfo(**args):
return Element(qname = (TABLENS,'data-pilot-sort-info'), **args)
def DataPilotSubtotal(**args):
return Element(qname = (TABLENS,'data-pilot-subtotal'), **args)
def DataPilotSubtotals(**args):
return Element(qname = (TABLENS,'data-pilot-subtotals'), **args)
def DataPilotTable(**args):
return Element(qname = (TABLENS,'data-pilot-table'), **args)
def DataPilotTables(**args):
return Element(qname = (TABLENS,'data-pilot-tables'), **args)
def DatabaseRange(**args):
return Element(qname = (TABLENS,'database-range'), **args)
def DatabaseRanges(**args):
return Element(qname = (TABLENS,'database-ranges'), **args)
def DatabaseSourceQuery(**args):
return Element(qname = (TABLENS,'database-source-query'), **args)
def DatabaseSourceSql(**args):
return Element(qname = (TABLENS,'database-source-sql'), **args)
def DatabaseSourceTable(**args):
return Element(qname = (TABLENS,'database-source-table'), **args)
def DdeLink(**args):
return Element(qname = (TABLENS,'dde-link'), **args)
def DdeLinks(**args):
return Element(qname = (TABLENS,'dde-links'), **args)
def Deletion(**args):
return Element(qname = (TABLENS,'deletion'), **args)
def Deletions(**args):
return Element(qname = (TABLENS,'deletions'), **args)
def Dependencies(**args):
return Element(qname = (TABLENS,'dependencies'), **args)
def Dependency(**args):
return Element(qname = (TABLENS,'dependency'), **args)
def Detective(**args):
return Element(qname = (TABLENS,'detective'), **args)
def ErrorMacro(**args):
return Element(qname = (TABLENS,'error-macro'), **args)
def ErrorMessage(**args):
return Element(qname = (TABLENS,'error-message'), **args)
def EvenColumns(**args):
return Element(qname = (TABLENS,'even-columns'), **args)
def EvenRows(**args):
return Element(qname = (TABLENS,'even-rows'), **args)
def Filter(**args):
return Element(qname = (TABLENS,'filter'), **args)
def FilterAnd(**args):
return Element(qname = (TABLENS,'filter-and'), **args)
def FilterCondition(**args):
return Element(qname = (TABLENS,'filter-condition'), **args)
def FilterOr(**args):
return Element(qname = (TABLENS,'filter-or'), **args)
def FirstColumn(**args):
return Element(qname = (TABLENS,'first-column'), **args)
def FirstRow(**args):
return Element(qname = (TABLENS,'first-row'), **args)
def HelpMessage(**args):
return Element(qname = (TABLENS,'help-message'), **args)
def HighlightedRange(**args):
return Element(qname = (TABLENS,'highlighted-range'), **args)
def Insertion(**args):
return Element(qname = (TABLENS,'insertion'), **args)
def InsertionCutOff(**args):
return Element(qname = (TABLENS,'insertion-cut-off'), **args)
def Iteration(**args):
return Element(qname = (TABLENS,'iteration'), **args)
def LabelRange(**args):
return Element(qname = (TABLENS,'label-range'), **args)
def LabelRanges(**args):
return Element(qname = (TABLENS,'label-ranges'), **args)
def LastColumn(**args):
return Element(qname = (TABLENS,'last-column'), **args)
def LastRow(**args):
return Element(qname = (TABLENS,'last-row'), **args)
def Movement(**args):
return Element(qname = (TABLENS,'movement'), **args)
def MovementCutOff(**args):
return Element(qname = (TABLENS,'movement-cut-off'), **args)
def NamedExpression(**args):
return Element(qname = (TABLENS,'named-expression'), **args)
def NamedExpressions(**args):
return Element(qname = (TABLENS,'named-expressions'), **args)
def NamedRange(**args):
return Element(qname = (TABLENS,'named-range'), **args)
def NullDate(**args):
return Element(qname = (TABLENS,'null-date'), **args)
def OddColumns(**args):
return Element(qname = (TABLENS,'odd-columns'), **args)
def OddRows(**args):
return Element(qname = (TABLENS,'odd-rows'), **args)
def Operation(**args):
return Element(qname = (TABLENS,'operation'), **args)
def Previous(**args):
return Element(qname = (TABLENS,'previous'), **args)
def Scenario(**args):
return Element(qname = (TABLENS,'scenario'), **args)
def Shapes(**args):
return Element(qname = (TABLENS,'shapes'), **args)
def Sort(**args):
return Element(qname = (TABLENS,'sort'), **args)
def SortBy(**args):
return Element(qname = (TABLENS,'sort-by'), **args)
def SortGroups(**args):
return Element(qname = (TABLENS,'sort-groups'), **args)
def SourceCellRange(**args):
return Element(qname = (TABLENS,'source-cell-range'), **args)
def SourceRangeAddress(**args):
return Element(qname = (TABLENS,'source-range-address'), **args)
def SourceService(**args):
return Element(qname = (TABLENS,'source-service'), **args)
def SubtotalField(**args):
return Element(qname = (TABLENS,'subtotal-field'), **args)
def SubtotalRule(**args):
return Element(qname = (TABLENS,'subtotal-rule'), **args)
def SubtotalRules(**args):
return Element(qname = (TABLENS,'subtotal-rules'), **args)
def Table(**args):
return Element(qname = (TABLENS,'table'), **args)
def TableCell(**args):
return Element(qname = (TABLENS,'table-cell'), **args)
def TableColumn(**args):
return Element(qname = (TABLENS,'table-column'), **args)
def TableColumnGroup(**args):
return Element(qname = (TABLENS,'table-column-group'), **args)
def TableColumns(**args):
return Element(qname = (TABLENS,'table-columns'), **args)
def TableHeaderColumns(**args):
return Element(qname = (TABLENS,'table-header-columns'), **args)
def TableHeaderRows(**args):
return Element(qname = (TABLENS,'table-header-rows'), **args)
def TableRow(**args):
return Element(qname = (TABLENS,'table-row'), **args)
def TableRowGroup(**args):
return Element(qname = (TABLENS,'table-row-group'), **args)
def TableRows(**args):
return Element(qname = (TABLENS,'table-rows'), **args)
def TableSource(**args):
return Element(qname = (TABLENS,'table-source'), **args)
def TableTemplate(**args):
return Element(qname = (TABLENS,'table-template'), **args)
def TargetRangeAddress(**args):
return Element(qname = (TABLENS,'target-range-address'), **args)
def TrackedChanges(**args):
return Element(qname = (TABLENS,'tracked-changes'), **args)
| apache-2.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.7/Lib/encodings/cp874.py | 593 | 12851 | """ Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp874',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\ufffe' # 0x82 -> UNDEFINED
u'\ufffe' # 0x83 -> UNDEFINED
u'\ufffe' # 0x84 -> UNDEFINED
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\ufffe' # 0x86 -> UNDEFINED
u'\ufffe' # 0x87 -> UNDEFINED
u'\ufffe' # 0x88 -> UNDEFINED
u'\ufffe' # 0x89 -> UNDEFINED
u'\ufffe' # 0x8A -> UNDEFINED
u'\ufffe' # 0x8B -> UNDEFINED
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\ufffe' # 0x99 -> UNDEFINED
u'\ufffe' # 0x9A -> UNDEFINED
u'\ufffe' # 0x9B -> UNDEFINED
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe' # 0xFC -> UNDEFINED
u'\ufffe' # 0xFD -> UNDEFINED
u'\ufffe' # 0xFE -> UNDEFINED
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
michael-ball/sublime-text | sublime-text-3/Packages/Python PEP8 Autoformat/libs/py33/lib2to3/main.py | 22 | 11624 | """
Main program for 2to3.
"""
from __future__ import with_statement
import sys
import os
import difflib
import logging
import shutil
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
A refactoring tool that can avoid overwriting its input files.
Prints output to stdout.
Output files can optionally be written to a different directory and or
have an extra file suffix appended to their name for use in situations
where you do not want to replace the input files.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs,
input_base_dir='', output_dir='', append_suffix=''):
"""
Args:
fixers: A list of fixers to import.
options: A dict with RefactoringTool configuration.
explicit: A list of fixers to run even if they are explicit.
nobackups: If true no backup '.bak' files will be created for those
files that are being refactored.
show_diffs: Should diffs of the refactoring be printed to stdout?
input_base_dir: The base directory for all input files. This class
will strip this path prefix off of filenames before substituting
it with output_dir. Only meaningful if output_dir is supplied.
All files processed by refactor() must start with this path.
output_dir: If supplied, all converted files will be written into
this directory tree instead of input_base_dir.
append_suffix: If supplied, all files output by this tool will have
this appended to their filename. Useful for changing .py to
.py3 for example by passing append_suffix='3'.
"""
self.nobackups = nobackups
self.show_diffs = show_diffs
if input_base_dir and not input_base_dir.endswith(os.sep):
input_base_dir += os.sep
self._input_base_dir = input_base_dir
self._output_dir = output_dir
self._append_suffix = append_suffix
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
orig_filename = filename
if self._output_dir:
if filename.startswith(self._input_base_dir):
filename = os.path.join(self._output_dir,
filename[len(self._input_base_dir):])
else:
raise ValueError('filename %s does not start with the '
'input_base_dir %s' % (
filename, self._input_base_dir))
if self._append_suffix:
filename += self._append_suffix
if orig_filename != filename:
output_dir = os.path.dirname(filename)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self.log_message('Writing converted %s to %s.', orig_filename,
filename)
if not self.nobackups:
# Make backup
backup = filename + ".bak"
if os.path.lexists(backup):
try:
os.remove(backup)
except os.error as err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except os.error as err:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
if orig_filename != filename:
# Preserve the file mode in the new output directory.
shutil.copymode(orig_filename, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message("No changes to %s", filename)
else:
self.log_message("Refactored %s", filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print(line)
sys.stdout.flush()
else:
for line in diff_lines:
print(line)
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
def warn(msg):
print("WARNING: %s" % (msg,), file=sys.stderr)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
# Set up option parser
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a transformation from being run")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=False,
help="Don't write backups for modified files")
parser.add_option("-o", "--output-dir", action="store", type="str",
default="", help="Put output files in this directory "
"instead of overwriting the input files. Requires -n.")
parser.add_option("-W", "--write-unchanged-files", action="store_true",
help="Also write files even if no changes were required"
" (useful with --output-dir); implies -w.")
parser.add_option("--add-suffix", action="store", type="str", default="",
help="Append this string to all output filenames."
" Requires -n if non-empty. "
"ex: --add-suffix='3' will generate .py3 files.")
# Parse command line arguments
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if options.write_unchanged_files:
flags["write_unchanged_files"] = True
if not options.write:
warn("--write-unchanged-files/-W implies -w.")
options.write = True
# If we allowed these, the original files would be renamed to backup names
# but not replaced.
if options.output_dir and not options.nobackups:
parser.error("Can't use --output-dir/-o without -n.")
if options.add_suffix and not options.nobackups:
parser.error("Can't use --add-suffix without -n.")
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
print("Available transformations for the -f/--fix option:")
for fixname in refactor.get_all_fix_names(fixer_pkg):
print(fixname)
if not args:
return 0
if not args:
print("At least one file or directory argument required.", file=sys.stderr)
print("Use --help to show usage.", file=sys.stderr)
return 2
if "-" in args:
refactor_stdin = True
if options.write:
print("Can't write to stdin.", file=sys.stderr)
return 2
if options.print_function:
flags["print_function"] = True
# Set up logging handler
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
logger = logging.getLogger('lib2to3.main')
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == "all":
all_present = True
else:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) if all_present else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
input_base_dir = os.path.commonprefix(args)
if (input_base_dir and not input_base_dir.endswith(os.sep)
and not os.path.isdir(input_base_dir)):
# One or more similar names were passed, their directory is the base.
# os.path.commonprefix() is ignorant of path elements, this corrects
# for that weird API.
input_base_dir = os.path.dirname(input_base_dir)
if options.output_dir:
input_base_dir = input_base_dir.rstrip(os.sep)
logger.info('Output in %r will mirror the input directory %r layout.',
options.output_dir, input_base_dir)
rt = StdoutRefactoringTool(
sorted(fixer_names), flags, sorted(explicit),
options.nobackups, not options.no_diffs,
input_base_dir=input_base_dir,
output_dir=options.output_dir,
append_suffix=options.add_suffix)
# Refactor all files and directories passed as arguments
if not rt.errors:
if refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
print("Sorry, -j isn't supported on this platform.",
file=sys.stderr)
return 1
rt.summarize()
# Return error status (0 if rt.errors is zero)
return int(bool(rt.errors))
| unlicense |
bees4ever/spotpy | spotpy/database/__init__.py | 2 | 1050 | from importlib import import_module
def __dir__():
"""
Using the __dir__ and __getattr__ functions allows
to inspect the availability of modules without loading them
:return:
"""
import pkgutil
names = [
name for importer, name, ispkg
in pkgutil.iter_modules(__path__)
if not ispkg and name != 'base'
]
return names + ['custom', 'noData']
def __getattr__(name):
names = __dir__()
print(names)
if name in names:
try:
db_module = import_module('.' + name, __name__)
except ImportError:
db_module = import_module('.base', __name__)
return getattr(db_module, name)
else:
raise AttributeError('{} is not a member of spotpy.database')
def get_datawriter(dbformat, *args, **kwargs):
"""Given a dbformat (ram, csv, sql, noData, etc), return the constructor
of the appropriate class from this file.
"""
db_class = __getattr__(dbformat)
datawriter = db_class(*args, **kwargs)
return datawriter
| mit |
mmadsen/HerokuCondaFlaskAlembic | web.py | 1 | 2277 | from flask import Flask, render_template, request
from flask.ext.sqlalchemy import SQLAlchemy
import os
import sys
import requests
import operator
import re
import nltk
from collections import Counter
from bs4 import BeautifulSoup
import stop_words
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from models import Result
@app.route('/', methods=['GET','POST'])
def index():
errors = []
results = {}
r = None # prevents uninitialization error, which happens on Heroku but not my laptop
if request.method == 'POST':
# get the URL entered
try:
url = request.form['url']
r = requests.get(url)
except:
errors.append("Unable to get URL - try again")
if r is not None:
(raw_counts, stop_removed_count) = count_words_from_html(r)
# package results for web display
results = sorted(stop_removed_count.items(), key=operator.itemgetter(1), reverse=True)[:10]
# store results in the database
try:
db_result = Result(
url=url,
result_all=raw_counts,
result_no_stop_words=stop_removed_count
)
db.session.add(db_result)
db.session.commit()
except Exception as e:
err = "Unable to add results to the database: %s" % e
errors.append(err)
return render_template('index.html', errors=errors, results=results)
def count_words_from_html(page):
"""
Given a returned page from the requests library, this method
extracts the raw text using BeautifulSoup, tokenizes, removes
punctuation, and tabulates the raw result and the result with
common English stop words removed, and returns a tuple of results
"""
raw = BeautifulSoup(page.text, 'html.parser').get_text()
nltk.data.path.append('./nltk_data') # set path for precompiled tokenizers
tokens = nltk.word_tokenize(raw)
text = nltk.Text(tokens)
# remove punctuation
nonPunct = re.compile('.*[A-Za-z].*')
raw_words = [w for w in text if nonPunct.match(w)]
raw_word_counts = Counter(raw_words)
# remove English stop words
stops = stop_words.get_stop_words('english')
no_stop_words = [w for w in raw_words if w.lower() not in stops]
no_stop_counts = Counter(no_stop_words)
return raw_word_counts, no_stop_counts
if __name__ == '__main__':
app.run() | apache-2.0 |
jemandez/creaturas-magicas | Configuraciones básicas/scripts/addons/blendertools-1.0.0/makewalk/io_json.py | 1 | 3120 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Project Name: MakeHuman
# Product Home Page: http://www.makehuman.org/
# Code Home Page: http://code.google.com/p/makehuman/
# Authors: Thomas Larsson
# Script copyright (C) MakeHuman Team 2001-2014
# Coding Standards: See http://www.makehuman.org/node/165
import json
import gzip
def loadJson(filepath):
try:
with gzip.open(filepath, 'rb') as fp:
bytes = fp.read()
except IOError:
bytes = None
if bytes:
string = bytes.decode("utf-8")
struct = json.loads(string)
else:
with open(filepath, "rU") as fp:
struct = json.load(fp)
return struct
def saveJson(struct, filepath, binary=False):
if binary:
bytes = json.dumps(struct)
with gzip.open(realpath, 'wb') as fp:
fp.write(bytes)
else:
string = encodeJsonData(struct, "")
with open(filepath, "w", encoding="utf-8") as fp:
fp.write(string)
fp.write("\n")
def encodeJsonData(data, pad=""):
if data == None:
return "none"
elif isinstance(data, bool):
if data == True:
return "true"
else:
return "false"
elif isinstance(data, float):
if abs(data) < 1e-6:
return "0"
else:
return "%.5g" % data
elif isinstance(data, int):
return str(data)
elif isinstance(data, str):
return "\"%s\"" % data
elif isinstance(data, (list, tuple)):
if data == []:
return "[]"
elif leafList(data):
string = "["
for elt in data:
string += encodeJsonData(elt) + ", "
return string[:-2] + "]"
else:
string = "["
for elt in data:
string += "\n " + pad + encodeJsonData(elt, pad+" ") + ","
return string[:-1] + "\n%s]" % pad
elif isinstance(data, dict):
if data == {}:
return "{}"
string = "{"
for key,value in data.items():
string += "\n %s\"%s\" : " % (pad, key) + encodeJsonData(value, pad+" ") + ","
return string[:-1] + "\n%s}" % pad
def leafList(data):
for elt in data:
if isinstance(elt, (list,tuple,dict)):
return False
return True
| gpl-3.0 |
cantino/newspaper | tests/useless_junk.py | 1 | 7171 | # -*- coding: utf-8 -*-
"""
import os
import glob
from copy import deepcopy
from goose.article import Article
from goose.utils import URLHelper, RawHelper
from goose.extractors import StandardContentExtractor
from goose.cleaners import StandardDocumentCleaner
from goose.outputformatters import StandardOutputFormatter
from goose.images.extractors import UpgradedImageIExtractor
from goose.videos.extractors import VideoExtractor
from goose.network import HtmlFetcher
class CrawlCandidate(object):
def __init__(self, config, url, raw_html):
self.config = config
# parser
self.parser = self.config.get_parser()
self.url = url
self.raw_html = raw_html
class Crawler(object):
def __init__(self, config):
self.config = config
# parser
self.parser = self.config.get_parser()
self.logPrefix = "crawler:"
def crawl(self, crawl_candidate):
article = Article()
parse_candidate = self.get_parse_candidate(crawl_candidate)
raw_html = self.get_html(crawl_candidate, parse_candidate)
if raw_html is None:
return article
doc = self.get_document(raw_html)
extractor = self.get_extractor()
document_cleaner = self.get_document_cleaner()
output_formatter = self.get_output_formatter()
# article
article.final_url = parse_candidate.url
article.link_hash = parse_candidate.link_hash
article.raw_html = raw_html
article.doc = doc
article.raw_doc = deepcopy(doc)
article.title = extractor.get_title(article)
# TODO
# article.publish_date = config.publishDateExtractor.extract(doc)
# article.additional_data = config.get_additionaldata_extractor.extract(doc)
article.meta_lang = extractor.get_meta_lang(article)
article.meta_favicon = extractor.get_favicon(article)
article.meta_description = extractor.get_meta_description(article)
article.meta_keywords = extractor.get_meta_keywords(article)
article.canonical_link = extractor.get_canonical_link(article)
article.domain = extractor.get_domain(article.final_url)
article.tags = extractor.extract_tags(article)
# # before we do any calcs on the body itself let's clean up the document
article.doc = document_cleaner.clean(article)
# big stuff
article.top_node = extractor.calculate_best_node(article)
if article.top_node is not None:
# video handeling
video_extractor = self.get_video_extractor(article)
video_extractor.get_videos()
# image handeling
if self.config.enable_image_fetching:
image_extractor = self.get_image_extractor(article)
article.top_image = image_extractor.get_best_image(article.raw_doc, article.top_node)
# post cleanup
article.top_node = extractor.post_cleanup(article.top_node)
# clean_text
article.cleaned_text = output_formatter.get_formatted_text(article)
# cleanup tmp file
self.relase_resources(article)
return article
def get_parse_candidate(self, crawl_candidate):
if crawl_candidate.raw_html:
return RawHelper.get_parsing_candidate(crawl_candidate.url, crawl_candidate.raw_html)
return URLHelper.get_parsing_candidate(crawl_candidate.url)
def get_html(self, crawl_candidate, parsing_candidate):
if crawl_candidate.raw_html:
return crawl_candidate.raw_html
# fetch HTML
html = HtmlFetcher().get_html(self.config, parsing_candidate.url)
return html
def get_image_extractor(self, article):
http_client = None
return UpgradedImageIExtractor(http_client, article, self.config)
def get_video_extractor(self, article):
return VideoExtractor(article, self.config)
def get_output_formatter(self):
return StandardOutputFormatter(self.config)
def get_document_cleaner(self):
return StandardDocumentCleaner(self.config)
def get_document(self, raw_html):
doc = self.parser.fromstring(raw_html)
return doc
def get_extractor(self):
return StandardContentExtractor(self.config)
def relase_resources(self, article):
path = os.path.join(self.config.local_storage_path, '%s_*' % article.link_hash)
for fname in glob.glob(path):
try:
os.remove(fname)
except OSError:
# TODO better log handeling
pass
if method == 'soup':
safe_img = (doc.find('meta', attrs={'property':'og:image'})
or doc.find('meta', attrs={'name':'og:image'}))
if safe_img:
safe_img = safe_img.get('content')
if not safe_img:
safe_img = (doc.find('link', attrs={'rel':'img_src'})
or doc.find('link', attrs={'rel':'icon'}))
if safe_img:
safe_img = safe_img.get('content')
if not safe_img:
safe_img = ''
return safe_img
def fix_unicode(inputstr):
if inputstr is None:
return u''
if not isinstance(inputstr, unicode):
try:
inputstr = inputstr.decode('utf8', errors='ignore')
except ValueError, e:
log.debug(e)
inputstr = u''
inputstr = inputstr.strip()
return inputstr
if method == 'soup':
img_tags = doc.findAll('img')
img_links = [i.get('src') for i in img_tags if i.get('src')]
all_tuples = []
for feed in self.feeds:
dom = feed.dom
if dom.get('entries'):
ll = dom['entries']
tuples = [(l['link'], l['title']) for l in ll
if l.get('link') and l.get('title')]
all_tuples.extend(tuples)
"""
"""
class GrequestsTestCase(unittest.TestCase):
def runTest(self):
print 'testing grequests unit'
#self.test_ordering()
self.test_capacity()
@print_test
def test_ordering(self):
TEST_SIZE = 25
dd = {}
urls = read_urls(amount=TEST_SIZE)
# don't count feeds, they always redirect to some other url
urls = [u for u in urls if 'feeds' not in urlparse.urlparse(u).netloc.split('.')]
for index, url in enumerate(urls):
_ul = urlparse.urlparse(url)
normalized = _ul.netloc + _ul.path
dd[index] = normalized
responses = async_request(urls, timeout=3)
for index, resp in enumerate(responses):
_ul = urlparse.urlparse(resp.url)
normalized = _ul.netloc + _ul.path
# print dd[index], '==', normalized
assert dd[index] == normalized
@print_test
def test_capacity(self):
TEST_SIZE = 450
urls = read_urls(amount=TEST_SIZE)
responses = async_request(urls, timeout=3)
failed = 0
for index, r in enumerate(responses):
if r is not None:
pass
else:
#print '[FAIL]', urls[index]
failed += 1
print '\t\ttotal:', len(urls), 'failed', failed
"""
| mit |
BMJHayward/django | tests/utils_tests/test_dateparse.py | 293 | 5308 | from __future__ import unicode_literals
import unittest
from datetime import date, datetime, time, timedelta
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.timezone import get_fixed_timezone
class DateParseTests(unittest.TestCase):
def test_parse_date(self):
# Valid inputs
self.assertEqual(parse_date('2012-04-23'), date(2012, 4, 23))
self.assertEqual(parse_date('2012-4-9'), date(2012, 4, 9))
# Invalid inputs
self.assertEqual(parse_date('20120423'), None)
self.assertRaises(ValueError, parse_date, '2012-04-56')
def test_parse_time(self):
# Valid inputs
self.assertEqual(parse_time('09:15:00'), time(9, 15))
self.assertEqual(parse_time('10:10'), time(10, 10))
self.assertEqual(parse_time('10:20:30.400'), time(10, 20, 30, 400000))
self.assertEqual(parse_time('4:8:16'), time(4, 8, 16))
# Invalid inputs
self.assertEqual(parse_time('091500'), None)
self.assertRaises(ValueError, parse_time, '09:15:90')
def test_parse_datetime(self):
# Valid inputs
self.assertEqual(parse_datetime('2012-04-23T09:15:00'),
datetime(2012, 4, 23, 9, 15))
self.assertEqual(parse_datetime('2012-4-9 4:8:16'),
datetime(2012, 4, 9, 4, 8, 16))
self.assertEqual(parse_datetime('2012-04-23T09:15:00Z'),
datetime(2012, 4, 23, 9, 15, 0, 0, get_fixed_timezone(0)))
self.assertEqual(parse_datetime('2012-4-9 4:8:16-0320'),
datetime(2012, 4, 9, 4, 8, 16, 0, get_fixed_timezone(-200)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400+02:30'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(150)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400+02'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(120)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400-02'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(-120)))
# Invalid inputs
self.assertEqual(parse_datetime('20120423091500'), None)
self.assertRaises(ValueError, parse_datetime, '2012-04-56T09:15:90')
class DurationParseTests(unittest.TestCase):
def test_parse_python_format(self):
timedeltas = [
timedelta(days=4, minutes=15, seconds=30, milliseconds=100), # fractions of seconds
timedelta(hours=10, minutes=15, seconds=30), # hours, minutes, seconds
timedelta(days=4, minutes=15, seconds=30), # multiple days
timedelta(days=1, minutes=00, seconds=00), # single day
timedelta(days=-4, minutes=15, seconds=30), # negative durations
timedelta(minutes=15, seconds=30), # minute & seconds
timedelta(seconds=30), # seconds
]
for delta in timedeltas:
self.assertEqual(parse_duration(format(delta)), delta)
def test_seconds(self):
self.assertEqual(parse_duration('30'), timedelta(seconds=30))
def test_minutes_seconds(self):
self.assertEqual(parse_duration('15:30'), timedelta(minutes=15, seconds=30))
self.assertEqual(parse_duration('5:30'), timedelta(minutes=5, seconds=30))
def test_hours_minutes_seconds(self):
self.assertEqual(parse_duration('10:15:30'), timedelta(hours=10, minutes=15, seconds=30))
self.assertEqual(parse_duration('1:15:30'), timedelta(hours=1, minutes=15, seconds=30))
self.assertEqual(parse_duration('100:200:300'), timedelta(hours=100, minutes=200, seconds=300))
def test_days(self):
self.assertEqual(parse_duration('4 15:30'), timedelta(days=4, minutes=15, seconds=30))
self.assertEqual(parse_duration('4 10:15:30'), timedelta(days=4, hours=10, minutes=15, seconds=30))
def test_fractions_of_seconds(self):
self.assertEqual(parse_duration('15:30.1'), timedelta(minutes=15, seconds=30, milliseconds=100))
self.assertEqual(parse_duration('15:30.01'), timedelta(minutes=15, seconds=30, milliseconds=10))
self.assertEqual(parse_duration('15:30.001'), timedelta(minutes=15, seconds=30, milliseconds=1))
self.assertEqual(parse_duration('15:30.0001'), timedelta(minutes=15, seconds=30, microseconds=100))
self.assertEqual(parse_duration('15:30.00001'), timedelta(minutes=15, seconds=30, microseconds=10))
self.assertEqual(parse_duration('15:30.000001'), timedelta(minutes=15, seconds=30, microseconds=1))
def test_negative(self):
self.assertEqual(parse_duration('-4 15:30'), timedelta(days=-4, minutes=15, seconds=30))
def test_iso_8601(self):
self.assertEqual(parse_duration('P4Y'), None)
self.assertEqual(parse_duration('P4M'), None)
self.assertEqual(parse_duration('P4W'), None)
self.assertEqual(parse_duration('P4D'), timedelta(days=4))
self.assertEqual(parse_duration('P0.5D'), timedelta(hours=12))
self.assertEqual(parse_duration('PT5H'), timedelta(hours=5))
self.assertEqual(parse_duration('PT5M'), timedelta(minutes=5))
self.assertEqual(parse_duration('PT5S'), timedelta(seconds=5))
self.assertEqual(parse_duration('PT0.000005S'), timedelta(microseconds=5))
| bsd-3-clause |
johne53/MB3Glib | win32/replace.py | 6 | 4090 | #!/usr/bin/python
#
# Simple utility script to manipulate
# certain types of strings in a file
# This can be used in various projects where
# there is the need to replace strings in files,
# and is copied from GLib's $(srcroot)/win32
# Author: Fan, Chun-wei
# Date: September 03, 2014
import os
import sys
import re
import string
import argparse
valid_actions = ['remove-prefix',
'replace-var',
'replace-str',
'remove-str']
def open_file(filename, mode):
if sys.version_info[0] < 3:
return open(filename, mode=mode)
else:
return open(filename, mode=mode, encoding='utf-8')
def replace_multi(src, dest, replace_items):
with open_file(src, 'r') as s:
with open_file(dest, 'w') as d:
for line in s:
replace_dict = dict((re.escape(key), value) \
for key, value in replace_items.items())
replace_pattern = re.compile("|".join(replace_dict.keys()))
d.write(replace_pattern.sub(lambda m: \
replace_dict[re.escape(m.group(0))], line))
def replace(src, dest, instring, outstring):
replace_item = {instring: outstring}
replace_multi(src, dest, replace_item)
def check_required_args(args, params):
for param in params:
if getattr(args, param, None) is None:
raise SystemExit('%s: error: --%s argument is required' % (__file__, param))
def warn_ignored_args(args, params):
for param in params:
if getattr(args, param, None) is not None:
print('%s: warning: --%s argument is ignored' % (__file__, param))
def main(argv):
parser = argparse.ArgumentParser(description='Process strings in a file.')
parser.add_argument('-a',
'--action',
help='Action to carry out. Can be one of:\n'
'remove-prefix\n'
'replace-var\n'
'replace-str\n'
'remove-str',
choices=valid_actions)
parser.add_argument('-i', '--input', help='Input file')
parser.add_argument('-o', '--output', help='Output file')
parser.add_argument('--instring', help='String to replace or remove')
parser.add_argument('--var', help='Autotools variable name to replace')
parser.add_argument('--outstring',
help='New String to replace specified string or variable')
parser.add_argument('--removeprefix', help='Prefix of string to remove')
args = parser.parse_args()
input_string = ''
output_string = ''
# We must have action, input, output for all operations
check_required_args(args, ['action','input','output'])
# Build the arguments by the operation that is to be done,
# to be fed into replace()
# Get rid of prefixes from a string
if args.action == 'remove-prefix':
check_required_args(args, ['instring','removeprefix'])
warn_ignored_args(args, ['outstring','var'])
input_string = args.removeprefix + args.instring
output_string = args.instring
# Replace an m4-style variable (those surrounded by @...@)
if args.action == 'replace-var':
check_required_args(args, ['var','outstring'])
warn_ignored_args(args, ['instring','removeprefix'])
input_string = '@' + args.var + '@'
output_string = args.outstring
# Replace a string
if args.action == 'replace-str':
check_required_args(args, ['instring','outstring'])
warn_ignored_args(args, ['var','removeprefix'])
input_string = args.instring
output_string = args.outstring
# Remove a string
if args.action == 'remove-str':
check_required_args(args, ['instring'])
warn_ignored_args(args, ['var','outstring','removeprefix'])
input_string = args.instring
output_string = ''
replace(args.input, args.output, input_string, output_string)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| lgpl-2.1 |
nirzari18/Query-Analysis-Application-on-Google-App-Engine | lib/oauth2client/crypt.py | 36 | 13165 | # -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Crypto-related routines for oauth2client."""
import base64
import imp
import json
import logging
import os
import sys
import time
import six
CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
AUTH_TOKEN_LIFETIME_SECS = 300 # 5 minutes in seconds
MAX_TOKEN_LIFETIME_SECS = 86400 # 1 day in seconds
logger = logging.getLogger(__name__)
class AppIdentityError(Exception):
pass
def _TryOpenSslImport():
"""Import OpenSSL, avoiding the explicit import where possible.
Importing OpenSSL 0.14 can take up to 0.5s, which is a large price
to pay at module import time. However, it's also possible for
``imp.find_module`` to fail to find the module, even when it's
installed. (This is the case in various exotic environments,
including some relevant for Google.) So we first try a fast-path,
and fall back to the slow import as needed.
Args:
None
Returns:
None
Raises:
ImportError if OpenSSL is unavailable.
"""
try:
_ = imp.find_module('OpenSSL')
return
except ImportError:
import OpenSSL
try:
_TryOpenSslImport()
class OpenSSLVerifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey, OpenSSL.crypto.PKey, The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string, The message to verify.
signature: string, The signature on the message.
Returns:
True if message was signed by the private key associated with the public
key that this object was constructed with.
"""
from OpenSSL import crypto
try:
if isinstance(message, six.text_type):
message = message.encode('utf-8')
crypto.verify(self._pubkey, signature, message, 'sha256')
return True
except:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is
expected to be an RSA key in PEM format.
Returns:
Verifier instance.
Raises:
OpenSSL.crypto.Error if the key_pem can't be parsed.
"""
from OpenSSL import crypto
if is_x509_cert:
pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
else:
pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
return OpenSSLVerifier(pubkey)
class OpenSSLSigner(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
from OpenSSL import crypto
if isinstance(message, six.text_type):
message = message.encode('utf-8')
return crypto.sign(self._key, message, 'sha256')
@staticmethod
def from_string(key, password=b'notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in PKCS12 or PEM format.
password: string, password for the private key file.
Returns:
Signer instance.
Raises:
OpenSSL.crypto.Error if the key can't be parsed.
"""
from OpenSSL import crypto
parsed_pem_key = _parse_pem_key(key)
if parsed_pem_key:
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, parsed_pem_key)
else:
if isinstance(password, six.text_type):
password = password.encode('utf-8')
pkey = crypto.load_pkcs12(key, password).get_privatekey()
return OpenSSLSigner(pkey)
def pkcs12_key_as_pem(private_key_text, private_key_password):
"""Convert the contents of a PKCS12 key to PEM using OpenSSL.
Args:
private_key_text: String. Private key.
private_key_password: String. Password for PKCS12.
Returns:
String. PEM contents of ``private_key_text``.
"""
from OpenSSL import crypto
decoded_body = base64.b64decode(private_key_text)
if isinstance(private_key_password, six.string_types):
private_key_password = private_key_password.encode('ascii')
pkcs12 = crypto.load_pkcs12(decoded_body, private_key_password)
return crypto.dump_privatekey(crypto.FILETYPE_PEM,
pkcs12.get_privatekey())
except ImportError:
OpenSSLVerifier = None
OpenSSLSigner = None
def pkcs12_key_as_pem(*args, **kwargs):
raise NotImplementedError('pkcs12_key_as_pem requires OpenSSL.')
try:
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from Crypto.Signature import PKCS1_v1_5
from Crypto.Util.asn1 import DerSequence
class PyCryptoVerifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey, OpenSSL.crypto.PKey (or equiv), The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string, The message to verify.
signature: string, The signature on the message.
Returns:
True if message was signed by the private key associated with the public
key that this object was constructed with.
"""
try:
return PKCS1_v1_5.new(self._pubkey).verify(
SHA256.new(message), signature)
except:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is
expected to be an RSA key in PEM format.
Returns:
Verifier instance.
"""
if is_x509_cert:
if isinstance(key_pem, six.text_type):
key_pem = key_pem.encode('ascii')
pemLines = key_pem.replace(b' ', b'').split()
certDer = _urlsafe_b64decode(b''.join(pemLines[1:-1]))
certSeq = DerSequence()
certSeq.decode(certDer)
tbsSeq = DerSequence()
tbsSeq.decode(certSeq[0])
pubkey = RSA.importKey(tbsSeq[6])
else:
pubkey = RSA.importKey(key_pem)
return PyCryptoVerifier(pubkey)
class PyCryptoSigner(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: string, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
if isinstance(message, six.text_type):
message = message.encode('utf-8')
return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))
@staticmethod
def from_string(key, password='notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM files.
Returns:
Signer instance.
Raises:
NotImplementedError if they key isn't in PEM format.
"""
parsed_pem_key = _parse_pem_key(key)
if parsed_pem_key:
pkey = RSA.importKey(parsed_pem_key)
else:
raise NotImplementedError(
'PKCS12 format is not supported by the PyCrypto library. '
'Try converting to a "PEM" '
'(openssl pkcs12 -in xxxxx.p12 -nodes -nocerts > privatekey.pem) '
'or using PyOpenSSL if native code is an option.')
return PyCryptoSigner(pkey)
except ImportError:
PyCryptoVerifier = None
PyCryptoSigner = None
if OpenSSLSigner:
Signer = OpenSSLSigner
Verifier = OpenSSLVerifier
elif PyCryptoSigner:
Signer = PyCryptoSigner
Verifier = PyCryptoVerifier
else:
raise ImportError('No encryption library found. Please install either '
'PyOpenSSL, or PyCrypto 2.6 or later')
def _parse_pem_key(raw_key_input):
"""Identify and extract PEM keys.
Determines whether the given key is in the format of PEM key, and extracts
the relevant part of the key if it is.
Args:
raw_key_input: The contents of a private key file (either PEM or PKCS12).
Returns:
string, The actual key if the contents are from a PEM file, or else None.
"""
offset = raw_key_input.find(b'-----BEGIN ')
if offset != -1:
return raw_key_input[offset:]
def _urlsafe_b64encode(raw_bytes):
if isinstance(raw_bytes, six.text_type):
raw_bytes = raw_bytes.encode('utf-8')
return base64.urlsafe_b64encode(raw_bytes).decode('ascii').rstrip('=')
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
if isinstance(b64string, six.text_type):
b64string = b64string.encode('ascii')
padded = b64string + b'=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _json_encode(data):
return json.dumps(data, separators=(',', ':'))
def make_signed_jwt(signer, payload):
"""Make a signed JWT.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
signer: crypt.Signer, Cryptographic signer.
payload: dict, Dictionary of data to convert to JSON and then sign.
Returns:
string, The JWT for the payload.
"""
header = {'typ': 'JWT', 'alg': 'RS256'}
segments = [
_urlsafe_b64encode(_json_encode(header)),
_urlsafe_b64encode(_json_encode(payload)),
]
signing_input = '.'.join(segments)
signature = signer.sign(signing_input)
segments.append(_urlsafe_b64encode(signature))
logger.debug(str(segments))
return '.'.join(segments)
def verify_signed_jwt_with_certs(jwt, certs, audience):
"""Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
jwt: string, A JWT.
certs: dict, Dictionary where values of public keys in PEM format.
audience: string, The audience, 'aud', that this JWT should contain. If
None then the JWT's 'aud' parameter is not verified.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
AppIdentityError if any checks are failed.
"""
segments = jwt.split('.')
if len(segments) != 3:
raise AppIdentityError('Wrong number of segments in token: %s' % jwt)
signed = '%s.%s' % (segments[0], segments[1])
signature = _urlsafe_b64decode(segments[2])
# Parse token.
json_body = _urlsafe_b64decode(segments[1])
try:
parsed = json.loads(json_body.decode('utf-8'))
except:
raise AppIdentityError('Can\'t parse token: %s' % json_body)
# Check signature.
verified = False
for pem in certs.values():
verifier = Verifier.from_string(pem, True)
if verifier.verify(signed, signature):
verified = True
break
if not verified:
raise AppIdentityError('Invalid token signature: %s' % jwt)
# Check creation timestamp.
iat = parsed.get('iat')
if iat is None:
raise AppIdentityError('No iat field in token: %s' % json_body)
earliest = iat - CLOCK_SKEW_SECS
# Check expiration timestamp.
now = int(time.time())
exp = parsed.get('exp')
if exp is None:
raise AppIdentityError('No exp field in token: %s' % json_body)
if exp >= now + MAX_TOKEN_LIFETIME_SECS:
raise AppIdentityError('exp field too far in future: %s' % json_body)
latest = exp + CLOCK_SKEW_SECS
if now < earliest:
raise AppIdentityError('Token used too early, %d < %d: %s' %
(now, earliest, json_body))
if now > latest:
raise AppIdentityError('Token used too late, %d > %d: %s' %
(now, latest, json_body))
# Check audience.
if audience is not None:
aud = parsed.get('aud')
if aud is None:
raise AppIdentityError('No aud field in token: %s' % json_body)
if aud != audience:
raise AppIdentityError('Wrong recipient, %s != %s: %s' %
(aud, audience, json_body))
return parsed
| apache-2.0 |
dev-elixir/hx_wt88047 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
flyher/pymo | symbian/PythonForS60/module-repo/standard-modules/encodings/big5hkscs.py | 816 | 1039 | #
# big5hkscs.py: Python Unicode Codec for BIG5HKSCS
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_hk, codecs
import _multibytecodec as mbc
codec = _codecs_hk.getcodec('big5hkscs')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='big5hkscs',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
whitepyro/debian_server_setup | lib/shove/tests/test_redis_store.py | 3 | 3517 | # -*- coding: utf-8 -*-
import unittest
class TestRedisStore(unittest.TestCase):
def setUp(self):
from shove import Shove
self.store = Shove('redis://localhost:6379/0')
def tearDown(self):
self.store.clear()
self.store.close()
def test__getitem__(self):
self.store['max'] = 3
self.assertEqual(self.store['max'], 3)
def test__setitem__(self):
self.store['max'] = 3
self.assertEqual(self.store['max'], 3)
def test__delitem__(self):
self.store['max'] = 3
del self.store['max']
self.assertEqual('max' in self.store, False)
def test_get(self):
self.store['max'] = 3
self.assertEqual(self.store.get('min'), None)
def test__cmp__(self):
from shove import Shove
tstore = Shove()
self.store['max'] = 3
tstore['max'] = 3
self.assertEqual(self.store, tstore)
def test__len__(self):
self.store['max'] = 3
self.store['min'] = 6
self.assertEqual(len(self.store), 2)
def test_clear(self):
self.store['max'] = 3
self.store['min'] = 6
self.store['pow'] = 7
self.store.clear()
self.assertEqual(len(self.store), 0)
def test_items(self):
self.store['max'] = 3
self.store['min'] = 6
self.store['pow'] = 7
slist = list(self.store.items())
self.assertEqual(('min', 6) in slist, True)
def test_iteritems(self):
self.store['max'] = 3
self.store['min'] = 6
self.store['pow'] = 7
slist = list(self.store.iteritems())
self.assertEqual(('min', 6) in slist, True)
def test_iterkeys(self):
self.store['max'] = 3
self.store['min'] = 6
self.store['pow'] = 7
slist = list(self.store.iterkeys())
self.assertEqual('min' in slist, True)
def test_itervalues(self):
self.store['max'] = 3
self.store['min'] = 6
self.store['pow'] = 7
slist = list(self.store.itervalues())
self.assertEqual(6 in slist, True)
def test_pop(self):
self.store['max'] = 3
self.store['min'] = 6
item = self.store.pop('min')
self.assertEqual(item, 6)
def test_popitem(self):
self.store['max'] = 3
self.store['min'] = 6
self.store['pow'] = 7
item = self.store.popitem()
self.assertEqual(len(item) + len(self.store), 4)
def test_setdefault(self):
self.store['max'] = 3
self.store['min'] = 6
self.store['powl'] = 7
self.store.setdefault('pow', 8)
self.assertEqual(self.store.setdefault('pow', 8), 8)
self.assertEqual(self.store['pow'], 8)
def test_update(self):
from shove import Shove
tstore = Shove()
tstore['max'] = 3
tstore['min'] = 6
tstore['pow'] = 7
self.store['max'] = 2
self.store['min'] = 3
self.store['pow'] = 7
self.store.update(tstore)
self.assertEqual(self.store['min'], 6)
def test_values(self):
self.store['max'] = 3
self.store['min'] = 6
self.store['pow'] = 7
slist = self.store.values()
self.assertEqual(6 in slist, True)
def test_keys(self):
self.store['max'] = 3
self.store['min'] = 6
self.store['pow'] = 7
slist = self.store.keys()
self.assertEqual('min' in slist, True)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
FrankBian/kuma | vendor/packages/sqlalchemy/test/orm/test_generative.py | 7 | 10297 | from sqlalchemy.test.testing import eq_
import sqlalchemy as sa
from sqlalchemy.test import testing
from sqlalchemy import Integer, String, ForeignKey, MetaData, func
from sqlalchemy.test.schema import Table
from sqlalchemy.test.schema import Column
from sqlalchemy.orm import mapper, relationship, create_session
from sqlalchemy.test.testing import eq_
from test.orm import _base, _fixtures
class GenerativeQueryTest(_base.MappedTest):
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('foo', metadata,
Column('id', Integer, sa.Sequence('foo_id_seq'), primary_key=True),
Column('bar', Integer),
Column('range', Integer))
@classmethod
def fixtures(cls):
rows = tuple([(i, i % 10) for i in range(100)])
foo_data = (('bar', 'range'),) + rows
return dict(foo=foo_data)
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
class Foo(_base.BasicEntity):
pass
mapper(Foo, foo)
@testing.resolve_artifact_names
def test_selectby(self):
res = create_session().query(Foo).filter_by(range=5)
assert res.order_by(Foo.bar)[0].bar == 5
assert res.order_by(sa.desc(Foo.bar))[0].bar == 95
@testing.fails_on('maxdb', 'FIXME: unknown')
@testing.resolve_artifact_names
def test_slice(self):
sess = create_session()
query = sess.query(Foo).order_by(Foo.id)
orig = query.all()
assert query[1] == orig[1]
assert list(query[10:20]) == orig[10:20]
assert list(query[10:]) == orig[10:]
assert list(query[:10]) == orig[:10]
assert list(query[:10]) == orig[:10]
assert list(query[5:5]) == orig[5:5]
assert list(query[10:40:3]) == orig[10:40:3]
assert list(query[-5:]) == orig[-5:]
assert list(query[-2:-5]) == orig[-2:-5]
assert list(query[-5:-2]) == orig[-5:-2]
assert list(query[:-2]) == orig[:-2]
assert query[10:20][5] == orig[10:20][5]
@testing.uses_deprecated('Call to deprecated function apply_max')
@testing.resolve_artifact_names
def test_aggregate(self):
sess = create_session()
query = sess.query(Foo)
assert query.count() == 100
assert sess.query(func.min(foo.c.bar)).filter(foo.c.bar<30).one() == (0,)
assert sess.query(func.max(foo.c.bar)).filter(foo.c.bar<30).one() == (29,)
# Py3K
#assert query.filter(foo.c.bar<30).values(sa.func.max(foo.c.bar)).__next__()[0] == 29
#assert query.filter(foo.c.bar<30).values(sa.func.max(foo.c.bar)).__next__()[0] == 29
# Py2K
assert query.filter(foo.c.bar<30).values(sa.func.max(foo.c.bar)).next()[0] == 29
assert query.filter(foo.c.bar<30).values(sa.func.max(foo.c.bar)).next()[0] == 29
# end Py2K
@testing.resolve_artifact_names
def test_aggregate_1(self):
if (testing.against('mysql+mysqldb') and
testing.db.dialect.dbapi.version_info[:4] == (1, 2, 1, 'gamma')):
return
query = create_session().query(func.sum(foo.c.bar))
assert query.filter(foo.c.bar<30).one() == (435,)
@testing.fails_on('firebird', 'FIXME: unknown')
@testing.fails_on('mssql', 'AVG produces an average as the original column type on mssql.')
@testing.resolve_artifact_names
def test_aggregate_2(self):
query = create_session().query(func.avg(foo.c.bar))
avg = query.filter(foo.c.bar < 30).one()[0]
eq_(float(round(avg, 1)), 14.5)
@testing.fails_on('mssql', 'AVG produces an average as the original column type on mssql.')
@testing.resolve_artifact_names
def test_aggregate_3(self):
query = create_session().query(Foo)
# Py3K
#avg_f = query.filter(foo.c.bar<30).values(sa.func.avg(foo.c.bar)).__next__()[0]
# Py2K
avg_f = query.filter(foo.c.bar<30).values(sa.func.avg(foo.c.bar)).next()[0]
# end Py2K
assert float(round(avg_f, 1)) == 14.5
# Py3K
#avg_o = query.filter(foo.c.bar<30).values(sa.func.avg(foo.c.bar)).__next__()[0]
# Py2K
avg_o = query.filter(foo.c.bar<30).values(sa.func.avg(foo.c.bar)).next()[0]
# end Py2K
assert float(round(avg_o, 1)) == 14.5
@testing.resolve_artifact_names
def test_filter(self):
query = create_session().query(Foo)
assert query.count() == 100
assert query.filter(Foo.bar < 30).count() == 30
res2 = query.filter(Foo.bar < 30).filter(Foo.bar > 10)
assert res2.count() == 19
@testing.resolve_artifact_names
def test_options(self):
query = create_session().query(Foo)
class ext1(sa.orm.MapperExtension):
def populate_instance(self, mapper, selectcontext, row, instance, **flags):
instance.TEST = "hello world"
return sa.orm.EXT_CONTINUE
assert query.options(sa.orm.extension(ext1()))[0].TEST == "hello world"
@testing.resolve_artifact_names
def test_order_by(self):
query = create_session().query(Foo)
assert query.order_by(Foo.bar)[0].bar == 0
assert query.order_by(sa.desc(Foo.bar))[0].bar == 99
@testing.resolve_artifact_names
def test_offset(self):
query = create_session().query(Foo)
assert list(query.order_by(Foo.bar).offset(10))[0].bar == 10
@testing.resolve_artifact_names
def test_offset(self):
query = create_session().query(Foo)
assert len(list(query.limit(10))) == 10
class GenerativeTest2(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('Table1', metadata,
Column('id', Integer, primary_key=True))
Table('Table2', metadata,
Column('t1id', Integer, ForeignKey("Table1.id"),
primary_key=True),
Column('num', Integer, primary_key=True))
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
class Obj1(_base.BasicEntity):
pass
class Obj2(_base.BasicEntity):
pass
mapper(Obj1, Table1)
mapper(Obj2, Table2)
@classmethod
def fixtures(cls):
return dict(
Table1=(('id',),
(1,),
(2,),
(3,),
(4,)),
Table2=(('num', 't1id'),
(1, 1),
(2, 1),
(3, 1),
(4, 2),
(5, 2),
(6, 3)))
@testing.resolve_artifact_names
def test_distinct_count(self):
query = create_session().query(Obj1)
eq_(query.count(), 4)
res = query.filter(sa.and_(Table1.c.id == Table2.c.t1id,
Table2.c.t1id == 1))
eq_(res.count(), 3)
res = query.filter(sa.and_(Table1.c.id == Table2.c.t1id,
Table2.c.t1id == 1)).distinct()
eq_(res.count(), 1)
class RelationshipsTest(_fixtures.FixtureTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
mapper(User, users, properties={
'orders':relationship(mapper(Order, orders, properties={
'addresses':relationship(mapper(Address, addresses))}))})
@testing.resolve_artifact_names
def test_join(self):
"""Query.join"""
session = create_session()
q = (session.query(User).join('orders', 'addresses').
filter(Address.id == 1))
eq_([User(id=7)], q.all())
@testing.resolve_artifact_names
def test_outer_join(self):
"""Query.outerjoin"""
session = create_session()
q = (session.query(User).outerjoin('orders', 'addresses').
filter(sa.or_(Order.id == None, Address.id == 1)))
eq_(set([User(id=7), User(id=8), User(id=10)]),
set(q.all()))
@testing.resolve_artifact_names
def test_outer_join_count(self):
"""test the join and outerjoin functions on Query"""
session = create_session()
q = (session.query(User).outerjoin('orders', 'addresses').
filter(sa.or_(Order.id == None, Address.id == 1)))
eq_(q.count(), 4)
@testing.resolve_artifact_names
def test_from(self):
session = create_session()
sel = users.outerjoin(orders).outerjoin(
addresses, orders.c.address_id == addresses.c.id)
q = (session.query(User).select_from(sel).
filter(sa.or_(Order.id == None, Address.id == 1)))
eq_(set([User(id=7), User(id=8), User(id=10)]),
set(q.all()))
class CaseSensitiveTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('Table1', metadata,
Column('ID', Integer, primary_key=True))
Table('Table2', metadata,
Column('T1ID', Integer, ForeignKey("Table1.ID"),
primary_key=True),
Column('NUM', Integer, primary_key=True))
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
class Obj1(_base.BasicEntity):
pass
class Obj2(_base.BasicEntity):
pass
mapper(Obj1, Table1)
mapper(Obj2, Table2)
@classmethod
def fixtures(cls):
return dict(
Table1=(('ID',),
(1,),
(2,),
(3,),
(4,)),
Table2=(('NUM', 'T1ID'),
(1, 1),
(2, 1),
(3, 1),
(4, 2),
(5, 2),
(6, 3)))
@testing.resolve_artifact_names
def test_distinct_count(self):
q = create_session(bind=testing.db).query(Obj1)
assert q.count() == 4
res = q.filter(sa.and_(Table1.c.ID==Table2.c.T1ID,Table2.c.T1ID==1))
assert res.count() == 3
res = q.filter(sa.and_(Table1.c.ID==Table2.c.T1ID,Table2.c.T1ID==1)).distinct()
eq_(res.count(), 1)
| mpl-2.0 |
guijomatos/SickRage | lib/feedcache/example.py | 28 | 1661 | #!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Example use of feedcache.Cache.
"""
__module_id__ = "$Id$"
#
# Import system modules
#
import sys
import shelve
#
# Import local modules
#
import cache
#
# Module
#
def main(urls=[]):
print 'Saving feed data to ./.feedcache'
storage = shelve.open('.feedcache')
try:
fc = cache.Cache(storage)
for url in urls:
parsed_data = fc.fetch(url)
print parsed_data.feed.title
for entry in parsed_data.entries:
print '\t', entry.title
finally:
storage.close()
return
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 |
pixicoin/pixicoin | contrib/pyminer/pyminer.py | 766 | 6434 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
Lightmatter/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/django/contrib/gis/tests/maps/tests.py | 104 | 1330 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipUnless
from django.contrib.gis.geos import HAS_GEOS
from django.test import TestCase
from django.test.utils import override_settings
GOOGLE_MAPS_API_KEY = 'XXXX'
@skipUnless(HAS_GEOS, 'Geos is required.')
class GoogleMapsTest(TestCase):
@override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY)
def test_google_map_scripts(self):
"""
Testing GoogleMap.scripts() output. See #20773.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap
google_map = GoogleMap()
scripts = google_map.scripts
self.assertIn(GOOGLE_MAPS_API_KEY, scripts)
self.assertIn("new GMap2", scripts)
@override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY)
def test_unicode_in_google_maps(self):
"""
Test that GoogleMap doesn't crash with non-ASCII content.
"""
from django.contrib.gis.geos import Point
from django.contrib.gis.maps.google.gmap import GoogleMap, GMarker
center = Point(6.146805, 46.227574)
marker = GMarker(center,
title='En français !')
google_map = GoogleMap(center=center, zoom=18, markers=[marker])
self.assertIn("En français", google_map.scripts)
| mit |
EarthmanT/moto | tests/test_sts/test_sts.py | 19 | 3007 | from __future__ import unicode_literals
import json
import boto
from freezegun import freeze_time
import sure # noqa
from moto import mock_sts
@freeze_time("2012-01-01 12:00:00")
@mock_sts
def test_get_session_token():
conn = boto.connect_sts()
token = conn.get_session_token(duration=123)
token.expiration.should.equal('2012-01-01T12:02:03.000Z')
token.session_token.should.equal("AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE")
token.access_key.should.equal("AKIAIOSFODNN7EXAMPLE")
token.secret_key.should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY")
@freeze_time("2012-01-01 12:00:00")
@mock_sts
def test_get_federation_token():
conn = boto.connect_sts()
token = conn.get_federation_token(duration=123, name="Bob")
token.credentials.expiration.should.equal('2012-01-01T12:02:03.000Z')
token.credentials.session_token.should.equal("AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==")
token.credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE")
token.credentials.secret_key.should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY")
token.federated_user_arn.should.equal("arn:aws:sts::123456789012:federated-user/Bob")
token.federated_user_id.should.equal("123456789012:Bob")
@freeze_time("2012-01-01 12:00:00")
@mock_sts
def test_assume_role():
conn = boto.connect_sts()
policy = json.dumps({
"Statement": [
{
"Sid": "Stmt13690092345534",
"Action": [
"S3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::foobar-tester"
]
},
]
})
s3_role = "arn:aws:iam::123456789012:role/test-role"
role = conn.assume_role(s3_role, "session-name", policy, duration_seconds=123)
credentials = role.credentials
credentials.expiration.should.equal('2012-01-01T12:02:03.000Z')
credentials.session_token.should.equal("BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE")
credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE")
credentials.secret_key.should.equal("aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY")
role.user.arn.should.equal("arn:aws:iam::123456789012:role/test-role")
role.user.assume_role_id.should.contain("session-name")
| apache-2.0 |
CyanogenMod/tools_repo | subcmds/rebase.py | 56 | 4362 | #
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from command import Command
from git_command import GitCommand
class Rebase(Command):
common = True
helpSummary = "Rebase local branches on upstream branch"
helpUsage = """
%prog {[<project>...] | -i <project>...}
"""
helpDescription = """
'%prog' uses git rebase to move local changes in the current topic branch to
the HEAD of the upstream history, useful when you have made commits in a topic
branch but need to incorporate new upstream changes "underneath" them.
"""
def _Options(self, p):
p.add_option('-i', '--interactive',
dest="interactive", action="store_true",
help="interactive rebase (single project only)")
p.add_option('-f', '--force-rebase',
dest='force_rebase', action='store_true',
help='Pass --force-rebase to git rebase')
p.add_option('--no-ff',
dest='no_ff', action='store_true',
help='Pass --no-ff to git rebase')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='Pass --quiet to git rebase')
p.add_option('--autosquash',
dest='autosquash', action='store_true',
help='Pass --autosquash to git rebase')
p.add_option('--whitespace',
dest='whitespace', action='store', metavar='WS',
help='Pass --whitespace to git rebase')
p.add_option('--auto-stash',
dest='auto_stash', action='store_true',
help='Stash local modifications before starting')
def Execute(self, opt, args):
all_projects = self.GetProjects(args)
one_project = len(all_projects) == 1
if opt.interactive and not one_project:
print('error: interactive rebase not supported with multiple projects',
file=sys.stderr)
if len(args) == 1:
print('note: project %s is mapped to more than one path' % (args[0],),
file=sys.stderr)
return -1
for project in all_projects:
cb = project.CurrentBranch
if not cb:
if one_project:
print("error: project %s has a detached HEAD" % project.relpath,
file=sys.stderr)
return -1
# ignore branches with detatched HEADs
continue
upbranch = project.GetBranch(cb)
if not upbranch.LocalMerge:
if one_project:
print("error: project %s does not track any remote branches"
% project.relpath, file=sys.stderr)
return -1
# ignore branches without remotes
continue
args = ["rebase"]
if opt.whitespace:
args.append('--whitespace=%s' % opt.whitespace)
if opt.quiet:
args.append('--quiet')
if opt.force_rebase:
args.append('--force-rebase')
if opt.no_ff:
args.append('--no-ff')
if opt.autosquash:
args.append('--autosquash')
if opt.interactive:
args.append("-i")
args.append(upbranch.LocalMerge)
print('# %s: rebasing %s -> %s'
% (project.relpath, cb, upbranch.LocalMerge), file=sys.stderr)
needs_stash = False
if opt.auto_stash:
stash_args = ["update-index", "--refresh", "-q"]
if GitCommand(project, stash_args).Wait() != 0:
needs_stash = True
# Dirty index, requires stash...
stash_args = ["stash"]
if GitCommand(project, stash_args).Wait() != 0:
return -1
if GitCommand(project, args).Wait() != 0:
return -1
if needs_stash:
stash_args.append('pop')
stash_args.append('--quiet')
if GitCommand(project, stash_args).Wait() != 0:
return -1
| apache-2.0 |
vilorious/pyload | module/lib/beaker/cache.py | 45 | 15814 | """Cache object
The Cache object is used to manage a set of cache files and their
associated backend. The backends can be rotated on the fly by
specifying an alternate type when used.
Advanced users can add new backends in beaker.backends
"""
import warnings
import beaker.container as container
import beaker.util as util
from beaker.exceptions import BeakerException, InvalidCacheBackendError
import beaker.ext.memcached as memcached
import beaker.ext.database as database
import beaker.ext.sqla as sqla
import beaker.ext.google as google
# Initialize the basic available backends
clsmap = {
'memory':container.MemoryNamespaceManager,
'dbm':container.DBMNamespaceManager,
'file':container.FileNamespaceManager,
'ext:memcached':memcached.MemcachedNamespaceManager,
'ext:database':database.DatabaseNamespaceManager,
'ext:sqla': sqla.SqlaNamespaceManager,
'ext:google': google.GoogleNamespaceManager,
}
# Initialize the cache region dict
cache_regions = {}
cache_managers = {}
try:
import pkg_resources
# Load up the additional entry point defined backends
for entry_point in pkg_resources.iter_entry_points('beaker.backends'):
try:
NamespaceManager = entry_point.load()
name = entry_point.name
if name in clsmap:
raise BeakerException("NamespaceManager name conflict,'%s' "
"already loaded" % name)
clsmap[name] = NamespaceManager
except (InvalidCacheBackendError, SyntaxError):
# Ignore invalid backends
pass
except:
import sys
from pkg_resources import DistributionNotFound
# Warn when there's a problem loading a NamespaceManager
if not isinstance(sys.exc_info()[1], DistributionNotFound):
import traceback
from StringIO import StringIO
tb = StringIO()
traceback.print_exc(file=tb)
warnings.warn("Unable to load NamespaceManager entry point: '%s': "
"%s" % (entry_point, tb.getvalue()), RuntimeWarning,
2)
except ImportError:
pass
def cache_region(region, *deco_args):
"""Decorate a function to cache itself using a cache region
The region decorator requires arguments if there are more than
2 of the same named function, in the same module. This is
because the namespace used for the functions cache is based on
the functions name and the module.
Example::
# Add cache region settings to beaker:
beaker.cache.cache_regions.update(dict_of_config_region_options))
@cache_region('short_term', 'some_data')
def populate_things(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
return load('rabbits', 20, 0)
.. note::
The function being decorated must only be called with
positional arguments.
"""
cache = [None]
def decorate(func):
namespace = util.func_namespace(func)
def cached(*args):
reg = cache_regions[region]
if not reg.get('enabled', True):
return func(*args)
if not cache[0]:
if region not in cache_regions:
raise BeakerException('Cache region not configured: %s' % region)
cache[0] = Cache._get_cache(namespace, reg)
cache_key = " ".join(map(str, deco_args + args))
def go():
return func(*args)
return cache[0].get_value(cache_key, createfunc=go)
cached._arg_namespace = namespace
cached._arg_region = region
return cached
return decorate
def region_invalidate(namespace, region, *args):
"""Invalidate a cache region namespace or decorated function
This function only invalidates cache spaces created with the
cache_region decorator.
:param namespace: Either the namespace of the result to invalidate, or the
cached function reference
:param region: The region the function was cached to. If the function was
cached to a single region then this argument can be None
:param args: Arguments that were used to differentiate the cached
function as well as the arguments passed to the decorated
function
Example::
# Add cache region settings to beaker:
beaker.cache.cache_regions.update(dict_of_config_region_options))
def populate_things(invalidate=False):
@cache_region('short_term', 'some_data')
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
# If the results should be invalidated first
if invalidate:
region_invalidate(load, None, 'some_data',
'rabbits', 20, 0)
return load('rabbits', 20, 0)
"""
if callable(namespace):
if not region:
region = namespace._arg_region
namespace = namespace._arg_namespace
if not region:
raise BeakerException("Region or callable function "
"namespace is required")
else:
region = cache_regions[region]
cache = Cache._get_cache(namespace, region)
cache_key = " ".join(str(x) for x in args)
cache.remove_value(cache_key)
class Cache(object):
"""Front-end to the containment API implementing a data cache.
:param namespace: the namespace of this Cache
:param type: type of cache to use
:param expire: seconds to keep cached data
:param expiretime: seconds to keep cached data (legacy support)
:param starttime: time when cache was cache was
"""
def __init__(self, namespace, type='memory', expiretime=None,
starttime=None, expire=None, **nsargs):
try:
cls = clsmap[type]
if isinstance(cls, InvalidCacheBackendError):
raise cls
except KeyError:
raise TypeError("Unknown cache implementation %r" % type)
self.namespace = cls(namespace, **nsargs)
self.expiretime = expiretime or expire
self.starttime = starttime
self.nsargs = nsargs
@classmethod
def _get_cache(cls, namespace, kw):
key = namespace + str(kw)
try:
return cache_managers[key]
except KeyError:
cache_managers[key] = cache = cls(namespace, **kw)
return cache
def put(self, key, value, **kw):
self._get_value(key, **kw).set_value(value)
set_value = put
def get(self, key, **kw):
"""Retrieve a cached value from the container"""
return self._get_value(key, **kw).get_value()
get_value = get
def remove_value(self, key, **kw):
mycontainer = self._get_value(key, **kw)
if mycontainer.has_current_value():
mycontainer.clear_value()
remove = remove_value
def _get_value(self, key, **kw):
if isinstance(key, unicode):
key = key.encode('ascii', 'backslashreplace')
if 'type' in kw:
return self._legacy_get_value(key, **kw)
kw.setdefault('expiretime', self.expiretime)
kw.setdefault('starttime', self.starttime)
return container.Value(key, self.namespace, **kw)
@util.deprecated("Specifying a "
"'type' and other namespace configuration with cache.get()/put()/etc. "
"is deprecated. Specify 'type' and other namespace configuration to "
"cache_manager.get_cache() and/or the Cache constructor instead.")
def _legacy_get_value(self, key, type, **kw):
expiretime = kw.pop('expiretime', self.expiretime)
starttime = kw.pop('starttime', None)
createfunc = kw.pop('createfunc', None)
kwargs = self.nsargs.copy()
kwargs.update(kw)
c = Cache(self.namespace.namespace, type=type, **kwargs)
return c._get_value(key, expiretime=expiretime, createfunc=createfunc,
starttime=starttime)
def clear(self):
"""Clear all the values from the namespace"""
self.namespace.remove()
# dict interface
def __getitem__(self, key):
return self.get(key)
def __contains__(self, key):
return self._get_value(key).has_current_value()
def has_key(self, key):
return key in self
def __delitem__(self, key):
self.remove_value(key)
def __setitem__(self, key, value):
self.put(key, value)
class CacheManager(object):
def __init__(self, **kwargs):
"""Initialize a CacheManager object with a set of options
Options should be parsed with the
:func:`~beaker.util.parse_cache_config_options` function to
ensure only valid options are used.
"""
self.kwargs = kwargs
self.regions = kwargs.pop('cache_regions', {})
# Add these regions to the module global
cache_regions.update(self.regions)
def get_cache(self, name, **kwargs):
kw = self.kwargs.copy()
kw.update(kwargs)
return Cache._get_cache(name, kw)
def get_cache_region(self, name, region):
if region not in self.regions:
raise BeakerException('Cache region not configured: %s' % region)
kw = self.regions[region]
return Cache._get_cache(name, kw)
def region(self, region, *args):
"""Decorate a function to cache itself using a cache region
The region decorator requires arguments if there are more than
2 of the same named function, in the same module. This is
because the namespace used for the functions cache is based on
the functions name and the module.
Example::
# Assuming a cache object is available like:
cache = CacheManager(dict_of_config_options)
def populate_things():
@cache.region('short_term', 'some_data')
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
return load('rabbits', 20, 0)
.. note::
The function being decorated must only be called with
positional arguments.
"""
return cache_region(region, *args)
def region_invalidate(self, namespace, region, *args):
"""Invalidate a cache region namespace or decorated function
This function only invalidates cache spaces created with the
cache_region decorator.
:param namespace: Either the namespace of the result to invalidate, or the
name of the cached function
:param region: The region the function was cached to. If the function was
cached to a single region then this argument can be None
:param args: Arguments that were used to differentiate the cached
function as well as the arguments passed to the decorated
function
Example::
# Assuming a cache object is available like:
cache = CacheManager(dict_of_config_options)
def populate_things(invalidate=False):
@cache.region('short_term', 'some_data')
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
# If the results should be invalidated first
if invalidate:
cache.region_invalidate(load, None, 'some_data',
'rabbits', 20, 0)
return load('rabbits', 20, 0)
"""
return region_invalidate(namespace, region, *args)
if callable(namespace):
if not region:
region = namespace._arg_region
namespace = namespace._arg_namespace
if not region:
raise BeakerException("Region or callable function "
"namespace is required")
else:
region = self.regions[region]
cache = self.get_cache(namespace, **region)
cache_key = " ".join(str(x) for x in args)
cache.remove_value(cache_key)
def cache(self, *args, **kwargs):
"""Decorate a function to cache itself with supplied parameters
:param args: Used to make the key unique for this function, as in region()
above.
:param kwargs: Parameters to be passed to get_cache(), will override defaults
Example::
# Assuming a cache object is available like:
cache = CacheManager(dict_of_config_options)
def populate_things():
@cache.cache('mycache', expire=15)
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
return load('rabbits', 20, 0)
.. note::
The function being decorated must only be called with
positional arguments.
"""
cache = [None]
key = " ".join(str(x) for x in args)
def decorate(func):
namespace = util.func_namespace(func)
def cached(*args):
if not cache[0]:
cache[0] = self.get_cache(namespace, **kwargs)
cache_key = key + " " + " ".join(str(x) for x in args)
def go():
return func(*args)
return cache[0].get_value(cache_key, createfunc=go)
cached._arg_namespace = namespace
return cached
return decorate
def invalidate(self, func, *args, **kwargs):
"""Invalidate a cache decorated function
This function only invalidates cache spaces created with the
cache decorator.
:param func: Decorated function to invalidate
:param args: Used to make the key unique for this function, as in region()
above.
:param kwargs: Parameters that were passed for use by get_cache(), note that
this is only required if a ``type`` was specified for the
function
Example::
# Assuming a cache object is available like:
cache = CacheManager(dict_of_config_options)
def populate_things(invalidate=False):
@cache.cache('mycache', type="file", expire=15)
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
# If the results should be invalidated first
if invalidate:
cache.invalidate(load, 'mycache', 'rabbits', 20, 0, type="file")
return load('rabbits', 20, 0)
"""
namespace = func._arg_namespace
cache = self.get_cache(namespace, **kwargs)
cache_key = " ".join(str(x) for x in args)
cache.remove_value(cache_key)
| gpl-3.0 |
ocaisa/easybuild-easyblocks | easybuild/easyblocks/n/nemo.py | 2 | 4557 | ##
# Copyright 2015-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing NEMO, implemented as an easyblock
@author: Oriol Mula-Valls (IC3)
"""
import os
import shutil
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import write_file
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_NEMO(EasyBlock):
"""Support for building/installing NEMO."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for NEMO."""
super(EB_NEMO, self).__init__(*args, **kwargs)
self.conf_name = 'EB_NEMO_CONFIG'
self.conf_arch_file = 'NEMOGCM/ARCH/arch-eb.fcm'
@staticmethod
def extra_options():
"""Custom easyconfig parameters for NEMO."""
extra_vars = {
'with_components': [None, "List of components to include (e.g. TOP_SRC)", MANDATORY],
'add_keys': [None, "Add compilation keys", CUSTOM],
'del_keys': [None, "Delete compilation keys", CUSTOM]
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Custom configuration procedure for NEMO."""
netcdf_fortran_root = get_software_root('netCDF-Fortran')
if not netcdf_fortran_root:
raise EasyBuildError("netCDF-Fortran is not available, but is a required dependency")
cfg = '\n'.join([
"%%NCDF_INC -I%s/include" % netcdf_fortran_root,
"%%NCDF_LIB -L%s/lib -lnetcdff" % netcdf_fortran_root,
"%%FC %s" % os.getenv('F90'),
"%FCFLAGS -r8 -O3 -traceback",
"%FFLAGS %FCFLAGS",
"%LD %FC",
"%LDFLAGS ",
"%FPPFLAGS -P -C",
"%AR ar",
"%ARFLAGS rs",
"%MK make",
"%USER_INC %NCDF_INC",
"%USER_LIB %NCDF_LIB"
])
write_file(self.conf_arch_file, cfg)
cmd = "./makenemo -n %s -d '%s' -j0 -m eb" % (self.conf_name, ' '.join(self.cfg['with_components']))
if self.cfg['add_keys'] is not None:
cmd += " add_key '%s'" % ' '.join(self.cfg['add_keys'])
if self.cfg['del_keys'] is not None:
cmd += " del_key '%s'" % ' '.join(self.cfg['del_keys'])
try:
dst = 'NEMOGCM/CONFIG'
os.chdir(dst)
self.log.debug("Changed to directory %s", dst)
except OSError, err:
raise EasyBuildError("Failed to change to directory %s: %s", dst, err)
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def build_step(self):
"""Custom build procedure for NEMO."""
cmd = "./makenemo -n %s -m eb" % self.conf_name
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def install_step(self):
"""Custom install procedure for NEMO."""
binpath = os.path.join(self.cfg['start_dir'], 'NEMOGCM', 'CONFIG', self.conf_name, 'BLD/bin')
try:
shutil.copytree(binpath, os.path.join(self.installdir, 'bin'))
except OSError, err:
raise EasyBuildError("Copying %s to installation dir failed: %s", binpath, err)
def sanity_check_step(self):
"""Custom sanity check for NEMO."""
custom_paths = {
'files': ['bin/nemo.exe'],
'dirs': [],
}
super(EB_NEMO, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 |
FlorentChamault/My_sickbeard | lib/subliminal/services/tvsubtitles.py | 30 | 7310 | # -*- coding: utf-8 -*-
# Copyright 2012 Nicolas Wack <wackou@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..cache import cachedmethod
from ..language import language_set, Language
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..utils import get_keywords
from ..videos import Episode
from bs4 import BeautifulSoup
import logging
import re
from sickbeard import db
from sickbeard import logger as glog
logger = logging.getLogger("subliminal")
def match(pattern, string):
try:
return re.search(pattern, string).group(1)
except AttributeError:
logger.debug(u'Could not match %r on %r' % (pattern, string))
return None
class TvSubtitles(ServiceBase):
server_url = 'http://www.tvsubtitles.net'
site_url = 'http://www.tvsubtitles.net'
api_based = False
languages = language_set(['ar', 'bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', 'hu',
'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'uk',
'zh', 'pb'])
#TODO: Find more exceptions
language_map = {'gr': Language('gre'), 'cz': Language('cze'), 'ua': Language('ukr'),
'cn': Language('chi'), 'br': Language('pob')}
videos = [Episode]
require_video = False
#required_features = ['permissive']
@cachedmethod
def get_likely_series_id(self, name):
r = self.session.post('%s/search.php' % self.server_url, data={'q': name})
soup = BeautifulSoup(r.content, self.required_features)
maindiv = soup.find('div', 'left')
results = []
for elem in maindiv.find_all('li'):
sid = int(match('tvshow-([0-9]+)\.html', elem.a['href']))
show_name = match('(.*) \(', elem.a.text)
results.append((show_name, sid))
#TODO: pick up the best one in a smart way
result = results[0]
return result[1]
@cachedmethod
def get_episode_id(self, series_id, season, number):
"""Get the TvSubtitles id for the given episode. Raises KeyError if none
could be found."""
# download the page of the season, contains ids for all episodes
episode_id = None
r = self.session.get('%s/tvshow-%d-%d.html' % (self.server_url, series_id, season))
soup = BeautifulSoup(r.content, self.required_features)
table = soup.find('table', id='table5')
for row in table.find_all('tr'):
cells = row.find_all('td')
if not cells:
continue
episode_number = match('x([0-9]+)', cells[0].text)
if not episode_number:
continue
episode_number = int(episode_number)
episode_id = int(match('episode-([0-9]+)', cells[1].a['href']))
# we could just return the id of the queried episode, but as we
# already downloaded the whole page we might as well fill in the
# information for all the episodes of the season
self.cache_for(self.get_episode_id, args=(series_id, season, episode_number), result=episode_id)
# raises KeyError if not found
return self.cached_value(self.get_episode_id, args=(series_id, season, number))
# Do not cache this method in order to always check for the most recent
# subtitles
def get_sub_ids(self, episode_id):
subids = []
r = self.session.get('%s/episode-%d.html' % (self.server_url, episode_id))
epsoup = BeautifulSoup(r.content, self.required_features)
for subdiv in epsoup.find_all('a'):
if 'href' not in subdiv.attrs or not subdiv['href'].startswith('/subtitle'):
continue
subid = int(match('([0-9]+)', subdiv['href']))
lang = self.get_language(match('flags/(.*).gif', subdiv.img['src']))
result = {'subid': subid, 'language': lang}
for p in subdiv.find_all('p'):
if 'alt' in p.attrs and p['alt'] == 'rip':
result['rip'] = p.text.strip()
if 'alt' in p.attrs and p['alt'] == 'release':
result['release'] = p.text.strip()
subids.append(result)
return subids
def list_checked(self, video, languages):
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
def query(self, filepath, languages, keywords, series, season, episode):
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
self.init_cache()
myDB = db.DBConnection()
myDBcache = db.DBConnection("cache.db")
sql_show_id = myDB.select("SELECT tvdb_id, show_name FROM tv_shows WHERE show_name LIKE ?", ['%'+series+'%'])
if sql_show_id[0][0]:
sql_scene = myDB.select("SELECT scene_season, scene_episode FROM tv_episodes WHERE showid = ? and season = ? and episode = ?", [sql_show_id[0][0],season,episode])
real_name=sql_show_id[0][1]
if sql_scene[0][0]:
season=sql_scene[0][0]
episode= sql_scene[0][1]
sql_custom_names = myDBcache.select("SELECT show_name FROM scene_exceptions WHERE tvdb_id = ? and show_name<> ? ORDER BY exception_id asc", [sql_show_id[0][0],real_name])
if sql_custom_names:
series=sql_custom_names[0][0]
glog.log(u'Searching Subtitles on Tvsubtitles with title : %s season : %s episode : %s' % (series,season,episode))
sid = self.get_likely_series_id(series.lower())
try:
ep_id = self.get_episode_id(sid, season, episode)
except KeyError:
logger.debug(u'Could not find episode id for %s season %d episode %d' % (series, season, episode))
return []
subids = self.get_sub_ids(ep_id)
# filter the subtitles with our queried languages
subtitles = []
for subid in subids:
language = subid['language']
if language not in languages:
continue
path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s/download-%d.html' % (self.server_url, subid['subid']),
keywords=[subid['rip'], subid['release']])
subtitles.append(subtitle)
return subtitles
def download(self, subtitle):
self.download_zip_file(subtitle.link, subtitle.path)
return subtitle
Service = TvSubtitles
| gpl-3.0 |
Tokyo-Buffalo/tokyosouth | env/lib/python3.6/site-packages/twisted/test/test_text.py | 13 | 6456 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.text}.
"""
from twisted.python.compat import NativeStringIO as StringIO
from twisted.trial import unittest
from twisted.python import text
sampleText = \
"""Every attempt to employ mathematical methods in the study of chemical
questions must be considered profoundly irrational and contrary to the
spirit of chemistry ... If mathematical analysis should ever hold a
prominent place in chemistry - an aberration which is happily almost
impossible - it would occasion a rapid and widespread degeneration of that
science.
-- Auguste Comte, Philosophie Positive, Paris, 1838
"""
class WrapTests(unittest.TestCase):
"""
Tests for L{text.greedyWrap}.
"""
def setUp(self):
self.lineWidth = 72
self.sampleSplitText = sampleText.split()
self.output = text.wordWrap(sampleText, self.lineWidth)
def test_wordCount(self):
"""
Compare the number of words.
"""
words = []
for line in self.output:
words.extend(line.split())
wordCount = len(words)
sampleTextWordCount = len(self.sampleSplitText)
self.assertEqual(wordCount, sampleTextWordCount)
def test_wordMatch(self):
"""
Compare the lists of words.
"""
words = []
for line in self.output:
words.extend(line.split())
# Using assertEqual here prints out some
# rather too long lists.
self.assertTrue(self.sampleSplitText == words)
def test_lineLength(self):
"""
Check the length of the lines.
"""
failures = []
for line in self.output:
if not len(line) <= self.lineWidth:
failures.append(len(line))
if failures:
self.fail("%d of %d lines were too long.\n"
"%d < %s" % (len(failures), len(self.output),
self.lineWidth, failures))
def test_doubleNewline(self):
"""
Allow paragraphs delimited by two \ns.
"""
sampleText = "et\n\nphone\nhome."
result = text.wordWrap(sampleText, self.lineWidth)
self.assertEqual(result, ["et", "", "phone home.", ""])
class LineTests(unittest.TestCase):
"""
Tests for L{isMultiline} and L{endsInNewline}.
"""
def test_isMultiline(self):
"""
L{text.isMultiline} returns C{True} if the string has a newline in it.
"""
s = 'This code\n "breaks."'
m = text.isMultiline(s)
self.assertTrue(m)
s = 'This code does not "break."'
m = text.isMultiline(s)
self.assertFalse(m)
def test_endsInNewline(self):
"""
L{text.endsInNewline} returns C{True} if the string ends in a newline.
"""
s = 'newline\n'
m = text.endsInNewline(s)
self.assertTrue(m)
s = 'oldline'
m = text.endsInNewline(s)
self.assertFalse(m)
class StringyStringTests(unittest.TestCase):
"""
Tests for L{text.stringyString}.
"""
def test_tuple(self):
"""
Tuple elements are displayed on separate lines.
"""
s = ('a', 'b')
m = text.stringyString(s)
self.assertEqual(m, '(a,\n b,)\n')
def test_dict(self):
"""
Dicts elements are displayed using C{str()}.
"""
s = {'a': 0}
m = text.stringyString(s)
self.assertEqual(m, '{a: 0}')
def test_list(self):
"""
List elements are displayed on separate lines using C{str()}.
"""
s = ['a', 'b']
m = text.stringyString(s)
self.assertEqual(m, '[a,\n b,]\n')
class SplitTests(unittest.TestCase):
"""
Tests for L{text.splitQuoted}.
"""
def test_oneWord(self):
"""
Splitting strings with one-word phrases.
"""
s = 'This code "works."'
r = text.splitQuoted(s)
self.assertEqual(['This', 'code', 'works.'], r)
def test_multiWord(self):
s = 'The "hairy monkey" likes pie.'
r = text.splitQuoted(s)
self.assertEqual(['The', 'hairy monkey', 'likes', 'pie.'], r)
# Some of the many tests that would fail:
#def test_preserveWhitespace(self):
# phrase = '"MANY SPACES"'
# s = 'With %s between.' % (phrase,)
# r = text.splitQuoted(s)
# self.assertEqual(['With', phrase, 'between.'], r)
#def test_escapedSpace(self):
# s = r"One\ Phrase"
# r = text.splitQuoted(s)
# self.assertEqual(["One Phrase"], r)
class StrFileTests(unittest.TestCase):
def setUp(self):
self.io = StringIO("this is a test string")
def tearDown(self):
pass
def test_1_f(self):
self.assertFalse(text.strFile("x", self.io))
def test_1_1(self):
self.assertTrue(text.strFile("t", self.io))
def test_1_2(self):
self.assertTrue(text.strFile("h", self.io))
def test_1_3(self):
self.assertTrue(text.strFile("i", self.io))
def test_1_4(self):
self.assertTrue(text.strFile("s", self.io))
def test_1_5(self):
self.assertTrue(text.strFile("n", self.io))
def test_1_6(self):
self.assertTrue(text.strFile("g", self.io))
def test_3_1(self):
self.assertTrue(text.strFile("thi", self.io))
def test_3_2(self):
self.assertTrue(text.strFile("his", self.io))
def test_3_3(self):
self.assertTrue(text.strFile("is ", self.io))
def test_3_4(self):
self.assertTrue(text.strFile("ing", self.io))
def test_3_f(self):
self.assertFalse(text.strFile("bla", self.io))
def test_large_1(self):
self.assertTrue(text.strFile("this is a test", self.io))
def test_large_2(self):
self.assertTrue(text.strFile("is a test string", self.io))
def test_large_f(self):
self.assertFalse(text.strFile("ds jhfsa k fdas", self.io))
def test_overlarge_f(self):
self.assertFalse(text.strFile(
"djhsakj dhsa fkhsa s,mdbnfsauiw bndasdf hreew",
self.io))
def test_self(self):
self.assertTrue(text.strFile("this is a test string", self.io))
def test_insensitive(self):
self.assertTrue(text.strFile("ThIs is A test STRING", self.io, False))
| mit |
ofanoyi/scrapy | scrapy/spider.py | 15 | 2307 | """
Base class for Scrapy spiders
See documentation in docs/topics/spiders.rst
"""
from scrapy import log
from scrapy.http import Request
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import url_is_from_spider
from scrapy.utils.deprecate import create_deprecated_class
class Spider(object_ref):
"""Base class for scrapy spiders. All spiders must inherit from this
class.
"""
name = None
def __init__(self, name=None, **kwargs):
if name is not None:
self.name = name
elif not getattr(self, 'name', None):
raise ValueError("%s must have a name" % type(self).__name__)
self.__dict__.update(kwargs)
if not hasattr(self, 'start_urls'):
self.start_urls = []
def log(self, message, level=log.DEBUG, **kw):
"""Log the given messages at the given log level. Always use this
method to send log messages from your spider
"""
log.msg(message, spider=self, level=level, **kw)
def set_crawler(self, crawler):
assert not hasattr(self, '_crawler'), "Spider already bounded to %s" % crawler
self._crawler = crawler
@property
def crawler(self):
assert hasattr(self, '_crawler'), "Spider not bounded to any crawler"
return self._crawler
@property
def settings(self):
return self.crawler.settings
def start_requests(self):
for url in self.start_urls:
yield self.make_requests_from_url(url)
def make_requests_from_url(self, url):
return Request(url, dont_filter=True)
def parse(self, response):
raise NotImplementedError
@classmethod
def handles_request(cls, request):
return url_is_from_spider(request.url, cls)
def __str__(self):
return "<%s %r at 0x%0x>" % (type(self).__name__, self.name, id(self))
__repr__ = __str__
BaseSpider = create_deprecated_class('BaseSpider', Spider)
class ObsoleteClass(object):
def __init__(self, message):
self.message = message
def __getattr__(self, name):
raise AttributeError(self.message)
spiders = ObsoleteClass("""
"from scrapy.spider import spiders" no longer works - use "from scrapy.project import crawler" and then access crawler.spiders attribute"
""")
| bsd-3-clause |
jazztpt/edx-platform | common/djangoapps/course_modes/migrations/0002_auto__add_field_coursemode_currency.py | 114 | 1475 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseMode.currency'
db.add_column('course_modes_coursemode', 'currency',
self.gf('django.db.models.fields.CharField')(default='usd', max_length=8),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseMode.currency'
db.delete_column('course_modes_coursemode', 'currency')
models = {
'course_modes.coursemode': {
'Meta': {'object_name': 'CourseMode'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
| agpl-3.0 |
ManageIQ/integration_tests | cfme/networks/cloud_network.py | 2 | 7763 | import attr
from navmazing import NavigateToAttribute
from navmazing import NavigateToSibling
from widgetastic.utils import Version
from widgetastic.utils import VersionPick
from cfme.common import CustomButtonEventsMixin
from cfme.common import Taggable
from cfme.exceptions import ItemNotFound
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.modeling.base import parent_of_type
from cfme.networks.views import CloudNetworkAddView
from cfme.networks.views import CloudNetworkDetailsView
from cfme.networks.views import CloudNetworkEditView
from cfme.networks.views import CloudNetworkView
from cfme.utils import providers
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.wait import wait_for
@attr.s
class CloudNetwork(Taggable, BaseEntity, CustomButtonEventsMixin):
"""Class representing cloud networks in cfme database"""
category = 'networks'
string_name = 'CloudNetwork'
quad_name = None
db_types = ['CloudNetwork']
name = attr.ib()
provider_obj = attr.ib(default=None)
@property
def provider(self):
from cfme.networks.provider import NetworkProvider
return parent_of_type(self, NetworkProvider)
@property
def parent_provider(self):
""" Return object of parent cloud provider """
view = navigate_to(self, 'Details')
parent_cloud_text = VersionPick({Version.lowest(): 'Parent ems cloud',
'5.10': 'Parent Cloud Provider'}
).pick(self.appliance.version)
provider_name = view.entities.relationships.get_text_of(parent_cloud_text)
return providers.get_crud_by_name(provider_name)
@property
def network_type(self):
""" Return type of network """
view = navigate_to(self, 'Details')
return view.entities.properties.get_text_of('Type')
@property
def cloud_tenant(self):
"""Return name of tenant that network belongs to"""
view = navigate_to(self, 'Details')
return view.entities.relationships.get_text_of('Cloud tenant')
def edit(self, name, change_external=None, change_admin_state=None, change_shared=None):
"""Edit cloud network
Args:
name: (str) new network name
change_external: (bool) is network external
change_admin_state: (bool) network's administrative state, 'Up' or 'Down'
change_shared: (bool) is network shared, 'Yes' or 'No'
"""
view = navigate_to(self, 'Edit')
view.fill({'network_name': name,
'ext_router': change_external,
'administrative_state': change_admin_state,
'shared': change_shared})
view.save.click()
view.flash.assert_success_message(f'Cloud Network "{name}" updated')
self.name = name
def delete(self):
"""Delete this cloud network"""
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select('Delete this Cloud Network', handle_alert=True)
view.flash.assert_success_message('Delete initiated for 1 Cloud Network.')
@property
def network_provider(self):
""" Returns network provider """
# security group collection contains reference to provider
if self.provider:
return self.provider
# otherwise get provider name from ui
view = navigate_to(self, 'Details')
try:
prov_name = view.entities.relationships.get_text_of("Network Manager")
collection = self.appliance.collections.network_provider
return collection.instantiate(name=prov_name)
except ItemNotFound: # BZ 1480577
return None
@attr.s
class CloudNetworkCollection(BaseCollection):
"""Collection object for Cloud Network object"""
ENTITY = CloudNetwork
def create(self, name, tenant, provider, network_manager, network_type, is_external=False,
admin_state=True, is_shared=False):
"""Create cloud network
Args:
name: (str) name of the network
tenant: (str) name of cloud tenant to place network to
provider: crud object of Openstack Cloud provider
network_manager: (str) name of network manager
network_type: (str) type of network, such as 'VXLAN', 'VLAN', 'GRE' etc.
is_external: (bool) is network external
admin_state: (bool) network's initial administrative state, True stands for 'Up',
False - 'Down'
is_shared: (bool) is network shared
Returns:
instance of cfme.networks.cloud_network.CloudNetwork
"""
view = navigate_to(self, 'Add')
view.fill({'network_manager': network_manager,
'cloud_tenant': tenant,
'network_type': network_type,
'network_name': name,
'ext_router': is_external,
'administrative_state': admin_state,
'shared': is_shared})
view.add.click()
view.flash.assert_success_message(f'Cloud Network "{name}" created')
network = self.instantiate(name, provider)
# Refresh provider's relationships to have new network displayed
wait_for(provider.is_refreshed, func_kwargs=dict(refresh_delta=10), timeout=600)
wait_for(lambda: network.exists, timeout=100, fail_func=network.browser.refresh)
return network
def all(self):
"""returning all Cloud Network objects and support filtering as per provider"""
provider_id = self.filters.get("provider").id if self.filters.get("provider") else None
networks_all = self.appliance.rest_api.collections.cloud_networks.all
prov_db = {prov.id: prov for prov in self.appliance.rest_api.collections.providers}
nw_objs = []
for nw in networks_all:
prov_name = prov_db[prov_db[nw.ems_id].parent_ems_id]["name"]
prov = providers.get_crud_by_name(prov_name)
nw_objs.append(self.instantiate(name=nw.name, provider_obj=prov))
if provider_id:
return [nw for nw in nw_objs if nw.provider_obj.id == provider_id]
else:
return nw_objs
@navigator.register(CloudNetworkCollection, 'All')
class All(CFMENavigateStep):
VIEW = CloudNetworkView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select('Networks', 'Networks')
def resetter(self, *args, **kwargs):
"""Reset the view"""
self.view.browser.refresh()
@navigator.register(CloudNetwork, 'Details')
class Details(CFMENavigateStep):
prerequisite = NavigateToAttribute('parent', 'All')
VIEW = CloudNetworkDetailsView
def step(self, *args, **kwargs):
self.prerequisite_view.entities.get_entity(name=self.obj.name, surf_pages=True).click()
@navigator.register(CloudNetworkCollection, 'Add')
class Add(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
VIEW = CloudNetworkAddView
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.configuration.item_select('Add a new Cloud Network')
@navigator.register(CloudNetwork, 'Edit')
class Edit(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
VIEW = CloudNetworkEditView
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.configuration.item_select('Edit this Cloud Network')
| gpl-2.0 |
chirilo/mozillians | vendor-local/lib/python/djcelery/tests/test_backends/test_database.py | 13 | 3037 | from __future__ import absolute_import
from datetime import timedelta
from celery import current_app
from celery import states
from celery.result import AsyncResult
from celery.task import PeriodicTask
from celery.utils import gen_unique_id
from djcelery.backends.database import DatabaseBackend
from djcelery.utils import now
from djcelery.tests.utils import unittest
class SomeClass(object):
def __init__(self, data):
self.data = data
class MyPeriodicTask(PeriodicTask):
name = "c.u.my-periodic-task-244"
run_every = timedelta(seconds=1)
def run(self, **kwargs):
return 42
class TestDatabaseBackend(unittest.TestCase):
def test_backend(self):
b = DatabaseBackend()
tid = gen_unique_id()
self.assertEqual(b.get_status(tid), states.PENDING)
self.assertIsNone(b.get_result(tid))
b.mark_as_done(tid, 42)
self.assertEqual(b.get_status(tid), states.SUCCESS)
self.assertEqual(b.get_result(tid), 42)
tid2 = gen_unique_id()
result = {"foo": "baz", "bar": SomeClass(12345)}
b.mark_as_done(tid2, result)
# is serialized properly.
rindb = b.get_result(tid2)
self.assertEqual(rindb.get("foo"), "baz")
self.assertEqual(rindb.get("bar").data, 12345)
tid3 = gen_unique_id()
try:
raise KeyError("foo")
except KeyError, exception:
pass
b.mark_as_failure(tid3, exception)
self.assertEqual(b.get_status(tid3), states.FAILURE)
self.assertIsInstance(b.get_result(tid3), KeyError)
def test_forget(self):
b = DatabaseBackend()
tid = gen_unique_id()
b.mark_as_done(tid, {"foo": "bar"})
x = AsyncResult(tid)
self.assertEqual(x.result.get("foo"), "bar")
x.forget()
self.assertIsNone(x.result)
def test_taskset_store(self):
b = DatabaseBackend()
tid = gen_unique_id()
self.assertIsNone(b.restore_taskset(tid))
result = {"foo": "baz", "bar": SomeClass(12345)}
b.save_taskset(tid, result)
rindb = b.restore_taskset(tid)
self.assertIsNotNone(rindb)
self.assertEqual(rindb.get("foo"), "baz")
self.assertEqual(rindb.get("bar").data, 12345)
b.delete_taskset(tid)
self.assertIsNone(b.restore_taskset(tid))
def test_cleanup(self):
b = DatabaseBackend()
b.TaskModel._default_manager.all().delete()
ids = [gen_unique_id() for _ in xrange(3)]
for i, res in enumerate((16, 32, 64)):
b.mark_as_done(ids[i], res)
self.assertEqual(b.TaskModel._default_manager.count(), 3)
then = now() - current_app.conf.CELERY_TASK_RESULT_EXPIRES * 2
# Have to avoid save() because it applies the auto_now=True.
b.TaskModel._default_manager.filter(task_id__in=ids[:-1]) \
.update(date_done=then)
b.cleanup()
self.assertEqual(b.TaskModel._default_manager.count(), 1)
| bsd-3-clause |
pinkavaj/gnuradio | gr-utils/python/modtool/gr-newmod/python/build_utils_codes.py | 263 | 1391 | #
# Copyright 2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
def i_code (code3):
return code3[0]
def o_code (code3):
if len (code3) >= 2:
return code3[1]
else:
return code3[0]
def tap_code (code3):
if len (code3) >= 3:
return code3[2]
else:
return code3[0]
def i_type (code3):
return char_to_type[i_code (code3)]
def o_type (code3):
return char_to_type[o_code (code3)]
def tap_type (code3):
return char_to_type[tap_code (code3)]
char_to_type = {}
char_to_type['s'] = 'short'
char_to_type['i'] = 'int'
char_to_type['f'] = 'float'
char_to_type['c'] = 'gr_complex'
char_to_type['b'] = 'unsigned char'
| gpl-3.0 |
inspirehep/raven-python | raven/handlers/logbook.py | 21 | 3313 | """
raven.handlers.logbook
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from __future__ import print_function
import logbook
import sys
import traceback
from raven.base import Client
from raven.utils.encoding import to_string
from raven.utils import six
class SentryHandler(logbook.Handler):
def __init__(self, *args, **kwargs):
if len(args) == 1:
arg = args[0]
if isinstance(arg, six.string_types):
self.client = kwargs.pop('client_cls', Client)(dsn=arg, **kwargs)
elif isinstance(arg, Client):
self.client = arg
else:
raise ValueError('The first argument to %s must be either a Client instance or a DSN, got %r instead.' % (
self.__class__.__name__,
arg,
))
args = []
else:
try:
self.client = kwargs.pop('client')
except KeyError:
raise TypeError('Expected keyword argument for SentryHandler: client')
super(SentryHandler, self).__init__(*args, **kwargs)
def emit(self, record):
try:
# Avoid typical config issues by overriding loggers behavior
if record.channel.startswith(('sentry.errors', 'raven')):
print(to_string(self.format(record)), file=sys.stderr)
return
return self._emit(record)
except Exception:
if self.client.raise_send_errors:
raise
print("Top level Sentry exception caught - failed creating log record", file=sys.stderr)
print(to_string(record.msg), file=sys.stderr)
print(to_string(traceback.format_exc()))
try:
self.client.captureException()
except Exception:
pass
def _emit(self, record):
data = {
'level': logbook.get_level_name(record.level).lower(),
'logger': record.channel,
}
event_type = 'raven.events.Message'
handler_kwargs = {
'message': record.msg,
'params': record.args,
'formatted': self.format(record),
}
if 'tags' in record.kwargs:
handler_kwargs['tags'] = record.kwargs['tags']
# If there's no exception being processed, exc_info may be a 3-tuple of None
# http://docs.python.org/library/sys.html#sys.exc_info
if record.exc_info is True or (record.exc_info and all(record.exc_info)):
handler = self.client.get_handler(event_type)
data.update(handler.capture(**handler_kwargs))
event_type = 'raven.events.Exception'
handler_kwargs['exc_info'] = record.exc_info
extra = {
'lineno': record.lineno,
'filename': record.filename,
'function': record.func_name,
'process': record.process,
'process_name': record.process_name,
}
extra.update(record.extra)
return self.client.capture(event_type,
data=data,
extra=extra,
**handler_kwargs
)
| bsd-3-clause |
Peratham/tweater | py/nltk/tag/stanford.py | 4 | 3422 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Interface to the Stanford POS-tagger
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Nitin Madnani <nmadnani@ets.org>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
# $Id: stanford.py $
"""
A module for interfacing with the Stanford POS-tagger.
"""
import os
from subprocess import PIPE
import tempfile
import nltk
from api import *
_stanford_url = 'http://nlp.stanford.edu/software/tagger.shtml'
class StanfordTagger(TaggerI):
"""
A class for pos tagging with Stanford Tagger. The input is the paths to:
- a model trained on training data
- (optionally) the path to the stanford tagger jar file. If not specified here,
then this jar file must be specified in the CLASSPATH envinroment variable.
- (optionally) the encoding of the training data (default: ASCII)
Example:
>>> st = StanfordTagger('bidirectional-distsim-wsj-0-18.tagger')
>>> st.tag('What is the airspeed of an unladen swallow ?'.split())
[('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'),
('of', 'IN'), ('an', 'DT'), ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')]
"""
def __init__(self, path_to_model, path_to_jar=None, encoding=None, verbose=False):
self._stanford_jar = nltk.internals.find_jar(
'stanford-postagger.jar', path_to_jar,
searchpath=(), url=_stanford_url,
verbose=verbose)
if not os.path.isfile(path_to_model):
raise IOError("Stanford tagger model file not found: %s" % path_to_model)
self._stanford_model = path_to_model
self._encoding = encoding
def tag(self, tokens):
return self.batch_tag([tokens])[0]
def batch_tag(self, sentences):
encoding = self._encoding
nltk.internals.config_java(options='-mx1000m', verbose=False)
# Create a temporary input file
_input_fh, _input_file_path = tempfile.mkstemp(text=True)
# Build the java command to run the tagger
_stanpos_cmd = ['edu.stanford.nlp.tagger.maxent.MaxentTagger', \
'-model', self._stanford_model, '-textFile', \
_input_file_path, '-tokenize', 'false']
if encoding:
_stanpos_cmd.extend(['-encoding', encoding])
# Write the actual sentences to the temporary input file
_input_fh = os.fdopen(_input_fh, 'w')
_input = '\n'.join((' '.join(x) for x in sentences))
if isinstance(_input, unicode) and encoding:
_input = _input.encode(encoding)
_input_fh.write(_input)
_input_fh.close()
# Run the tagger and get the output
stanpos_output, _stderr = nltk.internals.java(_stanpos_cmd,classpath=self._stanford_jar, \
stdout=PIPE, stderr=PIPE)
if encoding:
stanpos_output = stanpos_output.decode(encoding)
# Delete the temporary file
os.unlink(_input_file_path)
# Output the tagged sentences
tagged_sentences = []
for tagged_sentence in stanpos_output.strip().split("\n"):
sentence = [tuple(tagged_word.strip().split("_"))
for tagged_word in tagged_sentence.strip().split()]
tagged_sentences.append(sentence)
return tagged_sentences
| gpl-3.0 |
eonpatapon/neutron | neutron/tests/api/test_flavors_extensions.py | 23 | 6787 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron.tests.api import base
from neutron.tests.tempest import test
LOG = logging.getLogger(__name__)
class TestFlavorsJson(base.BaseAdminNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List, Show, Create, Update, Delete Flavors
List, Show, Create, Update, Delete service profiles
"""
@classmethod
def resource_setup(cls):
super(TestFlavorsJson, cls).resource_setup()
if not test.is_extension_enabled('flavors', 'network'):
msg = "flavors extension not enabled."
raise cls.skipException(msg)
service_type = "LOADBALANCER"
description_flavor = "flavor is created by tempest"
name_flavor = "Best flavor created by tempest"
cls.flavor = cls.create_flavor(name_flavor, description_flavor,
service_type)
description_sp = "service profile created by tempest"
# Future TODO(madhu_ak): Right now the dummy driver is loaded. Will
# make changes as soon I get to know the flavor supported drivers
driver = ""
metainfo = '{"data": "value"}'
cls.service_profile = cls.create_service_profile(
description=description_sp, metainfo=metainfo, driver=driver)
def _delete_service_profile(self, service_profile_id):
# Deletes a service profile and verifies if it is deleted or not
self.admin_client.delete_service_profile(service_profile_id)
# Asserting that service profile is not found in list after deletion
labels = self.admin_client.list_service_profiles(id=service_profile_id)
self.assertEqual(len(labels['service_profiles']), 0)
@test.attr(type='smoke')
@test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50')
def test_create_update_delete_service_profile(self):
# Creates a service profile
description = "service_profile created by tempest"
driver = ""
metainfo = '{"data": "value"}'
body = self.admin_client.create_service_profile(
description=description, driver=driver, metainfo=metainfo)
service_profile = body['service_profile']
# Updates a service profile
self.admin_client.update_service_profile(service_profile['id'],
enabled=False)
self.assertTrue(service_profile['enabled'])
# Deletes a service profile
self.addCleanup(self._delete_service_profile,
service_profile['id'])
# Assert whether created service profiles are found in service profile
# lists or fail if created service profiles are not found in service
# profiles list
labels = (self.admin_client.list_service_profiles(
id=service_profile['id']))
self.assertEqual(len(labels['service_profiles']), 1)
@test.attr(type='smoke')
@test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50')
def test_create_update_delete_flavor(self):
# Creates a flavor
description = "flavor created by tempest"
service = "LOADBALANCERS"
name = "Best flavor created by tempest"
body = self.admin_client.create_flavor(name=name, service_type=service,
description=description)
flavor = body['flavor']
# Updates a flavor
self.admin_client.update_flavor(flavor['id'], enabled=False)
self.assertTrue(flavor['enabled'])
# Deletes a flavor
self.addCleanup(self._delete_flavor, flavor['id'])
# Assert whether created flavors are found in flavor lists or fail
# if created flavors are not found in flavors list
labels = (self.admin_client.list_flavors(id=flavor['id']))
self.assertEqual(len(labels['flavors']), 1)
@test.attr(type='smoke')
@test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
def test_show_service_profile(self):
# Verifies the details of a service profile
body = self.admin_client.show_service_profile(
self.service_profile['id'])
service_profile = body['service_profile']
self.assertEqual(self.service_profile['id'], service_profile['id'])
self.assertEqual(self.service_profile['description'],
service_profile['description'])
self.assertEqual(self.service_profile['metainfo'],
service_profile['metainfo'])
self.assertEqual(True, service_profile['enabled'])
@test.attr(type='smoke')
@test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
def test_show_flavor(self):
# Verifies the details of a flavor
body = self.admin_client.show_flavor(self.flavor['id'])
flavor = body['flavor']
self.assertEqual(self.flavor['id'], flavor['id'])
self.assertEqual(self.flavor['description'], flavor['description'])
self.assertEqual(self.flavor['name'], flavor['name'])
self.assertEqual(True, flavor['enabled'])
@test.attr(type='smoke')
@test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
def test_list_flavors(self):
# Verify flavor lists
body = self.admin_client.list_flavors(id=33)
flavors = body['flavors']
self.assertEqual(0, len(flavors))
@test.attr(type='smoke')
@test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
def test_list_service_profiles(self):
# Verify service profiles lists
body = self.admin_client.list_service_profiles(id=33)
service_profiles = body['service_profiles']
self.assertEqual(0, len(service_profiles))
def _delete_flavor(self, flavor_id):
# Deletes a flavor and verifies if it is deleted or not
self.admin_client.delete_flavor(flavor_id)
# Asserting that the flavor is not found in list after deletion
labels = self.admin_client.list_flavors(id=flavor_id)
self.assertEqual(len(labels['flavors']), 0)
class TestFlavorsIpV6TestJSON(TestFlavorsJson):
_ip_version = 6
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.