repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
eagleamon/home-assistant | homeassistant/components/sensor/temper.py | 18 | 3506 | """
Support for getting temperature from TEMPer devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.temper/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME, TEMP_FAHRENHEIT
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['temperusb==1.5.1']
CONF_SCALE = 'scale'
CONF_OFFSET = 'offset'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): vol.Coerce(str),
vol.Optional(CONF_SCALE, default=1): vol.Coerce(float),
vol.Optional(CONF_OFFSET, default=0): vol.Coerce(float)
})
TEMPER_SENSORS = []
def get_temper_devices():
"""Scan the Temper devices from temperusb."""
from temperusb.temper import TemperHandler
return TemperHandler().get_devices()
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Temper sensors."""
temp_unit = hass.config.units.temperature_unit
name = config.get(CONF_NAME)
scaling = {
'scale': config.get(CONF_SCALE),
'offset': config.get(CONF_OFFSET)
}
temper_devices = get_temper_devices()
for idx, dev in enumerate(temper_devices):
if idx != 0:
name = name + '_' + str(idx)
TEMPER_SENSORS.append(TemperSensor(dev, temp_unit, name, scaling))
add_devices(TEMPER_SENSORS)
def reset_devices():
"""
Re-scan for underlying Temper sensors and assign them to our devices.
This assumes the same sensor devices are present in the same order.
"""
temper_devices = get_temper_devices()
for sensor, device in zip(TEMPER_SENSORS, temper_devices):
sensor.set_temper_device(device)
class TemperSensor(Entity):
"""Representation of a Temper temperature sensor."""
def __init__(self, temper_device, temp_unit, name, scaling):
"""Initialize the sensor."""
self.temp_unit = temp_unit
self.scale = scaling['scale']
self.offset = scaling['offset']
self.current_value = None
self._name = name
self.set_temper_device(temper_device)
@property
def name(self):
"""Return the name of the temperature sensor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self.current_value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self.temp_unit
def set_temper_device(self, temper_device):
"""Assign the underlying device for this sensor."""
self.temper_device = temper_device
# set calibration data
self.temper_device.set_calibration_data(
scale=self.scale,
offset=self.offset
)
def update(self):
"""Retrieve latest state."""
try:
format_str = ('fahrenheit' if self.temp_unit == TEMP_FAHRENHEIT
else 'celsius')
sensor_value = self.temper_device.get_temperature(format_str)
self.current_value = round(sensor_value, 1)
except IOError:
_LOGGER.error('Failed to get temperature. The device address may'
'have changed - attempting to reset device')
reset_devices()
| apache-2.0 |
Inspq/ansible | lib/ansible/modules/cloud/ovirt/ovirt_networks.py | 1 | 9528 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_networks
short_description: Module to manage logical networks in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage logical networks in oVirt"
options:
name:
description:
- "Name of the the network to manage."
required: true
state:
description:
- "Should the network be present or absent"
choices: ['present', 'absent']
default: present
data_center:
description:
- "Datacenter name where network reside."
description:
description:
- "Description of the network."
comment:
description:
- "Comment of the network."
vlan_tag:
description:
- "Specify VLAN tag."
vm_network:
description:
- "If I(True) network will be marked as network for VM."
- "VM network carries traffic relevant to the virtual machine."
mtu:
description:
- "Maximum transmission unit (MTU) of the network."
clusters:
description:
- "List of dictionaries describing how the network is managed in specific cluster."
- "C(name) - Cluster name."
- "C(assigned) - I(true) if the network should be assigned to cluster. Default is I(true)."
- "C(required) - I(true) if the network must remain operational for all hosts associated with this network."
- "C(display) - I(true) if the network should marked as display network."
- "C(migration) - I(true) if the network should marked as migration network."
- "C(gluster) - I(true) if the network should marked as gluster network."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create network
- ovirt_networks:
data_center: mydatacenter
name: mynetwork
vlan_tag: 1
vm_network: true
# Remove network
- ovirt_networks:
state: absent
name: mynetwork
'''
RETURN = '''
id:
description: "ID of the managed network"
returned: "On success if network is found."
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
network:
description: "Dictionary of all the network attributes. Network attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/network."
returned: "On success if network is found."
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
class NetworksModule(BaseModule):
def build_entity(self):
return otypes.Network(
name=self._module.params['name'],
comment=self._module.params['comment'],
description=self._module.params['description'],
data_center=otypes.DataCenter(
name=self._module.params['data_center'],
) if self._module.params['data_center'] else None,
vlan=otypes.Vlan(
self._module.params['vlan_tag'],
) if self._module.params['vlan_tag'] else None,
usages=[
otypes.NetworkUsage.VM if self._module.params['vm_network'] else None
] if self._module.params['vm_network'] is not None else None,
mtu=self._module.params['mtu'],
)
def update_check(self, entity):
return (
equal(self._module.params.get('comment'), entity.comment) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('vlan_tag'), getattr(entity.vlan, 'id', None)) and
equal(self._module.params.get('vm_network'), True if entity.usages else False) and
equal(self._module.params.get('mtu'), entity.mtu)
)
class ClusterNetworksModule(BaseModule):
def __init__(self, network_id, cluster_network, *args, **kwargs):
super(ClusterNetworksModule, self).__init__(*args, **kwargs)
self._network_id = network_id
self._cluster_network = cluster_network
def build_entity(self):
return otypes.Network(
id=self._network_id,
name=self._module.params['name'],
required=self._cluster_network.get('required'),
display=self._cluster_network.get('display'),
usages=[
otypes.NetworkUsage(usage)
for usage in ['display', 'gluster', 'migration']
if self._cluster_network.get(usage, False)
] if (
self._cluster_network.get('display') is not None or
self._cluster_network.get('gluster') is not None or
self._cluster_network.get('migration') is not None
) else None,
)
def update_check(self, entity):
return (
equal(self._cluster_network.get('required'), entity.required) and
equal(self._cluster_network.get('display'), entity.display) and
equal(
sorted([
usage
for usage in ['display', 'gluster', 'migration']
if self._cluster_network.get(usage, False)
]),
sorted([
str(usage)
for usage in getattr(entity, 'usages', [])
# VM + MANAGEMENT is part of root network
if usage != otypes.NetworkUsage.VM and usage != otypes.NetworkUsage.MANAGEMENT
]),
)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
data_center=dict(default=None, required=True),
name=dict(default=None, required=True),
description=dict(default=None),
comment=dict(default=None),
vlan_tag=dict(default=None, type='int'),
vm_network=dict(default=None, type='bool'),
mtu=dict(default=None, type='int'),
clusters=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
clusters_service = connection.system_service().clusters_service()
networks_service = connection.system_service().networks_service()
networks_module = NetworksModule(
connection=connection,
module=module,
service=networks_service,
)
state = module.params['state']
search_params = {
'name': module.params['name'],
'datacenter': module.params['data_center'],
}
if state == 'present':
ret = networks_module.create(search_params=search_params)
# Update clusters networks:
if module.params.get('clusters') is not None:
for param_cluster in module.params.get('clusters'):
cluster = search_by_name(clusters_service, param_cluster.get('name'))
if cluster is None:
raise Exception("Cluster '%s' was not found." % param_cluster.get('name'))
cluster_networks_service = clusters_service.service(cluster.id).networks_service()
cluster_networks_module = ClusterNetworksModule(
network_id=ret['id'],
cluster_network=param_cluster,
connection=connection,
module=module,
service=cluster_networks_service,
)
if param_cluster.get('assigned', True):
ret = cluster_networks_module.create()
else:
ret = cluster_networks_module.remove()
elif state == 'absent':
ret = networks_module.remove(search_params=search_params)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
christophlsa/odoo | addons/l10n_be_invoice_bba/invoice.py | 36 | 12656 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re, time, random
from openerp import api
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
"""
account.invoice object:
- Add support for Belgian structured communication
- Rename 'reference' field labels to 'Communication'
"""
class account_invoice(osv.osv):
_inherit = 'account.invoice'
@api.cr_uid_context
def _get_reference_type(self, cursor, user, context=None):
"""Add BBA Structured Communication Type and change labels from 'reference' into 'communication' """
res = super(account_invoice, self)._get_reference_type(cursor, user,
context=context)
res[[i for i,x in enumerate(res) if x[0] == 'none'][0]] = ('none', 'Free Communication')
res.append(('bba', 'BBA Structured Communication'))
#l_logger.warning('reference_type = %s' %res )
return res
def check_bbacomm(self, val):
supported_chars = '0-9+*/ '
pattern = re.compile('[^' + supported_chars + ']')
if pattern.findall(val or ''):
return False
bbacomm = re.sub('\D', '', val or '')
if len(bbacomm) == 12:
base = int(bbacomm[:10])
mod = base % 97 or 97
if mod == int(bbacomm[-2:]):
return True
return False
def _check_communication(self, cr, uid, ids):
for inv in self.browse(cr, uid, ids):
if inv.reference_type == 'bba':
return self.check_bbacomm(inv.reference)
return True
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False,
partner_bank_id=False, company_id=False,
context=None):
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice, payment_term, partner_bank_id, company_id, context)
# reference_type = self.default_get(cr, uid, ['reference_type'])['reference_type']
# _logger.warning('partner_id %s' % partner_id)
reference = False
reference_type = 'none'
if partner_id:
if (type == 'out_invoice'):
reference_type = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).out_inv_comm_type
if reference_type:
reference = self.generate_bbacomm(cr, uid, ids, type, reference_type, partner_id, '', context=context)['value']['reference']
res_update = {
'reference_type': reference_type or 'none',
'reference': reference,
}
result['value'].update(res_update)
return result
def generate_bbacomm(self, cr, uid, ids, type, reference_type, partner_id, reference, context=None):
partner_obj = self.pool.get('res.partner')
reference = reference or ''
algorithm = False
if partner_id:
algorithm = partner_obj.browse(cr, uid, partner_id, context=context).out_inv_comm_algorithm
algorithm = algorithm or 'random'
if (type == 'out_invoice'):
if reference_type == 'bba':
if algorithm == 'date':
if not self.check_bbacomm(reference):
doy = time.strftime('%j')
year = time.strftime('%Y')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (doy, year))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = doy + year + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (doy, year, seq, mod)
elif algorithm == 'partner_ref':
if not self.check_bbacomm(reference):
partner_ref = self.pool.get('res.partner').browse(cr, uid, partner_id).ref
partner_ref_nr = re.sub('\D', '', partner_ref or '')
if (len(partner_ref_nr) < 3) or (len(partner_ref_nr) > 7):
raise osv.except_osv(_('Warning!'),
_('The Partner should have a 3-7 digit Reference Number for the generation of BBA Structured Communications!' \
'\nPlease correct the Partner record.'))
else:
partner_ref_nr = partner_ref_nr.ljust(7, '0')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (partner_ref_nr[:3], partner_ref_nr[3:]))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = partner_ref_nr + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (partner_ref_nr[:3], partner_ref_nr[3:], seq, mod)
elif algorithm == 'random':
if not self.check_bbacomm(reference):
base = random.randint(1, 9999999999)
bbacomm = str(base).rjust(10, '0')
base = int(bbacomm)
mod = base % 97 or 97
mod = str(mod).rjust(2, '0')
reference = '+++%s/%s/%s%s+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod)
else:
raise osv.except_osv(_('Error!'),
_("Unsupported Structured Communication Type Algorithm '%s' !" \
"\nPlease contact your Odoo support channel.") % algorithm)
return {'value': {'reference': reference}}
def create(self, cr, uid, vals, context=None):
reference = vals.get('reference', False)
reference_type = vals.get('reference_type', False)
if vals.get('type') == 'out_invoice' and not reference_type:
# fallback on default communication type for partner
reference_type = self.pool.get('res.partner').browse(cr, uid, vals['partner_id']).out_inv_comm_type
if reference_type == 'bba':
reference = self.generate_bbacomm(cr, uid, [], vals['type'], reference_type, vals['partner_id'], '', context={})['value']['reference']
vals.update({
'reference_type': reference_type or 'none',
'reference': reference,
})
if reference_type == 'bba':
if not reference:
raise osv.except_osv(_('Warning!'),
_('Empty BBA Structured Communication!' \
'\nPlease fill in a unique BBA Structured Communication.'))
if self.check_bbacomm(reference):
reference = re.sub('\D', '', reference)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for inv in self.browse(cr, uid, ids, context):
if vals.has_key('reference_type'):
reference_type = vals['reference_type']
else:
reference_type = inv.reference_type or ''
if reference_type == 'bba' and 'reference' in vals:
if self.check_bbacomm(vals['reference']):
reference = re.sub('\D', '', vals['reference'])
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('id', '!=', inv.id), ('type', '=', 'out_invoice'),
('reference_type', '=', 'bba'), ('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).write(cr, uid, ids, vals, context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
invoice = self.browse(cr, uid, id, context=context)
if invoice.type in ['out_invoice']:
reference_type = invoice.reference_type or 'none'
default['reference_type'] = reference_type
if reference_type == 'bba':
partner = invoice.partner_id
default['reference'] = self.generate_bbacomm(cr, uid, id,
invoice.type, reference_type,
partner.id, '', context=context)['value']['reference']
return super(account_invoice, self).copy(cr, uid, id, default, context=context)
_columns = {
'reference': fields.char('Communication', help="The partner reference of this invoice."),
'reference_type': fields.selection(_get_reference_type, 'Communication Type',
required=True),
}
_constraints = [
(_check_communication, 'Invalid BBA Structured Communication !', ['Communication']),
]
account_invoice()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mancoast/CPythonPyc_test | cpython/278_test_linuxaudiodev.py | 133 | 3179 | from test import test_support
test_support.requires('audio')
from test.test_support import findfile, run_unittest
import errno
import sys
import audioop
import unittest
linuxaudiodev = test_support.import_module('linuxaudiodev', deprecated=True)
sunaudio = test_support.import_module('sunaudio', deprecated=True)
SND_FORMAT_MULAW_8 = 1
class LinuxAudioDevTests(unittest.TestCase):
def setUp(self):
self.dev = linuxaudiodev.open('w')
def tearDown(self):
self.dev.close()
def test_methods(self):
# at least check that these methods can be invoked
self.dev.bufsize()
self.dev.obufcount()
self.dev.obuffree()
self.dev.getptr()
self.dev.fileno()
def test_play_sound_file(self):
path = findfile("audiotest.au")
fp = open(path, 'r')
size, enc, rate, nchannels, extra = sunaudio.gethdr(fp)
data = fp.read()
fp.close()
if enc != SND_FORMAT_MULAW_8:
self.fail("Expect .au file with 8-bit mu-law samples")
# convert the data to 16-bit signed
data = audioop.ulaw2lin(data, 2)
# set the data format
if sys.byteorder == 'little':
fmt = linuxaudiodev.AFMT_S16_LE
else:
fmt = linuxaudiodev.AFMT_S16_BE
# set parameters based on .au file headers
self.dev.setparameters(rate, 16, nchannels, fmt)
self.dev.write(data)
self.dev.flush()
def test_errors(self):
size = 8
fmt = linuxaudiodev.AFMT_U8
rate = 8000
nchannels = 1
try:
self.dev.setparameters(-1, size, nchannels, fmt)
except ValueError, err:
self.assertEqual(err.args[0], "expected rate >= 0, not -1")
try:
self.dev.setparameters(rate, -2, nchannels, fmt)
except ValueError, err:
self.assertEqual(err.args[0], "expected sample size >= 0, not -2")
try:
self.dev.setparameters(rate, size, 3, fmt)
except ValueError, err:
self.assertEqual(err.args[0], "nchannels must be 1 or 2, not 3")
try:
self.dev.setparameters(rate, size, nchannels, 177)
except ValueError, err:
self.assertEqual(err.args[0], "unknown audio encoding: 177")
try:
self.dev.setparameters(rate, size, nchannels, linuxaudiodev.AFMT_U16_LE)
except ValueError, err:
self.assertEqual(err.args[0], "for linear unsigned 16-bit little-endian "
"audio, expected sample size 16, not 8")
try:
self.dev.setparameters(rate, 16, nchannels, fmt)
except ValueError, err:
self.assertEqual(err.args[0], "for linear unsigned 8-bit audio, expected "
"sample size 8, not 16")
def test_main():
try:
dsp = linuxaudiodev.open('w')
except linuxaudiodev.error, msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT, errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
dsp.close()
run_unittest(LinuxAudioDevTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
tanzaho/python-goose | tests/extractors.py | 1 | 18282 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import json
import re
from base import BaseMockTests, MockResponse
from goose import Goose
from goose.utils import FileHelper
from goose.configuration import Configuration
from goose.text import StopWordsChinese
from goose.text import StopWordsArabic
from goose.text import StopWordsKorean
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
class MockResponseExtractors(MockResponse):
def content(self, req):
current_test = self.cls._get_current_testname()
path = os.path.join(CURRENT_PATH, "data", "extractors", "%s.html" % current_test)
path = os.path.abspath(path)
content = FileHelper.loadResourceFile(path)
return content
class TestExtractionBase(BaseMockTests):
"""\
Extraction test case
"""
callback = MockResponseExtractors
def getRawHtml(self):
return self.load_test_file('.html')
def loadData(self):
content = self.load_test_file('.json')
self.data = json.loads(content)
def load_content_html(self):
self.expected_content_html = self.load_test_file('.content.html')
def load_test_file(self, suffix):
suite, module, cls, func = self.id().split('.')
path = os.path.join(CURRENT_PATH, "data", module, "%s%s" % (func, suffix))
path = os.path.abspath(path)
try:
return FileHelper.loadResourceFile(path)
except IOError:
pass
def assert_cleaned_text(self, field, expected_value, result_value):
"""\
"""
# # TODO : handle verbose level in tests
# print "\n=======================::. ARTICLE REPORT %s .::======================\n" % self.id()
# print 'expected_value (%s) \n' % len(expected_value)
# print expected_value
# print "-------"
# print 'result_value (%s) \n' % len(result_value)
# print result_value
# cleaned_text is Null
msg = u"Resulting article text was NULL!"
self.assertNotEqual(result_value, None, msg=msg)
# cleaned_text length
msg = u"Article text was not as long as expected beginning!"
self.assertTrue(len(expected_value) <= len(result_value), msg=msg)
# clean_text value
result_value = result_value[0:len(expected_value)]
msg = u"The beginning of the article text was not as expected!"
self.assertEqual(expected_value, result_value, msg=msg)
def assert_tags(self, field, expected_value, result_value):
"""\
"""
# as we have a set in expected_value and a list in result_value
# make result_value a set
expected_value = set(expected_value)
# check if both have the same number of items
msg = (u"expected tags set and result tags set"
u"don't have the same number of items")
self.assertEqual(len(result_value), len(expected_value), msg=msg)
# check if each tag in result_value is in expected_value
for tag in result_value:
self.assertTrue(tag in expected_value)
def runArticleAssertions(self, article, fields):
"""\
"""
for field in fields:
expected_value = self.data['expected'][field]
result_value = getattr(article, field, None)
# custom assertion for a given field
assertion = 'assert_%s' % field
if hasattr(self, assertion):
getattr(self, assertion)(field, expected_value, result_value)
continue
# default assertion
msg = u"Error %s" % field
self.assertEqual(expected_value, result_value, msg=msg)
def assert_content_html(self, article):
expected_content_html = re.sub('\s', '', self.expected_content_html)
actual_content_html = re.sub('\s', '', article.content_html).decode("utf8")
msg = u"HTML content is incorrect\n\n"
msg += "Expected: %s\n\n" % self.expected_content_html
msg += "Actual: %s" % article.content_html.decode("utf8")
self.assertEqual(expected_content_html, actual_content_html, msg=msg)
def extract(self, instance):
article = instance.extract(url=self.data['url'])
return article
def getConfig(self):
config = Configuration()
config.enable_image_fetching = False
return config
def getArticle(self):
"""\
"""
# load test case data
self.loadData()
self.load_content_html()
# basic configuration
# no image fetching
config = self.getConfig()
self.parser = config.get_parser()
# target language
# needed for non english language most of the time
target_language = self.data.get('target_language')
if target_language:
config.target_language = target_language
config.use_meta_language = False
# run goose
g = Goose(config=config)
return self.extract(g)
class TestExtractions(TestExtractionBase):
def test_allnewlyrics1(self):
return 'pending'
article = self.getArticle()
fields = ['title', 'cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_cnn1(self):
return 'pending'
article = self.getArticle()
fields = ['title', 'cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_businessWeek1(self):
return 'pending'
article = self.getArticle()
fields = ['title', 'cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_businessWeek2(self):
return 'pending'
article = self.getArticle()
fields = ['title', 'cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_businessWeek3(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_cbslocal(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_elmondo1(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_elpais(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_liberation(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_lefigaro(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_techcrunch1(self):
return 'pending'
article = self.getArticle()
fields = ['title', 'cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_foxNews(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_aolNews(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_huffingtonPost2(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_testHuffingtonPost(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text', 'meta_description', 'title', ]
self.runArticleAssertions(article=article, fields=fields)
def test_espn(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_engadget(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_msn1(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
# #########################################
# # FAIL CHECK
# # UNICODE
# def test_guardian1(self):
# article = self.getArticle()
# fields = ['cleaned_text']
# self.runArticleAssertions(article=article, fields=fields)
def test_time(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text', 'title']
self.runArticleAssertions(article=article, fields=fields)
def test_time2(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_cnet(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_yahoo(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_politico(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_businessinsider1(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_businessinsider2(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_businessinsider3(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_cnbc1(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_marketplace(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_issue24(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_issue25(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_issue28(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_issue32(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_issue4(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_gizmodo1(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text', 'meta_description', 'meta_keywords']
self.runArticleAssertions(article=article, fields=fields)
def test_mashable_issue_74(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_usatoday_issue_74(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_okaymarketing(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_bbc(self):
article = self.getArticle()
self.assert_content_html(article)
def test_huffingtonpost(self):
article = self.getArticle()
self.assert_content_html(article)
def test_theguardian(self):
article = self.getArticle()
self.assert_content_html(article)
def test_blockquotes(self):
article = self.getArticle()
self.assert_content_html(article)
def test_open_graph_content(self):
article = self.getArticle()
self.assert_content_html(article)
def test_clean_bad_tags(self):
article = self.getArticle()
self.assert_content_html(article)
def test_embedded_media_items(self):
article = self.getArticle()
self.assert_content_html(article)
class TestKnownHosts(TestExtractionBase):
def test_known_host_selectors(self):
article = self.getArticle()
self.assert_content_html(article)
def test_known_host_selectors_with_regexs_references(self):
article = self.getArticle()
self.assert_content_html(article)
class TestRelativePaths(TestExtractionBase):
def test_relative_paths(self):
article = self.getArticle()
self.assert_content_html(article)
def test_tags_with_no_path(self):
article = self.getArticle()
self.assert_content_html(article)
class TestReplacingAttributes(TestExtractionBase):
def test_replacing_attributes(self):
article = self.getArticle()
self.assert_content_html(article)
class TestPublishDate(TestExtractionBase):
def test_publish_date(self):
article = self.getArticle()
self.runArticleAssertions(article=article, fields=['publish_date'])
def test_publish_date_rnews(self):
article = self.getArticle()
self.runArticleAssertions(article=article, fields=['publish_date'])
def test_publish_date_article(self):
article = self.getArticle()
self.runArticleAssertions(article=article, fields=['publish_date'])
class TestMetaDescription(TestExtractionBase):
def test_meta_description(self):
article = self.getArticle()
self.runArticleAssertions(article=article, fields=['meta_description'])
class TestExtractWithUrl(TestExtractionBase):
def test_get_canonical_url(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text', 'canonical_link']
self.runArticleAssertions(article=article, fields=fields)
class TestExtractChinese(TestExtractionBase):
def getConfig(self):
config = super(TestExtractChinese, self).getConfig()
config.stopwords_class = StopWordsChinese
return config
def test_bbc_chinese(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
class TestExtractArabic(TestExtractionBase):
def getConfig(self):
config = super(TestExtractArabic, self).getConfig()
config.stopwords_class = StopWordsArabic
return config
def test_cnn_arabic(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
class TestExtractKorean(TestExtractionBase):
def getConfig(self):
config = super(TestExtractKorean, self).getConfig()
config.stopwords_class = StopWordsKorean
return config
def test_donga_korean(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text', 'meta_description', 'meta_keywords']
self.runArticleAssertions(article=article, fields=fields)
class TestExtractionsRaw(TestExtractions):
def extract(self, instance):
article = instance.extract(raw_html=self.getRawHtml())
return article
def test_bbc(self):
return 'pending'
class TestArticleTags(TestExtractionBase):
def test_tags_kexp(self):
article = self.getArticle()
fields = ['tags']
self.runArticleAssertions(article=article, fields=fields)
def test_tags_deadline(self):
article = self.getArticle()
fields = ['tags']
self.runArticleAssertions(article=article, fields=fields)
def test_tags_wnyc(self):
article = self.getArticle()
fields = ['tags']
self.runArticleAssertions(article=article, fields=fields)
def test_tags_cnet(self):
article = self.getArticle()
fields = ['tags']
self.runArticleAssertions(article=article, fields=fields)
def test_tags_abcau(self):
"""
Test ABC Australia page with "topics" tags
"""
article = self.getArticle()
fields = ['tags']
self.runArticleAssertions(article=article, fields=fields)
| apache-2.0 |
jessicalucci/NovaOrc | nova/api/openstack/wsgi.py | 4 | 43822 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import math
import time
from xml.dom import minidom
from lxml import etree
import webob
from nova.api.openstack import xmlutil
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import wsgi
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
LOG = logging.getLogger(__name__)
# The vendor content types should serialize identically to the non-vendor
# content types. So to avoid littering the code with both options, we
# map the vendor to the other when looking up the type
_CONTENT_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'application/json',
'application/vnd.openstack.compute+xml': 'application/xml',
}
SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
'application/xml',
'application/vnd.openstack.compute+xml',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'json',
'application/json': 'json',
'application/vnd.openstack.compute+xml': 'xml',
'application/xml': 'xml',
'application/atom+xml': 'atom',
}
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._extension_data = {'db_items': {}}
def cache_db_items(self, key, items, item_key='id'):
"""
Allow API methods to store objects from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
db_items = self._extension_data['db_items'].setdefault(key, {})
for item in items:
db_items[item[item_key]] = item
def get_db_items(self, key):
"""
Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
"""
return self._extension_data['db_items'][key]
def get_db_item(self, key, item_key):
"""
Allow an API extension to get a previously stored object
within the same API request.
Note that the object data will be slightly stale.
"""
return self.get_db_items(key).get(item_key)
def cache_db_instances(self, instances):
self.cache_db_items('instances', instances, 'uuid')
def cache_db_instance(self, instance):
self.cache_db_items('instances', [instance], 'uuid')
def get_db_instances(self):
return self.get_db_items('instances')
def get_db_instance(self, instance_uuid):
return self.get_db_item('instances', instance_uuid)
def cache_db_flavors(self, flavors):
self.cache_db_items('flavors', flavors, 'flavorid')
def cache_db_flavor(self, flavor):
self.cache_db_items('flavors', [flavor], 'flavorid')
def get_db_flavors(self):
return self.get_db_items('flavors')
def get_db_flavor(self, flavorid):
return self.get_db_item('flavors', flavorid)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in SUPPORTED_CONTENT_TYPES:
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['nova.best_content_type'] = (content_type or
'application/json')
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
content_type = self.content_type
# NOTE(markmc): text/plain is the default for eventlet and
# other webservers which use mimetools.Message.gettype()
# whereas twisted defaults to ''.
if not content_type or content_type == 'text/plain':
return None
if content_type not in SUPPORTED_CONTENT_TYPES:
raise exception.InvalidContentType(content_type=content_type)
return content_type
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
node = xmlutil.safe_minidom_parse_string(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
return node.childNodes[0].nodeValue
elif node.nodeName in listnames:
return [self._from_xml_node(n, listnames) for n in node.childNodes]
else:
result = dict()
for attr in node.attributes.keys():
if not attr.startswith("xmlns"):
result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
result[child.nodeName] = self._from_xml_node(child,
listnames)
return result
def find_first_child_named_in_namespace(self, parent, namespace, name):
"""Search a nodes children for the first child with a given name."""
for node in parent.childNodes:
if (node.localName == name and
node.namespaceURI and
node.namespaceURI == namespace):
return node
return None
def find_first_child_named(self, parent, name):
"""Search a nodes children for the first child with a given name."""
for node in parent.childNodes:
if node.localName == name:
return node
return None
def find_children_named(self, parent, name):
"""Return all of a nodes children who have the given name."""
for node in parent.childNodes:
if node.localName == name:
yield node
def extract_text(self, node):
"""Get the text field contained by the given node."""
ret_val = ""
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE:
ret_val += child.nodeValue
return ret_val
def extract_elements(self, node):
"""Get only Element type childs from node."""
elements = []
for child in node.childNodes:
if child.nodeType == child.ELEMENT_NODE:
elements.append(child)
return elements
def find_attribute_or_element(self, parent, name):
"""Get an attribute value; fallback to an element if not found."""
if parent.hasAttribute(name):
return parent.getAttribute(name)
node = self.find_first_child_named(parent, name)
if node:
return self.extract_text(node)
return None
def default(self, datastring):
return {'body': self._from_xml(datastring)}
class MetadataXMLDeserializer(XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request."""
metadata = {}
if metadata_node is not None:
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
return jsonutils.dumps(data)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
:param xmlns: XML namespace to include with serialized xml
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
self.xmlns = xmlns
def default(self, data):
# We expect data to contain a single key which is the XML root.
root_key = data.keys()[0]
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
return self.to_xml_string(node)
def to_xml_string(self, node, has_atom=False):
self._add_xmlns(node, has_atom)
return node.toxml('UTF-8')
#NOTE (ameade): the has_atom should be removed after all of the
# xml serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, has_atom=False):
if self.xmlns is not None:
node.setAttribute('xmlns', self.xmlns)
if has_atom:
node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
# Set the xml namespace if one is specified
# TODO(justinsb): We could also use prefixes on the keys
xmlns = metadata.get('xmlns', None)
if xmlns:
result.setAttribute('xmlns', xmlns)
#TODO(bcwaldon): accomplish this without a type-check
if isinstance(data, list):
collections = metadata.get('list_collections', {})
if nodename in collections:
metadata = collections[nodename]
for item in data:
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(item))
result.appendChild(node)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
#TODO(bcwaldon): accomplish this without a type-check
elif isinstance(data, dict):
collections = metadata.get('dict_collections', {})
if nodename in collections:
metadata = collections[nodename]
for k, v in data.items():
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(k))
text = doc.createTextNode(str(v))
node.appendChild(text)
result.appendChild(node)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in data.items():
if k in attrs:
result.setAttribute(k, str(v))
else:
if k == "deleted":
v = str(bool(v))
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
else:
# Type is atom
node = doc.createTextNode(str(data))
result.appendChild(node)
return result
def _create_link_nodes(self, xml_doc, links):
link_nodes = []
for link in links:
link_node = xml_doc.createElement('atom:link')
link_node.setAttribute('rel', link['rel'])
link_node.setAttribute('href', link['href'])
if 'type' in link:
link_node.setAttribute('type', link['type'])
link_nodes.append(link_node)
return link_nodes
def _to_xml(self, root):
"""Convert the xml object to an xml string."""
return etree.tostring(root, encoding='UTF-8', xml_declaration=True)
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
def deserializers(**deserializers):
"""Attaches deserializers to a method.
This decorator associates a dictionary of deserializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_deserializers'):
func.wsgi_deserializers = {}
func.wsgi_deserializers.update(deserializers)
return func
return decorator
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object with appropriate serializers.
Object that app methods may return in order to bind alternate
serializers with a response object to be serialized. Its use is
optional.
"""
def __init__(self, obj, code=None, headers=None, **serializers):
"""Binds serializers with an object.
Takes keyword arguments akin to the @serializer() decorator
for specifying serializers. Serializers specified will be
given preference over default serializers or method-specific
serializers on return.
"""
self.obj = obj
self.serializers = serializers
self._default_code = 200
self._code = code
self._headers = headers or {}
self.serializer = None
self.media_type = None
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def _bind_method_serializers(self, meth_serializers):
"""Binds method serializers with the response object.
Binds the method serializers with the response object.
Serializers specified to the constructor will take precedence
over serializers specified to this method.
:param meth_serializers: A dictionary with keys mapping to
response types and values containing
serializer objects.
"""
# We can't use update because that would be the wrong
# precedence
for mtype, serializer in meth_serializers.items():
self.serializers.setdefault(mtype, serializer)
def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def preserialize(self, content_type, default_serializers=None):
"""Prepares the serializer that will be used to serialize.
Determines the serializer that will be used and prepares an
instance of it for later call. This allows the serializer to
be accessed by extensions for, e.g., template extension.
"""
mtype, serializer = self.get_serializer(content_type,
default_serializers)
self.media_type = mtype
self.serializer = serializer()
def attach(self, **kwargs):
"""Attach slave templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
def serialize(self, request, content_type, default_serializers=None):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
if self.serializer:
serializer = self.serializer
else:
_mtype, _serializer = self.get_serializer(content_type,
default_serializers)
serializer = _serializer()
response = webob.Response()
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = str(value)
response.headers['Content-Type'] = content_type
if self.obj is not None:
response.body = serializer.serialize(self.obj)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return decoded.keys()[0]
def action_peek_xml(body):
"""Determine action to invoke."""
dom = xmlutil.safe_minidom_parse_string(body)
action_node = dom.childNodes[0]
return action_node.tagName
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.NotAuthorized):
msg = unicode(ex_value.message % ex_value.kwargs)
raise Fault(webob.exc.HTTPForbidden(explanation=msg))
elif isinstance(ex_value, exception.Invalid):
msg = unicode(ex_value.message % ex_value.kwargs)
raise Fault(exception.ConvertedException(
code=ex_value.code, explanation=msg))
# Under python 2.6, TypeError's exception value is actually a string,
# so test # here via ex_type instead:
# http://bugs.python.org/issue7853
elif issubclass(ex_type, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_('Exception handling resource: %s') % ex_value,
exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value))
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
def __init__(self, controller, action_peek=None, inherits=None,
**deserializers):
"""
:param controller: object that implement methods created by routes lib
:param action_peek: dictionary of routines for peeking into an action
request body to determine the desired action
:param inherits: another resource object that this resource should
inherit extensions from. Any action extensions that
are applied to the parent resource will also apply
to this resource.
"""
self.controller = controller
default_deserializers = dict(xml=XMLDeserializer,
json=JSONDeserializer)
default_deserializers.update(deserializers)
self.default_deserializers = default_deserializers
self.default_serializers = dict(xml=XMLDictSerializer,
json=JSONDictSerializer)
self.action_peek = dict(xml=action_peek_xml,
json=action_peek_json)
self.action_peek.update(action_peek or {})
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
self.inherits = inherits
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug(_("Unrecognized Content-Type provided in request"))
return None, ''
if not content_type:
LOG.debug(_("No Content-Type provided in request"))
return None, ''
if len(request.body) <= 0:
LOG.debug(_("Empty body provided in request"))
return None, ''
return content_type, request.body
def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
return deserializer().deserialize(body)
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = gen.next()
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# Run post-processing in the reverse order
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
if body:
LOG.debug(_("Action: '%(action)s', body: %(body)s") % locals())
LOG.debug(_("Calling method %s") % meth)
# Now, deserialize the request body...
try:
if content_type:
contents = self.deserialize(meth, content_type, body)
else:
contents = {}
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('nova.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request url")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
_set_request_id_header(request, resp_obj)
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
return response
def get_method(self, request, action, content_type, body):
meth, extensions = self._get_method(request,
action,
content_type,
body)
if self.inherits:
_meth, parent_ext = self.inherits.get_method(request,
action,
content_type,
body)
extensions.extend(parent_ext)
return meth, extensions
def _get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in ['action', 'create', 'delete', 'update',
'show']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
return method(req=request, **action_args)
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
# start with wsgi actions from base classes
for base in bases:
actions.update(getattr(base, 'wsgi_actions', {}))
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
class Controller(object):
"""Default controller."""
__metaclass__ = ControllerMetaclass
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
@staticmethod
def is_valid_body(body, entity_name):
if not (body and entity_name in body):
return False
def is_dict(d):
try:
d.get(None)
return True
except AttributeError:
return False
if not is_dict(body[entity_name]):
return False
return True
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
for key, value in self.wrapped_exc.headers.items():
self.wrapped_exc.headers[key] = str(value)
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
explanation = self.wrapped_exc.explanation
LOG.debug(_("Returning %(code)s to user: %(explanation)s"),
{'code': code, 'explanation': explanation})
fault_data = {
fault_name: {
'code': code,
'message': explanation}}
if code == 413:
retry = self.wrapped_exc.headers.get('Retry-After', None)
if retry:
fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {fault_name: 'code'}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
content_type = req.best_match_content_type()
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
_set_request_id_header(req, self.wrapped_exc.headers)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
class OverLimitFault(webob.exc.HTTPException):
"""
Rate-limited request response.
"""
def __init__(self, message, details, retry_time):
"""
Initialize new `OverLimitFault` with relevant information.
"""
hdrs = OverLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs)
self.content = {
"overLimit": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
"retryAfter": hdrs['Retry-After'],
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""
Return the wrapped exception with a serialized body conforming to our
error format.
"""
content_type = request.best_match_content_type()
metadata = {"attributes": {"overLimit": ["code", "retryAfter"]}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
def _set_request_id_header(req, headers):
context = req.environ.get('nova.context')
if context:
headers['x-compute-request-id'] = context.request_id
| apache-2.0 |
lukeiwanski/tensorflow | tensorflow/python/keras/initializers.py | 9 | 6730 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializer classes (soon to be replaced with core TF initializers).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops.init_ops import Constant
from tensorflow.python.ops.init_ops import Identity
from tensorflow.python.ops.init_ops import Initializer # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import Ones
from tensorflow.python.ops.init_ops import Orthogonal
from tensorflow.python.ops.init_ops import RandomNormal
from tensorflow.python.ops.init_ops import RandomUniform
from tensorflow.python.ops.init_ops import TruncatedNormal
from tensorflow.python.ops.init_ops import VarianceScaling
from tensorflow.python.ops.init_ops import Zeros
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.initializers.lecun_normal')
def lecun_normal(seed=None):
"""LeCun normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(1 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
- [Efficient
Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode='fan_in', distribution='normal', seed=seed)
@tf_export('keras.initializers.lecun_uniform')
def lecun_uniform(seed=None):
"""LeCun uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(3 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
LeCun 98, Efficient Backprop,
http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
"""
return VarianceScaling(
scale=1., mode='fan_in', distribution='uniform', seed=seed)
@tf_export('keras.initializers.glorot_normal')
def glorot_normal(seed=None):
"""Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
Glorot & Bengio, AISTATS 2010
http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
"""
return VarianceScaling(
scale=1., mode='fan_avg', distribution='normal', seed=seed)
@tf_export('keras.initializers.glorot_uniform')
def glorot_uniform(seed=None):
"""Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
Glorot & Bengio, AISTATS 2010
http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
"""
return VarianceScaling(
scale=1., mode='fan_avg', distribution='uniform', seed=seed)
@tf_export('keras.initializers.he_normal')
def he_normal(seed=None):
"""He normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
He et al., http://arxiv.org/abs/1502.01852
"""
return VarianceScaling(
scale=2., mode='fan_in', distribution='normal', seed=seed)
@tf_export('keras.initializers.he_uniform')
def he_uniform(seed=None):
"""He uniform variance scaling initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
He et al., http://arxiv.org/abs/1502.01852
"""
return VarianceScaling(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
# Compatibility aliases
# pylint: disable=invalid-name
zero = zeros = Zeros
one = ones = Ones
constant = Constant
uniform = random_uniform = RandomUniform
normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
# pylint: enable=invalid-name
# Utility functions
@tf_export('keras.initializers.serialize')
def serialize(initializer):
return serialize_keras_object(initializer)
@tf_export('keras.initializers.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='initializer')
@tf_export('keras.initializers.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret initializer identifier: ' +
str(identifier))
| apache-2.0 |
kobolabs/qt-everywhere-opensource-src-4.6.2 | src/3rdparty/freetype/src/tools/docmaker/sources.py | 367 | 10766 | # Sources (c) 2002, 2003, 2004, 2006, 2007, 2008, 2009
# David Turner <david@freetype.org>
#
#
# this file contains definitions of classes needed to decompose
# C sources files into a series of multi-line "blocks". There are
# two kinds of blocks:
#
# - normal blocks, which contain source code or ordinary comments
#
# - documentation blocks, which have restricted formatting, and
# whose text always start with a documentation markup tag like
# "<Function>", "<Type>", etc..
#
# the routines used to process the content of documentation blocks
# are not contained here, but in "content.py"
#
# the classes and methods found here only deal with text parsing
# and basic documentation block extraction
#
import fileinput, re, sys, os, string
################################################################
##
## BLOCK FORMAT PATTERN
##
## A simple class containing compiled regular expressions used
## to detect potential documentation format block comments within
## C source code
##
## note that the 'column' pattern must contain a group that will
## be used to "unbox" the content of documentation comment blocks
##
class SourceBlockFormat:
def __init__( self, id, start, column, end ):
"""create a block pattern, used to recognize special documentation blocks"""
self.id = id
self.start = re.compile( start, re.VERBOSE )
self.column = re.compile( column, re.VERBOSE )
self.end = re.compile( end, re.VERBOSE )
#
# format 1 documentation comment blocks look like the following:
#
# /************************************/
# /* */
# /* */
# /* */
# /************************************/
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,}/ # followed by '/' and at least two asterisks then '/'
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
/\*{1} # followed by '/' and precisely one asterisk
([^*].*) # followed by anything (group 1)
\*{1}/ # followed by one asterisk and a '/'
\s*$ # probably followed by whitespace
'''
re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
#
# format 2 documentation comment blocks look like the following:
#
# /************************************ (at least 2 asterisks)
# *
# *
# *
# *
# **/ (1 or more asterisks at the end)
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,} # followed by '/' and at least two asterisks
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
\*{1}(?!/) # followed by precisely one asterisk not followed by `/'
(.*) # then anything (group1)
'''
end = r'''
\s* # any number of whitespace
\*+/ # followed by at least one asterisk, then '/'
'''
re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
#
# the list of supported documentation block formats, we could add new ones
# relatively easily
#
re_source_block_formats = [re_source_block_format1, re_source_block_format2]
#
# the following regular expressions corresponds to markup tags
# within the documentation comment blocks. they're equivalent
# despite their different syntax
#
# notice how each markup tag _must_ begin a new line
#
re_markup_tag1 = re.compile( r'''\s*<(\w*)>''' ) # <xxxx> format
re_markup_tag2 = re.compile( r'''\s*@(\w*):''' ) # @xxxx: format
#
# the list of supported markup tags, we could add new ones relatively
# easily
#
re_markup_tags = [re_markup_tag1, re_markup_tag2]
#
# used to detect a cross-reference, after markup tags have been stripped
#
re_crossref = re.compile( r'@(\w*)(.*)' )
#
# used to detect italic and bold styles in paragraph text
#
re_italic = re.compile( r"_(\w(\w|')*)_(.*)" ) # _italic_
re_bold = re.compile( r"\*(\w(\w|')*)\*(.*)" ) # *bold*
#
# used to detect the end of commented source lines
#
re_source_sep = re.compile( r'\s*/\*\s*\*/' )
#
# used to perform cross-reference within source output
#
re_source_crossref = re.compile( r'(\W*)(\w*)' )
#
# a list of reserved source keywords
#
re_source_keywords = re.compile( '''\\b ( typedef |
struct |
enum |
union |
const |
char |
int |
short |
long |
void |
signed |
unsigned |
\#include |
\#define |
\#undef |
\#if |
\#ifdef |
\#ifndef |
\#else |
\#endif ) \\b''', re.VERBOSE )
################################################################
##
## SOURCE BLOCK CLASS
##
## A SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlocks".
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, including comments
##
## the important fields in a text block are the following ones:
##
## self.lines : a list of text lines for the corresponding block
##
## self.content : for documentation comment blocks only, this is the
## block content that has been "unboxed" from its
## decoration. This is None for all other blocks
## (i.e. sources or ordinary comments with no starting
## markup tag)
##
class SourceBlock:
def __init__( self, processor, filename, lineno, lines ):
self.processor = processor
self.filename = filename
self.lineno = lineno
self.lines = lines[:]
self.format = processor.format
self.content = []
if self.format == None:
return
words = []
# extract comment lines
lines = []
for line0 in self.lines:
m = self.format.column.match( line0 )
if m:
lines.append( m.group( 1 ) )
# now, look for a markup tag
for l in lines:
l = string.strip( l )
if len( l ) > 0:
for tag in re_markup_tags:
if tag.match( l ):
self.content = lines
return
def location( self ):
return "(" + self.filename + ":" + repr( self.lineno ) + ")"
# debugging only - not used in normal operations
def dump( self ):
if self.content:
print "{{{content start---"
for l in self.content:
print l
print "---content end}}}"
return
fmt = ""
if self.format:
fmt = repr( self.format.id ) + " "
for line in self.lines:
print line
################################################################
##
## SOURCE PROCESSOR CLASS
##
## The SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlock"
## objects.
##
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, include comments
##
##
class SourceProcessor:
def __init__( self ):
"""initialize a source processor"""
self.blocks = []
self.filename = None
self.format = None
self.lines = []
def reset( self ):
"""reset a block processor, clean all its blocks"""
self.blocks = []
self.format = None
def parse_file( self, filename ):
"""parse a C source file, and add its blocks to the processor's list"""
self.reset()
self.filename = filename
fileinput.close()
self.format = None
self.lineno = 0
self.lines = []
for line in fileinput.input( filename ):
# strip trailing newlines, important on Windows machines!
if line[-1] == '\012':
line = line[0:-1]
if self.format == None:
self.process_normal_line( line )
else:
if self.format.end.match( line ):
# that's a normal block end, add it to 'lines' and
# create a new block
self.lines.append( line )
self.add_block_lines()
elif self.format.column.match( line ):
# that's a normal column line, add it to 'lines'
self.lines.append( line )
else:
# humm.. this is an unexpected block end,
# create a new block, but don't process the line
self.add_block_lines()
# we need to process the line again
self.process_normal_line( line )
# record the last lines
self.add_block_lines()
def process_normal_line( self, line ):
"""process a normal line and check whether it is the start of a new block"""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
self.format = f
self.lineno = fileinput.filelineno()
self.lines.append( line )
def add_block_lines( self ):
"""add the current accumulated lines and create a new block"""
if self.lines != []:
block = SourceBlock( self, self.filename, self.lineno, self.lines )
self.blocks.append( block )
self.format = None
self.lines = []
# debugging only, not used in normal operations
def dump( self ):
"""print all blocks in a processor"""
for b in self.blocks:
b.dump()
# eof
| lgpl-2.1 |
ebd2/presto | presto-docs/src/main/sphinx/conf.py | 57 | 2278 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Presto documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
import os
import sys
import xml.dom.minidom
try:
sys.dont_write_bytecode = True
except:
pass
sys.path.insert(0, os.path.abspath('ext'))
def child_node(node, name):
for i in node.childNodes:
if (i.nodeType == i.ELEMENT_NODE) and (i.tagName == name):
return i
return None
def node_text(node):
return node.childNodes[0].data
def maven_version(pom):
dom = xml.dom.minidom.parse(pom)
project = dom.childNodes[0]
version = child_node(project, 'version')
if version:
return node_text(version)
parent = child_node(project, 'parent')
version = child_node(parent, 'version')
return node_text(version)
def get_version():
version = os.environ.get('PRESTO_VERSION', '').strip()
return version or maven_version('../../../pom.xml')
# -- General configuration -----------------------------------------------------
needs_sphinx = '1.1'
extensions = ['download']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Presto'
version = get_version()
release = version
exclude_patterns = ['_build', 'rest*', 'overview/concepts*']
pygments_style = 'sphinx'
highlight_language = 'sql'
rst_epilog = """
.. |presto_server_release| replace:: ``presto-server-{release}``
""".replace('{release}', release)
# -- Options for HTML output ---------------------------------------------------
html_theme_path = ['./themes']
html_theme = 'presto'
html_title = '%s %s Documentation' % (project, release)
html_add_permalinks = ''
html_show_copyright = False
html_show_sphinx = False
| apache-2.0 |
neteler/QGIS | python/plugins/processing/algs/qgis/scripts/Create_points_along_lines.py | 4 | 1165 | ##Vector geometry tools=group
##lines=vector
##distance=number 1
##startpoint=number 0
##endpoint=number 0
##output=output vector
from qgis.core import QgsFeature, QgsField
from PyQt4.QtCore import QVariant
from processing.tools.vector import VectorWriter
def create_points(feat):
geom = feat.geometry()
length = geom.length()
currentdistance = 0
if endpoint > 0:
length = endpoint
out = QgsFeature()
while startpoint + currentdistance <= length:
point = geom.interpolate(startpoint + currentdistance)
currentdistance = currentdistance + distance
out.setGeometry(point)
attrs = feat.attributes()
attrs.append(currentdistance)
out.setAttributes(attrs)
writer.addFeature(out)
layer = processing.getObject(lines)
fields = layer.dataProvider().fields()
fields.append(QgsField('Distance', QVariant.Double))
writer = VectorWriter(output, None, fields, QGis.WKBPoint,
layer.crs())
feats = processing.features(layer)
nFeat = len(feats)
for i, feat in enumerate(feats):
progress.setPercentage(int(100 * i / nFeat))
create_points(feat)
del writer
| gpl-2.0 |
sosreport/sos | tests/unittests/option_tests.py | 4 | 1415 | # This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
import unittest
from sos.report.plugins import Plugin
from sos.policies.distros import LinuxPolicy
from sos.policies.init_systems import InitSystem
class MockOptions(object):
all_logs = False
dry_run = False
log_size = 25
allow_system_changes = False
skip_commands = []
skip_files = []
class GlobalOptionTest(unittest.TestCase):
def setUp(self):
self.commons = {
'sysroot': '/',
'policy': LinuxPolicy(init=InitSystem()),
'cmdlineopts': MockOptions(),
'devices': {}
}
self.plugin = Plugin(self.commons)
self.plugin.opt_names = ['baz', 'empty', 'test_option']
self.plugin.opt_parms = [
{'enabled': False}, {'enabled': None}, {'enabled': 'foobar'}
]
def test_simple_lookup(self):
self.assertEquals(self.plugin.get_option('test_option'), 'foobar')
def test_cascade(self):
self.assertEquals(self.plugin.get_option(('baz')), False)
if __name__ == "__main__":
unittest.main()
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
SotongDJ/SotongDJ-PythonLab | Exp6-wget-batch-tools/progress/main.py | 1 | 1524 | import os
def status():
print "----RESULT-----------"
os.system("ls -1>progress/file.tmp")
for wgetlog in open("progress/file.tmp").read().splitlines():
if "wget-log" in wgetlog:
percentage="0%"
for line in open(wgetlog).read().splitlines():
if "K ." in line or "100%" in line or "K =" in line:
if "100%" not in line:
#print "mark0:"+percentage
tpo=int(percentage.replace("%",""))
for sect in line.split(" "):
# print "rub:"+sect
if "%" in sect:
a=sect
#print "a:"+a
#print "mark1:"+percentage
tpn=int(a.replace("%",''))
if tpn > tpo:
percentage=a
# print "mark2:"+percentage
#print "mark3:"+percentage
elif "100%" in line:
percentage="Finished"
print wgetlog+":"+percentage
print "---------------------"
command="i"
while command!="n":
command=raw_input("Which you want?\n\"w\" for start a new wget process\n\"c\" for check the status and repeat this sctipt\n\"n\" for the end\nYour selection:\n")
if command=="w":
os.system("wget -bc \""+raw_input("Copy and paste your target url:\n")+"\"")
elif command=="c":
status()
| gpl-3.0 |
odpi/hadoop | hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py | 323 | 3444 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
pat = re.compile('(?P<name>[^=]+)="(?P<value>[^"]*)" *')
counterPat = re.compile('(?P<name>[^:]+):(?P<value>[^,]*),?')
def parse(tail):
result = {}
for n,v in re.findall(pat, tail):
result[n] = v
return result
mapStartTime = {}
mapEndTime = {}
reduceStartTime = {}
reduceShuffleTime = {}
reduceSortTime = {}
reduceEndTime = {}
reduceBytes = {}
for line in sys.stdin:
words = line.split(" ",1)
event = words[0]
attrs = parse(words[1])
if event == 'MapAttempt':
if attrs.has_key("START_TIME"):
mapStartTime[attrs["TASKID"]] = int(attrs["START_TIME"])/1000
elif attrs.has_key("FINISH_TIME"):
mapEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000
elif event == 'ReduceAttempt':
if attrs.has_key("START_TIME"):
reduceStartTime[attrs["TASKID"]] = int(attrs["START_TIME"]) / 1000
elif attrs.has_key("FINISH_TIME"):
reduceShuffleTime[attrs["TASKID"]] = int(attrs["SHUFFLE_FINISHED"])/1000
reduceSortTime[attrs["TASKID"]] = int(attrs["SORT_FINISHED"])/1000
reduceEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000
elif event == 'Task':
if attrs["TASK_TYPE"] == "REDUCE" and attrs.has_key("COUNTERS"):
for n,v in re.findall(counterPat, attrs["COUNTERS"]):
if n == "File Systems.HDFS bytes written":
reduceBytes[attrs["TASKID"]] = int(v)
runningMaps = {}
shufflingReduces = {}
sortingReduces = {}
runningReduces = {}
startTime = min(reduce(min, mapStartTime.values()),
reduce(min, reduceStartTime.values()))
endTime = max(reduce(max, mapEndTime.values()),
reduce(max, reduceEndTime.values()))
reduces = reduceBytes.keys()
reduces.sort()
print "Name reduce-output-bytes shuffle-finish reduce-finish"
for r in reduces:
print r, reduceBytes[r], reduceShuffleTime[r] - startTime,
print reduceEndTime[r] - startTime
print
for t in range(startTime, endTime):
runningMaps[t] = 0
shufflingReduces[t] = 0
sortingReduces[t] = 0
runningReduces[t] = 0
for map in mapStartTime.keys():
for t in range(mapStartTime[map], mapEndTime[map]):
runningMaps[t] += 1
for reduce in reduceStartTime.keys():
for t in range(reduceStartTime[reduce], reduceShuffleTime[reduce]):
shufflingReduces[t] += 1
for t in range(reduceShuffleTime[reduce], reduceSortTime[reduce]):
sortingReduces[t] += 1
for t in range(reduceSortTime[reduce], reduceEndTime[reduce]):
runningReduces[t] += 1
print "time maps shuffle merge reduce"
for t in range(startTime, endTime):
print t - startTime, runningMaps[t], shufflingReduces[t], sortingReduces[t],
print runningReduces[t]
| apache-2.0 |
ylow/SFrame | oss_src/unity/python/sframe/util/cloudpickle.py | 6 | 28239 | """
This class is defined to override standard pickle functionality
The goals of it follow:
-Serialize lambdas and nested functions to compiled byte code
-Deal with main module correctly
-Deal with other non-serializable objects
It does not include an unpickler, as standard python unpickling suffices.
This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
<http://www.picloud.com>`_.
Copyright (c) 2012, Regents of the University of California.
Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of California, Berkeley nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
import operator
import os
import io
import pickle
import struct
import sys
import types
from functools import partial
import itertools
import dis
import traceback
if sys.version < '3':
from pickle import Pickler
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PY3 = False
else:
types.ClassType = type
from pickle import _Pickler as Pickler
from io import BytesIO as StringIO
PY3 = True
#relevant opcodes
STORE_GLOBAL = dis.opname.index('STORE_GLOBAL')
DELETE_GLOBAL = dis.opname.index('DELETE_GLOBAL')
LOAD_GLOBAL = dis.opname.index('LOAD_GLOBAL')
GLOBAL_OPS = [STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL]
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
EXTENDED_ARG = dis.EXTENDED_ARG
def islambda(func):
return getattr(func,'__name__') == '<lambda>'
_BUILTIN_TYPE_NAMES = {}
for k, v in types.__dict__.items():
if type(v) is type:
_BUILTIN_TYPE_NAMES[v] = k
def _builtin_type(name):
return getattr(types, name)
class CloudPickler(Pickler):
dispatch = Pickler.dispatch.copy()
def __init__(self, file, protocol=None):
Pickler.__init__(self, file, protocol)
# set of modules to unpickle
self.modules = set()
# map ids to dictionary. used to ensure that functions can share global env
self.globals_ref = {}
def dump(self, obj):
self.inject_addons()
try:
return Pickler.dump(self, obj)
except RuntimeError as e:
if 'recursion' in e.args[0]:
msg = """Could not pickle object as excessively deep recursion required."""
raise pickle.PicklingError(msg)
def save_memoryview(self, obj):
"""Fallback to save_string"""
Pickler.save_string(self, str(obj))
def save_buffer(self, obj):
"""Fallback to save_string"""
Pickler.save_string(self,str(obj))
if PY3:
dispatch[memoryview] = save_memoryview
else:
dispatch[buffer] = save_buffer
def save_unsupported(self, obj):
raise pickle.PicklingError("Cannot pickle objects of type %s" % type(obj))
dispatch[types.GeneratorType] = save_unsupported
# itertools objects do not pickle!
for v in itertools.__dict__.values():
if type(v) is type:
dispatch[v] = save_unsupported
def save_module(self, obj):
"""
Save a module as an import
"""
self.modules.add(obj)
self.save_reduce(subimport, (obj.__name__,), obj=obj)
dispatch[types.ModuleType] = save_module
def save_codeobject(self, obj):
"""
Save a code object
"""
if PY3:
args = (
obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames,
obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, obj.co_freevars,
obj.co_cellvars
)
else:
args = (
obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code,
obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars
)
self.save_reduce(types.CodeType, args, obj=obj)
dispatch[types.CodeType] = save_codeobject
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
write = self.write
if name is None:
name = obj.__name__
modname = pickle.whichmodule(obj, name)
# print('which gives %s %s %s' % (modname, obj, name))
try:
themodule = sys.modules[modname]
except KeyError:
# eval'd items such as namedtuple give invalid items for their function __module__
modname = '__main__'
if modname == '__main__':
themodule = None
if themodule:
self.modules.add(themodule)
if getattr(themodule, name, None) is obj:
return self.save_global(obj, name)
# if func is lambda, def'ed at prompt, is in main, or is nested, then
# we'll pickle the actual function object rather than simply saving a
# reference (as is done in default pickler), via save_function_tuple.
if islambda(obj) or obj.__code__.co_filename == '<stdin>' or themodule is None:
#print("save global", islambda(obj), obj.__code__.co_filename, modname, themodule)
self.save_function_tuple(obj)
return
else:
# func is nested
klass = getattr(themodule, name, None)
if klass is None or klass is not obj:
self.save_function_tuple(obj)
return
if obj.__dict__:
# essentially save_reduce, but workaround needed to avoid recursion
self.save(_restore_attr)
write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
self.save(obj.__dict__)
write(pickle.TUPLE + pickle.REDUCE)
else:
write(pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
dispatch[types.FunctionType] = save_function
def save_function_tuple(self, func):
""" Pickles an actual func object.
A func comprises: code, globals, defaults, closure, and dict. We
extract and save these, injecting reducing functions at certain points
to recreate the func object. Keep in mind that some of these pieces
can contain a ref to the func itself. Thus, a naive save on these
pieces could trigger an infinite loop of save's. To get around that,
we first create a skeleton func object using just the code (this is
safe, since this won't contain a ref to the func), and memoize it as
soon as it's created. The other stuff can then be filled in later.
"""
save = self.save
write = self.write
code, f_globals, defaults, closure, dct, base_globals = self.extract_func_data(func)
save(_fill_function) # skeleton function updater
write(pickle.MARK) # beginning of tuple that _fill_function expects
# create a skeleton function object and memoize it
save(_make_skel_func)
save((code, len(closure), base_globals))
write(pickle.REDUCE)
self.memoize(func)
# save the rest of the func data needed by _fill_function
save(f_globals)
save(defaults)
save(closure) # maintains backcompat
save(dct)
write(pickle.TUPLE)
write(pickle.REDUCE) # applies _fill_function on the tuple
@staticmethod
def extract_code_globals(co):
"""
Find all globals names read or written to by codeblock co
"""
code = co.co_code
if not PY3:
code = [ord(c) for c in code]
names = co.co_names
out_names = set()
n = len(code)
i = 0
extended_arg = 0
while i < n:
op = code[i]
i += 1
if op >= HAVE_ARGUMENT:
oparg = code[i] + code[i+1] * 256 + extended_arg
extended_arg = 0
i += 2
if op == EXTENDED_ARG:
extended_arg = oparg*65536
if op in GLOBAL_OPS:
out_names.add(names[oparg])
# see if nested function have any global refs
if co.co_consts:
for const in co.co_consts:
if type(const) is types.CodeType:
out_names |= CloudPickler.extract_code_globals(const)
return out_names
def extract_func_data(self, func):
"""
Turn the function into a tuple of data necessary to recreate it:
code, globals, defaults, closure, dict
"""
code = func.__code__
# extract all global ref's
func_global_refs = self.extract_code_globals(code)
# process all variables referenced by global environment
f_globals = {}
for var in func_global_refs:
if var in func.__globals__:
f_globals[var] = func.__globals__[var]
# defaults requires no processing
defaults = func.__defaults__
# process closure
closure = [c.cell_contents for c in func.__closure__] if func.__closure__ else []
# save the dict
dct = func.__dict__
base_globals = self.globals_ref.get(id(func.__globals__), {})
self.globals_ref[id(func.__globals__)] = base_globals
return (code, f_globals, defaults, closure, dct, base_globals)
def save_builtin_function(self, obj):
if obj.__module__ is "__builtin__":
return self.save_global(obj)
return self.save_function(obj)
dispatch[types.BuiltinFunctionType] = save_builtin_function
def save_global(self, obj, name=None, pack=struct.pack):
if obj.__module__ == "__builtin__" or obj.__module__ == "builtins":
if obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
if name is None:
name = obj.__name__
modname = getattr(obj, "__module__", None)
if modname is None:
modname = pickle.whichmodule(obj, name)
if modname == '__main__':
themodule = None
else:
__import__(modname)
themodule = sys.modules[modname]
self.modules.add(themodule)
if hasattr(themodule, name) and getattr(themodule, name) is obj:
return Pickler.save_global(self, obj, name)
typ = type(obj)
if typ is not obj and isinstance(obj, (type, types.ClassType)):
d = dict(obj.__dict__) # copy dict proxy to a dict
if not isinstance(d.get('__dict__', None), property):
# don't extract dict that are properties
d.pop('__dict__', None)
d.pop('__weakref__', None)
# hack as __new__ is stored differently in the __dict__
new_override = d.get('__new__', None)
if new_override:
d['__new__'] = obj.__new__
# workaround for namedtuple (hijacked by PySpark)
if getattr(obj, '_is_namedtuple_', False):
self.save_reduce(_load_namedtuple, (obj.__name__, obj._fields))
return
self.save(_load_class)
self.save_reduce(typ, (obj.__name__, obj.__bases__, {"__doc__": obj.__doc__}), obj=obj)
d.pop('__doc__', None)
# handle property and staticmethod
dd = {}
for k, v in d.items():
if isinstance(v, property):
k = ('property', k)
v = (v.fget, v.fset, v.fdel, v.__doc__)
elif isinstance(v, staticmethod) and hasattr(v, '__func__'):
k = ('staticmethod', k)
v = v.__func__
elif isinstance(v, classmethod) and hasattr(v, '__func__'):
k = ('classmethod', k)
v = v.__func__
dd[k] = v
self.save(dd)
self.write(pickle.TUPLE2)
self.write(pickle.REDUCE)
else:
raise pickle.PicklingError("Can't pickle %r" % obj)
dispatch[type] = save_global
dispatch[types.ClassType] = save_global
def save_instancemethod(self, obj):
# Memoization rarely is ever useful due to python bounding
if PY3:
self.save_reduce(types.MethodType, (obj.__func__, obj.__self__), obj=obj)
else:
self.save_reduce(types.MethodType, (obj.__func__, obj.__self__, obj.__self__.__class__),
obj=obj)
dispatch[types.MethodType] = save_instancemethod
def save_inst(self, obj):
"""Inner logic to save instance. Based off pickle.save_inst
Supports __transient__"""
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
pickle._keep_alive(args, memo)
else:
args = ()
write(pickle.MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(pickle.OBJ)
else:
for arg in args:
save(arg)
write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
#remove items if transient
if hasattr(obj, '__transient__'):
transient = obj.__transient__
stuff = stuff.copy()
for k in list(stuff.keys()):
if k in transient:
del stuff[k]
else:
stuff = getstate()
pickle._keep_alive(stuff, memo)
save(stuff)
write(pickle.BUILD)
if not PY3:
dispatch[types.InstanceType] = save_inst
def save_property(self, obj):
# properties not correctly saved in python
self.save_reduce(property, (obj.fget, obj.fset, obj.fdel, obj.__doc__), obj=obj)
dispatch[property] = save_property
def save_itemgetter(self, obj):
"""itemgetter serializer (needed for namedtuple support)"""
class Dummy:
def __getitem__(self, item):
return item
items = obj(Dummy())
if not isinstance(items, tuple):
items = (items, )
return self.save_reduce(operator.itemgetter, items)
if type(operator.itemgetter) is type:
dispatch[operator.itemgetter] = save_itemgetter
def save_attrgetter(self, obj):
"""attrgetter serializer"""
class Dummy(object):
def __init__(self, attrs, index=None):
self.attrs = attrs
self.index = index
def __getattribute__(self, item):
attrs = object.__getattribute__(self, "attrs")
index = object.__getattribute__(self, "index")
if index is None:
index = len(attrs)
attrs.append(item)
else:
attrs[index] = ".".join([attrs[index], item])
return type(self)(attrs, index)
attrs = []
obj(Dummy(attrs))
return self.save_reduce(operator.attrgetter, tuple(attrs))
if type(operator.attrgetter) is type:
dispatch[operator.attrgetter] = save_attrgetter
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
"""Modified to support __transient__ on new objects
Change only affects protocol level 2 (which is always used by PiCloud"""
# Assert that args is a tuple or None
if not isinstance(args, tuple):
raise pickle.PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not hasattr(func, '__call__'):
raise pickle.PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
#Added fix to allow transient
cls = args[0]
if not hasattr(cls, "__new__"):
raise pickle.PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise pickle.PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
#Don't pickle transient entries
if hasattr(obj, '__transient__'):
transient = obj.__transient__
state = state.copy()
for k in list(state.keys()):
if k in transient:
del state[k]
save(args)
write(pickle.NEWOBJ)
else:
save(func)
save(args)
write(pickle.REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(pickle.BUILD)
def save_partial(self, obj):
"""Partial objects do not serialize correctly in python2.x -- this fixes the bugs"""
self.save_reduce(_genpartial, (obj.func, obj.args, obj.keywords))
if sys.version_info < (2,7): # 2.7 supports partial pickling
dispatch[partial] = save_partial
def save_file(self, obj):
"""Save a file"""
try:
import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute
except ImportError:
import io as pystringIO
if not hasattr(obj, 'name') or not hasattr(obj, 'mode'):
raise pickle.PicklingError("Cannot pickle files that do not map to an actual file")
if obj is sys.stdout:
return self.save_reduce(getattr, (sys,'stdout'), obj=obj)
if obj is sys.stderr:
return self.save_reduce(getattr, (sys,'stderr'), obj=obj)
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if hasattr(obj, 'isatty') and obj.isatty():
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
if 'r' not in obj.mode:
raise pickle.PicklingError("Cannot pickle files that are not opened for reading")
name = obj.name
try:
fsize = os.stat(name).st_size
except OSError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be stat" % name)
if obj.closed:
#create an empty closed string io
retval = pystringIO.StringIO("")
retval.close()
elif not fsize: #empty file
retval = pystringIO.StringIO("")
try:
tmpfile = file(name)
tst = tmpfile.read(1)
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
tmpfile.close()
if tst != '':
raise pickle.PicklingError("Cannot pickle file %s as it does not appear to map to a physical, real file" % name)
else:
try:
tmpfile = file(name)
contents = tmpfile.read()
tmpfile.close()
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
retval = pystringIO.StringIO(contents)
curloc = obj.tell()
retval.seek(curloc)
retval.name = name
self.save(retval)
self.memoize(obj)
if PY3:
dispatch[io.TextIOWrapper] = save_file
else:
dispatch[file] = save_file
"""Special functions for Add-on libraries"""
def inject_numpy(self):
numpy = sys.modules.get('numpy')
if not numpy or not hasattr(numpy, 'ufunc'):
return
self.dispatch[numpy.ufunc] = self.__class__.save_ufunc
def save_ufunc(self, obj):
"""Hack function for saving numpy ufunc objects"""
name = obj.__name__
numpy_tst_mods = ['numpy', 'scipy.special']
for tst_mod_name in numpy_tst_mods:
tst_mod = sys.modules.get(tst_mod_name, None)
if tst_mod and name in tst_mod.__dict__:
return self.save_reduce(_getobject, (tst_mod_name, name))
raise pickle.PicklingError('cannot save %s. Cannot resolve what module it is defined in'
% str(obj))
def inject_unity_proxy(self):
# get the top level module
gl = __import__(__name__.split('.')[0])
if not gl:
return
## Make sure the unity objects are not picklable ##
self.dispatch[gl.SArray] = self.__class__.save_unsupported
self.dispatch[gl.SFrame] = self.__class__.save_unsupported
self.dispatch[gl.SGraph] = self.__class__.save_unsupported
self.dispatch[gl.Graph] = self.__class__.save_unsupported
self.dispatch[gl.Sketch] = self.__class__.save_unsupported
self.dispatch[gl.Model] = self.__class__.save_unsupported
## Make sure the underlying cython objects are not picklable ##
self.dispatch[gl.cython.cy_sarray.UnitySArrayProxy] = self.__class__.save_unsupported
self.dispatch[gl.cython.cy_sframe.UnitySFrameProxy] = self.__class__.save_unsupported
self.dispatch[gl.cython.cy_sketch.UnitySketchProxy] = self.__class__.save_unsupported
self.dispatch[gl.cython.cy_graph.UnityGraphProxy] = self.__class__.save_unsupported
self.dispatch[gl.cython.cy_model.UnityModel] = self.__class__.save_unsupported
self.dispatch[gl.cython.cy_ipc.PyCommClient] = self.__class__.save_unsupported
def inject_addons(self):
"""Plug in system. Register additional pickling functions if modules already loaded"""
self.inject_numpy()
self.inject_unity_proxy()
# Shorthands for legacy support
def dump(obj, file, protocol=2):
CloudPickler(file, protocol).dump(obj)
def dumps(obj, protocol=2):
file = StringIO()
cp = CloudPickler(file,protocol)
cp.dump(obj)
return file.getvalue()
#hack for __import__ not working as desired
def subimport(name):
__import__(name)
return sys.modules[name]
# restores function attributes
def _restore_attr(obj, attr):
for key, val in attr.items():
setattr(obj, key, val)
return obj
def _get_module_builtins():
return pickle.__builtins__
def print_exec(stream):
ei = sys.exc_info()
traceback.print_exception(ei[0], ei[1], ei[2], None, stream)
def _modules_to_main(modList):
"""Force every module in modList to be placed into main"""
if not modList:
return
main = sys.modules['__main__']
for modname in modList:
if type(modname) is str:
try:
mod = __import__(modname)
except Exception as e:
sys.stderr.write('warning: could not import %s\n. '
'Your function may unexpectedly error due to this import failing;'
'A version mismatch is likely. Specific error was:\n' % modname)
print_exec(sys.stderr)
else:
setattr(main, mod.__name__, mod)
#object generators:
def _genpartial(func, args, kwds):
if not args:
args = ()
if not kwds:
kwds = {}
return partial(func, *args, **kwds)
def _fill_function(func, globals, defaults, closures, dict):
""" Fills in the rest of function data into the skeleton function object
that were created via _make_skel_func().
"""
closure = _reconstruct_closure(closures) if closures else None
if sys.version_info.major == 2:
func = types.FunctionType(func.__code__, func.func_globals,
None, None, closure)
func.func_globals.update(globals)
func.func_defaults = defaults
func.func_dict = dict
else:
func = types.FunctionType(func.__code__, func.__globals__,
None, None, closure)
func.__globals__.update(globals)
func.__defaults__ = defaults
func.__dict__ = dict
return func
def _make_cell(value):
return (lambda: value).__closure__[0]
def _reconstruct_closure(values):
return tuple([_make_cell(v) for v in values])
def _make_skel_func(code, num_closures, base_globals = None):
""" Creates a skeleton function object that contains just the provided
code and the correct number of cells in func_closure. All other
func attributes (e.g. func_globals) are empty.
"""
dummy_closure = tuple(map(lambda i:_make_cell(None), range(num_closures)))
if base_globals is None:
base_globals = {}
base_globals['__builtins__'] = __builtins__
return types.FunctionType(code, base_globals,
None, None, dummy_closure)
def _load_class(cls, d):
"""
Loads additional properties into class `cls`.
"""
for k, v in d.items():
if isinstance(k, tuple):
typ, k = k
if typ == 'property':
v = property(*v)
elif typ == 'staticmethod':
v = staticmethod(v)
elif typ == 'classmethod':
v = classmethod(v)
setattr(cls, k, v)
return cls
def _load_namedtuple(name, fields):
"""
Loads a class generated by namedtuple
"""
from collections import namedtuple
return namedtuple(name, fields)
"""Constructors for 3rd party libraries
Note: These can never be renamed due to client compatibility issues"""
def _getobject(modname, attribute):
mod = __import__(modname, fromlist=[attribute])
return mod.__dict__[attribute]
| bsd-3-clause |
onepercentclub/django-token-auth | setup.py | 1 | 1301 | #!/usr/bin/env python
import os
import setuptools
import token_auth
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setuptools.setup(
name="django-token-auth",
version=token_auth.__version__,
packages=setuptools.find_packages(),
include_package_data=True,
license='BSD',
description='Token Authentication for Bluebottle',
long_description=README,
url="http://onepercentclub.com",
author="1%Club Developers",
author_email="devteam@onepercentclub.com",
install_requires=[
'Django>=1.6.8',
'pycrypto>=2.6.1',
'python-saml==2.1.7'
],
tests_require=[
'django_nose>=1.4',
'factory-boy==2.3.1',
'django-setuptest==0.1.4',
'mock==1.0.1'
],
test_suite="token_auth.runtests.runtests",
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: None',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content'
]
)
| gpl-2.0 |
alfmatos/NetworkManager | examples/python/nm-state.py | 5 | 2748 | #!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright (C) 2010 Red Hat, Inc.
#
import dbus
bus = dbus.SystemBus()
proxy = bus.get_object("org.freedesktop.NetworkManager", "/org/freedesktop/NetworkManager")
manager = dbus.Interface(proxy, "org.freedesktop.NetworkManager")
# Get device-specific state
devices = manager.GetDevices()
for d in devices:
dev_proxy = bus.get_object("org.freedesktop.NetworkManager", d)
prop_iface = dbus.Interface(dev_proxy, "org.freedesktop.DBus.Properties")
# Get the device's current state and interface name
state = prop_iface.Get("org.freedesktop.NetworkManager.Device", "State")
name = prop_iface.Get("org.freedesktop.NetworkManager.Device", "Interface")
# and print them out
if state == 8: # activated
print "Device %s is activated" % name
else:
print "Device %s is not activated" % name
# Get active connection state
manager_prop_iface = dbus.Interface(proxy, "org.freedesktop.DBus.Properties")
active = manager_prop_iface.Get("org.freedesktop.NetworkManager", "ActiveConnections")
for a in active:
ac_proxy = bus.get_object("org.freedesktop.NetworkManager", a)
prop_iface = dbus.Interface(ac_proxy, "org.freedesktop.DBus.Properties")
state = prop_iface.Get("org.freedesktop.NetworkManager.Connection.Active", "State")
# Connections in NM are a collection of settings that describe everything
# needed to connect to a specific network. Lets get those details so we
# can find the user-readable name of the connection.
con_path = prop_iface.Get("org.freedesktop.NetworkManager.Connection.Active", "Connection")
service_proxy = bus.get_object("org.freedesktop.NetworkManager", con_path)
con_iface = dbus.Interface(service_proxy, "org.freedesktop.NetworkManager.Settings.Connection")
con_details = con_iface.GetSettings()
con_name = con_details['connection']['id']
if state == 2: # activated
print "Connection '%s' is activated" % con_name
else:
print "Connection '%s' is activating" % con_name
| gpl-2.0 |
Mte90/remo | remo/events/api/serializers.py | 5 | 1279 | from rest_framework import serializers
from remo.base.templatetags.helpers import absolutify
from remo.events.models import Event
from remo.profiles.api.serializers import (FunctionalAreaSerializer,
UserSerializer)
class EventSerializer(serializers.HyperlinkedModelSerializer):
"""Serializer for the Event model."""
class Meta:
model = Event
fields = ['name', '_url']
class EventDetailedSerializer(serializers.HyperlinkedModelSerializer):
"""Detailed serializer for the Event model."""
categories = FunctionalAreaSerializer(many=True)
owner = UserSerializer()
remo_url = serializers.SerializerMethodField()
initiative = serializers.ReadOnlyField(source='campaign.name')
class Meta:
model = Event
fields = ['name', 'description', 'start', 'end', 'timezone', 'city',
'region', 'country', 'lat', 'lon', 'owner', 'external_link',
'initiative', 'categories', 'estimated_attendance',
'planning_pad_url', 'hashtag', 'remo_url']
def get_remo_url(self, obj):
"""
Default method for fetching the url for the event
in ReMo portal.
"""
return absolutify(obj.get_absolute_url())
| bsd-3-clause |
pizzapanther/GAE-Bulk-Mailer | django/db/models/options.py | 99 | 23148 | from __future__ import unicode_literals
import re
from bisect import bisect
from django.conf import settings
from django.db.models.related import RelatedObject
from django.db.models.fields.related import ManyToManyRel
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.proxy import OrderWrt
from django.db.models.loading import get_models, app_cache_ready
from django.utils import six
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_text, smart_text, python_2_unicode_compatible
from django.utils.translation import activate, deactivate_all, get_language, string_concat
# Calculate the verbose_name by converting from InitialCaps to "lowercase with spaces".
get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together')
@python_2_unicode_compatible
class Options(object):
def __init__(self, meta, app_label=None):
self.local_fields, self.local_many_to_many = [], []
self.virtual_fields = []
self.module_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self.unique_together = []
self.index_together = []
self.permissions = []
self.object_name, self.app_label = None, app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.admin = None
self.meta = meta
self.pk = None
self.has_auto_field, self.auto_field = False, None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = SortedDict()
self.duplicate_targets = {}
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes).
self.abstract_managers = []
self.concrete_managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.util import truncate_name
cls._meta = self
self.installed = re.sub('\.models$', '', cls.__module__) in settings.INSTALLED_APPS
# First, construct the default values for these options.
self.object_name = cls.__name__
self.module_name = self.object_name.lower()
self.verbose_name = get_verbose_name(self.object_name)
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# unique_together can be either a tuple of tuples, or a single
# tuple of two strings. Normalize it to a tuple of tuples, so that
# calling code can uniformly expect that.
ut = meta_attrs.pop('unique_together', self.unique_together)
if ut and not isinstance(ut[0], (tuple, list)):
ut = (ut,)
self.unique_together = ut
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + module_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.module_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
self.order_with_respect_to = self.get_field(self.order_with_respect_to)
self.ordering = ('_order',)
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
# Determine any sets of fields that are pointing to the same targets
# (e.g. two ForeignKeys to the same remote model). The query
# construction code needs to know this. At the end of this,
# self.duplicate_targets will map each duplicate field column to the
# columns it duplicates.
collections = {}
for column, target in six.iteritems(self.duplicate_targets):
try:
collections[target].add(column)
except KeyError:
collections[target] = set([column])
self.duplicate_targets = {}
for elt in six.itervalues(collections):
if len(elt) == 1:
continue
for column in elt:
self.duplicate_targets[column] = elt.difference(set([column]))
def add_field(self, field):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if field.rel and isinstance(field.rel, ManyToManyRel):
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
if hasattr(self, '_m2m_cache'):
del self._m2m_cache
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
if hasattr(self, '_field_cache'):
del self._field_cache
del self._field_name_cache
if hasattr(self, '_name_map'):
del self._name_map
def add_virtual_field(self, field):
self.virtual_fields.append(field)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def pk_index(self):
"""
Returns the index of the primary key field in the self.fields list.
"""
return self.fields.index(self.pk)
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.module_name))
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
lang = get_language()
deactivate_all()
raw = force_text(self.verbose_name)
activate(lang)
return raw
verbose_name_raw = property(verbose_name_raw)
def _swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
model_label = '%s.%s' % (self.app_label, self.object_name.lower())
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):
return swapped_for
return None
swapped = property(_swapped)
def _fields(self):
"""
The getter for self.fields. This returns the list of field objects
available to this model (including through parent models).
Callers are not permitted to modify this list, since it's a reference
to this instance (not a copy).
"""
try:
self._field_name_cache
except AttributeError:
self._fill_fields_cache()
return self._field_name_cache
fields = property(_fields)
def get_fields_with_model(self):
"""
Returns a sequence of (field, model) pairs for all fields. The "model"
element is None for fields on the current model. Mostly of use when
constructing queries so that we know which model a field belongs to.
"""
try:
self._field_cache
except AttributeError:
self._fill_fields_cache()
return self._field_cache
def _fill_fields_cache(self):
cache = []
for parent in self.parents:
for field, model in parent._meta.get_fields_with_model():
if model:
cache.append((field, model))
else:
cache.append((field, parent))
cache.extend([(f, None) for f in self.local_fields])
self._field_cache = tuple(cache)
self._field_name_cache = [x for x, _ in cache]
def _many_to_many(self):
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return list(self._m2m_cache)
many_to_many = property(_many_to_many)
def get_m2m_with_model(self):
"""
The many-to-many version of get_fields_with_model().
"""
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return list(six.iteritems(self._m2m_cache))
def _fill_m2m_cache(self):
cache = SortedDict()
for parent in self.parents:
for field, model in parent._meta.get_m2m_with_model():
if model:
cache[field] = model
else:
cache[field] = parent
for field in self.local_many_to_many:
cache[field] = None
self._m2m_cache = cache
def get_field(self, name, many_to_many=True):
"""
Returns the requested field by name. Raises FieldDoesNotExist on error.
"""
to_search = many_to_many and (self.fields + self.many_to_many) or self.fields
for f in to_search:
if f.name == name:
return f
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name))
def get_field_by_name(self, name):
"""
Returns the (field_object, model, direct, m2m), where field_object is
the Field instance for the given name, model is the model containing
this field (None for local fields), direct is True if the field exists
on this model, and m2m is True for many-to-many relations. When
'direct' is False, 'field_object' is the corresponding RelatedObject
for this field (since the field doesn't have an instance associated
with it).
Uses a cache internally, so after the first access, this is very fast.
"""
try:
try:
return self._name_map[name]
except AttributeError:
cache = self.init_name_map()
return cache[name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r'
% (self.object_name, name))
def get_all_field_names(self):
"""
Returns a list of all field names that are possible for this model
(including reverse relation names). This is used for pretty printing
debugging output (a list of choices), so any internal-only field names
are not included.
"""
try:
cache = self._name_map
except AttributeError:
cache = self.init_name_map()
names = sorted(cache.keys())
# Internal-only names end with "+" (symmetrical m2m related names being
# the main example). Trim them.
return [val for val in names if not val.endswith('+')]
def init_name_map(self):
"""
Initialises the field name -> field object mapping.
"""
cache = {}
# We intentionally handle related m2m objects first so that symmetrical
# m2m accessor names can be overridden, if necessary.
for f, model in self.get_all_related_m2m_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, True)
for f, model in self.get_all_related_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, False)
for f, model in self.get_m2m_with_model():
cache[f.name] = (f, model, True, True)
for f, model in self.get_fields_with_model():
cache[f.name] = (f, model, True, False)
if app_cache_ready():
self._name_map = cache
return cache
def get_add_permission(self):
return 'add_%s' % self.object_name.lower()
def get_change_permission(self):
return 'change_%s' % self.object_name.lower()
def get_delete_permission(self):
return 'delete_%s' % self.object_name.lower()
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [k for k, v in self.get_all_related_objects_with_model(
local_only=local_only, include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq)]
def get_all_related_objects_with_model(self, local_only=False,
include_hidden=False,
include_proxy_eq=False):
"""
Returns a list of (related-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
predicates = []
if local_only:
predicates.append(lambda k, v: not v)
if not include_hidden:
predicates.append(lambda k, v: not k.field.rel.is_hidden())
cache = (self._related_objects_proxy_cache if include_proxy_eq
else self._related_objects_cache)
return [t for t in cache.items() if all(p(*t) for p in predicates)]
def _fill_related_objects_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True):
if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
# Collect also objects which are in relation to some proxy child/parent of self.
proxy_cache = cache.copy()
for klass in get_models(include_auto_created=True, only_installed=False):
if not klass._meta.swapped:
for f in klass._meta.local_fields:
if f.rel and not isinstance(f.rel.to, six.string_types):
if self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
proxy_cache[RelatedObject(f.rel.to, klass, f)] = None
elif self.concrete_model == f.rel.to._meta.concrete_model:
proxy_cache[RelatedObject(f.rel.to, klass, f)] = None
self._related_objects_cache = cache
self._related_objects_proxy_cache = proxy_cache
def get_all_related_many_to_many_objects(self, local_only=False):
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
if local_only:
return [k for k, v in cache.items() if not v]
return list(cache)
def get_all_related_m2m_objects_with_model(self):
"""
Returns a list of (related-m2m-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
return list(six.iteritems(cache))
def _fill_related_many_to_many_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_m2m_objects_with_model():
if obj.field.creation_counter < 0 and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models(only_installed=False):
if not klass._meta.swapped:
for f in klass._meta.local_many_to_many:
if (f.rel
and not isinstance(f.rel.to, six.string_types)
and self == f.rel.to._meta):
cache[RelatedObject(f.rel.to, klass, f)] = None
if app_cache_ready():
self._related_many_to_many_cache = cache
return cache
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a granparent or even more distant relation.
"""
if not self.parents:
return
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
raise TypeError('%r is not an ancestor of this model'
% model._meta.module_name)
def get_parent_list(self):
"""
Returns a list of all the ancestor of this model as a list. Useful for
determining if something is an ancestor, regardless of lineage.
"""
result = set()
for parent in self.parents:
result.add(parent)
result.update(parent._meta.get_parent_list())
return result
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_ordered_objects(self):
"Returns a list of Options objects that are ordered with respect to this object."
if not hasattr(self, '_ordered_objects'):
objects = []
# TODO
#for klass in get_models(get_app(self.app_label)):
# opts = klass._meta
# if opts.order_with_respect_to and opts.order_with_respect_to.rel \
# and self == opts.order_with_respect_to.rel.to._meta:
# objects.append(opts)
self._ordered_objects = objects
return self._ordered_objects
| bsd-2-clause |
Superjom/NeuralNetworks | models/mlp.py | 1 | 5692 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Feb 24, 2014
@author: Chunwei Yan @ PKU
@mail: yanchunwei@outlook.com
'''
import sys
sys.path.append('..')
import numpy
import theano
from theano import tensor as T
from softmax_regression import SoftmaxRegression
class HiddenLayer(object):
''' a layer of neurons '''
def __init__(self, input, n_visible, n_output, rng,
activation=T.tanh, W=None, b=None, learning_rate=0.01):
if not rng:
rng = numpy.random.RandomState(1234)
self.rng = rng
#print 'n_output, n_visible', n_output, n_visible
if not W:
initial_W = numpy.asarray(
rng.uniform(
low=-4 * numpy.sqrt(6. / (n_output + n_visible)),
high=4 * numpy.sqrt(6. / (n_output + n_visible)),
size=(n_visible, n_output)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
initial_W = numpy.asarray(
rng.uniform(
low=-16 * numpy.sqrt(6. / (n_output + n_visible)),
high=16 * numpy.sqrt(6. / (n_output + n_visible)),
size=(n_visible, n_output)), dtype=theano.config.floatX)
W = theano.shared(
value=initial_W,
name='W',
borrow=True,
)
T.unbroadcast(W)
if not b:
b_values = numpy.zeros((n_output,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.X = input
self.W = W
self.b = b
self.learning_rate = learning_rate
self.n_visible, self.n_output = n_visible, n_output
self.activation = activation
self.params = [self.W, self.b]
# a output hock
self.output = self.activation(
T.dot(self.X, self.W) + self.b)
class MultiLayerPerceptron(object):
def __init__(self, rng=None, input=None, n_visible=100, n_hidden=50, n_output=10,
L1_reg=0.0, L2_reg=0.01, learning_rate=0.001):
'''
a network with two layers
:parameters:
n_visible: int
number of visible(input) nodes
n_hidden: int
number of hidden nodes
'''
self.x = input
self.learning_rate = learning_rate
self.L1_reg, self.L2_reg = L1_reg, L2_reg
if not input:
self.x = T.fvector('x')
# create two layers
self.hidden_layer = HiddenLayer(
rng = rng,
input = input,
n_visible = n_visible,
n_output = n_hidden,
activation = T.tanh
)
self.output_layer = SoftmaxRegression(
input = self.hidden_layer.output,
n_features = n_hidden,
n_states = n_output,
)
# methods mapper
self.negative_log_likelihood = self.output_layer.negative_log_likelihood
self.errors = self.output_layer.errors
def get_cost(self):
self.y = T.bscalar('y')
self.L1 = abs(self.hidden_layer.W).sum() \
+ abs(self.output_layer.W).sum()
self.L2_sqr = (self.hidden_layer.W ** 2).sum() \
+ (self.output_layer.W ** 2).sum()
self.params = self.hidden_layer.params + self.output_layer.params
self.cost = self.negative_log_likelihood(self.y) \
+ self.L2_reg * self.L2_sqr
#+ self.L1_reg * self.L1
return self.cost
def compile(self):
cost = self.get_cost()
# predict model
self.predict = theano.function(
inputs = [self.x],
outputs = self.output_layer.y_pred
)
gparams = []
for param in self.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
updates = []
for param, gparam in zip(self.params, gparams):
up = T.cast(param - self.learning_rate * gparam,
theano.config.floatX)
updates.append(
(param, up))
#print 'updates', updates
# train model
self.trainer = theano.function(
inputs = [self.x, self.y],
outputs = self.errors(self.y),
updates = updates)
if __name__ == '__main__':
x = T.fvector('x')
mlp = MultiLayerPerceptron(
input = x,
n_visible = 50,
n_hidden = 20,
n_output = 5,
learning_rate = 0.03,
)
print 'type of W', type(mlp.hidden_layer.W)
mlp.compile()
rng = numpy.random
x_set = rng.randn(400, 50).astype(theano.config.floatX)
y_set = rng.randint(size=400, low=0, high=5).astype(theano.config.floatX)
n_rcds = x_set.shape[0]
#print 'hid.b:\t', mlp.hidden_layer.b.eval()
#print 'output.b:\t', mlp.output_layer.b.eval()
for no in xrange(100):
errors = []
y_preds = []
for i in xrange(n_rcds):
x = numpy.array(x_set[i]).astype(
theano.config.floatX)
y = y_set[i]
y_pred = mlp.predict(x)[0]
error = mlp.trainer(x, y)
#print 'error', error
errors.append(error)
y_preds.append(y_pred)
e = numpy.array(errors).mean()
print "%dth\t%f" % (no, e)
print "original:\t", y_set[:30]
print "predict:\t", y_preds[:30]
#print 'hid.b:\t', mlp.hidden_layer.b.eval()
#print 'output.b:\t', mlp.output_layer.b.eval()
if __name__ == "__main__":
pass
| apache-2.0 |
proxysh/Safejumper-for-Mac | buildlinux/env32/lib/python2.7/site-packages/twisted/test/test_hook.py | 20 | 4244 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.hook module.
"""
from twisted.python import hook
from twisted.trial import unittest
class BaseClass:
"""
dummy class to help in testing.
"""
def __init__(self):
"""
dummy initializer
"""
self.calledBasePre = 0
self.calledBasePost = 0
self.calledBase = 0
def func(self, a, b):
"""
dummy method
"""
assert a == 1
assert b == 2
self.calledBase = self.calledBase + 1
class SubClass(BaseClass):
"""
another dummy class
"""
def __init__(self):
"""
another dummy initializer
"""
BaseClass.__init__(self)
self.calledSubPre = 0
self.calledSubPost = 0
self.calledSub = 0
def func(self, a, b):
"""
another dummy function
"""
assert a == 1
assert b == 2
BaseClass.func(self, a, b)
self.calledSub = self.calledSub + 1
_clean_BaseClass = BaseClass.__dict__.copy()
_clean_SubClass = SubClass.__dict__.copy()
def basePre(base, a, b):
"""
a pre-hook for the base class
"""
base.calledBasePre = base.calledBasePre + 1
def basePost(base, a, b):
"""
a post-hook for the base class
"""
base.calledBasePost = base.calledBasePost + 1
def subPre(sub, a, b):
"""
a pre-hook for the subclass
"""
sub.calledSubPre = sub.calledSubPre + 1
def subPost(sub, a, b):
"""
a post-hook for the subclass
"""
sub.calledSubPost = sub.calledSubPost + 1
class HookTests(unittest.TestCase):
"""
test case to make sure hooks are called
"""
def setUp(self):
"""Make sure we have clean versions of our classes."""
BaseClass.__dict__.clear()
BaseClass.__dict__.update(_clean_BaseClass)
SubClass.__dict__.clear()
SubClass.__dict__.update(_clean_SubClass)
def testBaseHook(self):
"""make sure that the base class's hook is called reliably
"""
base = BaseClass()
self.assertEqual(base.calledBase, 0)
self.assertEqual(base.calledBasePre, 0)
base.func(1,2)
self.assertEqual(base.calledBase, 1)
self.assertEqual(base.calledBasePre, 0)
hook.addPre(BaseClass, "func", basePre)
base.func(1, b=2)
self.assertEqual(base.calledBase, 2)
self.assertEqual(base.calledBasePre, 1)
hook.addPost(BaseClass, "func", basePost)
base.func(1, b=2)
self.assertEqual(base.calledBasePost, 1)
self.assertEqual(base.calledBase, 3)
self.assertEqual(base.calledBasePre, 2)
hook.removePre(BaseClass, "func", basePre)
hook.removePost(BaseClass, "func", basePost)
base.func(1, b=2)
self.assertEqual(base.calledBasePost, 1)
self.assertEqual(base.calledBase, 4)
self.assertEqual(base.calledBasePre, 2)
def testSubHook(self):
"""test interactions between base-class hooks and subclass hooks
"""
sub = SubClass()
self.assertEqual(sub.calledSub, 0)
self.assertEqual(sub.calledBase, 0)
sub.func(1, b=2)
self.assertEqual(sub.calledSub, 1)
self.assertEqual(sub.calledBase, 1)
hook.addPre(SubClass, 'func', subPre)
self.assertEqual(sub.calledSub, 1)
self.assertEqual(sub.calledBase, 1)
self.assertEqual(sub.calledSubPre, 0)
self.assertEqual(sub.calledBasePre, 0)
sub.func(1, b=2)
self.assertEqual(sub.calledSub, 2)
self.assertEqual(sub.calledBase, 2)
self.assertEqual(sub.calledSubPre, 1)
self.assertEqual(sub.calledBasePre, 0)
# let the pain begin
hook.addPre(BaseClass, 'func', basePre)
BaseClass.func(sub, 1, b=2)
# sub.func(1, b=2)
self.assertEqual(sub.calledBase, 3)
self.assertEqual(sub.calledBasePre, 1, str(sub.calledBasePre))
sub.func(1, b=2)
self.assertEqual(sub.calledBasePre, 2)
self.assertEqual(sub.calledBase, 4)
self.assertEqual(sub.calledSubPre, 2)
self.assertEqual(sub.calledSub, 3)
testCases = [HookTests]
| gpl-2.0 |
cloud-fan/spark | python/pyspark/pandas/tests/data_type_ops/test_binary_ops.py | 1 | 6682 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class BinaryOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series([b"1", b"2", b"3"])
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_add(self):
psser = self.psser
pser = self.pser
self.assert_eq(psser + b"1", pser + b"1")
self.assert_eq(psser + psser, pser + pser)
self.assert_eq(psser + psser.astype("bytes"), pser + pser.astype("bytes"))
self.assertRaises(TypeError, lambda: psser + "x")
self.assertRaises(TypeError, lambda: psser + 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser + psser)
self.assert_eq(self.psser + self.psser, self.pser + self.pser)
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser - psser)
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser * psser)
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser / psser)
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser // psser)
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser % psser)
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser ** psser)
def test_radd(self):
self.assert_eq(b"1" + self.psser, b"1" + self.pser)
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
data = [b"1", b"2", b"3"]
pser = pd.Series(data)
psser = ps.Series(data)
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_astype(self):
pser = self.pser
psser = self.psser
self.assert_eq(pd.Series(["1", "2", "3"]), psser.astype(str))
self.assert_eq(pser.astype("category"), psser.astype("category"))
cat_type = CategoricalDtype(categories=[b"2", b"3", b"1"])
self.assert_eq(pser.astype(cat_type), psser.astype(cat_type))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_binary_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
shanglt/youtube-dl | youtube_dl/extractor/wistia.py | 128 | 2050 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_request
from ..utils import ExtractorError
class WistiaIE(InfoExtractor):
_VALID_URL = r'https?://(?:fast\.)?wistia\.net/embed/iframe/(?P<id>[a-z0-9]+)'
_API_URL = 'http://fast.wistia.com/embed/medias/{0:}.json'
_TEST = {
'url': 'http://fast.wistia.net/embed/iframe/sh7fpupwlt',
'md5': 'cafeb56ec0c53c18c97405eecb3133df',
'info_dict': {
'id': 'sh7fpupwlt',
'ext': 'mov',
'title': 'Being Resourceful',
'duration': 117,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
request = compat_urllib_request.Request(self._API_URL.format(video_id))
request.add_header('Referer', url) # Some videos require this.
data_json = self._download_json(request, video_id)
if data_json.get('error'):
raise ExtractorError('Error while getting the playlist',
expected=True)
data = data_json['media']
formats = []
thumbnails = []
for atype, a in data['assets'].items():
if atype == 'still':
thumbnails.append({
'url': a['url'],
'resolution': '%dx%d' % (a['width'], a['height']),
})
continue
if atype == 'preview':
continue
formats.append({
'format_id': atype,
'url': a['url'],
'width': a['width'],
'height': a['height'],
'filesize': a['size'],
'ext': a['ext'],
'preference': 1 if atype == 'original' else None,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': data['name'],
'formats': formats,
'thumbnails': thumbnails,
'duration': data.get('duration'),
}
| unlicense |
dbbhattacharya/kitsune | vendor/packages/logilab-astng/test/unittest_nodes.py | 6 | 10388 | # This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
# copyright 2003-2010 Sylvain Thenault, all rights reserved.
# contact mailto:thenault@gmail.com
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""tests for specific behaviour of astng nodes
"""
import sys
from logilab.common import testlib
from logilab.astng import builder, nodes, NotFoundError
from logilab.astng.nodes_as_string import as_string
from data import module as test_module
abuilder = builder.ASTNGBuilder()
class _NodeTC(testlib.TestCase):
"""test transformation of If Node"""
CODE = None
@property
def astng(self):
try:
return self.__class__.__dict__['CODE_ASTNG']
except KeyError:
astng = abuilder.string_build(self.CODE)
self.__class__.CODE_ASTNG = astng
return astng
class IfNodeTC(_NodeTC):
"""test transformation of If Node"""
CODE = """
if 0:
print
if True:
print
else:
pass
if "":
print
elif []:
raise
if 1:
print
elif True:
print
elif func():
pass
else:
raise
"""
def test_if_elif_else_node(self):
"""test transformation for If node"""
self.assertEquals(len(self.astng.body), 4)
for stmt in self.astng.body:
self.assertIsInstance( stmt, nodes.If)
self.failIf(self.astng.body[0].orelse) # simple If
self.assertIsInstance(self.astng.body[1].orelse[0], nodes.Pass) # If / else
self.assertIsInstance(self.astng.body[2].orelse[0], nodes.If) # If / elif
self.assertIsInstance(self.astng.body[3].orelse[0].orelse[0], nodes.If)
def test_block_range(self):
# XXX ensure expected values
self.assertEquals(self.astng.block_range(1), (0, 22))
self.assertEquals(self.astng.block_range(10), (0, 22)) # XXX (10, 22) ?
self.assertEquals(self.astng.body[1].block_range(5), (5, 6))
self.assertEquals(self.astng.body[1].block_range(6), (6, 6))
self.assertEquals(self.astng.body[1].orelse[0].block_range(7), (7, 8))
self.assertEquals(self.astng.body[1].orelse[0].block_range(8), (8, 8))
class TryExceptNodeTC(_NodeTC):
CODE = """
try:
print 'pouet'
except IOError:
pass
except UnicodeError:
print
else:
print
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEquals(self.astng.body[0].block_range(1), (1, 8))
self.assertEquals(self.astng.body[0].block_range(2), (2, 2))
self.assertEquals(self.astng.body[0].block_range(3), (3, 8))
self.assertEquals(self.astng.body[0].block_range(4), (4, 4))
self.assertEquals(self.astng.body[0].block_range(5), (5, 5))
self.assertEquals(self.astng.body[0].block_range(6), (6, 6))
self.assertEquals(self.astng.body[0].block_range(7), (7, 7))
self.assertEquals(self.astng.body[0].block_range(8), (8, 8))
class TryFinallyNodeTC(_NodeTC):
CODE = """
try:
print 'pouet'
finally:
print 'pouet'
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEquals(self.astng.body[0].block_range(1), (1, 4))
self.assertEquals(self.astng.body[0].block_range(2), (2, 2))
self.assertEquals(self.astng.body[0].block_range(3), (3, 4))
self.assertEquals(self.astng.body[0].block_range(4), (4, 4))
class TryFinally25NodeTC(_NodeTC):
CODE = """
try:
print 'pouet'
except Exception:
print 'oops'
finally:
print 'pouet'
"""
def test_block_range(self):
if sys.version_info < (2, 5):
self.skip('require python >= 2.5')
# XXX ensure expected values
self.assertEquals(self.astng.body[0].block_range(1), (1, 6))
self.assertEquals(self.astng.body[0].block_range(2), (2, 2))
self.assertEquals(self.astng.body[0].block_range(3), (3, 4))
self.assertEquals(self.astng.body[0].block_range(4), (4, 4))
self.assertEquals(self.astng.body[0].block_range(5), (5, 5))
self.assertEquals(self.astng.body[0].block_range(6), (6, 6))
MODULE = abuilder.module_build(test_module)
MODULE2 = abuilder.file_build('data/module2.py', 'data.module2')
class ImportNodeTC(testlib.TestCase):
def test_import_self_resolve(self):
myos = MODULE2.igetattr('myos').next()
self.failUnless(isinstance(myos, nodes.Module), myos)
self.failUnlessEqual(myos.name, 'os')
self.failUnlessEqual(myos.qname(), 'os')
self.failUnlessEqual(myos.pytype(), '__builtin__.module')
def test_from_self_resolve(self):
spawn = MODULE.igetattr('spawn').next()
self.failUnless(isinstance(spawn, nodes.Class), spawn)
self.failUnlessEqual(spawn.root().name, 'logilab.common.shellutils')
self.failUnlessEqual(spawn.qname(), 'logilab.common.shellutils.Execute')
self.failUnlessEqual(spawn.pytype(), '__builtin__.classobj')
abspath = MODULE2.igetattr('abspath').next()
self.failUnless(isinstance(abspath, nodes.Function), abspath)
self.failUnlessEqual(abspath.root().name, 'os.path')
self.failUnlessEqual(abspath.qname(), 'os.path.abspath')
self.failUnlessEqual(abspath.pytype(), '__builtin__.function')
def test_real_name(self):
from_ = MODULE['spawn']
self.assertEquals(from_.real_name('spawn'), 'Execute')
imp_ = MODULE['os']
self.assertEquals(imp_.real_name('os'), 'os')
self.assertRaises(NotFoundError, imp_.real_name, 'os.path')
imp_ = MODULE['spawn']
self.assertEquals(imp_.real_name('spawn'), 'Execute')
self.assertRaises(NotFoundError, imp_.real_name, 'Execute')
imp_ = MODULE2['YO']
self.assertEquals(imp_.real_name('YO'), 'YO')
self.assertRaises(NotFoundError, imp_.real_name, 'data')
def test_as_string(self):
ast = MODULE['modutils']
self.assertEquals(as_string(ast), "from logilab.common import modutils")
ast = MODULE['spawn']
self.assertEquals(as_string(ast), "from logilab.common.shellutils import Execute as spawn")
ast = MODULE['os']
self.assertEquals(as_string(ast), "import os.path")
def test_module_as_string(self):
"""just check as_string on a whole module doesn't raise an exception
"""
self.assert_(as_string(MODULE))
self.assert_(as_string(MODULE2))
class CmpNodeTC(testlib.TestCase):
def test_as_string(self):
ast = abuilder.string_build("a == 2")
self.assertEquals(as_string(ast), "a == 2")
class ConstNodeTC(testlib.TestCase):
def _test(self, value):
node = nodes.const_factory(value)
self.assertIsInstance(node._proxied, nodes.Class)
self.assertEquals(node._proxied.name, value.__class__.__name__)
self.assertIs(node.value, value)
self.failUnless(node._proxied.parent)
self.assertEquals(node._proxied.root().name, value.__class__.__module__)
def test_none(self):
self._test(None)
def test_bool(self):
self._test(True)
def test_int(self):
self._test(1)
def test_float(self):
self._test(1.0)
def test_complex(self):
self._test(1.0j)
def test_str(self):
self._test('a')
def test_unicode(self):
self._test(u'a')
class ArgumentsNodeTC(testlib.TestCase):
def test_linenumbering(self):
ast = abuilder.string_build('''
def func(a,
b): pass
x = lambda x: None
''')
self.assertEquals(ast['func'].args.fromlineno, 2)
self.assertEquals(ast['func'].args.tolineno, 3)
self.failIf(ast['func'].args.is_statement)
xlambda = ast['x'].infer().next()
self.assertEquals(xlambda.args.fromlineno, 4)
self.assertEquals(xlambda.args.tolineno, 4)
self.failIf(xlambda.args.is_statement)
class SliceNodeTC(testlib.TestCase):
def test(self):
for code in ('a[0]', 'a[1:3]', 'a[:-1:step]', 'a[:,newaxis]',
'a[newaxis,:]', 'del L[::2]', 'del A[1]', 'del Br[:]'):
ast = abuilder.string_build(code)
self.assertEquals(ast.as_string(), code)
def test_slice_and_subscripts(self):
code = """a[:1] = bord[2:]
a[:1] = bord[2:]
del bree[3:d]
bord[2:]
del av[d::f], a[df:]
a[:1] = bord[2:]
del SRC[::1,newaxis,1:]
tous[vals] = 1010
del thousand[key]
del a[::2], a[:-1:step]
del Fee.form[left:]
aout.vals = miles.of_stuff
del (ccok, (name.thing, foo.attrib.value)), Fee.form[left:]
if all[1] == bord[0:]:
pass"""
ast = abuilder.string_build(code)
self.assertEquals(ast.as_string(), code)
class EllipsisNodeTC(testlib.TestCase):
def test(self):
ast = abuilder.string_build('a[...]')
self.assertEquals(ast.as_string(), 'a[...]')
if __name__ == '__main__':
testlib.unittest_main()
| bsd-3-clause |
blaggacao/openupgradelib | docs/conf.py | 3 | 8554 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# openupgradelib documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import openupgradelib
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenUpgrade Library'
copyright = u'2015, Odoo Community Association'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = openupgradelib.__version__
# The full version, including alpha/beta/rc tags.
release = openupgradelib.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'openupgradelibdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'openupgradelib.tex',
u'OpenUpgrade Library Documentation',
u'Odoo Community Association', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openupgradelib',
u'OpenUpgrade Library Documentation',
[u'Odoo Community Association'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'openupgradelib',
u'OpenUpgrade Library Documentation',
u'Odoo Community Association',
'openupgradelib',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| agpl-3.0 |
markoshorro/gem5 | src/mem/slicc/ast/FormalParamAST.py | 53 | 2683 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.AST import AST
from slicc.symbols import Var
class FormalParamAST(AST):
def __init__(self, slicc, type_ast, ident, default = None, pointer = False):
super(FormalParamAST, self).__init__(slicc)
self.type_ast = type_ast
self.ident = ident
self.default = default
self.pointer = pointer
def __repr__(self):
return "[FormalParamAST: %s]" % self.ident
@property
def name(self):
return self.ident
def generate(self):
type = self.type_ast.type
param = "param_%s" % self.ident
# Add to symbol table
v = Var(self.symtab, self.ident, self.location, type, param,
self.pairs)
self.symtab.newSymbol(v)
if self.pointer or str(type) == "TBE" or (
"interface" in type and (
type["interface"] == "AbstractCacheEntry" or
type["interface"] == "AbstractEntry")):
return type, "%s* %s" % (type.c_ident, param)
else:
return type, "const %s& %s" % (type.c_ident, param)
| bsd-3-clause |
jazztpt/edx-platform | lms/djangoapps/django_comment_client/utils.py | 43 | 28663 | from collections import defaultdict
from datetime import datetime
import json
import logging
import pytz
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import connection
from django.http import HttpResponse
from django.utils.timezone import UTC
import pystache_custom as pystache
from opaque_keys.edx.locations import i4xEncoder
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from django_comment_common.models import Role, FORUM_ROLE_STUDENT
from django_comment_client.permissions import check_permissions_by_view, has_permission, get_team
from django_comment_client.settings import MAX_COMMENT_DEPTH
from edxmako import lookup_template
from courseware import courses
from courseware.access import has_access
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from openedx.core.djangoapps.course_groups.cohorts import (
get_course_cohort_settings, get_cohort_by_id, get_cohort_id, is_course_cohorted
)
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
log = logging.getLogger(__name__)
def extract(dic, keys):
return {k: dic.get(k) for k in keys}
def strip_none(dic):
return dict([(k, v) for k, v in dic.iteritems() if v is not None])
def strip_blank(dic):
def _is_blank(v):
return isinstance(v, str) and len(v.strip()) == 0
return dict([(k, v) for k, v in dic.iteritems() if not _is_blank(v)])
# TODO should we be checking if d1 and d2 have the same keys with different values?
def merge_dict(dic1, dic2):
return dict(dic1.items() + dic2.items())
def get_role_ids(course_id):
roles = Role.objects.filter(course_id=course_id).exclude(name=FORUM_ROLE_STUDENT)
return dict([(role.name, list(role.users.values_list('id', flat=True))) for role in roles])
def has_discussion_privileges(user, course_id):
"""Returns True if the user is privileged in teams discussions for
this course. The user must be one of Discussion Admin, Moderator,
or Community TA.
Args:
user (User): The user to check privileges for.
course_id (CourseKey): A key for the course to check privileges for.
Returns:
bool
"""
# get_role_ids returns a dictionary of only admin, moderator and community TAs.
roles = get_role_ids(course_id)
for role in roles:
if user.id in roles[role]:
return True
return False
def has_forum_access(uname, course_id, rolename):
try:
role = Role.objects.get(name=rolename, course_id=course_id)
except Role.DoesNotExist:
return False
return role.users.filter(username=uname).exists()
def has_required_keys(module):
"""Returns True iff module has the proper attributes for generating metadata with get_discussion_id_map_entry()"""
for key in ('discussion_id', 'discussion_category', 'discussion_target'):
if getattr(module, key, None) is None:
log.debug("Required key '%s' not in discussion %s, leaving out of category map", key, module.location)
return False
return True
def get_accessible_discussion_modules(course, user, include_all=False): # pylint: disable=invalid-name
"""
Return a list of all valid discussion modules in this course that
are accessible to the given user.
"""
all_modules = modulestore().get_items(course.id, qualifiers={'category': 'discussion'})
return [
module for module in all_modules
if has_required_keys(module) and (include_all or has_access(user, 'load', module, course.id))
]
def get_discussion_id_map_entry(module):
"""
Returns a tuple of (discussion_id, metadata) suitable for inclusion in the results of get_discussion_id_map().
"""
return (
module.discussion_id,
{
"location": module.location,
"title": module.discussion_category.split("/")[-1].strip() + " / " + module.discussion_target
}
)
class DiscussionIdMapIsNotCached(Exception):
"""Thrown when the discussion id map is not cached for this course, but an attempt was made to access it."""
pass
def get_cached_discussion_key(course, discussion_id):
"""
Returns the usage key of the discussion module associated with discussion_id if it is cached. If the discussion id
map is cached but does not contain discussion_id, returns None. If the discussion id map is not cached for course,
raises a DiscussionIdMapIsNotCached exception.
"""
try:
cached_mapping = CourseStructure.objects.get(course_id=course.id).discussion_id_map
if not cached_mapping:
raise DiscussionIdMapIsNotCached()
return cached_mapping.get(discussion_id)
except CourseStructure.DoesNotExist:
raise DiscussionIdMapIsNotCached()
def get_cached_discussion_id_map(course, discussion_ids, user):
"""
Returns a dict mapping discussion_ids to respective discussion module metadata if it is cached and visible to the
user. If not, returns the result of get_discussion_id_map
"""
try:
entries = []
for discussion_id in discussion_ids:
key = get_cached_discussion_key(course, discussion_id)
if not key:
continue
module = modulestore().get_item(key)
if not (has_required_keys(module) and has_access(user, 'load', module, course.id)):
continue
entries.append(get_discussion_id_map_entry(module))
return dict(entries)
except DiscussionIdMapIsNotCached:
return get_discussion_id_map(course, user)
def get_discussion_id_map(course, user):
"""
Transform the list of this course's discussion modules (visible to a given user) into a dictionary of metadata keyed
by discussion_id.
"""
return dict(map(get_discussion_id_map_entry, get_accessible_discussion_modules(course, user)))
def _filter_unstarted_categories(category_map):
now = datetime.now(UTC())
result_map = {}
unfiltered_queue = [category_map]
filtered_queue = [result_map]
while unfiltered_queue:
unfiltered_map = unfiltered_queue.pop()
filtered_map = filtered_queue.pop()
filtered_map["children"] = []
filtered_map["entries"] = {}
filtered_map["subcategories"] = {}
for child in unfiltered_map["children"]:
if child in unfiltered_map["entries"]:
if unfiltered_map["entries"][child]["start_date"] <= now:
filtered_map["children"].append(child)
filtered_map["entries"][child] = {}
for key in unfiltered_map["entries"][child]:
if key != "start_date":
filtered_map["entries"][child][key] = unfiltered_map["entries"][child][key]
else:
log.debug(u"Filtering out:%s with start_date: %s", child, unfiltered_map["entries"][child]["start_date"])
else:
if unfiltered_map["subcategories"][child]["start_date"] < now:
filtered_map["children"].append(child)
filtered_map["subcategories"][child] = {}
unfiltered_queue.append(unfiltered_map["subcategories"][child])
filtered_queue.append(filtered_map["subcategories"][child])
return result_map
def _sort_map_entries(category_map, sort_alpha):
things = []
for title, entry in category_map["entries"].items():
if entry["sort_key"] is None and sort_alpha:
entry["sort_key"] = title
things.append((title, entry))
for title, category in category_map["subcategories"].items():
things.append((title, category))
_sort_map_entries(category_map["subcategories"][title], sort_alpha)
category_map["children"] = [x[0] for x in sorted(things, key=lambda x: x[1]["sort_key"])]
def get_discussion_category_map(course, user, cohorted_if_in_list=False, exclude_unstarted=True):
"""
Transform the list of this course's discussion modules into a recursive dictionary structure. This is used
to render the discussion category map in the discussion tab sidebar for a given user.
Args:
course: Course for which to get the ids.
user: User to check for access.
cohorted_if_in_list (bool): If True, inline topics are marked is_cohorted only if they are
in course_cohort_settings.discussion_topics.
Example:
>>> example = {
>>> "entries": {
>>> "General": {
>>> "sort_key": "General",
>>> "is_cohorted": True,
>>> "id": "i4x-edx-eiorguegnru-course-foobarbaz"
>>> }
>>> },
>>> "children": ["General", "Getting Started"],
>>> "subcategories": {
>>> "Getting Started": {
>>> "subcategories": {},
>>> "children": [
>>> "Working with Videos",
>>> "Videos on edX"
>>> ],
>>> "entries": {
>>> "Working with Videos": {
>>> "sort_key": None,
>>> "is_cohorted": False,
>>> "id": "d9f970a42067413cbb633f81cfb12604"
>>> },
>>> "Videos on edX": {
>>> "sort_key": None,
>>> "is_cohorted": False,
>>> "id": "98d8feb5971041a085512ae22b398613"
>>> }
>>> }
>>> }
>>> }
>>> }
"""
unexpanded_category_map = defaultdict(list)
modules = get_accessible_discussion_modules(course, user)
course_cohort_settings = get_course_cohort_settings(course.id)
for module in modules:
id = module.discussion_id
title = module.discussion_target
sort_key = module.sort_key
category = " / ".join([x.strip() for x in module.discussion_category.split("/")])
# Handle case where module.start is None
entry_start_date = module.start if module.start else datetime.max.replace(tzinfo=pytz.UTC)
unexpanded_category_map[category].append({"title": title, "id": id, "sort_key": sort_key, "start_date": entry_start_date})
category_map = {"entries": defaultdict(dict), "subcategories": defaultdict(dict)}
for category_path, entries in unexpanded_category_map.items():
node = category_map["subcategories"]
path = [x.strip() for x in category_path.split("/")]
# Find the earliest start date for the entries in this category
category_start_date = None
for entry in entries:
if category_start_date is None or entry["start_date"] < category_start_date:
category_start_date = entry["start_date"]
for level in path[:-1]:
if level not in node:
node[level] = {"subcategories": defaultdict(dict),
"entries": defaultdict(dict),
"sort_key": level,
"start_date": category_start_date}
else:
if node[level]["start_date"] > category_start_date:
node[level]["start_date"] = category_start_date
node = node[level]["subcategories"]
level = path[-1]
if level not in node:
node[level] = {"subcategories": defaultdict(dict),
"entries": defaultdict(dict),
"sort_key": level,
"start_date": category_start_date}
else:
if node[level]["start_date"] > category_start_date:
node[level]["start_date"] = category_start_date
always_cohort_inline_discussions = ( # pylint: disable=invalid-name
not cohorted_if_in_list and course_cohort_settings.always_cohort_inline_discussions
)
dupe_counters = defaultdict(lambda: 0) # counts the number of times we see each title
for entry in entries:
is_entry_cohorted = (
course_cohort_settings.is_cohorted and (
always_cohort_inline_discussions or entry["id"] in course_cohort_settings.cohorted_discussions
)
)
title = entry["title"]
if node[level]["entries"][title]:
# If we've already seen this title, append an incrementing number to disambiguate
# the category from other categores sharing the same title in the course discussion UI.
dupe_counters[title] += 1
title = u"{title} ({counter})".format(title=title, counter=dupe_counters[title])
node[level]["entries"][title] = {"id": entry["id"],
"sort_key": entry["sort_key"],
"start_date": entry["start_date"],
"is_cohorted": is_entry_cohorted}
# TODO. BUG! : course location is not unique across multiple course runs!
# (I think Kevin already noticed this) Need to send course_id with requests, store it
# in the backend.
for topic, entry in course.discussion_topics.items():
category_map['entries'][topic] = {
"id": entry["id"],
"sort_key": entry.get("sort_key", topic),
"start_date": datetime.now(UTC()),
"is_cohorted": (course_cohort_settings.is_cohorted and
entry["id"] in course_cohort_settings.cohorted_discussions)
}
_sort_map_entries(category_map, course.discussion_sort_alpha)
return _filter_unstarted_categories(category_map) if exclude_unstarted else category_map
def discussion_category_id_access(course, user, discussion_id):
"""
Returns True iff the given discussion_id is accessible for user in course.
Assumes that the commentable identified by discussion_id has a null or 'course' context.
Uses the discussion id cache if available, falling back to
get_discussion_categories_ids if there is no cache.
"""
if discussion_id in course.top_level_discussion_topic_ids:
return True
try:
key = get_cached_discussion_key(course, discussion_id)
if not key:
return False
module = modulestore().get_item(key)
return has_required_keys(module) and has_access(user, 'load', module, course.id)
except DiscussionIdMapIsNotCached:
return discussion_id in get_discussion_categories_ids(course, user)
def get_discussion_categories_ids(course, user, include_all=False):
"""
Returns a list of available ids of categories for the course that
are accessible to the given user.
Args:
course: Course for which to get the ids.
user: User to check for access.
include_all (bool): If True, return all ids. Used by configuration views.
"""
accessible_discussion_ids = [
module.discussion_id for module in get_accessible_discussion_modules(course, user, include_all=include_all)
]
return course.top_level_discussion_topic_ids + accessible_discussion_ids
class JsonResponse(HttpResponse):
def __init__(self, data=None):
content = json.dumps(data, cls=i4xEncoder)
super(JsonResponse, self).__init__(content,
mimetype='application/json; charset=utf-8')
class JsonError(HttpResponse):
def __init__(self, error_messages=[], status=400):
if isinstance(error_messages, basestring):
error_messages = [error_messages]
content = json.dumps({'errors': error_messages}, indent=2, ensure_ascii=False)
super(JsonError, self).__init__(content,
mimetype='application/json; charset=utf-8', status=status)
class HtmlResponse(HttpResponse):
def __init__(self, html=''):
super(HtmlResponse, self).__init__(html, content_type='text/plain')
class ViewNameMiddleware(object):
def process_view(self, request, view_func, view_args, view_kwargs):
request.view_name = view_func.__name__
class QueryCountDebugMiddleware(object):
"""
This middleware will log the number of queries run
and the total time taken for each request (with a
status code of 200). It does not currently support
multi-db setups.
"""
def process_response(self, request, response):
if response.status_code == 200:
total_time = 0
for query in connection.queries:
query_time = query.get('time')
if query_time is None:
# django-debug-toolbar monkeypatches the connection
# cursor wrapper and adds extra information in each
# item in connection.queries. The query time is stored
# under the key "duration" rather than "time" and is
# in milliseconds, not seconds.
query_time = query.get('duration', 0) / 1000
total_time += float(query_time)
log.info(u'%s queries run, total %s seconds', len(connection.queries), total_time)
return response
def get_ability(course_id, content, user):
return {
'editable': check_permissions_by_view(user, course_id, content, "update_thread" if content['type'] == 'thread' else "update_comment"),
'can_reply': check_permissions_by_view(user, course_id, content, "create_comment" if content['type'] == 'thread' else "create_sub_comment"),
'can_delete': check_permissions_by_view(user, course_id, content, "delete_thread" if content['type'] == 'thread' else "delete_comment"),
'can_openclose': check_permissions_by_view(user, course_id, content, "openclose_thread") if content['type'] == 'thread' else False,
'can_vote': check_permissions_by_view(user, course_id, content, "vote_for_thread" if content['type'] == 'thread' else "vote_for_comment"),
}
# TODO: RENAME
def get_annotated_content_info(course_id, content, user, user_info):
"""
Get metadata for an individual content (thread or comment)
"""
voted = ''
if content['id'] in user_info['upvoted_ids']:
voted = 'up'
elif content['id'] in user_info['downvoted_ids']:
voted = 'down'
return {
'voted': voted,
'subscribed': content['id'] in user_info['subscribed_thread_ids'],
'ability': get_ability(course_id, content, user),
}
# TODO: RENAME
def get_annotated_content_infos(course_id, thread, user, user_info):
"""
Get metadata for a thread and its children
"""
infos = {}
def annotate(content):
infos[str(content['id'])] = get_annotated_content_info(course_id, content, user, user_info)
for child in (
content.get('children', []) +
content.get('endorsed_responses', []) +
content.get('non_endorsed_responses', [])
):
annotate(child)
annotate(thread)
return infos
def get_metadata_for_threads(course_id, threads, user, user_info):
def infogetter(thread):
return get_annotated_content_infos(course_id, thread, user, user_info)
metadata = reduce(merge_dict, map(infogetter, threads), {})
return metadata
# put this method in utils.py to avoid circular import dependency between helpers and mustache_helpers
def render_mustache(template_name, dictionary, *args, **kwargs):
template = lookup_template('main', template_name).source
return pystache.render(template, dictionary)
def permalink(content):
if isinstance(content['course_id'], CourseKey):
course_id = content['course_id'].to_deprecated_string()
else:
course_id = content['course_id']
if content['type'] == 'thread':
return reverse('django_comment_client.forum.views.single_thread',
args=[course_id, content['commentable_id'], content['id']])
else:
return reverse('django_comment_client.forum.views.single_thread',
args=[course_id, content['commentable_id'], content['thread_id']]) + '#' + content['id']
def extend_content(content):
roles = {}
if content.get('user_id'):
try:
user = User.objects.get(pk=content['user_id'])
roles = dict(('name', role.name.lower()) for role in user.roles.filter(course_id=content['course_id']))
except User.DoesNotExist:
log.error(
'User ID %s in comment content %s but not in our DB.',
content.get('user_id'),
content.get('id')
)
content_info = {
'displayed_title': content.get('highlighted_title') or content.get('title', ''),
'displayed_body': content.get('highlighted_body') or content.get('body', ''),
'permalink': permalink(content),
'roles': roles,
'updated': content['created_at'] != content['updated_at'],
}
return merge_dict(content, content_info)
def add_courseware_context(content_list, course, user, id_map=None):
"""
Decorates `content_list` with courseware metadata using the discussion id map cache if available.
"""
if id_map is None:
id_map = get_cached_discussion_id_map(
course,
[content['commentable_id'] for content in content_list],
user
)
for content in content_list:
commentable_id = content['commentable_id']
if commentable_id in id_map:
location = id_map[commentable_id]["location"].to_deprecated_string()
title = id_map[commentable_id]["title"]
url = reverse('jump_to', kwargs={"course_id": course.id.to_deprecated_string(),
"location": location})
content.update({"courseware_url": url, "courseware_title": title})
def prepare_content(content, course_key, is_staff=False, course_is_cohorted=None):
"""
This function is used to pre-process thread and comment models in various
ways before adding them to the HTTP response. This includes fixing empty
attribute fields, enforcing author anonymity, and enriching metadata around
group ownership and response endorsement.
@TODO: not all response pre-processing steps are currently integrated into
this function.
Arguments:
content (dict): A thread or comment.
course_key (CourseKey): The course key of the course.
is_staff (bool): Whether the user is a staff member.
course_is_cohorted (bool): Whether the course is cohorted.
"""
fields = [
'id', 'title', 'body', 'course_id', 'anonymous', 'anonymous_to_peers',
'endorsed', 'parent_id', 'thread_id', 'votes', 'closed', 'created_at',
'updated_at', 'depth', 'type', 'commentable_id', 'comments_count',
'at_position_list', 'children', 'highlighted_title', 'highlighted_body',
'courseware_title', 'courseware_url', 'unread_comments_count',
'read', 'group_id', 'group_name', 'pinned', 'abuse_flaggers',
'stats', 'resp_skip', 'resp_limit', 'resp_total', 'thread_type',
'endorsed_responses', 'non_endorsed_responses', 'non_endorsed_resp_total',
'endorsement', 'context'
]
if (content.get('anonymous') is False) and ((content.get('anonymous_to_peers') is False) or is_staff):
fields += ['username', 'user_id']
content = strip_none(extract(content, fields))
if content.get("endorsement"):
endorsement = content["endorsement"]
endorser = None
if endorsement["user_id"]:
try:
endorser = User.objects.get(pk=endorsement["user_id"])
except User.DoesNotExist:
log.error(
"User ID %s in endorsement for comment %s but not in our DB.",
content.get('user_id'),
content.get('id')
)
# Only reveal endorser if requester can see author or if endorser is staff
if (
endorser and
("username" in fields or has_permission(endorser, "endorse_comment", course_key))
):
endorsement["username"] = endorser.username
else:
del endorsement["user_id"]
if course_is_cohorted is None:
course_is_cohorted = is_course_cohorted(course_key)
for child_content_key in ["children", "endorsed_responses", "non_endorsed_responses"]:
if child_content_key in content:
children = [
prepare_content(child, course_key, is_staff, course_is_cohorted=course_is_cohorted)
for child in content[child_content_key]
]
content[child_content_key] = children
if course_is_cohorted:
# Augment the specified thread info to include the group name if a group id is present.
if content.get('group_id') is not None:
content['group_name'] = get_cohort_by_id(course_key, content.get('group_id')).name
else:
# Remove any cohort information that might remain if the course had previously been cohorted.
content.pop('group_id', None)
return content
def get_group_id_for_comments_service(request, course_key, commentable_id=None):
"""
Given a user requesting content within a `commentable_id`, determine the
group_id which should be passed to the comments service.
Returns:
int: the group_id to pass to the comments service or None if nothing
should be passed
Raises:
ValueError if the requested group_id is invalid
"""
if commentable_id is None or is_commentable_cohorted(course_key, commentable_id):
if request.method == "GET":
requested_group_id = request.GET.get('group_id')
elif request.method == "POST":
requested_group_id = request.POST.get('group_id')
if has_permission(request.user, "see_all_cohorts", course_key):
if not requested_group_id:
return None
try:
group_id = int(requested_group_id)
get_cohort_by_id(course_key, group_id)
except CourseUserGroup.DoesNotExist:
raise ValueError
else:
# regular users always query with their own id.
group_id = get_cohort_id(request.user, course_key)
return group_id
else:
# Never pass a group_id to the comments service for a non-cohorted
# commentable
return None
def is_comment_too_deep(parent):
"""
Determine whether a comment with the given parent violates MAX_COMMENT_DEPTH
parent can be None to determine whether root comments are allowed
"""
return (
MAX_COMMENT_DEPTH is not None and (
MAX_COMMENT_DEPTH < 0 or
(parent and parent["depth"] >= MAX_COMMENT_DEPTH)
)
)
def is_commentable_cohorted(course_key, commentable_id):
"""
Args:
course_key: CourseKey
commentable_id: string
Returns:
Bool: is this commentable cohorted?
Raises:
Http404 if the course doesn't exist.
"""
course = courses.get_course_by_id(course_key)
course_cohort_settings = get_course_cohort_settings(course_key)
if not course_cohort_settings.is_cohorted or get_team(commentable_id):
# this is the easy case :)
ans = False
elif (
commentable_id in course.top_level_discussion_topic_ids or
course_cohort_settings.always_cohort_inline_discussions is False
):
# top level discussions have to be manually configured as cohorted
# (default is not).
# Same thing for inline discussions if the default is explicitly set to False in settings
ans = commentable_id in course_cohort_settings.cohorted_discussions
else:
# inline discussions are cohorted by default
ans = True
log.debug(u"is_commentable_cohorted(%s, %s) = {%s}", course_key, commentable_id, ans)
return ans
| agpl-3.0 |
ishay2b/tensorflow | tensorflow/python/framework/device.py | 150 | 9078 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to represent a device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
class DeviceSpec(object):
"""Represents a (possibly partial) specification for a TensorFlow device.
`DeviceSpec`s are used throughout TensorFlow to describe where state is stored
and computations occur. Using `DeviceSpec` allows you to parse device spec
strings to verify their validity, merge them or compose them programmatically.
Example:
```python
# Place the operations on device "GPU:0" in the "ps" job.
device_spec = DeviceSpec(job="ps", device_type="GPU", device_index=0)
with tf.device(device_spec):
# Both my_var and squared_var will be placed on /job:ps/device:GPU:0.
my_var = tf.Variable(..., name="my_variable")
squared_var = tf.square(my_var)
```
If a `DeviceSpec` is partially specified, it will be merged with other
`DeviceSpec`s according to the scope in which it is defined. `DeviceSpec`
components defined in inner scopes take precedence over those defined in
outer scopes.
```python
with tf.device(DeviceSpec(job="train", )):
with tf.device(DeviceSpec(job="ps", device_type="GPU", device_index=0):
# Nodes created here will be assigned to /job:ps/device:GPU:0.
with tf.device(DeviceSpec(device_type="GPU", device_index=1):
# Nodes created here will be assigned to /job:train/device:GPU:1.
```
A `DeviceSpec` consists of 5 components -- each of
which is optionally specified:
* Job: The job name.
* Replica: The replica index.
* Task: The task index.
* Device type: The device type string (e.g. "CPU" or "GPU").
* Device index: The device index.
"""
def __init__(self, job=None, replica=None, task=None, device_type=None,
device_index=None):
"""Create a new `DeviceSpec` object.
Args:
job: string. Optional job name.
replica: int. Optional replica index.
task: int. Optional task index.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self.job = job
self.replica = replica
self.task = task
if device_type == "cpu" or device_type == "gpu":
# For backwards compatibility only, we support lowercase variants of
# cpu and gpu but turn them into uppercase here.
self.device_type = device_type.upper()
else:
self.device_type = device_type
self.device_index = device_index
def _clear(self):
self._job = None
self._replica = None
self._task = None
self.device_type = None
self.device_index = None
@property
def job(self):
return self._job
@job.setter
def job(self, job):
if job is not None:
self._job = str(job)
else:
self._job = None
@property
def replica(self):
return self._replica
@replica.setter
def replica(self, replica):
if replica is not None:
self._replica = int(replica)
else:
self._replica = None
@property
def task(self):
return self._task
@task.setter
def task(self, task):
if task is not None:
self._task = int(task)
else:
self._task = None
def parse_from_string(self, spec):
"""Parse a `DeviceSpec` name into its components.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
The `DeviceSpec`.
Raises:
ValueError: if the spec was not valid.
"""
self._clear()
splits = [x.split(":") for x in spec.split("/")]
for y in splits:
ly = len(y)
if y:
# NOTE(touts): we use the property getters here.
if ly == 2 and y[0] == "job":
self.job = y[1]
elif ly == 2 and y[0] == "replica":
self.replica = y[1]
elif ly == 2 and y[0] == "task":
self.task = y[1]
elif ((ly == 1 or ly == 2) and
((y[0].upper() == "GPU") or (y[0].upper() == "CPU"))):
if self.device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
self.device_type = y[0].upper()
if ly == 2 and y[1] != "*":
self.device_index = int(y[1])
elif ly == 3 and y[0] == "device":
if self.device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
self.device_type = y[1]
if y[2] != "*":
self.device_index = int(y[2])
elif ly and y[0] != "": # pylint: disable=g-explicit-bool-comparison
raise ValueError("Unknown attribute: '%s' in '%s'" % (y[0], spec))
return self
def merge_from(self, dev):
"""Merge the properties of "dev" into this `DeviceSpec`.
Args:
dev: a `DeviceSpec`.
"""
if dev.job is not None:
self.job = dev.job
if dev.replica is not None:
self.replica = dev.replica
if dev.task is not None:
self.task = dev.task
if dev.device_type is not None:
self.device_type = dev.device_type
if dev.device_index is not None:
self.device_index = dev.device_index
def to_string(self):
"""Return a string representation of this `DeviceSpec`.
Returns:
a string of the form
/job:<name>/replica:<id>/task:<id>/device:<device_type>:<id>.
"""
dev = ""
if self.job is not None:
dev += "/job:" + self.job
if self.replica is not None:
dev += "/replica:" + str(self.replica)
if self.task is not None:
dev += "/task:" + str(self.task)
if self.device_type is not None:
device_index_string = "*"
if self.device_index is not None:
device_index_string = str(self.device_index)
dev += "/device:%s:%s" % (self.device_type, device_index_string)
return dev
@staticmethod
def from_string(spec):
"""Construct a `DeviceSpec` from a string.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
A DeviceSpec.
"""
return DeviceSpec().parse_from_string(spec)
def check_valid(spec):
"""Check that a device spec is valid.
Args:
spec: a string.
Raises:
An exception if the spec is invalid.
"""
# Construct a DeviceSpec. It will assert a failure if spec is invalid.
DeviceSpec.from_string(spec)
def canonical_name(device):
"""Returns a canonical name for the given `DeviceSpec` or device name."""
if device is None:
return ""
if isinstance(device, DeviceSpec):
return device.to_string()
else:
device = DeviceSpec.from_string(device)
return device.to_string()
def merge_device(spec):
"""Returns a device function that merges devices specifications.
This can be used to merge partial specifications of devices. The
innermost setting for a device field takes precedence. For example:
with tf.device(merge_device("/device:GPU:0"))
# Nodes created here have device "/device:GPU:0"
with tf.device(merge_device("/job:worker")):
# Nodes created here have device "/job:worker/device:GPU:0"
with tf.device(merge_device("/device:CPU:0")):
# Nodes created here have device "/job:worker/device:CPU:0"
with tf.device(merge_device("/job:ps")):
# Nodes created here have device "/job:ps/device:CPU:0"
Args:
spec: A `DeviceSpec` or a device spec string (partially) describing the
device that should be used for all nodes created in the scope of
the returned device function's with block.
Returns:
A device function with the above-described behavior.
Raises:
ValueError: if the spec was not valid.
"""
if not isinstance(spec, DeviceSpec):
spec = DeviceSpec.from_string(spec or "")
def _device_function(node_def):
current_device = DeviceSpec.from_string(node_def.device or "")
copy_spec = copy.copy(spec)
copy_spec.merge_from(current_device) # current_device takes precedence.
return copy_spec
return _device_function
| apache-2.0 |
z123/build-a-saas-app-with-flask | catwatch/tests/billing/test_views.py | 1 | 7627 | from flask import url_for, json
from flask_babel import gettext as _
from catwatch.tests.lib.util import ViewTestMixin
from catwatch.tests.lib.assertions import assert_status_with_message
class TestBilling(ViewTestMixin):
def test_pricing_page(self):
""" Pricing page renders successfully. """
response = self.client.get(url_for('billing.pricing'))
assert_status_with_message(200, response, 'Sign up')
def test_pricing_page_logged_in(self):
""" Pricing page renders successfully. """
self.login()
response = self.client.get(url_for('billing.pricing'))
assert_status_with_message(200, response, 'Continue')
def test_pricing_page_as_subscriber(self, subscriptions):
""" Pricing page for subscribers should redirect to update. """
self.login(identity='subscriber@localhost.com')
response = self.client.get(url_for('billing.pricing'),
follow_redirects=True)
assert_status_with_message(200, response, 'Change plan')
def test_coupon_code_not_valid(self):
""" Coupon code should not be processed, """
self.login()
params = {'coupon_code': None}
response = self.client.post(url_for('billing.coupon_code'),
data=params, follow_redirects=True)
data = json.loads(response.data)
assert response.status_code == 422
assert data['error'] == _('Discount code cannot be processed.')
def test_coupon_code_not_redeemable(self):
""" Coupon code should be redeemable. """
self.login()
params = {'coupon_code': 'foo'}
response = self.client.post(url_for('billing.coupon_code'),
data=params, follow_redirects=True)
data = json.loads(response.data)
assert response.status_code == 404
assert data['error'] == _('Discount code not found.')
def test_subscription_create_page(self):
""" Subscription create page renders successfully. """
self.login()
response = self.client.get(url_for('billing.create'),
follow_redirects=True)
assert response.status_code == 200
def test_subscription_create_as_subscriber(self, subscriptions):
""" Subscribers should not be allowed to create a subscription. """
self.login(identity='subscriber@localhost.com')
response = self.client.get(url_for('billing.create'),
follow_redirects=True)
assert_status_with_message(200, response,
_('You already have an active subscription'
'.'))
def test_subscription_create(self, users, mock_stripe):
""" Subscription create requires javascript. """
self.login()
params = {
'stripe_key': 'cus_000',
'plan': 'gold',
'name': 'Foobar Johnson'
}
response = self.client.post(url_for('billing.create'),
data=params, follow_redirects=True)
assert_status_with_message(200, response,
_('You must enable Javascript for this '
'request.'))
def test_subscription_update_page_without_subscription(self):
""" Subscription update page redirects to pricing page. """
self.login()
response = self.client.get(url_for('billing.update'),
follow_redirects=True)
assert_status_with_message(200, response, "You're moments away")
def test_subscription_update_page(self, subscriptions):
""" Subscription update page renders successfully. """
self.login(identity='subscriber@localhost.com')
response = self.client.get(url_for('billing.update'),
follow_redirects=True)
assert_status_with_message(200, response,
"You're about to change plans")
def test_subscription_update(self, subscriptions, mock_stripe):
""" Subscription create adds a new subscription. """
self.login(identity='subscriber@localhost.com')
params = {
'submit_gold': ''
}
response = self.client.post(url_for('billing.update'),
data=params, follow_redirects=True)
assert response.status_code == 200
def test_subscription_cancel_page_without_subscription(self):
""" Subscription cancel page redirects to settings. """
self.login()
response = self.client.get(url_for('billing.cancel'),
follow_redirects=True)
assert_status_with_message(200, response,
_('You do not have an active subscription'
'.'))
def test_subscription_cancel_page(self, subscriptions):
""" Subscription cancel page renders successfully. """
self.login(identity='subscriber@localhost.com')
response = self.client.get(url_for('billing.cancel'),
follow_redirects=True)
assert response.status_code == 200
def test_subscription_cancel(self, subscriptions, mock_stripe):
""" Subscription cancel is successful. """
self.login(identity='subscriber@localhost.com')
response = self.client.post(url_for('billing.cancel'),
data={}, follow_redirects=True)
assert_status_with_message(200, response,
_('Sorry to see you go, your subscription '
'has been cancelled.'))
def test_subscription_update_payment_method_without_card(self):
""" Subscription update method without card should fail. """
self.login()
response = self.client.post(url_for('billing.update_payment_method'),
data={}, follow_redirects=True)
assert_status_with_message(200, response,
_('You do not have a payment method on '
'file.'))
def test_subscription_update_payment_method(self, subscriptions,
mock_stripe):
""" Subscription update payment requires javascript. """
self.login(identity='subscriber@localhost.com')
response = self.client.post(url_for('billing.update_payment_method'),
data={}, follow_redirects=True)
assert_status_with_message(200, response,
_('You must enable Javascript for this '
'request.'))
def test_subscription_billing_history(self, subscriptions, mock_stripe):
""" Subscription billing history should render successfully. """
self.login(identity='subscriber@localhost.com')
response = self.client.get(url_for('billing.billing_history'))
assert_status_with_message(200, response,
'Billing details and history')
def test_subscription_billing_history_without_sub(self, mock_stripe):
""" Subscription billing history without sub should still work. """
self.login()
response = self.client.get(url_for('billing.billing_history'))
assert_status_with_message(200, response,
'Billing details and history')
| mit |
teamtuga4/teamtuga4ever.repository | plugin.video.pancas/resources/lib/libraries/unwise.py | 23 | 2106 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
def execute(str_eval):
page_value=""
try:
ss="w,i,s,e=("+str_eval+')'
exec (ss)
page_value=__unwise(w,i,s,e)
except: return
return page_value
def __unwise(w, i, s, e):
lIll = 0;
ll1I = 0;
Il1l = 0;
ll1l = [];
l1lI = [];
while True:
if (lIll < 5):
l1lI.append(w[lIll])
elif (lIll < len(w)):
ll1l.append(w[lIll]);
lIll+=1;
if (ll1I < 5):
l1lI.append(i[ll1I])
elif (ll1I < len(i)):
ll1l.append(i[ll1I])
ll1I+=1;
if (Il1l < 5):
l1lI.append(s[Il1l])
elif (Il1l < len(s)):
ll1l.append(s[Il1l]);
Il1l+=1;
if (len(w) + len(i) + len(s) + len(e) == len(ll1l) + len(l1lI) + len(e)):
break;
lI1l = ''.join(ll1l)
I1lI = ''.join(l1lI)
ll1I = 0;
l1ll = [];
for lIll in range(0,len(ll1l),2):
ll11 = -1;
if ( ord(I1lI[ll1I]) % 2):
ll11 = 1;
l1ll.append(chr( int(lI1l[lIll: lIll+2], 36) - ll11));
ll1I+=1;
if (ll1I >= len(l1lI)):
ll1I = 0;
ret=''.join(l1ll)
if 'eval(function(w,i,s,e)' in ret:
ret=re.compile('eval\(function\(w,i,s,e\).*}\((.*?)\)').findall(ret)[0]
return execute(ret)
else:
return ret
| gpl-2.0 |
SickRage/SickRage | tests/test_name_parser.py | 2 | 21121 | #!/usr/bin/env python2.7
# Author: echel0n <echel0n@sickrage.ca>
# URL: https://sickrage.ca
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import unittest
from datetime import date
import sickrage
import tests
from sickrage.core.nameparser import ParseResult, NameParser, InvalidNameException, InvalidShowException
from sickrage.core.tv.show import TVShow
sickrage.app.sys_encoding = 'UTF-8'
DEBUG = VERBOSE = False
simple_test_cases = {
'standard': {
'Mr Show Name.S00E01E02E03E04E05E06E07E08.Source.Quality.Etc-Group': ParseResult(None, 'Mr Show Name', 0,
[1, 2, 3, 4, 5, 6, 7, 8],
'Source.Quality.Etc',
'Group'),
'Mr.Show.Name.S01E02.Source.Quality.Etc-Group': ParseResult(None, 'Mr Show Name', 1, [2], 'Source.Quality.Etc',
'Group'),
'Show.Name.S01E02': ParseResult(None, 'Show Name', 1, [2]),
'Show Name - S01E02 - My Ep Name': ParseResult(None, 'Show Name', 1, [2], 'My Ep Name'),
'Show.1.0.Name.S01.E03.My.Ep.Name-Group': ParseResult(None, 'Show 1.0 Name', 1, [3], 'My.Ep.Name', 'Group'),
'Show.Name.S01E02E03.Source.Quality.Etc-Group': ParseResult(None, 'Show Name', 1, [2, 3], 'Source.Quality.Etc',
'Group'),
'Mr. Show Name - S01E02-03 - My Ep Name': ParseResult(None, 'Mr. Show Name', 1, [2, 3], 'My Ep Name'),
'Show.Name.S01.E02.E03': ParseResult(None, 'Show Name', 1, [2, 3]),
'Show.Name-0.2010.S01E02.Source.Quality.Etc-Group': ParseResult(None, 'Show Name-0 2010', 1, [2],
'Source.Quality.Etc', 'Group'),
'S01E02 Ep Name': ParseResult(None, None, 1, [2], 'Ep Name'),
'Show Name - S06E01 - 2009-12-20 - Ep Name': ParseResult(None, 'Show Name', 6, [1], '2009-12-20 - Ep Name'),
'Show Name - S06E01 - -30-': ParseResult(None, 'Show Name', 6, [1], '30-'),
'Show-Name-S06E01-720p': ParseResult(None, 'Show-Name', 6, [1], '720p'),
'Show-Name-S06E01-1080i': ParseResult(None, 'Show-Name', 6, [1], '1080i'),
'Show.Name.S06E01.Other.WEB-DL': ParseResult(None, 'Show Name', 6, [1], 'Other.WEB-DL'),
'Show.Name.S06E01 Some-Stuff Here': ParseResult(None, 'Show Name', 6, [1], 'Some-Stuff Here'),
'Show Name - S03E14-36! 24! 36! Hut! (1)': ParseResult(None, 'Show Name', 3, [14], '36! 24! 36! Hut! (1)'),
},
'fov': {
'Show_Name.1x02.Source_Quality_Etc-Group': ParseResult(None, 'Show Name', 1, [2], 'Source_Quality_Etc',
'Group'),
'Show Name 1x02': ParseResult(None, 'Show Name', 1, [2]),
'Show Name 1x02 x264 Test': ParseResult(None, 'Show Name', 1, [2], 'x264 Test'),
'Show Name - 1x02 - My Ep Name': ParseResult(None, 'Show Name', 1, [2], 'My Ep Name'),
'Show_Name.1x02x03x04.Source_Quality_Etc-Group': ParseResult(None, 'Show Name', 1, [2, 3, 4],
'Source_Quality_Etc', 'Group'),
'Show Name - 1x02-03-04 - My Ep Name': ParseResult(None, 'Show Name', 1, [2, 3, 4], 'My Ep Name'),
'1x02 Ep Name': ParseResult(None, None, 1, [2], 'Ep Name'),
'Show-Name-1x02-720p': ParseResult(None, 'Show-Name', 1, [2], '720p'),
'Show-Name-1x02-1080i': ParseResult(None, 'Show-Name', 1, [2], '1080i'),
'Show Name [05x12] Ep Name': ParseResult(None, 'Show Name', 5, [12], 'Ep Name'),
'Show.Name.1x02.WEB-DL': ParseResult(None, 'Show Name', 1, [2], 'WEB-DL')
},
'standard_repeat': {
'Show.Name.S01E02.S01E03.Source.Quality.Etc-Group': ParseResult(None, 'Show Name', 1, [2, 3],
'Source.Quality.Etc', 'Group'),
'Show.Name.S01E02.S01E03': ParseResult(None, 'Show Name', 1, [2, 3]),
'Show Name - S01E02 - S01E03 - S01E04 - Ep Name': ParseResult(None, 'Show Name', 1, [2, 3, 4], 'Ep Name'),
'Show.Name.S01E02.S01E03.WEB-DL': ParseResult(None, 'Show Name', 1, [2, 3], 'WEB-DL')
},
'fov_repeat': {
'Show.Name.1x02.1x03.Source.Quality.Etc-Group': ParseResult(None, 'Show Name', 1, [2, 3], 'Source.Quality.Etc',
'Group'),
'Show.Name.1x02.1x03': ParseResult(None, 'Show Name', 1, [2, 3]),
'Show Name - 1x02 - 1x03 - 1x04 - Ep Name': ParseResult(None, 'Show Name', 1, [2, 3, 4], 'Ep Name'),
'Show.Name.1x02.1x03.WEB-DL': ParseResult(None, 'Show Name', 1, [2, 3], 'WEB-DL')
},
'bare': {
'Show.Name.102.Source.Quality.Etc-Group': ParseResult(None, 'Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
'show.name.2010.123.source.quality.etc-group': ParseResult(None, 'show name 2010', 1, [23],
'source.quality.etc', 'group'),
'show.name.2010.222.123.source.quality.etc-group': ParseResult(None, 'show name 2010.222', 1, [23],
'source.quality.etc', 'group'),
'Show.Name.102': ParseResult(None, 'Show Name', 1, [2]),
'Show.Name.01e02': ParseResult(None, 'Show Name', 1, [2]),
'the.event.401.hdtv-group': ParseResult(None, 'the event', 4, [1], 'hdtv', 'group'),
'show.name.2010.special.hdtv-blah': None,
'show.ex-name.102.hdtv-group': ParseResult(None, 'show ex-name', 1, [2], 'hdtv', 'group'),
},
'stupid': {
'tpz-abc102': ParseResult(None, 'abc', 1, [2], None, 'tpz'),
'tpz-abc.102': ParseResult(None, 'abc', 1, [2], None, 'tpz')
},
'no_season': {
'Show Name - 01 - Ep Name': ParseResult(None, 'Show Name', None, [1], 'Ep Name'),
'01 - Ep Name': ParseResult(None, None, None, [1], 'Ep Name'),
'Show Name - 01 - Ep Name - WEB-DL': ParseResult(None, 'Show Name', None, [1], 'Ep Name - WEB-DL')
},
'no_season_general': {
'Show.Name.E23.Source.Quality.Etc-Group': ParseResult(None, 'Show Name', None, [23], 'Source.Quality.Etc',
'Group'),
'Show Name - Episode 01 - Ep Name': ParseResult(None, 'Show Name', None, [1], 'Ep Name'),
'Show.Name.Part.3.Source.Quality.Etc-Group': ParseResult(None, 'Show Name', None, [3], 'Source.Quality.Etc',
'Group'),
'Show.Name.Part.1.and.Part.2.Blah-Group': ParseResult(None, 'Show Name', None, [1, 2], 'Blah', 'Group'),
'Show.Name.Part.IV.Source.Quality.Etc-Group': ParseResult(None, 'Show Name', None, [4], 'Source.Quality.Etc',
'Group'),
'Deconstructed.E07.1080i.HDTV.DD5.1.MPEG2-TrollHD': ParseResult(None, 'Deconstructed', None, [7],
'1080i.HDTV.DD5.1.MPEG2', 'TrollHD'),
'Show.Name.E23.WEB-DL': ParseResult(None, 'Show Name', None, [23], 'WEB-DL'),
},
'no_season_multi_ep': {
'Show.Name.E23-24.Source.Quality.Etc-Group': ParseResult(None, 'Show Name', None, [23, 24],
'Source.Quality.Etc', 'Group'),
'Show Name - Episode 01-02 - Ep Name': ParseResult(None, 'Show Name', None, [1, 2], 'Ep Name'),
'Show.Name.E23-24.WEB-DL': ParseResult(None, 'Show Name', None, [23, 24], 'WEB-DL')
},
'season_only': {
'Show.Name.S02.Source.Quality.Etc-Group': ParseResult(None, 'Show Name', 2, [], 'Source.Quality.Etc', 'Group'),
'Show Name Season 2': ParseResult(None, 'Show Name', 2),
'Season 02': ParseResult(None, None, 2)
},
'scene_date_format': {
'Show.Name.2010.11.23.Source.Quality.Etc-Group': ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc',
'Group', date(2010, 11, 23)),
'Show Name - 2010.11.23': ParseResult(None, 'Show Name', air_date=date(2010, 11, 23)),
'Show.Name.2010.23.11.Source.Quality.Etc-Group': ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc',
'Group', date(2010, 11, 23)),
'Show Name - 2010-11-23 - Ep Name': ParseResult(None, 'Show Name', extra_info='Ep Name',
air_date=date(2010, 11, 23)),
'2010-11-23 - Ep Name': ParseResult(None, extra_info='Ep Name', air_date=date(2010, 11, 23)),
'Show.Name.2010.11.23.WEB-DL': ParseResult(None, 'Show Name', None, [], 'WEB-DL', None, date(2010, 11, 23))
},
}
anime_test_cases = {
'anime_SxxExx': {
'Show Name - S01E02 - Ep Name': ParseResult(None, 'Show Name', 1, [2], 'Ep Name', version=-1),
'Show Name - S01E02-03 - My Ep Name': ParseResult(None, 'Show Name', 1, [2, 3], 'My Ep Name', version=-1),
'Show Name - S01E02': ParseResult(None, 'Show Name', 1, [2], version=-1),
'Show Name - S01E02-03': ParseResult(None, 'Show Name', 1, [2, 3], version=-1),
},
}
combination_test_cases = [
('/test/path/to/Season 02/03 - Ep Name.avi',
ParseResult(None, None, 2, [3], 'Ep Name', version=-1),
{'no_season', 'season_only'}),
('Show.Name.S02.Source.Quality.Etc-Group/tpz-sn203.avi',
ParseResult(None, 'Show Name', 2, [3], 'Source.Quality.Etc', 'Group', version=-1),
{'stupid', 'season_only'}),
('MythBusters.S08E16.720p.HDTV.x264-aAF/aaf-mb.s08e16.720p.mkv',
ParseResult(None, 'MythBusters', 8, [16], '720p.HDTV.x264', 'aAF', version=-1),
{'standard'}),
(
'/home/drop/storage/TV/Terminator The Sarah Connor Chronicles/Season 2/S02E06 The Tower is Tall, But the Fall is Short.mkv',
ParseResult(None, None, 2, [6], 'The Tower is Tall, But the Fall is Short', version=-1),
{'standard', 'season_only'}),
(r'/Test/TV/Jimmy Fallon/Season 2/Jimmy Fallon - 2010-12-15 - blah.avi',
ParseResult(None, 'Jimmy Fallon', extra_info='blah', air_date=date(2010, 12, 15), version=-1),
{'scene_date_format', 'season_only'}),
(r'/X/30 Rock/Season 4/30 Rock - 4x22 -.avi',
ParseResult(None, '30 Rock', 4, [22], version=-1),
{'fov', 'season_only'}),
('Season 2\\Show Name - 03-04 - Ep Name.avi',
ParseResult(None, 'Show Name', 2, [3, 4], extra_info='Ep Name', version=-1),
{'no_season', 'season_only'}),
('Season 02\\03-04-05 - Ep Name.avi',
ParseResult(None, None, 2, [3, 4, 5], extra_info='Ep Name', version=-1),
{'no_season', 'season_only'}),
]
unicode_test_cases = [
('The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
ParseResult(None, 'The.Big.Bang.Theory', 2, [7],
'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3',
'SHELDON')),
('The.Big.Bang.Theory.2x07.The.Panty.Pi\xc3\xb1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
ParseResult(None, 'The.Big.Bang.Theory', 2, [7],
'The.Panty.Pi\xc3\xb1ata.Polarization.720p.HDTV.x264.AC3',
'SHELDON'))
]
failure_cases = ['7sins-jfcs01e09-720p-bluray-x264']
class UnicodeTests(tests.SiCKRAGETestDBCase):
def setUp(self):
super(UnicodeTests, self).setUp()
self.show = TVShow(1, 1, 'en')
self.show.name = "The Big Bang Theory"
self.show.save_to_db()
def _test_unicode(self, name, result):
np = NameParser(True, showObj=self.show, validate_show=False)
parse_result = np.parse(name)
# this shouldn't raise an exception
repr(str(parse_result))
self.assertEqual(parse_result.extra_info, result.extra_info)
def test_unicode(self):
for (name, result) in unicode_test_cases:
self._test_unicode(name, result)
class FailureCaseTests(tests.SiCKRAGETestDBCase):
@staticmethod
def _test_name(name):
np = NameParser(True)
try:
parse_result = np.parse(name)
except (InvalidNameException, InvalidShowException):
return True
if VERBOSE:
print('Actual: ', parse_result.which_regex, parse_result)
return False
def test_failures(self):
for name in failure_cases:
self.assertTrue(self._test_name(name))
class ComboTests(tests.SiCKRAGETestDBCase):
def _test_combo(self, name, result, which_regexes):
if VERBOSE:
print()
print('Testing', name)
np = NameParser(True, validate_show=False)
test_result = np.parse(name)
if DEBUG:
print(test_result, test_result.which_regex)
print(result, which_regexes)
self.assertEqual(test_result, result)
for cur_regex in which_regexes:
self.assertTrue(cur_regex in test_result.which_regex)
self.assertEqual(len(which_regexes), len(test_result.which_regex))
def test_combos(self):
for (name, result, which_regexes) in combination_test_cases:
# Normalise the paths. Converts UNIX-style paths into Windows-style
# paths when test is run on Windows.
self._test_combo(os.path.normpath(name), result, which_regexes)
class AnimeTests(tests.SiCKRAGETestDBCase):
def setUp(self):
super(AnimeTests, self).setUp()
self.show = TVShow(1, 1, 'en')
self.show.anime = True
self.show.save_to_db()
def _test_names(self, np, section, transform=None, verbose=False):
"""
Performs a test
:param name_parser: to use for test
:param section:
:param transform:
:param verbose:
:return:
"""
if VERBOSE or verbose:
print()
print('Running', section, 'tests')
for cur_test_base in anime_test_cases[section]:
if transform:
cur_test = transform(cur_test_base)
np.file_name = cur_test
else:
cur_test = cur_test_base
if VERBOSE or verbose:
print('Testing', cur_test)
result = anime_test_cases[section][cur_test_base]
np.showObj.name = result.series_name if result else None
if not result:
self.assertRaises(InvalidNameException, np.parse, cur_test)
return
else:
result.which_regex = {section}
test_result = np.parse(cur_test)
if DEBUG or verbose:
print('air_by_date:', test_result.is_air_by_date, 'air_date:', test_result.air_date)
print('anime:', test_result.is_anime, 'ab_episode_numbers:', test_result.ab_episode_numbers)
print(test_result)
print(result)
self.assertEqual(test_result.which_regex, {section},
'{} : {} != {}'.format(cur_test, test_result.which_regex, {section}))
self.assertEqual(test_result, result, '{} : {} != {}'.format(cur_test, test_result, result))
def test_anime_sxxexx_file_names(self):
"""
Test anime SxxExx file names
"""
np = NameParser(showObj=self.show, validate_show=False)
self._test_names(np, 'anime_SxxExx', lambda x: x + '.avi')
class BasicTests(tests.SiCKRAGETestDBCase):
def setUp(self):
super(BasicTests, self).setUp()
self.show = TVShow(1, 1, 'en')
self.show.save_to_db()
def _test_names(self, np, section, transform=None, verbose=False):
if VERBOSE or verbose:
print('Running', section, 'tests')
for cur_test_base in simple_test_cases[section]:
if transform:
cur_test = transform(cur_test_base)
np.file_name = cur_test
else:
cur_test = cur_test_base
if VERBOSE or verbose:
print('Testing', cur_test)
result = simple_test_cases[section][cur_test_base]
np.showObj.name = result.series_name if result else None
if not result:
self.assertRaises(InvalidNameException, np.parse, cur_test)
return
else:
result.which_regex = {section}
test_result = np.parse(cur_test)
if DEBUG or verbose:
print('air_by_date:', test_result.is_air_by_date, 'air_date:', test_result.air_date)
print('anime:', test_result.is_anime, 'ab_episode_numbers:', test_result.ab_episode_numbers)
print(test_result)
print(result)
self.assertEqual(test_result.which_regex, {section})
self.assertEqual(str(test_result), str(result))
def test_standard_names(self):
np = NameParser(True, showObj=self.show, validate_show=False)
self._test_names(np, 'standard')
def test_standard_repeat_names(self):
np = NameParser(False, showObj=self.show, validate_show=False)
self._test_names(np, 'standard_repeat')
def test_fov_names(self):
np = NameParser(False, showObj=self.show, validate_show=False)
self._test_names(np, 'fov')
def test_fov_repeat_names(self):
np = NameParser(False, showObj=self.show, validate_show=False)
self._test_names(np, 'fov_repeat')
def test_stupid_names(self):
np = NameParser(False, showObj=self.show, validate_show=False)
self._test_names(np, 'stupid')
def test_no_season_names(self):
np = NameParser(False, showObj=self.show, validate_show=False)
self._test_names(np, 'no_season')
def test_no_season_general_names(self):
np = NameParser(False, showObj=self.show, validate_show=False)
self._test_names(np, 'no_season_general')
def test_no_season_multi_ep_names(self):
np = NameParser(False, showObj=self.show, validate_show=False)
self._test_names(np, 'no_season_multi_ep')
def test_season_only_names(self):
np = NameParser(False, showObj=self.show, validate_show=False)
self._test_names(np, 'season_only')
def test_standard_file_names(self):
np = NameParser(showObj=self.show, validate_show=False)
self._test_names(np, 'standard', lambda x: x + '.avi')
def test_standard_repeat_file_names(self):
np = NameParser(showObj=self.show, validate_show=False)
self._test_names(np, 'standard_repeat', lambda x: x + '.avi')
def test_fov_file_names(self):
np = NameParser(showObj=self.show, validate_show=False)
self._test_names(np, 'fov', lambda x: x + '.avi')
def test_fov_repeat_file_names(self):
np = NameParser(showObj=self.show, validate_show=False)
self._test_names(np, 'fov_repeat', lambda x: x + '.avi')
def test_stupid_file_names(self):
np = NameParser(showObj=self.show, validate_show=False)
self._test_names(np, 'stupid', lambda x: x + '.avi')
def test_no_season_file_names(self):
np = NameParser(showObj=self.show, validate_show=False)
self._test_names(np, 'no_season', lambda x: x + '.avi')
def test_no_season_general_file_names(self):
np = NameParser(showObj=self.show, validate_show=False)
self._test_names(np, 'no_season_general', lambda x: x + '.avi')
def test_no_season_multi_ep_file_names(self):
np = NameParser(showObj=self.show, validate_show=False)
self._test_names(np, 'no_season_multi_ep', lambda x: x + '.avi')
def test_season_only_file_names(self):
np = NameParser(showObj=self.show, validate_show=False)
self._test_names(np, 'season_only', lambda x: x + '.avi')
def test_combination_names(self):
pass
if __name__ == '__main__':
print("==================")
print("STARTING - NAME PARSER TESTS")
print("==================")
print("######################################################################")
unittest.main()
| gpl-3.0 |
youknowone/instantauth | python/instantauth/cryptors/aes.py | 1 | 2071 |
"""
Install 'pycrypto' package to use this module
"""
from Crypto.Cipher import AES
from . import BaseCryptor
def add_padding(data, block_size):
data_len = len(data)
pad_len = (block_size - data_len) % block_size
if pad_len == 0:
pad_len = block_size
padding = chr(pad_len)
return ''.join((data, padding * pad_len))
def strip_padding(data):
padding = data[-1]
pad_len = ord(padding)
return data[:-pad_len]
def cut_key(key, key_size):
while len(key) < key_size: # ...
key += chr(0) * key_size
return key[:key_size]
class AESCryptor(BaseCryptor):
BLOCK_SIZE = 16
def __init__(self, bits=128, iv=''): # iv is useless for temporary data in usual case
if not bits in (128, 192, 256):
raise ValueError(bits) # make one
self.key_size = bits / 8
self.iv = cut_key(iv, self.BLOCK_SIZE)
def encrypt_stream(self, stream, secret_key):
secret_key = cut_key(secret_key, self.key_size)
cipher = AES.new(secret_key, AES.MODE_CBC, self.iv)
padded = add_padding(stream, self.BLOCK_SIZE)
encrypted = cipher.encrypt(padded)
return encrypted
def decrypt_stream(self, stream, secret_key):
secret_key = cut_key(secret_key, self.key_size)
cipher = AES.new(secret_key, AES.MODE_CBC, self.iv)
decrypted = cipher.decrypt(stream)
return strip_padding(decrypted)
def encrypt_data(self, data, private_key, secret_key):
secret_key = cut_key(secret_key, self.key_size)
iv = cut_key(private_key, self.BLOCK_SIZE)
cipher = AES.new(secret_key, AES.MODE_CBC, iv)
padded = add_padding(data, self.BLOCK_SIZE)
encrypted = cipher.encrypt(padded)
return encrypted
def decrypt_data(self, data, private_key, secret_key):
secret_key = cut_key(secret_key, self.key_size)
iv = cut_key(private_key, self.BLOCK_SIZE)
cipher = AES.new(secret_key, AES.MODE_CBC, iv)
decrypted = cipher.decrypt(data)
return strip_padding(decrypted)
| bsd-2-clause |
jdar/phantomjs-modified | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/environment_unittest.py | 124 | 1853 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from .environment import Environment
class EnvironmentTest(unittest.TestCase):
def test_disable_gcc_smartquotes(self):
environment = Environment({})
environment.disable_gcc_smartquotes()
env = environment.to_dictionary()
self.assertEqual(env['LC_ALL'], 'C')
| bsd-3-clause |
HadiOfBBG/pegasusrises | gdata/docs/service.py | 53 | 23149 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DocsService extends the GDataService to streamline Google Documents
operations.
DocsService: Provides methods to query feeds and manipulate items.
Extends GDataService.
DocumentQuery: Queries a Google Document list feed.
DocumentAclQuery: Queries a Google Document Acl feed.
"""
__author__ = ('api.jfisher (Jeff Fisher), '
'e.bidelman (Eric Bidelman)')
import re
import atom
import gdata.service
import gdata.docs
import urllib
# XML Namespaces used in Google Documents entities.
DATA_KIND_SCHEME = gdata.GDATA_NAMESPACE + '#kind'
DOCUMENT_LABEL = 'document'
SPREADSHEET_LABEL = 'spreadsheet'
PRESENTATION_LABEL = 'presentation'
FOLDER_LABEL = 'folder'
PDF_LABEL = 'pdf'
LABEL_SCHEME = gdata.GDATA_NAMESPACE + '/labels'
STARRED_LABEL_TERM = LABEL_SCHEME + '#starred'
TRASHED_LABEL_TERM = LABEL_SCHEME + '#trashed'
HIDDEN_LABEL_TERM = LABEL_SCHEME + '#hidden'
MINE_LABEL_TERM = LABEL_SCHEME + '#mine'
PRIVATE_LABEL_TERM = LABEL_SCHEME + '#private'
SHARED_WITH_DOMAIN_LABEL_TERM = LABEL_SCHEME + '#shared-with-domain'
VIEWED_LABEL_TERM = LABEL_SCHEME + '#viewed'
FOLDERS_SCHEME_PREFIX = gdata.docs.DOCUMENTS_NAMESPACE + '/folders/'
# File extensions of documents that are permitted to be uploaded or downloaded.
SUPPORTED_FILETYPES = {
'CSV': 'text/csv',
'TSV': 'text/tab-separated-values',
'TAB': 'text/tab-separated-values',
'DOC': 'application/msword',
'DOCX': ('application/vnd.openxmlformats-officedocument.'
'wordprocessingml.document'),
'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet',
'ODT': 'application/vnd.oasis.opendocument.text',
'RTF': 'application/rtf',
'SXW': 'application/vnd.sun.xml.writer',
'TXT': 'text/plain',
'XLS': 'application/vnd.ms-excel',
'XLSX': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'PDF': 'application/pdf',
'PNG': 'image/png',
'PPT': 'application/vnd.ms-powerpoint',
'PPS': 'application/vnd.ms-powerpoint',
'HTM': 'text/html',
'HTML': 'text/html',
'ZIP': 'application/zip',
'SWF': 'application/x-shockwave-flash'
}
class DocsService(gdata.service.GDataService):
"""Client extension for the Google Documents service Document List feed."""
__FILE_EXT_PATTERN = re.compile('.*\.([a-zA-Z]{3,}$)')
__RESOURCE_ID_PATTERN = re.compile('^([a-z]*)(:|%3A)([\w-]*)$')
def __init__(self, email=None, password=None, source=None,
server='docs.google.com', additional_headers=None, **kwargs):
"""Creates a client for the Google Documents service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'docs.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='writely', source=source,
server=server, additional_headers=additional_headers, **kwargs)
self.ssl = True
def _MakeKindCategory(self, label):
if label is None:
return None
return atom.Category(scheme=DATA_KIND_SCHEME,
term=gdata.docs.DOCUMENTS_NAMESPACE + '#' + label, label=label)
def _MakeContentLinkFromId(self, resource_id):
match = self.__RESOURCE_ID_PATTERN.match(resource_id)
label = match.group(1)
doc_id = match.group(3)
if label == DOCUMENT_LABEL:
return '/feeds/download/documents/Export?docId=%s' % doc_id
if label == PRESENTATION_LABEL:
return '/feeds/download/presentations/Export?docId=%s' % doc_id
if label == SPREADSHEET_LABEL:
return ('https://spreadsheets.google.com/feeds/download/spreadsheets/'
'Export?key=%s' % doc_id)
raise ValueError, 'Invalid resource id: %s' % resource_id
def _UploadFile(self, media_source, title, category, folder_or_uri=None):
"""Uploads a file to the Document List feed.
Args:
media_source: A gdata.MediaSource object containing the file to be
uploaded.
title: string The title of the document on the server after being
uploaded.
category: An atom.Category object specifying the appropriate document
type.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except AttributeError:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
entry = gdata.docs.DocumentListEntry()
entry.title = atom.Title(text=title)
if category is not None:
entry.category.append(category)
entry = self.Post(entry, uri, media_source=media_source,
extra_headers={'Slug': media_source.file_name},
converter=gdata.docs.DocumentListEntryFromString)
return entry
def _DownloadFile(self, uri, file_path):
"""Downloads a file.
Args:
uri: string The full Export URL to download the file from.
file_path: string The full path to save the file to.
Raises:
RequestError: on error response from server.
"""
server_response = self.request('GET', uri)
response_body = server_response.read()
timeout = 5
while server_response.status == 302 and timeout > 0:
server_response = self.request('GET',
server_response.getheader('Location'))
response_body = server_response.read()
timeout -= 1
if server_response.status != 200:
raise gdata.service.RequestError, {'status': server_response.status,
'reason': server_response.reason,
'body': response_body}
f = open(file_path, 'wb')
f.write(response_body)
f.flush()
f.close()
def MoveIntoFolder(self, source_entry, folder_entry):
"""Moves a document into a folder in the Document List Feed.
Args:
source_entry: DocumentListEntry An object representing the source
document/folder.
folder_entry: DocumentListEntry An object with a link to the destination
folder.
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
entry = gdata.docs.DocumentListEntry()
entry.id = source_entry.id
entry = self.Post(entry, folder_entry.content.src,
converter=gdata.docs.DocumentListEntryFromString)
return entry
def Query(self, uri, converter=gdata.docs.DocumentListFeedFromString):
"""Queries the Document List feed and returns the resulting feed of
entries.
Args:
uri: string The full URI to be queried. This can contain query
parameters, a hostname, or simply the relative path to a Document
List feed. The DocumentQuery object is useful when constructing
query parameters.
converter: func (optional) A function which will be executed on the
retrieved item, generally to render it into a Python object.
By default the DocumentListFeedFromString function is used to
return a DocumentListFeed object. This is because most feed
queries will result in a feed and not a single entry.
"""
return self.Get(uri, converter=converter)
def QueryDocumentListFeed(self, uri):
"""Retrieves a DocumentListFeed by retrieving a URI based off the Document
List feed, including any query parameters. A DocumentQuery object can
be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
A DocumentListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListFeedFromString)
def GetDocumentListEntry(self, uri):
"""Retrieves a particular DocumentListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString)
def GetDocumentListFeed(self, uri=None):
"""Retrieves a feed containing all of a user's documents.
Args:
uri: string A full URI to query the Document List feed.
"""
if not uri:
uri = gdata.docs.service.DocumentQuery().ToUri()
return self.QueryDocumentListFeed(uri)
def GetDocumentListAclEntry(self, uri):
"""Retrieves a particular DocumentListAclEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListAclEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListAclEntryFromString)
def GetDocumentListAclFeed(self, uri):
"""Retrieves a feed containing all of a user's documents.
Args:
uri: string The URI of a document's Acl feed to retrieve.
Returns:
A DocumentListAclFeed object representing the ACL feed
returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListAclFeedFromString)
def Upload(self, media_source, title, folder_or_uri=None, label=None):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
label: optional label describing the type of the document to be created.
Returns:
A DocumentListEntry containing information about the document created
on the Google Documents service.
"""
return self._UploadFile(media_source, title, self._MakeKindCategory(label),
folder_or_uri)
def Download(self, entry_or_id_or_url, file_path, export_format=None,
gid=None, extra_params=None):
"""Downloads a document from the Document List.
Args:
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
or a url to download from (such as the content src).
file_path: string The full path to save the file to.
export_format: the format to convert to, if conversion is required.
gid: grid id, for downloading a single grid of a spreadsheet
extra_params: a map of any further parameters to control how the document
is downloaded
Raises:
RequestError if the service does not respond with success
"""
if isinstance(entry_or_id_or_url, gdata.docs.DocumentListEntry):
url = entry_or_id_or_url.content.src
else:
if self.__RESOURCE_ID_PATTERN.match(entry_or_id_or_url):
url = self._MakeContentLinkFromId(entry_or_id_or_url)
else:
url = entry_or_id_or_url
if export_format is not None:
if url.find('/Export?') == -1:
raise gdata.service.Error, ('This entry cannot be exported '
'as a different format')
url += '&exportFormat=%s' % export_format
if gid is not None:
if url.find('spreadsheets') == -1:
raise gdata.service.Error, 'grid id param is not valid for this entry'
url += '&gid=%s' % gid
if extra_params:
url += '&' + urllib.urlencode(extra_params)
self._DownloadFile(url, file_path)
def Export(self, entry_or_id_or_url, file_path, gid=None, extra_params=None):
"""Downloads a document from the Document List in a different format.
Args:
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
or a url to download from (such as the content src).
file_path: string The full path to save the file to. The export
format is inferred from the the file extension.
gid: grid id, for downloading a single grid of a spreadsheet
extra_params: a map of any further parameters to control how the document
is downloaded
Raises:
RequestError if the service does not respond with success
"""
ext = None
match = self.__FILE_EXT_PATTERN.match(file_path)
if match:
ext = match.group(1)
self.Download(entry_or_id_or_url, file_path, ext, gid, extra_params)
def CreateFolder(self, title, folder_or_uri=None):
"""Creates a folder in the Document List feed.
Args:
title: string The title of the folder on the server after being created.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the folder created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except AttributeError:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
folder_entry = gdata.docs.DocumentListEntry()
folder_entry.title = atom.Title(text=title)
folder_entry.category.append(self._MakeKindCategory(FOLDER_LABEL))
folder_entry = self.Post(folder_entry, uri,
converter=gdata.docs.DocumentListEntryFromString)
return folder_entry
def MoveOutOfFolder(self, source_entry):
"""Moves a document into a folder in the Document List Feed.
Args:
source_entry: DocumentListEntry An object representing the source
document/folder.
Returns:
True if the entry was moved out.
"""
return self.Delete(source_entry.GetEditLink().href)
# Deprecated methods
#@atom.deprecated('Please use Upload instead')
def UploadPresentation(self, media_source, title, folder_or_uri=None):
"""Uploads a presentation inside of a MediaSource object to the Document
List feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The MediaSource object containing a
presentation file to be uploaded.
title: string The title of the presentation on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the presentation created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(PRESENTATION_LABEL),
folder_or_uri=folder_or_uri)
UploadPresentation = atom.deprecated('Please use Upload instead')(
UploadPresentation)
#@atom.deprecated('Please use Upload instead')
def UploadSpreadsheet(self, media_source, title, folder_or_uri=None):
"""Uploads a spreadsheet inside of a MediaSource object to the Document
List feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The MediaSource object containing a spreadsheet
file to be uploaded.
title: string The title of the spreadsheet on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the spreadsheet created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(SPREADSHEET_LABEL),
folder_or_uri=folder_or_uri)
UploadSpreadsheet = atom.deprecated('Please use Upload instead')(
UploadSpreadsheet)
#@atom.deprecated('Please use Upload instead')
def UploadDocument(self, media_source, title, folder_or_uri=None):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(DOCUMENT_LABEL),
folder_or_uri=folder_or_uri)
UploadDocument = atom.deprecated('Please use Upload instead')(
UploadDocument)
"""Calling any of these functions is the same as calling Export"""
DownloadDocument = atom.deprecated('Please use Export instead')(Export)
DownloadPresentation = atom.deprecated('Please use Export instead')(Export)
DownloadSpreadsheet = atom.deprecated('Please use Export instead')(Export)
"""Calling any of these functions is the same as calling MoveIntoFolder"""
MoveDocumentIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MovePresentationIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MoveSpreadsheetIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MoveFolderIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
class DocumentQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Document List feed"""
def __init__(self, feed='/feeds/documents', visibility='private',
projection='full', text_query=None, params=None,
categories=None):
"""Constructor for Document List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
visibility: string (optional) The visibility chosen for the current feed.
projection: string (optional) The projection chosen for the current feed.
text_query: string (optional) The contents of the q query parameter. This
string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.visibility = visibility
self.projection = projection
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed, self.visibility, self.projection])
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
def AddNamedFolder(self, email, folder_name):
"""Adds a named folder category, qualified by a schema.
This function lets you query for documents that are contained inside a
named folder without fear of collision with other categories.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was added to the object.
"""
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
self.categories.append(category)
return category
def RemoveNamedFolder(self, email, folder_name):
"""Removes a named folder category, qualified by a schema.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was removed to the object.
"""
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
self.categories.remove(category)
return category
class DocumentAclQuery(gdata.service.Query):
"""Object used to construct a URI to query a Document's ACL feed"""
def __init__(self, resource_id, feed='/feeds/acl/private/full'):
"""Constructor for Document ACL Query
Args:
resource_id: string The resource id. (e.g. 'document%3Adocument_id',
'spreadsheet%3Aspreadsheet_id', etc.)
feed: string (optional) The path for the feed.
(e.g. '/feeds/acl/private/full')
Yields:
A DocumentAclQuery object used to construct a URI based on the Document
ACL feed.
"""
self.resource_id = resource_id
gdata.service.Query.__init__(self, feed)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
ACL feed.
"""
return '%s/%s' % (gdata.service.Query.ToUri(self), self.resource_id)
| apache-2.0 |
Jumpscale/web | pythonlib/wtforms/ext/i18n/form.py | 38 | 1560 | import warnings
from wtforms import form
from wtforms.ext.i18n.utils import get_translations
translations_cache = {}
class Form(form.Form):
"""
Base form for a simple localized WTForms form.
**NOTE** this class is now un-necessary as the i18n features have
been moved into the core of WTForms, and will be removed in WTForms 3.0.
This will use the stdlib gettext library to retrieve an appropriate
translations object for the language, by default using the locale
information from the environment.
If the LANGUAGES class variable is overridden and set to a sequence of
strings, this will be a list of languages by priority to use instead, e.g::
LANGUAGES = ['en_GB', 'en']
One can also provide the languages by passing `LANGUAGES=` to the
constructor of the form.
Translations objects are cached to prevent having to get a new one for the
same languages every instantiation.
"""
LANGUAGES = None
def __init__(self, *args, **kwargs):
warnings.warn('i18n is now in core, wtforms.ext.i18n will be removed in WTForms 3.0', DeprecationWarning)
if 'LANGUAGES' in kwargs:
self.LANGUAGES = kwargs.pop('LANGUAGES')
super(Form, self).__init__(*args, **kwargs)
def _get_translations(self):
languages = tuple(self.LANGUAGES) if self.LANGUAGES else (self.meta.locales or None)
if languages not in translations_cache:
translations_cache[languages] = get_translations(languages)
return translations_cache[languages]
| apache-2.0 |
h2oai/h2o-dev | py2/jenkins_h2o_port_allocate.py | 30 | 8319 | #!/usr/bin/python
# "Avoid locker or centralized resource by hard-wiring the port mapping within range"
# "implied by max # of ports used per job, max # of executors per machine, and # of machines."
# "Map of source determines port. in/out using env variables"
print "\njenkins_h2o_port_allocate...."
import socket, os, subprocess
USED_HOSTNAMES = [
'mr-0xb1',
'mr-0xb4',
'mr-0x2',
'mr-0x3',
'mr-0x4',
'mr-0x5',
'mr-0x6',
'mr-0x7',
'mr-0x8',
'mr-0x9',
'mr-0x10',
'mr-0xd4',
'mr-0xd5',
'mr-0xd6',
'mr-0xd7',
'mr-0xd8',
'mr-0xd9',
'mr-0xd10',
'Kevin-Ubuntu3',
]
# maximum number of ports a job uses 10 = 5 jvms * 2 ports per h2o jvm (current max known)
PORTS_PER_SLOT = 10
DEFAULT_BASE_PORT = 54340
EXECUTOR_NUM = 8
def jenkins_h2o_port_allocate():
"""
input: jenkins environment variable EXECUTOR_NUMBER
output: creates ./BASE_PORT.sh, that you should 'source ./PORT.sh'
(can't see the env. variables directly from python?)
which will create os environment variables H2O_PORT and H2O_PORT_OFFSET (legacy)
internal state for this script that can be updated:
USED_HOSTNAMES (list of machine names),
PORTS_PER_SLOT (max per any job),
DEFAULT_BASE_PORT
If you modify any of the internal state, you may introduce contention between
new jenkins jobs and running jenkins jobs. (might not!)
You should stop/start all jobs (or ignore failures) if you modify internal state here.
Hence, no parameters to avoid living dangerously!
"""
if os.environ.has_key("EXECUTOR_NUMBER"):
# this will fail if it's not an integer
executor = int(os.environ["EXECUTOR_NUMBER"])
else:
executor = 1 # jenkins starts with 1
print "jenkins EXECUTOR_NUMBER:", executor
if executor<0 or executor>=EXECUTOR_NUM:
raise Exception("executor: %s wrong? Expecting 1-8 jenkins executors on a machine (0-7 exp.)" % executor)
h2oPort = DEFAULT_BASE_PORT
h2oPortOffset = 0
hostname = socket.gethostname()
if hostname not in USED_HOSTNAMES:
print "WARNING: this hostname: %s isn't in my list. You should add it?" % hostname
print "Will use default base port"
else:
hostnameIndex = USED_HOSTNAMES.index(hostname)
h2oPortOffset = PORTS_PER_SLOT * (executor + hostnameIndex)
h2oPort += h2oPortOffset
print "Possible h2o base_port range is %s to %s" % \
(DEFAULT_BASE_PORT, DEFAULT_BASE_PORT + (PORTS_PER_SLOT * EXECUTOR_NUM * len(USED_HOSTNAMES)) - 2)
print "Possible h2o ports used ranged is %s to %s" % \
(DEFAULT_BASE_PORT, DEFAULT_BASE_PORT + (PORTS_PER_SLOT * EXECUTOR_NUM * len(USED_HOSTNAMES)) - 1)
print "want to 'export H2O_PORT=%s'" % h2oPort
print "want to 'export H2O_PORT_OFFSET=%s # legacy'" % h2oPortOffset
f = open('H2O_BASE_PORT.sh','w')
f.write('export H2O_PORT=%s\n' % h2oPort)
f.write('export H2O_PORT_OFFSET=%s # legacy\n' % h2oPortOffset)
f.close()
print "\nNow please:\nsource ./H2O_BASE_PORT.sh"
if __name__ == "__main__":
jenkins_h2o_port_allocate()
"""
This auto-magics the manual allocation I did when parallelized the current 8-way jenkins jobs,
2 per machine, on the jenkins mr-0xd4 that dispatches to mr-0xd5 thru mr-0xd9
The rationale for a global allocate requires understanding what machines a jenkins master/slave can be on,
and what machines they send h2o jars to.
at 0xdata:
A jenkins master is a member of a group of machines. Jenkins can send the python or other test to another slave machine, and then the test can dispatch h2o either locally, or to other machines in the group.
it can target h2o.jar's anywhere in that group, or dispatch a job to a slave in that group that might do the same.
We currently have two such groups, with one jenkins master in each group (mr-0xb4 and mr-0xd4)
(update: let's just say it's all one big group. Not worth optimizing for subgroup knowlege)
So using
(hostname offset in the list of total hostnames) * (EXECUTOR_NUMBER-1 * PORTS_PER_SLOT)
Will give a unique offset from the default 54340 base, for the job, regardless of which jenkins (master or slave) starts it in the group and where the h2o targest are (which is controlled by the config.json used in the job)
all cloud builds done in a job (one or more) use the same offset.
Dispatching tests from your laptop..will they collide with jenkins?
If the host machine is not in the list, like a laptop, then the offset is 0. (54340 will be used). I suppose jenkins could shift it's base_port to be at least 10 above 54340, so existing scripts that users have, that use 54340, won't be stepped on by jenkins. 54340 could be the jenkins base port.
EC2:
I suppose if the tests are used in ec2, we only do one h2o jar per machine, (or multijvm) so no conflict if 54340 is used. (or 54340). We typically want fast EC2 results, so don't overload target machines?. I suppose an EC2 machine list could be created in this script if we started overloading EC2 machines also
PORTS_PER_SLOT is 10 right now, since the most a job will do is 5 h2o jvms.
I guess to ease the transition, I could leave the H2O_PORT_OFFSET as the api to build_cloud(), and have another python script look at the current ho2 IP and EXECUTOR_NUMBER env variable from jenkins
Notes:
Right now, assuming the subnet octet range from a group is 160-180 or 181-190 works. 164 is an oddball case (out of the ten range for it's group)
I guess I could just put a list of IPs for the jenkins groups that exist, and find the group your in, and then get a "group index" from that list. That's robust and easily maintainable.
This algorithm keeps the total port range in use = (max # of executors per jenkins master or slave) * PORTS_PER_SLOT * (# of machines in a group)
Using 2 executors per machine is nice. 4 is about the max that works well with h2o. so 4 * 10 * 10 = 400 ports
that would be 54340 thru 54721
NICE POSSIBILITES: If we know that ubuntu or other services need to reserve ports that are in our range, we can put in mappings to other ports for those values, or shift the port range or whatever...i.e. we can adjust the algorithm in one place. If the 54340 base is not good, that's set in h2o.py..currently tests don't modify base_port (except for some cloud tests we don't run in jenkins, that do more than 5 jvms on a single machine)
I suppose the tool could output the exact port to use, rather than an offset to h2o.py's default. Maybe initially will output both, so h2o.py can migrate
i.e. environment variables H2O_PORT_OFFSET and H2O_PORT (= 5321 + H2O_PORT_OFFSET)
UPDATE: To allow for dispatching h2o to any machine in any jenkins group, we can have just one group list that has all possible machines. Makes the used port range twice as big (800) but that's okay. It's like going to a 255.255.0.0 network!
Detail:
Jenkins has global environment variables
This one is useful
EXECUTOR_NUMBER The unique number that identifies the current executor (among executors of the same machine) that's carrying out this build. This is the number you see in the "build executor status", except that the number starts from 0, not 1.
Now each slave machine can have multiple executors, in addition to the master.
So since in a grand scheme, we don't know who's creating h2o.jars on target machines, from which machine, (jenkins master or slave)...
it means we want a global h2o port allocation (assuming that scraping an h2o port from OS allocation is ugly)
I have cases on 164 jenkins that send the python job to jenkins slave 174, which dispatches h2o jars to 175-180, Or dispatch to YARN on hadoop clusters, but we don't care about ports there, we get told the ip/port by the h2odriver.
Since the pool of machines in a group is fixed, we have the EXECUTOR_NUMBER which is the parallelism per machine (jenkins master or slave), and we
Will give a unique offset to 54340
We can call it a "PORT_SLOT" and pass it as a environment variable like the current "export H2O_PORT_OFFSET=40"
that the build_cloud() uses to offset the default base_port. I suppose PORTS_PER_SLOT can be fixed in build_cloud() so it's the same for all jobs (so jobs don't step over each other.
"""
| apache-2.0 |
manasgarg/flask-sauth | flask_sauth/forms.py | 1 | 3424 | #!/usr/bin/env python
from datetime import datetime
from wtforms import Form, TextField, PasswordField, HiddenField, ValidationError
from wtforms import validators as v
from models import User, authenticate
class RegistrationForm( Form):
name = TextField( validators=[v.DataRequired(), v.Length(max=256)])
email = TextField( validators=[v.DataRequired(), v.Email(), v.Length(max=256), v.Email()])
password = PasswordField( validators=[v.DataRequired(), v.Length(max=256)])
next = HiddenField()
def validate_email( form, field):
email = field.data.lower().strip()
if( User.objects(email=email).count()):
raise ValidationError( "Hey! This email is already registered with us. Did you forget your password?")
def save( self):
user = User.create_user( self.name.data, self.email.data, self.password.data, email_verified=True)
user.save()
return user
class LoginForm( Form):
email = TextField(u"Email Address", validators=[v.DataRequired(), v.Email()])
password = PasswordField( validators=[v.DataRequired()])
next = HiddenField()
def validate_email( self, field):
if( not User.objects( email=field.data).count()):
raise ValidationError( "This email address is not registered.")
def validate_password( self, field):
self.user_cache = authenticate(email=self.email.data, password=field.data)
if self.user_cache is None:
raise ValidationError("Please enter correct information. Note that password is case-sensitive.")
elif not self.user_cache.is_email_activated:
raise ValidationError("This account is inactive.")
class ResetPasswordForm(Form):
email = TextField(u"Email Address", validators=[v.DataRequired(), v.Email()])
def validate_email( self, field):
email = field.data.lower().strip()
if( User.objects.filter( email=email).count() == 0):
raise ValidationError( "This email address is not registered with us.")
field.data = email
return True
class NewPasswordForm( Form):
password1 = PasswordField( "Please enter your password", validators=[v.DataRequired()])
password2 = PasswordField( "Please re-enter your password", validators=[v.DataRequired()])
def validate_password2( self, field):
password1 = self.password1.data
password2 = self.password2.data
if( password1 != password2):
raise ValidationError( "The passwords don't match.")
class ChangePasswordForm( Form):
password = PasswordField( "Current Password", validators=[v.DataRequired()])
password1 = PasswordField( "New Password", validators=[v.DataRequired()])
password2 = PasswordField( "Re-enter New Password", validators=[v.DataRequired()])
def __init__( self, *args, **kwargs):
super( ChangePasswordForm, self).__init__( *args, **kwargs)
def validate_password( self, field):
from flask_login import current_user
user_cache = authenticate(email=current_user.email, password=field.data)
if( not user_cache):
raise ValidationError( "The current password that you entered is wrong.")
def validate_password2( self, field):
password1 = self.password1.data
password2 = self.password2.data
if( password1 != password2):
raise ValidationError( "The passwords don't match.")
| bsd-3-clause |
tanmaykm/edx-platform | lms/djangoapps/teams/search_indexes.py | 63 | 5030 | """ Search index used to load data into elasticsearch"""
import logging
from elasticsearch.exceptions import ConnectionError
from django.conf import settings
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils import translation
from functools import wraps
from search.search_engine_base import SearchEngine
from request_cache import get_request_or_stub
from .errors import ElasticSearchConnectionError
from lms.djangoapps.teams.models import CourseTeam
from .serializers import CourseTeamSerializer
def if_search_enabled(f):
"""
Only call `f` if search is enabled for the CourseTeamIndexer.
"""
@wraps(f)
def wrapper(*args, **kwargs):
"""Wraps the decorated function."""
cls = args[0]
if cls.search_is_enabled():
return f(*args, **kwargs)
return wrapper
class CourseTeamIndexer(object):
"""
This is the index object for searching and storing CourseTeam model instances.
"""
INDEX_NAME = "course_team_index"
DOCUMENT_TYPE_NAME = "course_team"
ENABLE_SEARCH_KEY = "ENABLE_TEAMS"
def __init__(self, course_team):
self.course_team = course_team
def data(self):
"""
Uses the CourseTeamSerializer to create a serialized course_team object.
Adds in additional text and pk fields.
Removes membership relation.
Returns serialized object with additional search fields.
"""
# Django Rest Framework v3.1 requires that we pass the request to the serializer
# so it can construct hyperlinks. To avoid changing the interface of this object,
# we retrieve the request from the request cache.
context = {
"request": get_request_or_stub()
}
serialized_course_team = CourseTeamSerializer(self.course_team, context=context).data
# Save the primary key so we can load the full objects easily after we search
serialized_course_team['pk'] = self.course_team.pk
# Don't save the membership relations in elasticsearch
serialized_course_team.pop('membership', None)
# add generally searchable content
serialized_course_team['content'] = {
'text': self.content_text()
}
return serialized_course_team
def content_text(self):
"""
Generate the text field used for general search.
"""
# Always use the English version of any localizable strings (see TNL-3239)
with translation.override('en'):
return u"{name}\n{description}\n{country}\n{language}".format(
name=self.course_team.name,
description=self.course_team.description,
country=self.course_team.country.name.format(),
language=self._language_name()
)
def _language_name(self):
"""
Convert the language from code to long name.
"""
languages = dict(settings.ALL_LANGUAGES)
try:
return languages[self.course_team.language]
except KeyError:
return self.course_team.language
@classmethod
@if_search_enabled
def index(cls, course_team):
"""
Update index with course_team object (if feature is enabled).
"""
search_engine = cls.engine()
serialized_course_team = CourseTeamIndexer(course_team).data()
search_engine.index(cls.DOCUMENT_TYPE_NAME, [serialized_course_team])
@classmethod
@if_search_enabled
def remove(cls, course_team):
"""
Remove course_team from the index (if feature is enabled).
"""
cls.engine().remove(cls.DOCUMENT_TYPE_NAME, [course_team.team_id])
@classmethod
@if_search_enabled
def engine(cls):
"""
Return course team search engine (if feature is enabled).
"""
try:
return SearchEngine.get_search_engine(index=cls.INDEX_NAME)
except ConnectionError as err:
logging.error('Error connecting to elasticsearch: %s', err)
raise ElasticSearchConnectionError
@classmethod
def search_is_enabled(cls):
"""
Return boolean of whether course team indexing is enabled.
"""
return settings.FEATURES.get(cls.ENABLE_SEARCH_KEY, False)
@receiver(post_save, sender=CourseTeam, dispatch_uid='teams.signals.course_team_post_save_callback')
def course_team_post_save_callback(**kwargs):
"""
Reindex object after save.
"""
try:
CourseTeamIndexer.index(kwargs['instance'])
except ElasticSearchConnectionError:
pass
@receiver(post_delete, sender=CourseTeam, dispatch_uid='teams.signals.course_team_post_delete_callback')
def course_team_post_delete_callback(**kwargs): # pylint: disable=invalid-name
"""
Reindex object after delete.
"""
try:
CourseTeamIndexer.remove(kwargs['instance'])
except ElasticSearchConnectionError:
pass
| agpl-3.0 |
emmett9001/schema.to | schemato/schemato.py | 2 | 2790 | import urllib
import logging
import re
from pkg_resources import iter_entry_points
from six import StringIO
from six.moves import xrange
from .compound_graph import CompoundGraph
from .schemas.parselypage import ParselyPageValidator
log = logging.getLogger(__name__)
class Schemato(object):
def __init__(self, source, url=None, loglevel="ERROR"):
"""init with a local filepath or a URI"""
super(Schemato, self).__init__()
self.set_loglevel(loglevel)
graph_source = source
if url is not None:
graph_source = StringIO(source)
self.graph = CompoundGraph(graph_source)
def _read_stream(source):
text, url = self._get_document(source)
return url, self._document_lines(text)
parsed_url, self.doc_lines = _read_stream(source)
self.url = url
if url is None:
self.url = parsed_url
validator = ParselyPageValidator(self.graph, self.doc_lines)
self.parsely_page = validator.data
def validate(self):
self._load_validators()
results = [v.validate() for v in self.validators]
log.info("returned from validate() : %s", results)
for res in results:
log.info(res.to_json())
return results
def set_loglevel(self, loglevel):
if hasattr(logging, loglevel):
log.setLevel(loglevel)
else:
log.setLevel(logging.ERROR)
log.error(
"Unrecognized loglevel %s, defaulting to ERROR", loglevel)
def _load_validators(self):
self.validators = set()
for entry_point in iter_entry_points('schemato_validators'):
validator_fn = entry_point.load()
validator = validator_fn(self.graph, self.doc_lines, url=self.url)
self.validators.add(validator)
def _document_lines(self, text):
"""helper, get a list of (linetext, linenum) from a string with
newlines
"""
inlines = text.split('\n')
doc_lines = [(re.sub(r'^ +| +$', '', line), num)
for line, num
in zip(inlines, xrange(1, len(inlines) + 1))]
return doc_lines
def _get_document(self, source):
"""helper, open a file or url and return the content and identifier"""
scheme_url = source
if not source.startswith("http"):
scheme_url = "http://%s" % source
text = source
try:
text = urllib.urlopen(scheme_url).read()
except:
pass
else:
return (text, scheme_url)
try:
text = open(source, "r").read()
except:
pass
else:
return (text, source)
return (text, None)
| apache-2.0 |
CopeX/odoo | addons/project_issue/project_issue.py | 217 | 29319 | #-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp import api
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv, orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools import html2plaintext
from openerp.tools.translate import _
class project_issue_version(osv.Model):
_name = "project.issue.version"
_order = "name desc"
_columns = {
'name': fields.char('Version Number', required=True),
'active': fields.boolean('Active', required=False),
}
_defaults = {
'active': 1,
}
class project_issue(osv.Model):
_name = "project.issue"
_description = "Project Issue"
_order = "priority desc, create_date desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'project_issue.mt_issue_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'project_issue.mt_issue_stage': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence > 1,
},
'user_id': {
'project_issue.mt_issue_assigned': lambda self, cr, uid, obj, ctx=None: obj.user_id and obj.user_id.id,
},
'kanban_state': {
'project_issue.mt_issue_blocked': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'blocked',
'project_issue.mt_issue_ready': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'done',
},
}
def _get_default_partner(self, cr, uid, context=None):
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default project by checking if present in the context """
return self._resolve_project_id_from_context(cr, uid, context=context)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('fold', '=', False)], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context.get('default_project_id')
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return int(project_ids[0][0])
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve section_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('case_default', '=', True), ('fold', '=', False): add default columns that are not folded
# - OR ('project_ids', 'in', project_id), ('fold', '=', False) if project_id: add project columns that are not folded
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id)]
search_domain += [('id', 'in', ids)]
# perform search
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current userโs ID for security checks,
@param ids: List of Opendayโs IDs
@return: difference between current date and log date
@param context: A standard dictionary for contextual values
"""
Calendar = self.pool['resource.calendar']
res = dict((res_id, {}) for res_id in ids)
for issue in self.browse(cr, uid, ids, context=context):
values = {
'day_open': 0.0, 'day_close': 0.0,
'working_hours_open': 0.0, 'working_hours_close': 0.0,
'days_since_creation': 0.0, 'inactivity_days': 0.0,
}
# if the working hours on the project are not defined, use default ones (8 -> 12 and 13 -> 17 * 5), represented by None
calendar_id = None
if issue.project_id and issue.project_id.resource_calendar_id:
calendar_id = issue.project_id.resource_calendar_id.id
dt_create_date = datetime.strptime(issue.create_date, DEFAULT_SERVER_DATETIME_FORMAT)
if issue.date_open:
dt_date_open = datetime.strptime(issue.date_open, DEFAULT_SERVER_DATETIME_FORMAT)
values['day_open'] = (dt_date_open - dt_create_date).total_seconds() / (24.0 * 3600)
values['working_hours_open'] = Calendar._interval_hours_get(
cr, uid, calendar_id, dt_create_date, dt_date_open,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False, context=context)
if issue.date_closed:
dt_date_closed = datetime.strptime(issue.date_closed, DEFAULT_SERVER_DATETIME_FORMAT)
values['day_close'] = (dt_date_closed - dt_create_date).total_seconds() / (24.0 * 3600)
values['working_hours_close'] = Calendar._interval_hours_get(
cr, uid, calendar_id, dt_create_date, dt_date_closed,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False, context=context)
days_since_creation = datetime.today() - dt_create_date
values['days_since_creation'] = days_since_creation.days
if issue.date_action_last:
inactive_days = datetime.today() - datetime.strptime(issue.date_action_last, DEFAULT_SERVER_DATETIME_FORMAT)
elif issue.date_last_stage_update:
inactive_days = datetime.today() - datetime.strptime(issue.date_last_stage_update, DEFAULT_SERVER_DATETIME_FORMAT)
else:
inactive_days = datetime.today() - datetime.strptime(issue.create_date, DEFAULT_SERVER_DATETIME_FORMAT)
values['inactivity_days'] = inactive_days.days
# filter only required values
for field in fields:
res[issue.id][field] = values[field]
return res
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
task_pool = self.pool.get('project.task')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
progress = 0.0
if issue.task_id:
progress = task_pool._hours_get(cr, uid, [issue.task_id.id], field_names, args, context=context)[issue.task_id.id]['progress']
res[issue.id] = {'progress' : progress}
return res
def on_change_project(self, cr, uid, ids, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def _get_issue_task(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for task in self.pool.get('project.task').browse(cr, uid, ids, context=context):
issues += issue_pool.search(cr, uid, [('task_id','=',task.id)])
return issues
def _get_issue_work(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id:
issues += issue_pool.search(cr, uid, [('task_id','=',work.task_id.id)])
return issues
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Issue', required=True),
'active': fields.boolean('Active', required=False),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'write_date': fields.datetime('Update Date', readonly=True),
'days_since_creation': fields.function(_compute_day, string='Days since creation date', \
multi='compute_day', type="integer", help="Difference in days between creation date and current date"),
'date_deadline': fields.date('Deadline'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.\
Define Responsible user and Email account for mail gateway.'),
'partner_id': fields.many2one('res.partner', 'Contact', select=1),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Private Note'),
'kanban_state': fields.selection([('normal', 'Normal'),('blocked', 'Blocked'),('done', 'Ready for next stage')], 'Kanban State',
track_visibility='onchange',
help="A Issue's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this issue\n"
" * Ready for next stage indicates the issue is ready to be pulled to the next stage",
required=False),
'email_from': fields.char('Email', size=128, help="These people will receive email.", select=1),
'email_cc': fields.char('Watchers Emails', size=256, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'date_open': fields.datetime('Assigned', readonly=True, select=True),
# Project Issue fields
'date_closed': fields.datetime('Closed', readonly=True, select=True),
'date': fields.datetime('Date'),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True),
'channel': fields.char('Channel', help="Communication channel."),
'categ_ids': fields.many2many('project.category', string='Tags'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority', select=True),
'version_id': fields.many2one('project.issue.version', 'Version'),
'stage_id': fields.many2one ('project.task.type', 'Stage',
track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'project_id': fields.many2one('project.project', 'Project', track_visibility='onchange', select=True),
'duration': fields.float('Duration'),
'task_id': fields.many2one('project.task', 'Task', domain="[('project_id','=',project_id)]"),
'day_open': fields.function(_compute_day, string='Days to Assign',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'day_close': fields.function(_compute_day, string='Days to Close',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'user_id': fields.many2one('res.users', 'Assigned to', required=False, select=1, track_visibility='onchange'),
'working_hours_open': fields.function(_compute_day, string='Working Hours to assign the Issue',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'working_hours_close': fields.function(_compute_day, string='Working Hours to close the Issue',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'inactivity_days': fields.function(_compute_day, string='Days since last action',
multi='compute_day', type="integer", help="Difference in days between last action and current date"),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'progress': fields.function(_hours_get, string='Progress (%)', multi='hours', group_operator="avg", help="Computed as: Time Spent / Total Time.",
store = {
'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['task_id'], 10),
'project.task': (_get_issue_task, ['work_ids', 'remaining_hours', 'planned_hours', 'state', 'stage_id'], 10),
'project.task.work': (_get_issue_work, ['hours'], 10),
}),
}
_defaults = {
'active': 1,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': '0',
'kanban_state': 'normal',
'date_last_stage_update': fields.datetime.now,
'user_id': lambda obj, cr, uid, context: uid,
}
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def copy(self, cr, uid, id, default=None, context=None):
issue = self.read(cr, uid, [id], ['name'], context=context)[0]
if not default:
default = {}
default = default.copy()
default.update(name=_('%s (copy)') % (issue['name']))
return super(project_issue, self).copy(cr, uid, id, default=default, context=context)
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, None, vals.get('stage_id'), context=context)['value'])
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
return super(project_issue, self).create(cr, uid, vals, context=create_context)
def write(self, cr, uid, ids, vals, context=None):
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, ids, vals.get('stage_id'), context=context)['value'])
vals['date_last_stage_update'] = fields.datetime.now()
if 'kanban_state' not in vals:
vals['kanban_state'] = 'normal'
# user_id change: update date_start
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
return super(project_issue, self).write(cr, uid, ids, vals, context)
def onchange_task_id(self, cr, uid, ids, task_id, context=None):
if not task_id:
return {'value': {}}
task = self.pool.get('project.task').browse(cr, uid, task_id, context=context)
return {'value': {'user_id': task.user_id.id, }}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
""" This function returns value of partner email address based on partner
:param part: Partner's id
"""
result = {}
if partner_id:
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context)
result['email_from'] = partner.email
return {'value': result}
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_document_name'] = _("issues")
return super(project_issue, self).get_empty_list_help(cr, uid, help, context=context)
# -------------------------------------------------------
# Stage management
# -------------------------------------------------------
def onchange_stage_id(self, cr, uid, ids, stage_id, context=None):
if not stage_id:
return {'value': {}}
stage = self.pool['project.task.type'].browse(cr, uid, stage_id, context=context)
if stage.fold:
return {'value': {'date_closed': fields.datetime.now()}}
return {'value': {'date_closed': False}}
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the issue:
- type: stage type must be the same or 'both'
- section_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * (len(section_ids)-1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def case_escalate(self, cr, uid, ids, context=None): # FIXME rename this method to issue_escalate
for issue in self.browse(cr, uid, ids, context=context):
data = {}
esc_proj = issue.project_id.project_escalation_id
if not esc_proj:
raise osv.except_osv(_('Warning!'), _('You cannot escalate this issue.\nThe relevant Project has not configured the Escalation Project!'))
data['project_id'] = esc_proj.id
if esc_proj.user_id:
data['user_id'] = esc_proj.user_id.id
issue.write(data)
if issue.task_id:
issue.task_id.write({'project_id': esc_proj.id, 'user_id': False})
return True
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
issues = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([issue.project_id.id for issue in issues if issue.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), context=context)
return dict((issue.id, aliases.get(issue.project_id and issue.project_id.id or 0, False)) for issue in issues)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(project_issue, self).message_get_suggested_recipients(cr, uid, ids, context=context)
try:
for issue in self.browse(cr, uid, ids, context=context):
if issue.partner_id:
self._message_add_suggested_recipient(cr, uid, recipients, issue, partner=issue.partner_id, reason=_('Customer'))
elif issue.email_from:
self._message_add_suggested_recipient(cr, uid, recipients, issue, email=issue.email_from, reason=_('Customer Email'))
except (osv.except_osv, orm.except_orm): # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
context = dict(context or {}, state_to='draft')
defaults = {
'name': msg.get('subject') or _("No Subject"),
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
'user_id': False,
}
defaults.update(custom_values)
res_id = super(project_issue, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
return res_id
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, content_subtype='html', **kwargs):
""" Overrides mail_thread message_post so that we can set the date of last action field when
a new message is posted on the issue.
"""
if context is None:
context = {}
res = super(project_issue, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
if thread_id and subtype:
self.write(cr, SUPERUSER_ID, thread_id, {'date_action_last': fields.datetime.now()}, context=context)
return res
class project(osv.Model):
_inherit = "project.project"
def _get_alias_models(self, cr, uid, context=None):
return [('project.task', "Tasks"), ("project.issue", "Issues")]
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
project_id: Issue.search_count(cr,uid, [('project_id', '=', project_id), ('stage_id.fold', '=', False)], context=context)
for project_id in ids
}
_columns = {
'project_escalation_id': fields.many2one('project.project', 'Project Escalation',
help='If any issue is escalated from the current Project, it will be listed under the project selected here.',
states={'close': [('readonly', True)], 'cancelled': [('readonly', True)]}),
'issue_count': fields.function(_issue_count, type='integer', string="Issues",),
'issue_ids': fields.one2many('project.issue', 'project_id',
domain=[('stage_id.fold', '=', False)])
}
def _check_escalation(self, cr, uid, ids, context=None):
project_obj = self.browse(cr, uid, ids[0], context=context)
if project_obj.project_escalation_id:
if project_obj.project_escalation_id.id == project_obj.id:
return False
return True
_constraints = [
(_check_escalation, 'Error! You cannot assign escalation to the same project!', ['project_escalation_id'])
]
class account_analytic_account(osv.Model):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_issues': fields.boolean('Issues', help="Check this field if this project manages issues"),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_issues'] = template.use_issues
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
if context is None:
context = {}
res = super(account_analytic_account, self)._trigger_project_creation(cr, uid, vals, context=context)
return res or (vals.get('use_issues') and not 'project_creation_in_progress' in context)
class project_project(osv.Model):
_inherit = 'project.project'
_defaults = {
'use_issues': True
}
def _check_create_write_values(self, cr, uid, vals, context=None):
""" Perform some check on values given to create or write. """
# Handle use_tasks / use_issues: if only one is checked, alias should take the same model
if vals.get('use_tasks') and not vals.get('use_issues'):
vals['alias_model'] = 'project.task'
elif vals.get('use_issues') and not vals.get('use_tasks'):
vals['alias_model'] = 'project.issue'
def on_change_use_tasks_or_issues(self, cr, uid, ids, use_tasks, use_issues, context=None):
values = {}
if use_tasks and not use_issues:
values['alias_model'] = 'project.task'
elif not use_tasks and use_issues:
values['alias_model'] = 'project.issue'
return {'value': values}
def create(self, cr, uid, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).write(cr, uid, ids, vals, context=context)
class res_partner(osv.osv):
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
partner_id: Issue.search_count(cr,uid, [('partner_id', '=', partner_id)])
for partner_id in ids
}
""" Inherits partner and adds Issue information in the partner form """
_inherit = 'res.partner'
_columns = {
'issue_count': fields.function(_issue_count, string='# Issues', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AlexDoul/PyQt4 | examples/xmlpatterns/schema/ui_schema.py | 7 | 3609 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'schema.ui'
#
# Created: Fri Feb 5 15:27:54 2010
# by: PyQt4 UI code generator snapshot-4.7.1-c39e85a8e2ec
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_SchemaMainWindow(object):
def setupUi(self, SchemaMainWindow):
SchemaMainWindow.setObjectName("SchemaMainWindow")
SchemaMainWindow.resize(417, 594)
self.centralwidget = QtGui.QWidget(SchemaMainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.schemaLabel = QtGui.QLabel(self.centralwidget)
self.schemaLabel.setObjectName("schemaLabel")
self.gridLayout.addWidget(self.schemaLabel, 0, 0, 1, 2)
self.schemaSelection = QtGui.QComboBox(self.centralwidget)
self.schemaSelection.setObjectName("schemaSelection")
self.gridLayout.addWidget(self.schemaSelection, 0, 2, 1, 2)
self.schemaView = QtGui.QTextBrowser(self.centralwidget)
self.schemaView.setObjectName("schemaView")
self.gridLayout.addWidget(self.schemaView, 1, 0, 1, 4)
self.instanceLabel = QtGui.QLabel(self.centralwidget)
self.instanceLabel.setObjectName("instanceLabel")
self.gridLayout.addWidget(self.instanceLabel, 2, 0, 1, 2)
self.instanceSelection = QtGui.QComboBox(self.centralwidget)
self.instanceSelection.setObjectName("instanceSelection")
self.gridLayout.addWidget(self.instanceSelection, 2, 2, 1, 2)
self.instanceEdit = QtGui.QTextEdit(self.centralwidget)
self.instanceEdit.setObjectName("instanceEdit")
self.gridLayout.addWidget(self.instanceEdit, 3, 0, 1, 4)
self.label = QtGui.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 4, 0, 1, 1)
self.validationStatus = QtGui.QLabel(self.centralwidget)
self.validationStatus.setObjectName("validationStatus")
self.gridLayout.addWidget(self.validationStatus, 4, 1, 1, 2)
self.validateButton = QtGui.QPushButton(self.centralwidget)
self.validateButton.setObjectName("validateButton")
self.gridLayout.addWidget(self.validateButton, 4, 3, 1, 1)
SchemaMainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(SchemaMainWindow)
self.statusbar.setObjectName("statusbar")
SchemaMainWindow.setStatusBar(self.statusbar)
self.retranslateUi(SchemaMainWindow)
QtCore.QMetaObject.connectSlotsByName(SchemaMainWindow)
def retranslateUi(self, SchemaMainWindow):
SchemaMainWindow.setWindowTitle(QtGui.QApplication.translate("SchemaMainWindow", "XML Schema Validation", None, QtGui.QApplication.UnicodeUTF8))
self.schemaLabel.setText(QtGui.QApplication.translate("SchemaMainWindow", "XML Schema Document:", None, QtGui.QApplication.UnicodeUTF8))
self.instanceLabel.setText(QtGui.QApplication.translate("SchemaMainWindow", "XML Instance Document:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("SchemaMainWindow", "Status:", None, QtGui.QApplication.UnicodeUTF8))
self.validationStatus.setText(QtGui.QApplication.translate("SchemaMainWindow", "not validated", None, QtGui.QApplication.UnicodeUTF8))
self.validateButton.setText(QtGui.QApplication.translate("SchemaMainWindow", "Validate", None, QtGui.QApplication.UnicodeUTF8))
| gpl-2.0 |
rhauch/modeshape | modeshape-jcr/src/test/resources/mimetype/test.py | 18 | 12091 | """Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
On other platforms (mainly Mac and Windows), it uses just sys.prefix
(and sys.exec_prefix, if different, but this is unlikely). The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/site-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.3/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.3/site-packages/bar
/usr/local/lib/python2.3/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys, os
def makepath(*paths):
dir = os.path.abspath(os.path.join(*paths))
return dir, os.path.normcase(dir)
for m in sys.modules.values():
if hasattr(m, "__file__") and m.__file__:
m.__file__ = os.path.abspath(m.__file__)
del m
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
_dirs_in_sys_path = {}
dir = dircase = None # sys.path may be empty at this point
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in _dirs_in_sys_path:
L.append(dir)
_dirs_in_sys_path[dircase] = 1
sys.path[:] = L
del dir, dircase, L
# Append ./build/lib.<platform> in case we're running in the build dir
# (especially for Guido :-)
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
del get_platform, s
def _init_pathinfo():
global _dirs_in_sys_path
_dirs_in_sys_path = d = {}
for dir in sys.path:
if dir and not os.path.isdir(dir):
continue
dir, dircase = makepath(dir)
d[dircase] = 1
def addsitedir(sitedir):
global _dirs_in_sys_path
if _dirs_in_sys_path is None:
_init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in _dirs_in_sys_path:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name[-4:] == os.extsep + "pth":
addpackage(sitedir, name)
if reset:
_dirs_in_sys_path = None
def addpackage(sitedir, name):
global _dirs_in_sys_path
if _dirs_in_sys_path is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname)
except IOError:
return
while 1:
dir = f.readline()
if not dir:
break
if dir[0] == '#':
continue
if dir.startswith("import"):
exec dir
continue
if dir[-1] == '\n':
dir = dir[:-1]
dir, dircase = makepath(sitedir, dir)
if not dircase in _dirs_in_sys_path and os.path.exists(dir):
sys.path.append(dir)
_dirs_in_sys_path[dircase] = 1
if reset:
_dirs_in_sys_path = None
prefixes = [os.path.join(sys.prefix, "local"), sys.prefix]
sitedir = None # make sure sitedir is initialized because of later 'del'
if sys.exec_prefix != sys.prefix:
prefixes.append(os.path.join(sys.exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")]
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir)
del prefix, sitedir
_dirs_in_sys_path = None
# Define new built-ins 'quit' and 'exit'.
# These are simply strings that display a hint on how to exit.
if os.sep == ':':
exit = 'Use Cmd-Q to quit.'
elif os.sep == '\\':
exit = 'Use Ctrl-Z plus Return to exit.'
else:
exit = 'Use Ctrl-D (i.e. EOF) to exit.'
import __builtin__
__builtin__.quit = __builtin__.exit = exit
del exit
# interactive prompt objects for printing the license text, a list of
# contributors and the copyright notice.
class _Printer:
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for file in self.__files:
file = os.path.join(dir, file)
try:
fp = open(file)
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print self.__lines[i]
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = raw_input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
__builtin__.copyright = _Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
__builtin__.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
else:
__builtin__.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
__builtin__.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
# Define new built-in 'help'.
# This is a wrapper around pydoc.help (with a twist).
class _Helper:
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
__builtin__.help = _Helper()
# On Windows, some default encodings are not provided
# by Python (e.g. "cp932" in Japanese locale), while they
# are always available as "mbcs" in each locale.
# Make them usable by aliasing to "mbcs" in such a case.
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
# Set the string encoding used by the Unicode implementation. The
# default is 'ascii', but if you're willing to experiment, you can
# change this.
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
#
# Run custom site specific code, if available.
#
try:
import sitecustomize
except ImportError:
pass
#
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
#
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
def _test():
print "sys.path = ["
for dir in sys.path:
print " %s," % `dir`
print "]"
if __name__ == '__main__':
_test()
| apache-2.0 |
FCP-INDI/nipype | nipype/interfaces/spm/tests/test_preprocess.py | 10 | 5180 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
from tempfile import mkdtemp
from shutil import rmtree
import numpy as np
from nipype.testing import (assert_equal, assert_false, assert_true,
assert_raises, skipif)
import nibabel as nb
import nipype.interfaces.spm as spm
from nipype.interfaces.spm import no_spm
import nipype.interfaces.matlab as mlab
try:
matlab_cmd = os.environ['MATLABCMD']
except:
matlab_cmd = 'matlab'
mlab.MatlabCommand.set_default_matlab_cmd(matlab_cmd)
def create_files_in_directory():
outdir = mkdtemp()
cwd = os.getcwd()
os.chdir(outdir)
filelist = ['a.nii', 'b.nii']
for f in filelist:
hdr = nb.Nifti1Header()
shape = (3, 3, 3, 4)
hdr.set_data_shape(shape)
img = np.random.random(shape)
nb.save(nb.Nifti1Image(img, np.eye(4), hdr),
os.path.join(outdir, f))
return filelist, outdir, cwd
def clean_directory(outdir, old_wd):
if os.path.exists(outdir):
rmtree(outdir)
os.chdir(old_wd)
def test_slicetiming():
yield assert_equal, spm.SliceTiming._jobtype, 'temporal'
yield assert_equal, spm.SliceTiming._jobname, 'st'
def test_slicetiming_list_outputs():
filelist, outdir, cwd = create_files_in_directory()
st = spm.SliceTiming(in_files=filelist[0])
yield assert_equal, st._list_outputs()['timecorrected_files'][0][0], 'a'
clean_directory(outdir, cwd)
def test_realign():
yield assert_equal, spm.Realign._jobtype, 'spatial'
yield assert_equal, spm.Realign._jobname, 'realign'
yield assert_equal, spm.Realign().inputs.jobtype, 'estwrite'
def test_realign_list_outputs():
filelist, outdir, cwd = create_files_in_directory()
rlgn = spm.Realign(in_files=filelist[0])
yield assert_true, rlgn._list_outputs()['realignment_parameters'][0].startswith('rp_')
yield assert_true, rlgn._list_outputs()['realigned_files'][0].startswith('r')
yield assert_true, rlgn._list_outputs()['mean_image'].startswith('mean')
clean_directory(outdir, cwd)
def test_coregister():
yield assert_equal, spm.Coregister._jobtype, 'spatial'
yield assert_equal, spm.Coregister._jobname, 'coreg'
yield assert_equal, spm.Coregister().inputs.jobtype, 'estwrite'
def test_coregister_list_outputs():
filelist, outdir, cwd = create_files_in_directory()
coreg = spm.Coregister(source=filelist[0])
yield assert_true, coreg._list_outputs()['coregistered_source'][0].startswith('r')
coreg = spm.Coregister(source=filelist[0], apply_to_files=filelist[1])
yield assert_true, coreg._list_outputs()['coregistered_files'][0].startswith('r')
clean_directory(outdir, cwd)
def test_normalize():
yield assert_equal, spm.Normalize._jobtype, 'spatial'
yield assert_equal, spm.Normalize._jobname, 'normalise'
yield assert_equal, spm.Normalize().inputs.jobtype, 'estwrite'
def test_normalize_list_outputs():
filelist, outdir, cwd = create_files_in_directory()
norm = spm.Normalize(source=filelist[0])
yield assert_true, norm._list_outputs()['normalized_source'][0].startswith('w')
norm = spm.Normalize(source=filelist[0], apply_to_files=filelist[1])
yield assert_true, norm._list_outputs()['normalized_files'][0].startswith('w')
clean_directory(outdir, cwd)
def test_normalize12():
yield assert_equal, spm.Normalize12._jobtype, 'spatial'
yield assert_equal, spm.Normalize12._jobname, 'normalise'
yield assert_equal, spm.Normalize12().inputs.jobtype, 'estwrite'
def test_normalize12_list_outputs():
filelist, outdir, cwd = create_files_in_directory()
norm12 = spm.Normalize12(image_to_align=filelist[0])
yield assert_true, norm12._list_outputs()['normalized_image'][0].startswith('w')
norm12 = spm.Normalize12(image_to_align=filelist[0],
apply_to_files=filelist[1])
yield assert_true, norm12._list_outputs()['normalized_files'][0].startswith('w')
clean_directory(outdir, cwd)
@skipif(no_spm)
def test_segment():
if spm.Info.version()['name'] == "SPM12":
yield assert_equal, spm.Segment()._jobtype, 'tools'
yield assert_equal, spm.Segment()._jobname, 'oldseg'
else:
yield assert_equal, spm.Segment()._jobtype, 'spatial'
yield assert_equal, spm.Segment()._jobname, 'preproc'
@skipif(no_spm)
def test_newsegment():
if spm.Info.version()['name'] == "SPM12":
yield assert_equal, spm.NewSegment()._jobtype, 'spatial'
yield assert_equal, spm.NewSegment()._jobname, 'preproc'
else:
yield assert_equal, spm.NewSegment()._jobtype, 'tools'
yield assert_equal, spm.NewSegment()._jobname, 'preproc8'
def test_smooth():
yield assert_equal, spm.Smooth._jobtype, 'spatial'
yield assert_equal, spm.Smooth._jobname, 'smooth'
def test_dartel():
yield assert_equal, spm.DARTEL._jobtype, 'tools'
yield assert_equal, spm.DARTEL._jobname, 'dartel'
def test_dartelnorm2mni():
yield assert_equal, spm.DARTELNorm2MNI._jobtype, 'tools'
yield assert_equal, spm.DARTELNorm2MNI._jobname, 'dartel'
| bsd-3-clause |
nitesh1989/tools-iuc | tools/transtermhp/transtermhp.py | 4 | 2773 | #!/usr/bin/env python
import sys
import re
import subprocess
from Bio import SeqIO
from BCBio import GFF
from Bio.SeqFeature import SeqFeature, FeatureLocation
def main(expterm, fasta, gff3):
with open(fasta, 'r') as handle:
seq_dict = SeqIO.to_dict(SeqIO.parse(handle, "fasta"))
# Build coords file
with open(gff3, 'r') as handle:
for rec in GFF.parse(handle, base_dict=seq_dict):
with open('tmp.coords', 'w') as coords:
for feat in rec.features:
if feat.type == 'gene':
coords.write('\t'.join([
feat.id,
str(feat.location.start + 1),
str(feat.location.end),
rec.id,
]) + '\n')
with open('tmp.fasta', 'w') as fasta_handle:
SeqIO.write(rec, fasta_handle, 'fasta')
cmd = ['transterm', '-p', expterm, fasta, 'tmp.coords']
output = subprocess.check_output(cmd)
# TERM 1 4342 - 4366 + F 93 -11.5 -3.22878 | opp_overlap 4342, overlap 4340 4357
ttre = re.compile(
'^ (?P<name>.*) (?P<start>\d+) - (?P<end>\d+)\s+'
'(?P<strand>[-+])\s+(?P<loc>[GFRTHNgfr]+)\s+'
'(?P<conf>\d+)\s+(?P<hp>[0-9.-]+)\s+(?P<tail>[0-9.-]+)'
)
rec.features = []
batches = output.split('SEQUENCE ')
for batch in batches[1:]:
batch_lines = batch.split('\n')
# Strip the header
interesting = batch_lines[2:]
unformatted = [x for x in interesting if x.startswith(' ')][0::2]
for terminator in unformatted:
m = ttre.match(terminator)
if m:
start = int(m.group('start')) - 1
end = int(m.group('end'))
if m.group('strand') == '+':
strand = 1
else:
strand = 0
feature = SeqFeature(
FeatureLocation(start, end),
type="terminator",
strand=strand,
qualifiers={
"source": "TransTermHP_2.09",
"score": m.group('conf'),
"ID": m.group('name'),
}
)
rec.features.append(feature)
yield rec
if __name__ == '__main__':
for record in main(*sys.argv[1:4]):
GFF.write([record], sys.stdout)
| mit |
torkelsson/monitor-core | gmetad-python/Gmetad/gmetad_element.py | 15 | 5497 | #/*******************************************************************************
#* Portions Copyright (C) 2008 Novell, Inc. All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions are met:
#*
#* - Redistributions of source code must retain the above copyright notice,
#* this list of conditions and the following disclaimer.
#*
#* - Redistributions in binary form must reproduce the above copyright notice,
#* this list of conditions and the following disclaimer in the documentation
#* and/or other materials provided with the distribution.
#*
#* - Neither the name of Novell, Inc. nor the names of its
#* contributors may be used to endorse or promote products derived from this
#* software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
#* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#* ARE DISCLAIMED. IN NO EVENT SHALL Novell, Inc. OR THE CONTRIBUTORS
#* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
#* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
#* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
#* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#*
#* Authors: Matt Ryan (mrayn novell.com)
#* Brad Nicholes (bnicholes novell.com)
#******************************************************************************/
import copy
class Element:
''' This class implements the node element that is used to create the data store tree structure.'''
def generateKey(vals):
''' This methods generates a node key based on the node id and name'''
if isinstance(vals,list):
return ':'.join(vals)
return vals
generateKey = staticmethod(generateKey)
def __init__(self, id, attrs, tag=None):
''' This is the initialization method '''
# Initialize the id and tag for the node
self.id = id
if tag is None:
self.tag = id
else:
self.tag = tag
# If any attributes where given during intialization, add them here.
self.attrs = {}
self.lastReportedTime = 0
for k,v in attrs.items():
self.attrs[k.lower()] = v
self.children = {}
self.gridDepth = -1
def __setitem__(self, k, v):
''' This method adds or updates an attribute for the node. '''
try:
self.children[k].update(v)
except KeyError:
self.children[k] = v
def __getitem__(self, k):
''' This method retrieves a specific child node. '''
return self.children[k]
def update(self, elem):
''' This method updates an existing chld node based on a new node. '''
for k in self.attrs.keys():
try:
self.attrs[k] = elem.attrs[k]
except ValueError:
pass
def __str__(self):
''' This method generates a string representation of a node. '''
if self.attrs.has_key('name'):
return Element.generateKey([self.id,self.attrs['name']])
return Element.generateKey(self.id)
def __iter__(self):
''' This method allow the class to be an interator over it's children. '''
return self.children.itervalues()
def __copy__(self):
''' Shallow copy method, may not be used. '''
cp = Element(self.id, {})
for k in self.attrs.keys():
try:
cp.attrs[k.lower()] = copy.copy(self.attrs[k])
except ValueError:
pass
return cp
def summaryCopy(self, id=None, tag=None):
''' This method creates a copy of the node that can be used as a summary node. '''
attrs = {}
# Copy all of the attributes that are necessary for a summary node.
for k in self.attrs.keys():
try:
if k.lower() in ['name', 'sum', 'num', 'type', 'units', 'slope', 'source']:
attrs[k.lower()] = self.attrs[k]
attrs['sum'] = 0
attrs['num'] = 0
except ValueError:
pass
# Create a new node from the attributes that were copied from the existing node.
cp = Element(self.id, attrs, tag)
# Make sure that the summary node references the original children
cp.children = self.children
return cp
def getAttr(self, attr):
if self.attrs.has_key(attr.lower()):
return self.attrs[attr.lower()]
return None
def getAttrs(self):
return self.attrs
def setAttr(self, attr, val):
self.attrs[attr.lower()] = val
def incAttr(self, attr, val):
try:
self.attrs[attr.lower()] += val
except Exception, e:
print 'Can not increment attribute ' + str(e)
def getSummaryData(self):
try:
return self.summaryData
except:
return None
| bsd-3-clause |
fkarb/xltable | xltable/chart.py | 2 | 3266 | """
Chart objects reference data from Table instances and are written
to Excel worksheets as Excel charts.
"""
import datetime as dt
class Chart(object):
"""
Chart objects reference data from Table instances and are written
to Excel worksheets as Excel charts.
:param str type: Chart type (see below).
:param str subtype: Chart sub type (see below).
:param str title: Chart title
:param str legend_position: right (default), left, top, bottom or 'none' for no legend.
:param int width: Chart width.
:param int height: Chart height.
Chart types and sub-types:
- area:
- stacked
- percent_stacked
- bar:
- stacked
- perecent_stacked
- column:
- stacked
- perecent_stacked
- line
- scatter:
- straight_with_markers
- straight
- smooth_with_markers
- smooth
- stock
- radar:
- with_markers
- filled
"""
def __init__(self,
type,
subtype=None,
title=None,
legend_position=None,
x_axis=None,
y_axis=None,
show_blanks=None, # set to 'gap', 'zero' or 'span'
width=480,
height=288):
self.type = type
self.subtype = subtype
self.title = title
self.legend_position = legend_position
self.x_axis = dict(x_axis) if x_axis else x_axis
self.y_axis = dict(y_axis) if y_axis else y_axis
self.show_blanks = show_blanks
self.width = width
self.height = height
self.__series = []
# convert dates in the axis args to serial dates
for axis in (self.x_axis, self.y_axis):
if axis:
for key, value in list(axis.items()):
if isinstance(value, dt.date):
axis[key] = (value - dt.date(1900, 1, 1)).days + 2
def add_series(self, values, **kwargs):
"""
Adds a series to the chart.
:param values: A :py:class:`xltable.Expression` object that evaluates to the data series.
:param categories: A :py:class:`xltable.Expression` object that evaluates to the data series.
:param name: Name to show in the legend for the series
:param line: Line style, eg {'color': 'blue', 'width': 3.25} or {'none': True}
:param marker: dict specifying how the markers should look, eg {type: square}.
:param trendline: dict specifying how the trendline should be drawn, eg {type: linear}.
"""
series = {"values": values}
series.update(kwargs)
self.__series.append(series)
def iter_series(self, workbook, row, col):
"""
Yield series dictionaries with values resolved to the final excel formulas.
"""
for series in self.__series:
series = dict(series)
series["values"] = series["values"].get_formula(workbook, row, col)
if "categories" in series:
series["categories"] = series["categories"].get_formula(workbook, row, col)
yield series
| mit |
deepaklukose/grpc | tools/distrib/python/make_grpcio_tools.py | 2 | 6198 | #!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import errno
import filecmp
import glob
import os
import os.path
import shutil
import subprocess
import sys
import traceback
import uuid
DEPS_FILE_CONTENT = """
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED BY make_grpcio_tools.py!
CC_FILES={cc_files}
PROTO_FILES={proto_files}
CC_INCLUDE={cc_include}
PROTO_INCLUDE={proto_include}
{commit_hash}
"""
COMMIT_HASH_PREFIX = 'PROTOBUF_SUBMODULE_VERSION="'
COMMIT_HASH_SUFFIX = '"'
# Bazel query result prefix for expected source files in protobuf.
PROTOBUF_CC_PREFIX = '//:src/'
PROTOBUF_PROTO_PREFIX = '//:src/'
GRPC_ROOT = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..'))
GRPC_PYTHON_ROOT = os.path.join(GRPC_ROOT, 'tools', 'distrib', 'python',
'grpcio_tools')
GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT = os.path.join('third_party', 'protobuf',
'src')
GRPC_PROTOBUF = os.path.join(GRPC_ROOT, GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT)
GRPC_PROTOBUF_SUBMODULE_ROOT = os.path.join(GRPC_ROOT, 'third_party',
'protobuf')
GRPC_PROTOC_PLUGINS = os.path.join(GRPC_ROOT, 'src', 'compiler')
GRPC_PYTHON_PROTOBUF = os.path.join(GRPC_PYTHON_ROOT, 'third_party', 'protobuf',
'src')
GRPC_PYTHON_PROTOC_PLUGINS = os.path.join(GRPC_PYTHON_ROOT, 'grpc_root', 'src',
'compiler')
GRPC_PYTHON_PROTOC_LIB_DEPS = os.path.join(GRPC_PYTHON_ROOT,
'protoc_lib_deps.py')
GRPC_INCLUDE = os.path.join(GRPC_ROOT, 'include')
GRPC_PYTHON_INCLUDE = os.path.join(GRPC_PYTHON_ROOT, 'grpc_root', 'include')
BAZEL_DEPS = os.path.join(GRPC_ROOT, 'tools', 'distrib', 'python',
'bazel_deps.sh')
BAZEL_DEPS_PROTOC_LIB_QUERY = '//:protoc_lib'
BAZEL_DEPS_COMMON_PROTOS_QUERY = '//:well_known_protos'
def protobuf_submodule_commit_hash():
"""Gets the commit hash for the HEAD of the protobuf submodule currently
checked out."""
cwd = os.getcwd()
os.chdir(GRPC_PROTOBUF_SUBMODULE_ROOT)
output = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
os.chdir(cwd)
return output.splitlines()[0].strip()
def bazel_query(query):
output = subprocess.check_output([BAZEL_DEPS, query])
return output.splitlines()
def get_deps():
"""Write the result of the bazel query `query` against protobuf to
`out_file`."""
cc_files_output = bazel_query(BAZEL_DEPS_PROTOC_LIB_QUERY)
cc_files = [
name[len(PROTOBUF_CC_PREFIX):] for name in cc_files_output
if name.endswith('.cc') and name.startswith(PROTOBUF_CC_PREFIX)
]
proto_files_output = bazel_query(BAZEL_DEPS_COMMON_PROTOS_QUERY)
proto_files = [
name[len(PROTOBUF_PROTO_PREFIX):] for name in proto_files_output
if name.endswith('.proto') and name.startswith(PROTOBUF_PROTO_PREFIX)
]
commit_hash = protobuf_submodule_commit_hash()
deps_file_content = DEPS_FILE_CONTENT.format(
cc_files=cc_files,
proto_files=proto_files,
cc_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT),
proto_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT),
commit_hash=COMMIT_HASH_PREFIX + commit_hash + COMMIT_HASH_SUFFIX)
return deps_file_content
def long_path(path):
if os.name == 'nt':
return '\\\\?\\' + path
else:
return path
def main():
os.chdir(GRPC_ROOT)
for source, target in [(GRPC_PROTOBUF, GRPC_PYTHON_PROTOBUF),
(GRPC_PROTOC_PLUGINS, GRPC_PYTHON_PROTOC_PLUGINS),
(GRPC_INCLUDE, GRPC_PYTHON_INCLUDE)]:
for source_dir, _, files in os.walk(source):
target_dir = os.path.abspath(
os.path.join(target, os.path.relpath(source_dir, source)))
try:
os.makedirs(target_dir)
except OSError as error:
if error.errno != errno.EEXIST:
raise
for relative_file in files:
source_file = os.path.abspath(
os.path.join(source_dir, relative_file))
target_file = os.path.abspath(
os.path.join(target_dir, relative_file))
shutil.copyfile(source_file, target_file)
try:
protoc_lib_deps_content = get_deps()
except Exception as error:
# We allow this script to succeed even if we couldn't get the dependencies,
# as then we can assume that even without a successful bazel run the
# dependencies currently in source control are 'good enough'.
sys.stderr.write("Got non-fatal error:\n")
traceback.print_exc(file=sys.stderr)
return
# If we successfully got the dependencies, truncate and rewrite the deps file.
with open(GRPC_PYTHON_PROTOC_LIB_DEPS, 'w') as deps_file:
deps_file.write(protoc_lib_deps_content)
if __name__ == '__main__':
main()
| apache-2.0 |
k-zen/Ama | ama/file_listener.py | 1 | 2550 | # -*- coding: utf-8 -*-
"""
Clase perteneciente al mรณdulo de procesamiento de datos e inferencias Ama.
.. module:: file_listener
:platform: Unix
:synopsis: Funciones รบtiles para la detecciรณn de cambios en directorios. Ej. cuando se agrega un nuevo archivo de radar.
.. moduleauthor:: Andreas P. Koenzen <akc@apkc.net>
"""
import ama.utils as utils
import ama.processor as processor
import os
import time
from watchdog.events import FileSystemEventHandler
__author__ = "Andreas P. Koenzen"
__copyright__ = "Copyright 2016, Proyecto de Tesis / Universidad Catรณlica de Asunciรณn."
__credits__ = "Andreas P. Koenzen"
__license__ = "BSD"
__version__ = "0.1"
__maintainer__ = "Andreas P. Koenzen"
__email__ = "akc@apkc.net"
__status__ = "Prototype"
class FileListener(FileSystemEventHandler):
"""
Manejador de cambios en un directorio previamente establecido.
"""
layer = 0
"""
int: La capa de datos a procesar.
"""
def __init__(self, layer):
self.layer = layer
def on_created(self, event):
# dormir la hebra por 15 segundos, para esperar que el archivo sea copiado por completo.
time.sleep(15)
print(utils.Colors.BOLD + "INFO: Detectado archivo nuevo. Procesando..." + utils.Colors.ENDC)
try:
if utils.Utils.should_process_file(event.src_path, processor.Processor.FILE_SIZE_LIMIT, True):
print(utils.Colors.BOLD + "ARCHIVO: {0}".format(event.src_path) + utils.Colors.ENDC)
# procesar el archivo.
processor.Processor().single_correlate_dbz_to_location_to_json(event.src_path, self.layer)
else:
print(utils.Colors.FAIL + "ERROR: El archivo detectado no cumple con los requisitos de procesamiento." + utils.Colors.ENDC)
print(utils.Colors.FAIL + "ARCHIVO: {0}".format(event.src_path) + utils.Colors.ENDC)
except Exception as e:
print(utils.Colors.FAIL + "ERROR: Procesando archivo nuevo." + utils.Colors.ENDC)
print(utils.Colors.FAIL + "DESC: {0}".format(e) + utils.Colors.ENDC)
finally:
# siempre borrar el archivo que fue procesado.
if processor.Processor.SHOULD_REMOVE_PROCESSED_FILES == 1:
try:
os.remove(event.src_path)
except Exception as e:
print(utils.Colors.FAIL + "ERROR: Borrando archivo original." + utils.Colors.ENDC)
print(utils.Colors.FAIL + "DESC: {0}".format(e) + utils.Colors.ENDC)
| bsd-2-clause |
slayerjain/servo | tests/wpt/harness/wptrunner/executors/executorservodriver.py | 21 | 8682 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import socket
import threading
import time
import traceback
from .base import (Protocol,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
strip_server)
import webdriver
from ..testrunner import Stop
here = os.path.join(os.path.split(__file__)[0])
extra_timeout = 5
class ServoWebDriverProtocol(Protocol):
def __init__(self, executor, browser, capabilities, **kwargs):
Protocol.__init__(self, executor, browser)
self.capabilities = capabilities
self.host = browser.webdriver_host
self.port = browser.webdriver_port
self.session = None
def setup(self, runner):
"""Connect to browser via WebDriver."""
self.runner = runner
session_started = False
try:
self.session = webdriver.Session(self.host, self.port,
extension=webdriver.ServoExtensions)
self.session.start()
except:
self.logger.warning(
"Connecting with WebDriver failed:\n%s" % traceback.format_exc())
else:
self.logger.debug("session started")
session_started = True
if not session_started:
self.logger.warning("Failed to connect via WebDriver")
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
self.logger.debug("Hanging up on WebDriver session")
try:
self.session.end()
except:
pass
def is_alive(self):
try:
# Get a simple property over the connection
self.session.handle
# TODO what exception?
except Exception:
return False
return True
def after_connect(self):
pass
def wait(self):
while True:
try:
self.session.execute_async_script("")
except webdriver.TimeoutException:
pass
except (socket.timeout, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
def on_environment_change(self, old_environment, new_environment):
#Unset all the old prefs
self.session.extension.reset_prefs(*old_environment.get("prefs", {}).keys())
self.session.extension.set_prefs(new_environment.get("prefs", {}))
class ServoWebDriverRun(object):
def __init__(self, func, session, url, timeout, current_timeout=None):
self.func = func
self.result = None
self.session = session
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(self.timeout + extra_timeout)
if self.result is None:
assert not flag
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.url, self.timeout)
except webdriver.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", e)
finally:
self.result_flag.set()
def timeout_func(timeout):
if timeout:
t0 = time.time()
return lambda: time.time() - t0 > timeout + extra_timeout
else:
return lambda: False
class ServoWebDriverTestharnessExecutor(TestharnessExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None):
TestharnessExecutor.__init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None)
self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities)
with open(os.path.join(here, "testharness_servodriver.js")) as f:
self.script = f.read()
self.timeout = None
def on_protocol_change(self, new_protocol):
pass
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
url = self.test_url(test)
timeout = test.timeout * self.timeout_multiplier + extra_timeout
if timeout != self.timeout:
try:
self.protocol.session.timeouts.script = timeout
self.timeout = timeout
except IOError:
self.logger.error("Lost webdriver connection")
return Stop
success, data = ServoWebDriverRun(self.do_testharness,
self.protocol.session,
url,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, session, url, timeout):
session.url = url
result = json.loads(
session.execute_async_script(
self.script % {"abs_url": url,
"url": strip_server(url),
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000}))
# Prevent leaking every page in history until Servo develops a more sane
# page cache
session.back()
return result
class TimeoutError(Exception):
pass
class ServoWebDriverRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, capabilities=None, debug_info=None):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = ServoWebDriverProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.timeout = None
with open(os.path.join(here, "reftest-wait_servodriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
try:
result = self.implementation.run_test(test)
return self.convert_result(test, result)
except IOError:
return test.result_cls("CRASH", None), []
except TimeoutError:
return test.result_cls("TIMEOUT", None), []
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls("ERROR", message), []
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
timeout = (test.timeout * self.timeout_multiplier + extra_timeout
if self.debug_info is None else None)
if self.timeout != timeout:
try:
self.protocol.session.timeouts.script = timeout
self.timeout = timeout
except IOError:
self.logger.error("Lost webdriver connection")
return Stop
return ServoWebDriverRun(self._screenshot,
self.protocol.session,
self.test_url(test),
timeout).run()
def _screenshot(self, session, url, timeout):
session.url = url
session.execute_async_script(self.wait_script)
return session.screenshot()
| mpl-2.0 |
jhd/spunout | flask/lib/python2.7/site-packages/sqlparse/__init__.py | 4 | 2333 | # Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""Parse SQL statements."""
__version__ = '0.1.13'
# Setup namespace
from sqlparse import engine
from sqlparse import filters
from sqlparse import formatter
# Deprecated in 0.1.5. Will be removed in 0.2.0
from sqlparse.exceptions import SQLParseError
def parse(sql, encoding=None):
"""Parse sql and return a list of statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:returns: A tuple of :class:`~sqlparse.sql.Statement` instances.
"""
return tuple(parsestream(sql, encoding))
def parsestream(stream, encoding=None):
"""Parses sql statements from file-like object.
:param stream: A file-like object.
:param encoding: The encoding of the stream contents (optional).
:returns: A generator of :class:`~sqlparse.sql.Statement` instances.
"""
stack = engine.FilterStack()
stack.full_analyze()
return stack.run(stream, encoding)
def format(sql, **options):
"""Format *sql* according to *options*.
Available options are documented in :ref:`formatting`.
In addition to the formatting options this function accepts the
keyword "encoding" which determines the encoding of the statement.
:returns: The formatted SQL statement as string.
"""
encoding = options.pop('encoding', None)
stack = engine.FilterStack()
options = formatter.validate_options(options)
stack = formatter.build_filter_stack(stack, options)
stack.postprocess.append(filters.SerializerUnicode())
return ''.join(stack.run(sql, encoding))
def split(sql, encoding=None):
"""Split *sql* into single statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:returns: A list of strings.
"""
stack = engine.FilterStack()
stack.split_statements = True
return [unicode(stmt).strip() for stmt in stack.run(sql, encoding)]
from sqlparse.engine.filter import StatementFilter
def split2(stream):
splitter = StatementFilter()
return list(splitter.process(None, stream))
| gpl-3.0 |
ArcherCraftStore/ArcherVMPeridot | Python/Lib/turtledemo/planet_and_moon.py | 65 | 2866 | #!/usr/bin/env python3
""" turtle-example-suite:
tdemo_planets_and_moon.py
Gravitational system simulation using the
approximation method from Feynman-lectures,
p.9-8, using turtlegraphics.
Example: heavy central body, light planet,
very light moon!
Planet has a circular orbit, moon a stable
orbit around the planet.
You can hold the movement temporarily by pressing
the left mouse button with mouse over the
scrollbar of the canvas.
"""
from turtle import Shape, Turtle, mainloop, Vec2D as Vec
from time import sleep
G = 8
class GravSys(object):
def __init__(self):
self.planets = []
self.t = 0
self.dt = 0.01
def init(self):
for p in self.planets:
p.init()
def start(self):
for i in range(10000):
self.t += self.dt
for p in self.planets:
p.step()
class Star(Turtle):
def __init__(self, m, x, v, gravSys, shape):
Turtle.__init__(self, shape=shape)
self.penup()
self.m = m
self.setpos(x)
self.v = v
gravSys.planets.append(self)
self.gravSys = gravSys
self.resizemode("user")
self.pendown()
def init(self):
dt = self.gravSys.dt
self.a = self.acc()
self.v = self.v + 0.5*dt*self.a
def acc(self):
a = Vec(0,0)
for planet in self.gravSys.planets:
if planet != self:
v = planet.pos()-self.pos()
a += (G*planet.m/abs(v)**3)*v
return a
def step(self):
dt = self.gravSys.dt
self.setpos(self.pos() + dt*self.v)
if self.gravSys.planets.index(self) != 0:
self.setheading(self.towards(self.gravSys.planets[0]))
self.a = self.acc()
self.v = self.v + dt*self.a
## create compound yellow/blue turtleshape for planets
def main():
s = Turtle()
s.reset()
s.getscreen().tracer(0,0)
s.ht()
s.pu()
s.fd(6)
s.lt(90)
s.begin_poly()
s.circle(6, 180)
s.end_poly()
m1 = s.get_poly()
s.begin_poly()
s.circle(6,180)
s.end_poly()
m2 = s.get_poly()
planetshape = Shape("compound")
planetshape.addcomponent(m1,"orange")
planetshape.addcomponent(m2,"blue")
s.getscreen().register_shape("planet", planetshape)
s.getscreen().tracer(1,0)
## setup gravitational system
gs = GravSys()
sun = Star(1000000, Vec(0,0), Vec(0,-2.5), gs, "circle")
sun.color("yellow")
sun.shapesize(1.8)
sun.pu()
earth = Star(12500, Vec(210,0), Vec(0,195), gs, "planet")
earth.pencolor("green")
earth.shapesize(0.8)
moon = Star(1, Vec(220,0), Vec(0,295), gs, "planet")
moon.pencolor("blue")
moon.shapesize(0.5)
gs.init()
gs.start()
return "Done!"
if __name__ == '__main__':
msg = main()
print(msg)
#mainloop()
| apache-2.0 |
danieljaouen/ansible | lib/ansible/modules/network/cloudengine/ce_netstream_global.py | 43 | 36150 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_netstream_global
version_added: "2.4"
short_description: Manages global parameters of NetStream on HUAWEI CloudEngine switches.
description:
- Manages global parameters of NetStream on HUAWEI CloudEngine switches.
author: YangYang (@CloudEngine-Ansible)
options:
type:
description:
- Specifies the type of netstream global.
choices: ['ip', 'vxlan']
default: 'ip'
state:
description:
- Specify desired state of the resource.
choices: ['present', 'absent']
default: present
interface:
description:
- Netstream global interface.
required: true
sampler_interval:
description:
- Specifies the netstream sampler interval, length is 1 - 65535.
sampler_direction:
description:
- Specifies the netstream sampler direction.
choices: ['inbound', 'outbound']
statistics_direction:
description:
- Specifies the netstream statistic direction.
choices: ['inbound', 'outbound']
statistics_record:
description:
- Specifies the flexible netstream statistic record, length is 1 - 32.
index_switch:
description:
- Specifies the netstream index-switch.
choices: ['16', '32']
default: '16'
"""
EXAMPLES = '''
- name: netstream global module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure a netstream sampler at interface 10ge1/0/2, direction is outbound,interval is 30.
ce_netstream_global:
interface: 10ge1/0/2
type: ip
sampler_interval: 30
sampler_direction: outbound
state: present
provider: "{{ cli }}"
- name: Configure a netstream flexible statistic at interface 10ge1/0/2, record is test1, type is ip.
ce_netstream_global:
type: ip
interface: 10ge1/0/2
statistics_record: test1
provider: "{{ cli }}"
- name: Set the vxlan index-switch to 32.
ce_netstream_global:
type: vxlan
interface: all
index_switch: 32
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"index_switch": "16",
"interface": "10ge1/0/2",
"state": "present",
"statistics_record": "test",
"type": "vxlan"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"flexible_statistic": [
{
"interface": "10ge1/0/2",
"statistics_record": [],
"type": "ip"
},
{
"interface": "10ge1/0/2",
"statistics_record": [],
"type": "vxlan"
}
],
"index-switch": [
{
"index-switch": "16",
"type": "ip"
},
{
"index-switch": "16",
"type": "vxlan"
}
],
"ip_record": [
"test",
"test1"
],
"sampler": [
{
"interface": "all",
"sampler_direction": "null",
"sampler_interval": "null"
}
],
"statistic": [
{
"interface": "10ge1/0/2",
"statistics_direction": [],
"type": "null"
}
],
"vxlan_record": [
"test"
]}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"flexible_statistic": [
{
"interface": "10ge1/0/2",
"statistics_record": [],
"type": "ip"
},
{
"interface": "10ge1/0/2",
"statistics_record": [
"test"
],
"type": "vxlan"
}
],
"index-switch": [
{
"index-switch": "16",
"type": "ip"
},
{
"index-switch": "16",
"type": "vxlan"
}
],
"sampler": [
{
"interface": "all",
"sampler_direction": "null",
"sampler_interval": "null"
}
],
"statistic": [
{
"interface": "10ge1/0/2",
"statistics_direction": [],
"type": "null"
}
]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface 10ge1/0/2",
"netstream record test vxlan inner-ip"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('ALL'):
iftype = 'all'
else:
return None
return iftype.lower()
class NetStreamGlobal(object):
"""
Manages netstream global parameters.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.type = self.module.params['type']
self.interface = self.module.params['interface']
self.sampler_interval = self.module.params['sampler_interval']
self.sampler_direction = self.module.params['sampler_direction']
self.statistics_direction = self.module.params['statistics_direction']
self.statistics_record = self.module.params['statistics_record']
self.index_switch = self.module.params['index_switch']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
# local parameters
self.existing["sampler"] = list()
self.existing["statistic"] = list()
self.existing["flexible_statistic"] = list()
self.existing["index-switch"] = list()
self.existing["ip_record"] = list()
self.existing["vxlan_record"] = list()
self.end_state["sampler"] = list()
self.end_state["statistic"] = list()
self.end_state["flexible_statistic"] = list()
self.end_state["index-switch"] = list()
self.sampler_changed = False
self.statistic_changed = False
self.flexible_changed = False
self.index_switch_changed = False
def init_module(self):
"""init module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd)
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd)
def get_exist_sampler_interval(self):
"""get exist netstream sampler interval"""
sampler_tmp = dict()
sampler_tmp1 = dict()
flags = list()
exp = " | ignore-case include ^netstream sampler random-packets"
flags.append(exp)
config = get_config(self.module, flags)
if not config:
sampler_tmp["sampler_interval"] = "null"
sampler_tmp["sampler_direction"] = "null"
else:
config_list = config.split(' ')
config_num = len(config_list)
sampler_tmp["sampler_direction"] = config_list[config_num - 1]
sampler_tmp["sampler_interval"] = config_list[config_num - 2]
sampler_tmp["interface"] = "all"
self.existing["sampler"].append(sampler_tmp)
if self.interface != "all":
flags = list()
exp = " | ignore-case section include ^interface %s$" \
" | include netstream sampler random-packets" % self.interface
flags.append(exp)
config = get_config(self.module, flags)
if not config:
sampler_tmp1["sampler_interval"] = "null"
sampler_tmp1["sampler_direction"] = "null"
else:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
sampler_tmp1 = dict()
config_mem_list = config_mem.split(' ')
config_num = len(config_mem_list)
sampler_tmp1["sampler_direction"] = config_mem_list[
config_num - 1]
sampler_tmp1["sampler_interval"] = config_mem_list[
config_num - 2]
sampler_tmp1["interface"] = self.interface
self.existing["sampler"].append(sampler_tmp1)
def get_exist_statistic_record(self):
"""get exist netstream statistic record parameter"""
if self.statistics_record and self.statistics_direction:
self.module.fail_json(
msg='Error: The statistic direction and record can not exist at the same time.')
statistic_tmp = dict()
statistic_tmp1 = dict()
statistic_tmp["statistics_record"] = list()
statistic_tmp["interface"] = self.interface
statistic_tmp1["statistics_record"] = list()
statistic_tmp1["interface"] = self.interface
flags = list()
exp = " | ignore-case section include ^interface %s$" \
" | include netstream record"\
% (self.interface)
flags.append(exp)
config = get_config(self.module, flags)
if not config:
statistic_tmp["type"] = "ip"
self.existing["flexible_statistic"].append(statistic_tmp)
statistic_tmp1["type"] = "vxlan"
self.existing["flexible_statistic"].append(statistic_tmp1)
else:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
statistic_tmp["statistics_record"] = list()
config_mem_list = config_mem.split(' ')
if str(config_mem_list[3]) == "ip":
statistic_tmp["statistics_record"].append(
str(config_mem_list[2]))
statistic_tmp["type"] = "ip"
self.existing["flexible_statistic"].append(statistic_tmp)
for config_mem in config_list:
statistic_tmp1["statistics_record"] = list()
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
if str(config_mem_list[3]) == "vxlan":
statistic_tmp1["statistics_record"].append(
str(config_mem_list[2]))
statistic_tmp1["type"] = "vxlan"
self.existing["flexible_statistic"].append(statistic_tmp1)
def get_exist_interface_statistic(self):
"""get exist netstream interface statistic parameter"""
statistic_tmp1 = dict()
statistic_tmp1["statistics_direction"] = list()
flags = list()
exp = " | ignore-case section include ^interface %s$" \
" | include netstream inbound|outbound"\
% self.interface
flags.append(exp)
config = get_config(self.module, flags)
if not config:
statistic_tmp1["type"] = "null"
else:
statistic_tmp1["type"] = "ip"
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
statistic_tmp1["statistics_direction"].append(
str(config_mem_list[1]))
statistic_tmp1["interface"] = self.interface
self.existing["statistic"].append(statistic_tmp1)
def get_exist_index_switch(self):
"""get exist netstream index-switch"""
index_switch_tmp = dict()
index_switch_tmp1 = dict()
index_switch_tmp["index-switch"] = "16"
index_switch_tmp["type"] = "ip"
index_switch_tmp1["index-switch"] = "16"
index_switch_tmp1["type"] = "vxlan"
flags = list()
exp = " | ignore-case include index-switch"
flags.append(exp)
config = get_config(self.module, flags)
if not config:
self.existing["index-switch"].append(index_switch_tmp)
self.existing["index-switch"].append(index_switch_tmp1)
else:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem_list = config_mem.split(' ')
if str(config_mem_list[2]) == "ip":
index_switch_tmp["index-switch"] = "32"
index_switch_tmp["type"] = "ip"
if str(config_mem_list[2]) == "vxlan":
index_switch_tmp1["index-switch"] = "32"
index_switch_tmp1["type"] = "vxlan"
self.existing["index-switch"].append(index_switch_tmp)
self.existing["index-switch"].append(index_switch_tmp1)
def get_exist_record(self):
"""get exist netstream record"""
flags = list()
exp = " | ignore-case include netstream record"
flags.append(exp)
config = get_config(self.module, flags)
if config:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem_list = config_mem.split(' ')
if config_mem_list[3] == "ip":
self.existing["ip_record"].append(config_mem_list[2])
if config_mem_list[3] == "vxlan":
self.existing["vxlan_record"].append(config_mem_list[2])
def get_end_sampler_interval(self):
"""get end netstream sampler interval"""
sampler_tmp = dict()
sampler_tmp1 = dict()
flags = list()
exp = " | ignore-case include ^netstream sampler random-packets"
flags.append(exp)
config = get_config(self.module, flags)
if not config:
sampler_tmp["sampler_interval"] = "null"
sampler_tmp["sampler_direction"] = "null"
else:
config_list = config.split(' ')
config_num = len(config_list)
sampler_tmp["sampler_direction"] = config_list[config_num - 1]
sampler_tmp["sampler_interval"] = config_list[config_num - 2]
sampler_tmp["interface"] = "all"
self.end_state["sampler"].append(sampler_tmp)
if self.interface != "all":
flags = list()
exp = " | ignore-case section include ^interface %s$" \
" | include netstream sampler random-packets" % self.interface
flags.append(exp)
config = get_config(self.module, flags)
if not config:
sampler_tmp1["sampler_interval"] = "null"
sampler_tmp1["sampler_direction"] = "null"
else:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
sampler_tmp1 = dict()
config_mem_list = config_mem.split(' ')
config_num = len(config_mem_list)
sampler_tmp1["sampler_direction"] = config_mem_list[
config_num - 1]
sampler_tmp1["sampler_interval"] = config_mem_list[
config_num - 2]
sampler_tmp1["interface"] = self.interface
self.end_state["sampler"].append(sampler_tmp1)
def get_end_statistic_record(self):
"""get end netstream statistic record parameter"""
if self.statistics_record and self.statistics_direction:
self.module.fail_json(
msg='Error: The statistic direction and record can not exist at the same time.')
statistic_tmp = dict()
statistic_tmp1 = dict()
statistic_tmp["statistics_record"] = list()
statistic_tmp["interface"] = self.interface
statistic_tmp1["statistics_record"] = list()
statistic_tmp1["interface"] = self.interface
flags = list()
exp = " | ignore-case section include ^interface %s$" \
" | include netstream record"\
% (self.interface)
flags.append(exp)
config = get_config(self.module, flags)
if not config:
statistic_tmp["type"] = "ip"
self.end_state["flexible_statistic"].append(statistic_tmp)
statistic_tmp1["type"] = "vxlan"
self.end_state["flexible_statistic"].append(statistic_tmp1)
else:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
statistic_tmp["statistics_record"] = list()
config_mem_list = config_mem.split(' ')
if str(config_mem_list[3]) == "ip":
statistic_tmp["statistics_record"].append(
str(config_mem_list[2]))
statistic_tmp["type"] = "ip"
self.end_state["flexible_statistic"].append(statistic_tmp)
for config_mem in config_list:
statistic_tmp1["statistics_record"] = list()
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
if str(config_mem_list[3]) == "vxlan":
statistic_tmp1["statistics_record"].append(
str(config_mem_list[2]))
statistic_tmp1["type"] = "vxlan"
self.end_state["flexible_statistic"].append(statistic_tmp1)
def get_end_interface_statistic(self):
"""get end netstream interface statistic parameters"""
statistic_tmp1 = dict()
statistic_tmp1["statistics_direction"] = list()
flags = list()
exp = " | ignore-case section include ^interface %s$" \
" | include netstream inbound|outbound"\
% self.interface
flags.append(exp)
config = get_config(self.module, flags)
if not config:
statistic_tmp1["type"] = "null"
else:
statistic_tmp1["type"] = "ip"
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
statistic_tmp1["statistics_direction"].append(
str(config_mem_list[1]))
statistic_tmp1["interface"] = self.interface
self.end_state["statistic"].append(statistic_tmp1)
def get_end_index_switch(self):
"""get end netstream index switch"""
index_switch_tmp = dict()
index_switch_tmp1 = dict()
index_switch_tmp["index-switch"] = "16"
index_switch_tmp["type"] = "ip"
index_switch_tmp1["index-switch"] = "16"
index_switch_tmp1["type"] = "vxlan"
flags = list()
exp = " | ignore-case include index-switch"
flags.append(exp)
config = get_config(self.module, flags)
if not config:
self.end_state["index-switch"].append(index_switch_tmp)
self.end_state["index-switch"].append(index_switch_tmp1)
else:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem_list = config_mem.split(' ')
if str(config_mem_list[2]) == "ip":
index_switch_tmp["index-switch"] = "32"
index_switch_tmp["type"] = "ip"
if str(config_mem_list[2]) == "vxlan":
index_switch_tmp1["index-switch"] = "32"
index_switch_tmp1["type"] = "vxlan"
self.end_state["index-switch"].append(index_switch_tmp)
self.end_state["index-switch"].append(index_switch_tmp1)
def check_params(self):
"""check all input params"""
# netstream parameters check
if not get_interface_type(self.interface):
self.module.fail_json(
msg='Error: Interface name of %s is error.' % self.interface)
if self.sampler_interval:
if not str(self.sampler_interval).isdigit():
self.module.fail_json(
msg='Error: Active interval should be numerical.')
if int(self.sampler_interval) < 1 or int(self.sampler_interval) > 65535:
self.module.fail_json(
msg="Error: Sampler interval should between 1 - 65535.")
if self.statistics_record:
if len(self.statistics_record) < 1 or len(self.statistics_record) > 32:
self.module.fail_json(
msg="Error: Statistic record length should between 1 - 32.")
if self.interface == "all":
if self.statistics_record or self.statistics_direction:
self.module.fail_json(
msg="Error: Statistic function should be used at interface.")
if self.statistics_direction:
if self.type == "vxlan":
self.module.fail_json(
msg="Error: Vxlan do not support inbound or outbound statistic.")
if (self.sampler_interval and not self.sampler_direction) \
or (self.sampler_direction and not self.sampler_interval):
self.module.fail_json(
msg="Error: Sampler interval and direction must be set at the same time.")
if self.statistics_record and not self.type:
self.module.fail_json(
msg="Error: Statistic type and record must be set at the same time.")
self.get_exist_record()
if self.statistics_record:
if self.type == "ip":
if self.statistics_record not in self.existing["ip_record"]:
self.module.fail_json(
msg="Error: The statistic record is not exist.")
if self.type == "vxlan":
if self.statistics_record not in self.existing["vxlan_record"]:
self.module.fail_json(
msg="Error: The statistic record is not exist.")
def get_proposed(self):
"""get proposed info"""
if self.type:
self.proposed["type"] = self.type
if self.interface:
self.proposed["interface"] = self.interface
if self.sampler_interval:
self.proposed["sampler_interval"] = self.sampler_interval
if self.sampler_direction:
self.proposed["sampler_direction"] = self.sampler_direction
if self.statistics_direction:
self.proposed["statistics_direction"] = self.statistics_direction
if self.statistics_record:
self.proposed["statistics_record"] = self.statistics_record
if self.index_switch:
self.proposed["index_switch"] = self.index_switch
if self.state:
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
sampler_tmp = dict()
statistic_tmp = dict()
statistic_tmp1 = dict()
index_tmp = dict()
temp = False
self.get_exist_sampler_interval()
self.get_exist_interface_statistic()
self.get_exist_statistic_record()
self.get_exist_index_switch()
if self.state == "present":
for sampler_tmp in self.existing["sampler"]:
if self.interface == str(sampler_tmp["interface"]):
temp = True
if (self.sampler_interval and str(sampler_tmp["sampler_interval"]) != self.sampler_interval) \
or (self.sampler_direction and
str(sampler_tmp["sampler_direction"]) != self.sampler_direction):
self.sampler_changed = True
if not temp:
if self.sampler_direction or self.sampler_interval:
self.sampler_changed = True
for statistic_tmp in self.existing["statistic"]:
if str(statistic_tmp["interface"]) == self.interface and self.interface != "all":
if self.type == "vxlan":
if statistic_tmp["statistics_direction"] \
and 'outbound' in statistic_tmp["statistics_direction"]:
self.module.fail_json(
msg='Error: The NetStream record vxlan '
'cannot be configured because the port has been configured NetStream outbound ip.')
if statistic_tmp["statistics_direction"] and self.statistics_direction:
if self.statistics_direction not in statistic_tmp["statistics_direction"]:
self.statistic_changed = True
else:
if self.statistics_direction:
self.statistic_changed = True
for statistic_tmp1 in self.existing["flexible_statistic"]:
if self.interface != "all" \
and self.type == str(statistic_tmp1["type"]) \
and self.interface == str(statistic_tmp1["interface"]):
if statistic_tmp1["statistics_record"] and self.statistics_record:
if self.statistics_record not in statistic_tmp1["statistics_record"]:
self.flexible_changed = True
else:
if self.statistics_record:
self.flexible_changed = True
for index_tmp in self.existing["index-switch"]:
if self.type == str(index_tmp["type"]):
if self.index_switch != str(index_tmp["index-switch"]):
self.index_switch_changed = True
else:
for sampler_tmp in self.existing["sampler"]:
if self.interface == str(sampler_tmp["interface"]):
if (self.sampler_interval and str(sampler_tmp["sampler_interval"]) == self.sampler_interval) \
and (self.sampler_direction and str(sampler_tmp["sampler_direction"]) == self.sampler_direction):
self.sampler_changed = True
for statistic_tmp in self.existing["statistic"]:
if str(statistic_tmp["interface"]) == self.interface and self.interface != "all":
if len(statistic_tmp["statistics_direction"]) and self.statistics_direction:
if self.statistics_direction in statistic_tmp["statistics_direction"]:
self.statistic_changed = True
for statistic_tmp1 in self.existing["flexible_statistic"]:
if self.interface != "all" \
and self.type == str(statistic_tmp1["type"]) \
and self.interface == str(statistic_tmp1["interface"]):
if len(statistic_tmp1["statistics_record"]) and self.statistics_record:
if self.statistics_record in statistic_tmp1["statistics_record"]:
self.flexible_changed = True
for index_tmp in self.existing["index-switch"]:
if self.type == str(index_tmp["type"]):
if self.index_switch == str(index_tmp["index-switch"]):
if self.index_switch != "16":
self.index_switch_changed = True
def operate_ns_gloabl(self):
"""configure netstream global parameters"""
cmd = ""
if not self.sampler_changed and not self.statistic_changed \
and not self.flexible_changed and not self.index_switch_changed:
self.changed = False
return
if self.sampler_changed is True:
if self.type == "vxlan":
self.module.fail_json(
msg="Error: Netstream do not support vxlan sampler.")
if self.interface != "all":
cmd = "interface %s" % self.interface
self.cli_add_command(cmd)
cmd = "netstream sampler random-packets %s %s" % (
self.sampler_interval, self.sampler_direction)
if self.state == "present":
self.cli_add_command(cmd)
else:
self.cli_add_command(cmd, undo=True)
if self.interface != "all":
cmd = "quit"
self.cli_add_command(cmd)
if self.statistic_changed is True:
if self.interface != "all":
cmd = "interface %s" % self.interface
self.cli_add_command(cmd)
cmd = "netstream %s ip" % self.statistics_direction
if self.state == "present":
self.cli_add_command(cmd)
else:
self.cli_add_command(cmd, undo=True)
if self.interface != "all":
cmd = "quit"
self.cli_add_command(cmd)
if self.flexible_changed is True:
if self.interface != "all":
cmd = "interface %s" % self.interface
self.cli_add_command(cmd)
if self.state == "present":
for statistic_tmp in self.existing["flexible_statistic"]:
tmp_list = statistic_tmp["statistics_record"]
if self.type == statistic_tmp["type"]:
if self.type == "ip":
if len(tmp_list):
cmd = "netstream record %s ip" % tmp_list[0]
self.cli_add_command(cmd, undo=True)
cmd = "netstream record %s ip" % self.statistics_record
self.cli_add_command(cmd)
if self.type == "vxlan":
if len(tmp_list):
cmd = "netstream record %s vxlan inner-ip" % tmp_list[
0]
self.cli_add_command(cmd, undo=True)
cmd = "netstream record %s vxlan inner-ip" % self.statistics_record
self.cli_add_command(cmd)
else:
if self.type == "ip":
cmd = "netstream record %s ip" % self.statistics_record
self.cli_add_command(cmd, undo=True)
if self.type == "vxlan":
cmd = "netstream record %s vxlan inner-ip" % self.statistics_record
self.cli_add_command(cmd, undo=True)
if self.interface != "all":
cmd = "quit"
self.cli_add_command(cmd)
if self.index_switch_changed is True:
if self.interface != "all":
self.module.fail_json(
msg="Error: Index-switch function should be used globally.")
if self.type == "ip":
cmd = "netstream export ip index-switch %s" % self.index_switch
else:
cmd = "netstream export vxlan inner-ip index-switch %s" % self.index_switch
if self.state == "present":
self.cli_add_command(cmd)
else:
self.cli_add_command(cmd, undo=True)
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def get_end_state(self):
"""get end state info"""
self.get_end_sampler_interval()
self.get_end_interface_statistic()
self.get_end_statistic_record()
self.get_end_index_switch()
def work(self):
"""worker"""
self.check_params()
self.get_existing()
self.get_proposed()
self.operate_ns_gloabl()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
type=dict(required=False, choices=['ip', 'vxlan'], default='ip'),
interface=dict(required=True, type='str'),
sampler_interval=dict(required=False, type='str'),
sampler_direction=dict(required=False, choices=['inbound', 'outbound']),
statistics_direction=dict(required=False, choices=['inbound', 'outbound']),
statistics_record=dict(required=False, type='str'),
index_switch=dict(required=False, choices=['16', '32'], default='16'),
state=dict(required=False, choices=['present', 'absent'], default='present'),
)
argument_spec.update(ce_argument_spec)
module = NetStreamGlobal(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
ntoll/foox | foox/species/third.py | 1 | 12608 | """
This module encapsulates the behaviour of third species counterpoint.
"""
import random
import foox.ga as ga
from .utils import is_parallel, make_generate_function, is_stepwise_motion
# Some sane defaults.
DEFAULT_POPULATION_SIZE = 4000
DEFAULT_MAX_GENERATION = 200
DEFAULT_MUTATION_RANGE = 3
DEFAULT_MUTATION_RATE = 0.4
# Intervals between notes that are allowed in third species counterpoint.
CONSONANCES = [2, 4, 5, 7, 9, 11]
DISSONANCES = [3, 6, 8, 10]
VALID_ODD_BEAT_INTERVALS = CONSONANCES
VALID_EVEN_BEAT_INTERVALS = CONSONANCES + DISSONANCES
# Various rewards and punishments used with different aspects of the solution.
# Reward / punishment to ensure the solution starts correctly (5th or 8ve).
REWARD_FIRST = 1
PUNISH_FIRST = 0.1
# Reward / punishment to ensure the solution finishes correctly (at an 8ve).
REWARD_LAST = 1
PUNISH_LAST = 0.1
# Reward / punishment to ensure the penultimate note is step wise onto the
# final note.
REWARD_LAST_STEP = 2
PUNISH_LAST_STEP = 0.7
# Reward / punish contrary motion onto the final note.
REWARD_LAST_MOTION = 3
PUNISH_LAST_MOTION = 0.1
# Punishment if the penultimate note is a repeated note.
PUNISH_REPEATED_PENULTIMATE = 0.1
# Make sure the movement to the penultimate note isn't from too
# far away (not greater than a third).
REWARD_PENULTIMATE_PREPARATION = 1
PUNISH_PENULTIMATE_PREPARATION = 0.7
# Punish parallel fifths or octaves.
PUNISH_PARALLEL_FIFTHS_OCTAVES = 0.5
# Punishment for too many parallel/similar movements.
PUNISH_PARALLEL = 0.1
# Reward / punish correct stepwise movement around dissonances.
REWARD_STEPWISE_MOTION = 0.5
PUNISH_STEPWISE_MOTION = 0.1
# Punishment for too many repeated notes.
PUNISH_REPEATS = 1
# Punishment for too many large leaps in the melody.
PUNISH_LEAPS = 0.7
# The highest score a candidate solution may achieve. (Hack!)
MAX_REWARD = (
REWARD_FIRST
+ REWARD_LAST
+ REWARD_LAST_STEP
+ REWARD_LAST_MOTION
+ REWARD_PENULTIMATE_PREPARATION
)
def create_population(number, cantus_firmus):
"""
Will create a new list of random candidate solutions of the specified
number given the context of the cantus_firmus.
"""
result = []
for i in range(number):
new_chromosome = []
for note in cantus_firmus:
valid_odd_beat_range = [
interval
for interval in VALID_ODD_BEAT_INTERVALS
if (interval + note) < 17
]
valid_even_beat_range = [
interval
for interval in VALID_EVEN_BEAT_INTERVALS
if (interval + note) < 17
]
first_beat_interval = random.choice(valid_odd_beat_range)
second_beat_interval = random.choice(valid_even_beat_range)
third_beat_interval = random.choice(valid_odd_beat_range)
fourth_beat_interval = random.choice(valid_even_beat_range)
new_chromosome.append(note + first_beat_interval)
new_chromosome.append(note + second_beat_interval)
new_chromosome.append(note + third_beat_interval)
new_chromosome.append(note + fourth_beat_interval)
# Remove the last three beats since they're surplus to requirements.
genome = Genome(new_chromosome[:-3])
result.append(genome)
return result
def make_fitness_function(cantus_firmus):
"""
Given the cantus firmus, will return a function that takes a single Genome
instance and returns a fitness score.
"""
# Melody wide measures.
repeat_threshold = len(cantus_firmus) * 0.5
jump_threshold = len(cantus_firmus) * 0.3
def fitness_function(genome):
"""
Given a candidate solution will return its fitness score assuming
the cantus_firmus in this closure. Caches the fitness score in the
genome.
"""
# Save some time!
if genome.fitness is not None:
return genome.fitness
# The fitness score to be returned.
fitness_score = 0.0
# Counts the number of repeated notes.
repeats = 0
# Counts the amount of parallel motion.
parallel_motion = 0
# Counts the number of jumps in the melodic contour.
jump_contour = 0
contrapunctus = genome.chromosome
# Make sure the solution starts correctly (at a 5th or octave).
first_interval = contrapunctus[0] - cantus_firmus[0]
if first_interval == 7 or first_interval == 4:
fitness_score += REWARD_FIRST
else:
fitness_score -= PUNISH_FIRST
# Make sure the solution finishes correctly (at an octave).
if contrapunctus[-1] - cantus_firmus[-1] == 7:
fitness_score += REWARD_LAST
else:
fitness_score -= PUNISH_LAST
# Ensure the penultimate note is step wise onto the final note.
if abs(contrapunctus[-1] - contrapunctus[-2]) == 1:
fitness_score += REWARD_LAST_STEP
else:
fitness_score -= PUNISH_LAST_STEP
# Reward contrary motion onto the final note.
cantus_firmus_motion = cantus_firmus[-1] - cantus_firmus[-2]
contrapunctus_motion = contrapunctus[-1] - contrapunctus[-2]
if (cantus_firmus_motion < 0 and contrapunctus_motion > 0) or (
cantus_firmus_motion > 0 and contrapunctus_motion < 0
):
fitness_score += REWARD_LAST_MOTION
else:
fitness_score -= PUNISH_LAST_MOTION
# Make sure the penultimate note isn't a repeated note.
penultimate_preparation = abs(contrapunctus[-2] - contrapunctus[-3])
if penultimate_preparation == 0:
fitness_score -= PUNISH_REPEATED_PENULTIMATE
else:
# Make sure the movement to the penultimate note isn't from too
# far away (not greater than a third).
if penultimate_preparation < 2:
fitness_score += REWARD_PENULTIMATE_PREPARATION
else:
fitness_score -= PUNISH_PENULTIMATE_PREPARATION
# Check the fitness of the body of the solution.
last_notes = (contrapunctus[0], cantus_firmus[0])
last_interval = last_notes[0] - last_notes[1]
for i in range(1, len(contrapunctus) - 1):
contrapunctus_note = contrapunctus[i]
cantus_firmus_note = cantus_firmus[i // 4]
current_notes = (contrapunctus_note, cantus_firmus_note)
current_interval = contrapunctus_note - cantus_firmus_note
# Punish parallel fifths or octaves.
if (current_interval == 4 or current_interval == 7) and (
last_interval == 4 or last_interval == 7
):
fitness_score -= PUNISH_PARALLEL_FIFTHS_OCTAVES
# Check for parallel motion.
if is_parallel(last_notes, current_notes):
parallel_motion += 1
# Check if the melody is a repeating note.
if contrapunctus_note == last_notes[0]:
repeats += 1
# Check the melodic contour.
contour_leap = abs(current_notes[0] - last_notes[0])
if contour_leap >= 2:
jump_contour += contour_leap - 2
# Ensure dissonances are part of a step-wise movement.
if i % 2 and current_interval in DISSONANCES:
# The current_note is a dissonance on the third beat of a bar.
# Check that both the adjacent notes are only a step away.
if is_stepwise_motion(contrapunctus, i):
fitness_score += REWARD_STEPWISE_MOTION
else:
fitness_score -= PUNISH_STEPWISE_MOTION
else:
if is_stepwise_motion(contrapunctus, i):
fitness_score += REWARD_STEPWISE_MOTION
last_notes = current_notes
last_interval = current_interval
# Punish too many (> 1/3) repeated notes.
if repeats > repeat_threshold:
fitness_score -= PUNISH_REPEATS
# Punish too many (> 1/3) parallel movements.
if parallel_motion > repeat_threshold:
fitness_score -= PUNISH_PARALLEL
# Punish too many large leaps in the melody.
if jump_contour > jump_threshold:
fitness_score -= PUNISH_LEAPS
genome.fitness = fitness_score
return fitness_score
return fitness_function
def make_halt_function(cantus_firmus):
"""
Returns a halt function for the given cantus firmus and third species
counterpoint.
"""
def halt(population, generation_count):
"""
Given a population of candidate solutions and generation count (the
number of epochs the algorithm has run) will return a boolean to
indicate if an acceptable solution has been found within the
referenced population.
"""
fittest = population[0]
max_fitness = MAX_REWARD
for i in range(len(fittest.chromosome)):
# Check for dissonances. Each dissonance should have incremented
# the fitness because it has been "placed" correctly.
cantus_firmus_note = cantus_firmus[i // 4]
melody_note = fittest.chromosome[i]
interval = melody_note - cantus_firmus_note
if interval in DISSONANCES:
max_fitness += REWARD_STEPWISE_MOTION
else:
if i > 0 and i < (len(fittest.chromosome) - 2):
if is_stepwise_motion(fittest.chromosome, i):
max_fitness += REWARD_STEPWISE_MOTION
return (
fittest.fitness >= max_fitness
or generation_count > DEFAULT_MAX_GENERATION
)
return halt
class Genome(ga.Genome):
"""
A class to represent a candidate solution for second species counterpoint.
"""
def mutate(self, mutation_range, mutation_rate, context):
"""
Mutates the genotypes no more than the mutation_range depending on the
mutation_rate given and the cantus_firmus passed in as the context (to
ensure the mutation is valid).
"""
odd_beat_mutation_intervals = [
interval
for interval in VALID_ODD_BEAT_INTERVALS
if interval <= mutation_range
]
even_beat_mutation_intervals = [
interval
for interval in VALID_EVEN_BEAT_INTERVALS
if interval <= mutation_range
]
chromosome_length = len(self.chromosome)
for locus in range(chromosome_length):
if mutation_rate >= random.random():
cantus_firmus_note = context[locus // 4]
# The pitch of the notes immediately before and after the
# current note (used to avoid mutations that result in a
# repeated pitch).
pitches_to_avoid = []
if locus > 0:
pre_pitch = self.chromosome[locus - 1]
pitches_to_avoid.append(pre_pitch)
if locus < chromosome_length - 2:
post_pitch = self.chromosome[locus + 1]
pitches_to_avoid.append(post_pitch)
if locus % 2:
# Current melody note is on an even beat of the bar
mutation_intervals = [
i
for i in even_beat_mutation_intervals
if cantus_firmus_note + i not in pitches_to_avoid
]
if not mutation_intervals:
mutation_intervals = even_beat_mutation_intervals
else:
# Current melody note is on an odd beat of the bar.
mutation_intervals = [
i
for i in odd_beat_mutation_intervals
if cantus_firmus_note + i not in pitches_to_avoid
]
if not mutation_intervals:
mutation_intervals = odd_beat_mutation_intervals
valid_mutation_range = [
interval
for interval in mutation_intervals
if (interval + cantus_firmus_note) < 17
]
mutation = random.choice(valid_mutation_range)
new_allele = cantus_firmus_note + mutation
self.chromosome[locus] = new_allele
# Resets fitness score
self.fitness = None
| mit |
JazzeYoung/VeryDeepAutoEncoder | pylearn2/pylearn2/utils/tests/test_serial.py | 44 | 4438 | """
Tests for the pylearn2.utils.serial module. Currently only tests
read_bin_lush_matrix and load_train_file methods.
"""
from theano.compat.six.moves import xrange
import pylearn2
from pylearn2.utils.serial import read_bin_lush_matrix, load_train_file
import numpy as np
pylearn2_path = pylearn2.__path__[0]
example_bin_lush_path = pylearn2_path + '/utils/tests/example_bin_lush/'
yaml_path = pylearn2_path + '/utils/tests/'
def test_read_bin_lush_matrix_ubyte_scalar():
"""
Read data from a lush file with uint8 data (scalar).
Note: When you write a scalar from Koray's matlab code it always makes
everything 3D. Writing it straight from lush you might be able to get
a true scalar
"""
path = example_bin_lush_path + 'ubyte_scalar.lushbin'
result = read_bin_lush_matrix(path)
assert str(result.dtype) == 'uint8'
assert len(result.shape) == 3
assert result.shape[0] == 1
assert result.shape[1] == 1
assert result.shape[1] == 1
assert result[0, 0] == 12
def test_read_bin_lush_matrix_ubyte_3tensor():
"""
Read data from a lush file with uint8 data (3D-tensor)
"""
path = example_bin_lush_path + 'ubyte_3tensor.lushbin'
result = read_bin_lush_matrix(path)
assert str(result.dtype) == 'uint8'
assert len(result.shape) == 3
if result.shape != (2, 3, 4):
raise AssertionError(
"ubyte_3tensor.lushbin stores a 3-tensor "
"of shape (2,3,4), but read_bin_lush_matrix thinks it has "
"shape " + str(result.shape)
)
for i in xrange(1, 3):
for j in xrange(1, 4):
for k in xrange(1, 5):
assert result[i-1, j-1, k-1] == i + 3 * j + 12 * k
def test_read_bin_lush_matrix_int_3tensor():
"""
Read data from a lush file with int32 data (3D-tensor)
"""
path = example_bin_lush_path + 'int_3tensor.lushbin'
result = read_bin_lush_matrix(path)
assert str(result.dtype) == 'int32'
assert len(result.shape) == 3
if result.shape != (3, 2, 4):
raise AssertionError(
"ubyte_3tensor.lushbin stores a 3-tensor "
"of shape (3,2,4), but read_bin_lush_matrix thinks it has "
"shape " + str(result.shape)
)
for i in xrange(1, result.shape[0]+1):
for j in xrange(1, result.shape[1]+1):
for k in xrange(1, result.shape[2]+1):
assert (result[i - 1, j - 1, k - 1] ==
(i + 10000 ** j) * ((-2) ** k))
def test_read_bin_lush_matrix_float_3tensor():
"""
Read data from a lush file with float32 data (3D-tensor)
"""
path = example_bin_lush_path + 'float_3tensor.lushbin'
result = read_bin_lush_matrix(path)
assert str(result.dtype) == 'float32'
assert len(result.shape) == 3
if result.shape != (4, 3, 2):
raise AssertionError(
"ubyte_3tensor.lushbin stores a 3-tensor "
"of shape (4,3,2), but read_bin_lush_matrix thinks it has "
"shape " + str(result.shape)
)
for i in xrange(1, result.shape[0] + 1):
for j in xrange(1, result.shape[1] + 1):
for k in xrange(1, result.shape[2] + 1):
assert np.allclose(result[i - 1, j - 1, k - 1],
i + 1.5 * j + 1.7 * k)
def test_read_bin_lush_matrix_double_3tensor():
"""
Read data from a lush file with float64 data (3D-tensor)
"""
path = example_bin_lush_path + 'double_3tensor.lushbin'
result = read_bin_lush_matrix(path)
assert str(result.dtype) == 'float64'
assert len(result.shape) == 3
if result.shape != (4, 2, 3):
raise AssertionError(
"ubyte_3tensor.lushbin stores a 3-tensor "
"of shape (4,2,3), but read_bin_lush_matrix thinks it has "
"shape " + str(result.shape)
)
for i in xrange(1, result.shape[0]+1):
for j in xrange(1, result.shape[1]+1):
for k in xrange(1, result.shape[2]+1):
assert np.allclose(result[i - 1, j - 1, k - 1],
i + 1.5 * j + (-1.7) ** k)
def test_load_train_file():
"""
Loads a YAML file with and without environment variables.
"""
environ = {
'PYLEARN2_DATA_PATH': '/just/a/test/path/'
}
load_train_file(yaml_path + 'test_model.yaml')
load_train_file(yaml_path + 'test_model.yaml', environ=environ)
| bsd-3-clause |
gchp/django | django/contrib/gis/maps/google/__init__.py | 67 | 2767 | """
This module houses the GoogleMap object, used for generating
the needed javascript to embed Google Maps in a Web page.
Google(R) is a registered trademark of Google, Inc. of Mountain View, California.
Example:
* In the view:
return render(request, 'template.html', {'google': GoogleMap(key="abcdefg")})
* In the template:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{{ google.xhtml }}
<head>
<title>Google Maps via GeoDjango</title>
{{ google.style }}
{{ google.scripts }}
</head>
{{ google.body }}
<div id="{{ google.dom_id }}" style="width:600px;height:400px;"></div>
</body>
</html>
Note: If you want to be more explicit in your templates, the following are
equivalent:
{{ google.body }} => "<body {{ google.onload }} {{ google.onunload }}>"
{{ google.xhtml }} => "<html xmlns="http://www.w3.org/1999/xhtml" {{ google.xmlns }}>"
{{ google.style }} => "<style>{{ google.vml_css }}</style>"
Explanation:
- The `xhtml` property provides the correct XML namespace needed for
Google Maps to operate in IE using XHTML. Google Maps on IE uses
VML to draw polylines. Returns, by default:
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
- The `style` property provides the correct style tag for the CSS
properties required by Google Maps on IE:
<style type="text/css">v\:* {behavior:url(#default#VML);}</style>
- The `scripts` property provides the necessary <script> tags for
including the Google Maps javascript, as well as including the
generated javascript.
- The `body` property provides the correct attributes for the
body tag to load the generated javascript. By default, returns:
<body onload="gmap_load()" onunload="GUnload()">
- The `dom_id` property returns the DOM id for the map. Defaults to "map".
The following attributes may be set or customized in your local settings:
* GOOGLE_MAPS_API_KEY: String of your Google Maps API key. These are tied
to a domain. May be obtained from http://www.google.com/apis/maps/
* GOOGLE_MAPS_API_VERSION (optional): Defaults to using "2.x"
* GOOGLE_MAPS_URL (optional): Must have a substitution ('%s') for the API
version.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap, GoogleMapSet
from django.contrib.gis.maps.google.overlays import (
GEvent, GIcon, GMarker, GPolygon, GPolyline,
)
from django.contrib.gis.maps.google.zoom import GoogleZoom
__all__ = [
'GoogleMap', 'GoogleMapSet', 'GEvent', 'GIcon', 'GMarker', 'GPolygon',
'GPolyline', 'GoogleZoom',
]
| bsd-3-clause |
denibertovic/django-client-certificates | client_certs/admin.py | 1 | 1094 | from django.contrib import admin
from .models import Cert
from .cert import revoke_certificates
class CertAdmin(admin.ModelAdmin):
list_display = ('user', 'install_link', 'is_valid', 'valid_until')
fields = ('user', 'country', 'state', 'locality', 'organization',
'organizational_unit', 'common_name', 'description', 'valid_until')
def install_link(self, obj):
return '<a href="%s">Install Link</a>' % obj.get_absolute_url()
install_link.allow_tags = True
def revoke_certificate(self, request, queryset):
for_revokation = [cert.x509 for cert in queryset if cert.is_valid and cert.is_installed]
revoke_certificates(for_revokation)
updated = queryset.update(is_valid=False)
if updated == 1:
message = '1 Certificate was revoked.'
else:
message = '%s Certificates were revoked.' % updated
self.message_user(request, message)
revoke_certificate.short_description = "Revoke selected Client Certificates"
actions = [revoke_certificate]
admin.site.register(Cert, CertAdmin)
| bsd-2-clause |
tplusx/ns3-gpsr | bindings/python/ns3modulegen-modular.py | 18 | 3971 | import warnings
import sys
import os
import pybindgen.settings
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
from pybindgen.module import MultiSectionFactory
import ns3modulegen_core_customizations
pybindgen.settings.wrapper_registry = pybindgen.settings.StdMapWrapperRegistry
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
#print >> sys.stderr, ">>>>>>>>>>>>>>>>>>>>>>>>>>>> ", bool(eval(os.environ["GCC_RTTI_ABI_COMPLETE"]))
pybindgen.settings.gcc_rtti_abi_complete = bool(eval(os.environ["GCC_RTTI_ABI_COMPLETE"]))
class MyMultiSectionFactory(MultiSectionFactory):
def __init__(self, main_file_name):
super(MyMultiSectionFactory, self).__init__()
self.main_file_name = main_file_name
self.main_sink = FileCodeSink(open(main_file_name, "wt"))
self.header_name = "ns3module.h"
header_file_name = os.path.join(os.path.dirname(self.main_file_name), self.header_name)
#print >> sys.stderr, ">>>>>>>>>>>>>>>>>", header_file_name, main_file_name
self.header_sink = FileCodeSink(open(header_file_name, "wt"))
def get_section_code_sink(self, section_name):
return self.main_sink
def get_main_code_sink(self):
return self.main_sink
def get_common_header_code_sink(self):
return self.header_sink
def get_common_header_include(self):
return '"%s"' % self.header_name
def close(self):
self.header_sink.file.close()
self.main_sink.file.close()
def main(argv):
module_abs_src_path, target, extension_name, output_cc_file_name = argv[1:]
module_name = os.path.basename(module_abs_src_path)
out = MyMultiSectionFactory(output_cc_file_name)
sys.path.insert(0, os.path.join(module_abs_src_path, "bindings"))
try:
module_apidefs = __import__("modulegen__%s" % target)
del sys.modules["modulegen__%s" % target]
try:
module_customization = __import__("modulegen_customizations")
del sys.modules["modulegen_customizations"]
except ImportError:
module_customization = object()
try:
from callbacks_list import callback_classes
except ImportError, ex:
print >> sys.stderr, "***************", repr(ex)
callback_classes = []
else:
print >> sys.stderr, ">>>>>>>>>>>>>>>>", repr(callback_classes)
finally:
sys.path.pop(0)
root_module = module_apidefs.module_init()
root_module.set_name(extension_name)
root_module.add_include('"ns3/%s-module.h"' % module_name)
ns3modulegen_core_customizations.add_std_ios_openmode(root_module)
# -----------
module_apidefs.register_types(root_module)
if hasattr(module_customization, 'post_register_types'):
module_customization.post_register_types(root_module)
# register Callback<...> type handlers
ns3modulegen_core_customizations.generate_callback_classes(root_module.after_forward_declarations,
callback_classes)
# -----------
module_apidefs.register_methods(root_module)
if hasattr(module_customization, 'post_register_methods'):
module_customization.post_register_methods(root_module)
ns3modulegen_core_customizations.Object_customizations(root_module)
ns3modulegen_core_customizations.Attribute_customizations(root_module)
# -----------
module_apidefs.register_functions(root_module)
if hasattr(module_customization, 'post_register_functions'):
module_customization.post_register_functions(root_module)
# -----------
root_module.generate(out)
if __name__ == '__main__':
import sys
main(sys.argv)
| gpl-2.0 |
DPaaS-Raksha/horizon | openstack_dashboard/dashboards/project/volumes/urls.py | 9 | 1332 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns, url
from .views import (IndexView, CreateView, EditAttachmentsView, DetailView,
CreateSnapshotView)
urlpatterns = patterns('openstack_dashboard.dashboards.project.volumes.views',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^create/$', CreateView.as_view(), name='create'),
url(r'^(?P<volume_id>[^/]+)/attach/$',
EditAttachmentsView.as_view(),
name='attach'),
url(r'^(?P<volume_id>[^/]+)/create_snapshot/$',
CreateSnapshotView.as_view(),
name='create_snapshot'),
url(r'^(?P<volume_id>[^/]+)/$',
DetailView.as_view(),
name='detail'),
)
| apache-2.0 |
forgeservicelab/forge.insightly-sync | ldap_updater.py | 1 | 27000 | """Push updates to LDAP."""
import ldap as _ldap
import ldap.modlist as _modlist
import logging
from __init__ import sanitize, fileToRedmine
from unidecode import unidecode
from canned_mailer import CannedMailer
from insightly_updater import InsightlyUpdater
from fuzzywuzzy.process import extractOne
class ForgeLDAP(object):
"""LDAP connection wrapper.
Represents an LDAP connection and exposes LDAP CRUD operation funtions.
"""
_c = None
_logger = None
_redmine_key = None
username = None
def __init__(self, user, pwd, host, redmine_key=None):
"""Initialize the LDAP connection.
Initialize an LDAP object and bind it to the specified host.
Args:
user (str): The cn attribute of the account to use for binding. Must have administrator rights.
pwd (str): The password for the specified user.
host (str): The FQDN or IP of the host running the LDAP server. Connection uses ldaps protocol.
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._redmine_key = redmine_key
dn = 'cn=%s,%s' % (user, LDAPUpdater._LDAP_TREE['accounts'])
_ldap.set_option(_ldap.OPT_X_TLS_REQUIRE_CERT, _ldap.OPT_X_TLS_ALLOW)
self._c = _ldap.initialize("ldaps://%s" % host)
self.username = user
self._c.bind_s(dn, pwd, _ldap.AUTH_SIMPLE)
def destroy(self):
"""Unbind the underlying LDAP connection.
Ensures that the LDAP conection does not remain open.
"""
self._c.unbind_s()
def ldap_search(self, *args, **kwargs):
"""Search LDAP.
Performs an LDAP search.
Args:
*args: positional arguments for ldap synchronous search, as per python ldap module.
*kwargs: keyword arguments for ldap synchronous search, as per python ldap module.
Returns:
List: A list containing the results from the LDAP search.
None: If there are no results.
"""
try:
ldap_res = self._c.search_s(*args, **kwargs)
except _ldap.NO_SUCH_OBJECT:
return None
return ldap_res
def ldap_add(self, *args):
"""Add entries to LDAP.
Performs an LDAP add operation.
Args:
*args: positional arguments for ldap synchronous add, as per python ldap module.
*kwargs: keyword arguments for ldap synchronous add, as per python ldap module.
"""
try:
self._c.add_s(*args)
except _ldap.ALREADY_EXISTS, err:
self._logger.info('%s; %s' % (err, 'Ignoring.'))
except _ldap.LDAPError, err:
self._logger.error('Try LDAPadd: %s' % list(args))
self._logger.error(err)
if self._redmine_key:
fileToRedmine(key=self._redmine_key, subject=err.__class__.__name__, message='%s\nTry LDAPadd: %s'
% (err, args))
def ldap_update(self, *args):
"""Modify entries on LDAP.
Performs an LDAP modify operation.
Args:
*args: positional arguments for ldap synchronous modify, as per python ldap module.
*kwargs: keyword arguments for ldap synchronous modify, as per python ldap module.
"""
try:
self._c.modify_s(*args)
except _ldap.LDAPError, err:
self._logger.error('Try LDAPmodify: %s' % list(args))
self._logger.error(err)
if self._redmine_key:
fileToRedmine(key=self._redmine_key, subject=err.__class__.__name__, message='%s\nTry LDAPmodify: %s'
% (err, args))
def ldap_delete(self, *args):
"""Delete entries from LDAP.
Performs an LDAP delete operation.
Args:
*args: positional arguments for ldap synchronous delete, as per python ldap module.
*kwargs: keyword arguments for ldap synchronous delete, as per python ldap module.
"""
try:
self._c.delete_s(*args)
except _ldap.LDAPError, err:
self._logger.error('Try LDAPdelete: %s' % list(args))
self._logger.error(err)
if self._redmine_key:
fileToRedmine(key=self._redmine_key, subject=err.__class__.__name__, message='%s\nTry LDAPdelete: %s'
% (err, args))
class LDAPUpdater:
"""Update LDAP server to represent identity and membership relations stated on Insightly.
Attributes:
SDA: Constant representing the name of the SDA category on Insightly.
FPA: Constant representing the name of the FPA category on Insightly.
FPA_CRA: Constant representing the name of the FPA with CRA category on Insightly.
OS_TENANT: Constant representing the name of the OpenStack tenant category on Insightly.
PIPELINE_NAME: Constant representing the name of the Project execution pipeline on Insightly.
ACTION_CREATE: Constant representing the create key for the Action function.
ACTION_DELETE: Constant representing the delete key for the Action function.
ACTION_UPDATE: Constant representing the update key for the Action function.
"""
SDA = 'SDA'
FPA = 'FPA'
FPA_CRA = 'FPA (CRA)'
OS_TENANT = 'OpenStack Tenant'
PIPELINE_NAME = 'Project execution'
ACTION_CREATE = 'create'
ACTION_DELETE = 'delete'
ACTION_UPDATE = 'update'
_LDAP_TREE = {'accounts': "ou=accounts,dc=forgeservicelab,dc=fi",
'projects': "ou=projects,dc=forgeservicelab,dc=fi",
'admins': "cn=ldap_admins,ou=roles,dc=forgeservicelab,dc=fi"}
_PROTECTED_ACCOUNTS = ['admin', 'binder', 'pwdchanger', 'syncer']
_ALL_OTHER_GROUPS_FILTER = '(&(|(objectClass=groupOfNames)\
(objectClass=groupOfUniqueNames))\
(|(member=cn={user_cn},%(s)s)\
(uniqueMember=cn={user_cn},%(s)s))\
(!(cn:dn:={project_cn})))'.replace(' ', '') % {'s': _LDAP_TREE['accounts']}
_PLACEHOLDER_NAME = 'FirstName'
_PLACEHOLDER_SN = 'LastName'
def __init__(self, insightlyUpdater, args):
"""Initialize instance."""
self.mailer = CannedMailer(args)
self.updater = insightlyUpdater
def _parseName(self, name):
"""Return the first element of a compound name that is not a known particle.
Args:
name (str): The name to be parsed.
Returns:
str: The transliterated first non-particle element of a name, capped to 10 characters.
"""
PARTICLES = ['de', 'della', 'von', 'und']
SPECIAL_CHARS = ['\'', '.', '!']
splitName = reduce(list.__add__, map(lambda n: n.split('-'), name.split()))
try:
while splitName[0].lower() in PARTICLES:
splitName.pop(0)
except IndexError:
pass
return unidecode(filter(lambda c: c not in SPECIAL_CHARS,
splitName[0].decode('utf-8').lower()[:10])) if splitName else None
def _ldapCN(self, userID, ldap_conn):
return ldap_conn.ldap_search(self._LDAP_TREE['accounts'], _ldap.SCOPE_ONELEVEL,
filterstr='employeeNumber=%s' % userID,
attrsonly=1)[0][0]
def _createCN(self, user, ldap_conn):
firstName = None if user['givenName'] is self._PLACEHOLDER_NAME else self._parseName(user['givenName'])
lastName = None if user['sn'] is self._PLACEHOLDER_SN else self._parseName(user['sn'])
cn = '.'.join(filter(lambda n: n, [firstName, lastName]))
suffix = 0
while ldap_conn.ldap_search('cn=%s,%s' % (cn, self._LDAP_TREE['accounts']), _ldap.SCOPE_BASE, attrsonly=1):
cn = '%s.%s' % (cn[:-2], suffix)
suffix += 1
return cn
def _disableAndNotify(self, dn, ldap_conn):
account = ldap_conn.ldap_search(dn, _ldap.SCOPE_BASE, attrlist=['employeeType', 'cn', 'mail'])[0][1]
if account and ('employeeType' not in account or not extractOne(account['employeeType'][0],
['disabled'], score_cutoff=80)):
ldap_conn.ldap_update(dn, [(_ldap.MOD_REPLACE, 'employeeType', 'disabled')])
map(lambda e: self.mailer.sendCannedMail(e, self.mailer.CANNED_MESSAGES['disabled_account'],
account['cn'][0]), account['mail'])
def _pruneAccounts(self, ldap_conn):
# Disable orphans
map(lambda entry: self._disableAndNotify(entry, ldap_conn),
map(lambda dn: dn[0],
filter(lambda a: 'memberOf' not in a[1].keys() and not any(cn in a[0] for cn in
self._PROTECTED_ACCOUNTS),
ldap_conn.ldap_search(self._LDAP_TREE['accounts'],
_ldap.SCOPE_ONELEVEL,
attrlist=['memberOf']))))
# Re-enable non orphans
map(lambda entry: ldap_conn.ldap_update(entry, [(_ldap.MOD_REPLACE, 'employeeType', None)]),
map(lambda dn: dn[0],
filter(lambda a: 'memberOf' in a[1].keys(),
ldap_conn.ldap_search(self._LDAP_TREE['accounts'],
_ldap.SCOPE_ONELEVEL,
attrlist=['memberOf'],
filterstr='(employeeType=disabled)'))))
def _getLDAPCompatibleProject(self, project, objectClass, ldap_conn):
project = project.copy()
project['objectClass'] = objectClass
project['owner'] = [self._ldapCN(owner['employeeNumber'], ldap_conn) for owner in project.pop('owner', [])]
project['member'] = [self._ldapCN(member['employeeNumber'], ldap_conn) for member in project.pop('member', [])]
project['seeAlso'] = [self._ldapCN(seeAlso['employeeNumber'],
ldap_conn) for seeAlso in project.pop('seeAlso', [])]
project['uniqueMember'] = project['member']
project.pop('tenants')
project.pop('member' if objectClass is 'groupOfUniqueNames' else 'uniqueMember')
return project
def _getLDAPCompatibleAccount(self, account):
account = account.copy()
account['objectClass'] = 'inetOrgPerson'
if extractOne('True', account.pop('isHidden'), score_cutoff=75):
account['employeeType'] = 'hidden'
return account
# deprecated
def _createRecord(self, project, ldap_conn):
return filter(lambda r: len(r[1]), [
('objectClass', ['groupOfNames']),
('cn', [project['cn']]),
('o', project['o']),
('owner', map(lambda o: self._ldapCN(o['uid'], ldap_conn), project['owner'])),
('seeAlso', map(lambda a: self._ldapCN(a['uid'], ldap_conn), project['seeAlso'])),
('member', map(lambda m: self._ldapCN(m['uid'], ldap_conn), project['members'])),
('description', ['type:%s' % item for item in project['description']])
])
# deprecated
def _createTenantRecord(self, tenant, ldap_conn):
record = self._createRecord(tenant, ldap_conn)
record = map(lambda r: r if r[0] != 'objectClass' else (r[0], ['groupOfUniqueNames']), record)
if len(record) == 7:
record = map(lambda r: r if r[0] != 'owner' else ('uniqueMember', r[1]), record)
record.pop(4)
record.pop(4)
else:
record = map(lambda r: r if r[0] != 'member' else ('uniqueMember', r[1]), record)
return record
def _createOrUpdate(self, member_list, ldap_conn):
new_records = filter(lambda m: not ldap_conn.ldap_search(self._LDAP_TREE['accounts'],
_ldap.SCOPE_ONELEVEL,
filterstr='employeeNumber=%s' % m['employeeNumber'],
attrsonly=1),
member_list)
map(lambda c: ldap_conn.ldap_add('cn=%s,%s' % (self._createCN(c, ldap_conn), self._LDAP_TREE['accounts']),
_modlist.addModlist(self._getLDAPCompatibleAccount(c),
ignore_attr_types=['cn'])),
new_records)
map(lambda u: ldap_conn.ldap_update('%s' % self._ldapCN(u['employeeNumber'], ldap_conn),
_modlist.modifyModlist(ldap_conn.ldap_search(self._LDAP_TREE['accounts'],
_ldap.SCOPE_ONELEVEL,
filterstr='employeeNumber=%s'
% u['employeeNumber'])[0][1],
self._getLDAPCompatibleAccount(u),
ignore_attr_types=['userPassword', 'cn'])),
filter(lambda m: cmp(dict(self._getLDAPCompatibleAccount(m)),
ldap_conn.ldap_search(self._LDAP_TREE['accounts'],
_ldap.SCOPE_ONELEVEL,
filterstr='employeeNumber=%s' % m['employeeNumber'],
attrlist=['displayName', 'objectClass', 'employeeType',
'mobile', 'employeeNumber', 'sn',
'mail', 'givenName'])[0][1]),
member_list))
return new_records
def _sendNewAccountEmails(self, new_accounts, project_type, ldap_conn):
map(lambda d: map(lambda t: self.mailer.sendCannedMail(t,
self.mailer.CANNED_MESSAGES['new_devel_account'] if
project_type in [self.SDA, self.OS_TENANT] else
self.mailer.CANNED_MESSAGES['new_partner_account'],
d['cn'][0]),
d['mail']),
map(lambda a: ldap_conn.ldap_search('ou=accounts,dc=forgeservicelab,dc=fi',
_ldap.SCOPE_ONELEVEL,
filterstr='employeeNumber=%s' % a['employeeNumber'],
attrlist=['cn', 'mail'])[0][1],
new_accounts))
# deprecated
def _ensureButlerService(self, record):
if not any([member.startswith('cn=butler.service') for
member in filter(lambda attribute: attribute[0] == 'uniqueMember', record)[0][1]]):
record = map(lambda r: r if r[0] != 'uniqueMember'
else ('uniqueMember',
['cn=butler.service,ou=accounts,dc=forgeservicelab,dc=fi'] + r[1]), record)
return record
def _addAndNotify(self, dn, tenant, ldap_conn):
add_butler = False
if 'Digile.Platform' in dn:
self.updater\
.addUserToProject(ldap_conn.ldap_search('cn=butler.service,ou=accounts,dc=forgeservicelab,dc=fi',
_ldap.SCOPE_BASE,
attrlist=['employeeNumber'])[0][1]['employeeNumber'][0],
tenant)
add_butler = all([member['displayName'] != 'Butler Service' for member in tenant['member']])
ldap_tenant = self._getLDAPCompatibleProject(tenant, 'groupOfUniqueNames', ldap_conn)
if add_butler:
ldap_tenant['uniqueMember'] += ['cn=butler.service,ou=accounts,dc=forgeservicelab,dc=fi']
ldap_conn.ldap_add(dn, _modlist.addModlist(ldap_tenant))
map(lambda ml: map(lambda e: self.mailer.sendCannedMail(e,
self.mailer.CANNED_MESSAGES['added_to_tenant'],
ldap_tenant['cn']),
ml),
[ldap_conn.ldap_search(s, _ldap.SCOPE_BASE,
attrlist=['mail'])[0][1]['mail'] for s in ldap_tenant['uniqueMember']])
def _createTenants(self, tenant_list, project, ldap_conn):
if tenant_list:
map(lambda t: self._sendNewAccountEmails(self._createOrUpdate(t['member'], ldap_conn),
self.OS_TENANT, ldap_conn), tenant_list)
map(lambda c: self._addAndNotify('cn=%s,cn=%s,%s' % (c['cn'], project['cn'], self._LDAP_TREE['projects']),
c, ldap_conn),
tenant_list)
else:
insightly_tenant = self.updater.createDefaultTenantFor(project)
tenant = project.copy()
tenant['o'] = str(insightly_tenant['PROJECT_ID'])
tenant['uniqueMember'] = tenant.pop('owner', [])
tenant.pop('seeAlso')
self._sendNewAccountEmails(self._createOrUpdate(tenant['uniqueMember'], ldap_conn),
self.OS_TENANT, ldap_conn)
self._addAndNotify('cn=%(cn)s,cn=%(cn)s,%(sf)s' %
{'cn': project['cn'], 'sf': self._LDAP_TREE['projects']}, tenant, ldap_conn)
def _create(self, project, project_type, ldap_conn):
self._sendNewAccountEmails(self._createOrUpdate(project['member'], ldap_conn), project_type, ldap_conn)
ldap_conn.ldap_add(
'cn=%s,%s' % (project['cn'], self._LDAP_TREE['projects']),
_modlist.addModlist(self._getLDAPCompatibleProject(project, 'groupOfNames', ldap_conn)))
if project_type in [self.SDA, self.FPA_CRA]:
self._createTenants(project['tenants'], project, ldap_conn)
self.updater.updateProject(project, status=self.updater.STATUS_RUNNING)
map(lambda a: map(lambda m: self.mailer.sendCannedMail(m, self.mailer.CANNED_MESSAGES['notify_admin_contact'],
a['displayName']),
a['mail']),
project['seeAlso'])
map(lambda a: map(lambda m: self.mailer.sendCannedMail(m, self.mailer.CANNED_MESSAGES['added_to_project'],
project['cn']), a['mail']), project['member'])
def _updateAndNotify(self, dn, record, ldap_conn, is_tenant=False):
ldap_record = ldap_conn.ldap_search(dn, _ldap.SCOPE_BASE)[0][1]
dict_record = self._getLDAPCompatibleProject(record,
'groupOfUniqueNames' if is_tenant else 'groupOfNames',
ldap_conn)
if cmp(dict_record, ldap_record):
ldap_conn.ldap_update(dn, _modlist.modifyModlist(ldap_record, dict_record))
new_users = filter(lambda m: m not in (ldap_record['uniqueMember'] if 'uniqueMember' in ldap_record.keys()
else ldap_record['member']),
(dict_record['uniqueMember'] if 'uniqueMember' in dict_record.keys()
else dict_record['member']))
gone_users = filter(lambda m: m not in (dict_record['uniqueMember'] if 'uniqueMember' in dict_record.keys()
else dict_record['member']),
(ldap_record['uniqueMember'] if 'uniqueMember' in ldap_record.keys()
else ldap_record['member']))
if any(member_attribute in dict_record.keys() for member_attribute in ['member', 'uniqueMember']):
map(lambda email_list: map(lambda e: self.mailer
.sendCannedMail(e,
self.mailer.CANNED_MESSAGES['added_to_tenant']
if any(self.OS_TENANT in s for s in
dict_record['description']) else
self.mailer.CANNED_MESSAGES[
'added_to_project'],
dict_record['cn'][0]), email_list),
map(lambda s: ldap_conn.ldap_search(s, _ldap.SCOPE_BASE, attrlist=['mail'])[0][1]['mail'],
new_users))
map(lambda email_list: map(lambda e: self.mailer
.sendCannedMail(e,
self.mailer.CANNED_MESSAGES[
'deleted_from_tenant']
if any(self.OS_TENANT in s for s in
dict_record['description']) else
self.mailer.CANNED_MESSAGES[
'deleted_from_project'],
dict_record['cn'][0]), email_list),
map(lambda s: ldap_conn.ldap_search(s, _ldap.SCOPE_BASE, attrlist=['mail'])[0][1]['mail'],
gone_users))
def _updateTenants(self, tenant_list, project, ldap_conn):
map(lambda t: self._sendNewAccountEmails(self._createOrUpdate(t['member'], ldap_conn),
self.OS_TENANT, ldap_conn), tenant_list)
ldap_tenant_cns = [cn[1]['cn'][0] for cn in ldap_conn.ldap_search('cn=%s,%s' %
(project['cn'],
self._LDAP_TREE['projects']),
_ldap.SCOPE_ONELEVEL, attrlist=['cn'])]
new_tenants = filter(lambda t: t['cn'] not in ldap_tenant_cns, tenant_list)
removed_tenant_cns = filter(lambda cn: cn not in [tenant['cn'] for tenant in tenant_list], ldap_tenant_cns)
if new_tenants or not tenant_list:
self._createTenants(new_tenants, project, ldap_conn)
if removed_tenant_cns:
map(lambda cn: ldap_conn.ldap_delete('cn=%s,cn=%s,%s' % (cn, project['cn'], self._LDAP_TREE['projects'])),
removed_tenant_cns)
map(lambda u: self._updateAndNotify('cn=%s,cn=%s,%s' % (u['cn'], project['cn'], self._LDAP_TREE['projects']),
u, ldap_conn, is_tenant=True),
filter(lambda nonews: nonews not in new_tenants,
filter(lambda t: ldap_conn.ldap_search('cn=%s,cn=%s,%s' %
(t['cn'], project['cn'],
self._LDAP_TREE['projects']),
_ldap.SCOPE_BASE), tenant_list)))
def _update(self, project, project_type, ldap_conn):
ldap_record = ldap_conn.ldap_search('cn=%s,%s' % (project['cn'], self._LDAP_TREE['projects']),
_ldap.SCOPE_BASE)
if ldap_record:
self._sendNewAccountEmails(self._createOrUpdate(project['member'], ldap_conn), project_type, ldap_conn)
self._updateAndNotify('cn=%s,%s' % (project['cn'], self._LDAP_TREE['projects']),
project,
# map(lambda t: (_ldap.MOD_REPLACE, t[0], t[1]),
# self._createRecord(project, ldap_conn)),
ldap_conn)
if project_type in [self.SDA, self.FPA_CRA]:
self._updateTenants(project['tenants'], project, ldap_conn)
else:
self._create(project, project_type, ldap_conn)
def _deleteTenants(self, tenant_list, project, ldap_conn):
former_members = []
map(lambda tenant: members.extend(ldap_conn.ldap_search(tenant, _ldap.SCOPE_BASE,
attrlist=['uniqueMember'])[0][1]['uniqueMember']),
tenant_list)
map(lambda tenant: ldap_conn.ldap_delete(tenant), tenant_list)
def _delete(self, project, project_type, ldap_conn):
tenant_list = ldap_conn.ldap_search('cn=%s,' % project['cn'] + self._LDAP_TREE['projects'],
_ldap.SCOPE_SUBORDINATE, attrlist=['o'])
for tenant in tenant_list or []:
tenant[1]['o'] = tenant[1]['o'][0]
map(lambda tenant: ldap_conn.ldap_delete(tenant[0]), tenant_list or [])
ldap_conn.ldap_delete('cn=%s,%s' % (project['cn'], self._LDAP_TREE['projects']))
map(lambda tenant: self.updater.updateProject(tenant[1], updateStage=False,
status=self.updater.STATUS_COMPLETED), tenant_list or [])
self.updater.updateProject(project, updateStage=False, status=self.updater.STATUS_COMPLETED)
_actions = {
ACTION_CREATE: _create,
ACTION_DELETE: _delete,
ACTION_UPDATE: _update
}
def Action(self, action, data_list, ldap_conn):
"""Perform a CRUD action against LDAP.
Triggers the generation of LDAP payload and executes the requested action against the LDAP connection.
Args:
action (str): The action to perform, one of ACTION_CREATE, ACTION_DELETE or ACTION_UPDATE.
data_list (List): A list of the elements to use as payload for the CRUD action against LDAP.
ldap_conn (ForgeLDAP): An initialized LDAP connection to perform actions against.
"""
map(lambda k: map(lambda p: self._actions[action](self, p, k, ldap_conn), data_list[k]), data_list.keys())
self._pruneAccounts(ldap_conn)
| mit |
twister2016/twister | twister/mk/dpdk_nic_bind.py | 9 | 20853 | #! /usr/bin/python
#
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import os
import getopt
import subprocess
from os.path import exists, abspath, dirname, basename
# The PCI device class for ETHERNET devices
ETHERNET_CLASS = "0200"
# global dict ethernet devices present. Dictionary indexed by PCI address.
# Each device within this is itself a dictionary of device properties
devices = {}
# list of supported DPDK drivers
dpdk_drivers = ["igb_uio", "vfio-pci", "uio_pci_generic"]
# command-line arg flags
b_flag = None
status_flag = False
force_flag = False
args = []
def usage():
'''Print usage information for the program'''
argv0 = basename(sys.argv[0])
print("""
Usage:
------
%(argv0)s [options] DEVICE1 DEVICE2 ....
where DEVICE1, DEVICE2 etc, are specified via PCI "domain:bus:slot.func" syntax
or "bus:slot.func" syntax. For devices bound to Linux kernel drivers, they may
also be referred to by Linux interface name e.g. eth0, eth1, em0, em1, etc.
Options:
--help, --usage:
Display usage information and quit
-s, --status:
Print the current status of all known network interfaces.
For each device, it displays the PCI domain, bus, slot and function,
along with a text description of the device. Depending upon whether the
device is being used by a kernel driver, the igb_uio driver, or no
driver, other relevant information will be displayed:
* the Linux interface name e.g. if=eth0
* the driver being used e.g. drv=igb_uio
* any suitable drivers not currently using that device
e.g. unused=igb_uio
NOTE: if this flag is passed along with a bind/unbind option, the
status display will always occur after the other operations have taken
place.
-b driver, --bind=driver:
Select the driver to use or \"none\" to unbind the device
-u, --unbind:
Unbind a device (Equivalent to \"-b none\")
--force:
By default, devices which are used by Linux - as indicated by having
routes in the routing table - cannot be modified. Using the --force
flag overrides this behavior, allowing active links to be forcibly
unbound.
WARNING: This can lead to loss of network connection and should be used
with caution.
Examples:
---------
To display current device status:
%(argv0)s --status
To bind eth1 from the current driver and move to use igb_uio
%(argv0)s --bind=igb_uio eth1
To unbind 0000:01:00.0 from using any driver
%(argv0)s -u 0000:01:00.0
To bind 0000:02:00.0 and 0000:02:00.1 to the ixgbe kernel driver
%(argv0)s -b ixgbe 02:00.0 02:00.1
""" % locals()) # replace items from local variables
# This is roughly compatible with check_output function in subprocess module
# which is only available in python 2.7.
def check_output(args, stderr=None):
'''Run a command and capture its output'''
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=stderr).communicate()[0]
def find_module(mod):
'''find the .ko file for kernel module named mod.
Searches the $RTE_SDK/$RTE_TARGET directory, the kernel
modules directory and finally under the parent directory of
the script '''
# check $RTE_SDK/$RTE_TARGET directory
if 'RTE_SDK' in os.environ and 'RTE_TARGET' in os.environ:
path = "%s/%s/kmod/%s.ko" % (os.environ['RTE_SDK'],
os.environ['RTE_TARGET'], mod)
if exists(path):
return path
# check using depmod
try:
depmod_out = check_output(["modinfo", "-n", mod],
stderr=subprocess.STDOUT).lower()
if "error" not in depmod_out:
path = depmod_out.strip()
if exists(path):
return path
except: # if modinfo can't find module, it fails, so continue
pass
# check for a copy based off current path
tools_dir = dirname(abspath(sys.argv[0]))
if (tools_dir.endswith("tools")):
base_dir = dirname(tools_dir)
find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
if len(find_out) > 0: # something matched
path = find_out.splitlines()[0]
if exists(path):
return path
def check_modules():
'''Checks that igb_uio is loaded'''
global dpdk_drivers
# list of supported modules
mods = [{"Name": driver, "Found": False} for driver in dpdk_drivers]
# first check if module is loaded
try:
# Get list of sysfs modules (both built-in and dynamically loaded)
sysfs_path = '/sys/module/'
# Get the list of directories in sysfs_path
sysfs_mods = [os.path.join(sysfs_path, o) for o
in os.listdir(sysfs_path)
if os.path.isdir(os.path.join(sysfs_path, o))]
# Extract the last element of '/sys/module/abc' in the array
sysfs_mods = [a.split('/')[-1] for a in sysfs_mods]
# special case for vfio_pci (module is named vfio-pci,
# but its .ko is named vfio_pci)
sysfs_mods = map(lambda a:
a if a != 'vfio_pci' else 'vfio-pci', sysfs_mods)
for mod in mods:
if mod["Name"] in sysfs_mods:
mod["Found"] = True
except:
pass
# check if we have at least one loaded module
if True not in [mod["Found"] for mod in mods] and b_flag is not None:
if b_flag in dpdk_drivers:
print("Error - no supported modules(DPDK driver) are loaded")
sys.exit(1)
else:
print("Warning - no supported modules(DPDK driver) are loaded")
# change DPDK driver list to only contain drivers that are loaded
dpdk_drivers = [mod["Name"] for mod in mods if mod["Found"]]
def has_driver(dev_id):
'''return true if a device is assigned to a driver. False otherwise'''
return "Driver_str" in devices[dev_id]
def get_pci_device_details(dev_id):
'''This function gets additional details for a PCI device'''
device = {}
extra_info = check_output(["lspci", "-vmmks", dev_id]).splitlines()
# parse lspci details
for line in extra_info:
if len(line) == 0:
continue
name, value = line.decode().split("\t", 1)
name = name.strip(":") + "_str"
device[name] = value
# check for a unix interface name
sys_path = "/sys/bus/pci/devices/%s/net/" % dev_id
if exists(sys_path):
device["Interface"] = ",".join(os.listdir(sys_path))
else:
device["Interface"] = ""
# check if a port is used for ssh connection
device["Ssh_if"] = False
device["Active"] = ""
return device
def get_nic_details():
'''This function populates the "devices" dictionary. The keys used are
the pci addresses (domain:bus:slot.func). The values are themselves
dictionaries - one for each NIC.'''
global devices
global dpdk_drivers
# clear any old data
devices = {}
# first loop through and read details for all devices
# request machine readable format, with numeric IDs
dev = {}
dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines()
for dev_line in dev_lines:
if (len(dev_line) == 0):
if dev["Class"] == ETHERNET_CLASS:
# convert device and vendor ids to numbers, then add to global
dev["Vendor"] = int(dev["Vendor"], 16)
dev["Device"] = int(dev["Device"], 16)
# use dict to make copy of dev
devices[dev["Slot"]] = dict(dev)
else:
name, value = dev_line.decode().split("\t", 1)
dev[name.rstrip(":")] = value
# check what is the interface if any for an ssh connection if
# any to this host, so we can mark it later.
ssh_if = []
route = check_output(["ip", "-o", "route"])
# filter out all lines for 169.254 routes
route = "\n".join(filter(lambda ln: not ln.startswith("169.254"),
route.decode().splitlines()))
rt_info = route.split()
for i in range(len(rt_info) - 1):
if rt_info[i] == "dev":
ssh_if.append(rt_info[i+1])
# based on the basic info, get extended text details
for d in devices.keys():
# get additional info and add it to existing data
devices[d] = devices[d].copy()
devices[d].update(get_pci_device_details(d).items())
for _if in ssh_if:
if _if in devices[d]["Interface"].split(","):
devices[d]["Ssh_if"] = True
devices[d]["Active"] = "*Active*"
break
# add igb_uio to list of supporting modules if needed
if "Module_str" in devices[d]:
for driver in dpdk_drivers:
if driver not in devices[d]["Module_str"]:
devices[d]["Module_str"] = \
devices[d]["Module_str"] + ",%s" % driver
else:
devices[d]["Module_str"] = ",".join(dpdk_drivers)
# make sure the driver and module strings do not have any duplicates
if has_driver(d):
modules = devices[d]["Module_str"].split(",")
if devices[d]["Driver_str"] in modules:
modules.remove(devices[d]["Driver_str"])
devices[d]["Module_str"] = ",".join(modules)
def dev_id_from_dev_name(dev_name):
'''Take a device "name" - a string passed in by user to identify a NIC
device, and determine the device id - i.e. the domain:bus:slot.func - for
it, which can then be used to index into the devices array'''
# check if it's already a suitable index
if dev_name in devices:
return dev_name
# check if it's an index just missing the domain part
elif "0000:" + dev_name in devices:
return "0000:" + dev_name
else:
# check if it's an interface name, e.g. eth1
for d in devices.keys():
if dev_name in devices[d]["Interface"].split(","):
return devices[d]["Slot"]
# if nothing else matches - error
print("Unknown device: %s. "
"Please specify device in \"bus:slot.func\" format" % dev_name)
sys.exit(1)
def unbind_one(dev_id, force):
'''Unbind the device identified by "dev_id" from its current driver'''
dev = devices[dev_id]
if not has_driver(dev_id):
print("%s %s %s is not currently managed by any driver\n" %
(dev["Slot"], dev["Device_str"], dev["Interface"]))
return
# prevent us disconnecting ourselves
if dev["Ssh_if"] and not force:
print("Routing table indicates that interface %s is active. "
"Skipping unbind" % (dev_id))
return
# write to /sys to unbind
filename = "/sys/bus/pci/drivers/%s/unbind" % dev["Driver_str"]
try:
f = open(filename, "a")
except:
print("Error: unbind failed for %s - Cannot open %s"
% (dev_id, filename))
sys.exit(1)
f.write(dev_id)
f.close()
def bind_one(dev_id, driver, force):
'''Bind the device given by "dev_id" to the driver "driver". If the device
is already bound to a different driver, it will be unbound first'''
dev = devices[dev_id]
saved_driver = None # used to rollback any unbind in case of failure
# prevent disconnection of our ssh session
if dev["Ssh_if"] and not force:
print("Routing table indicates that interface %s is active. "
"Not modifying" % (dev_id))
return
# unbind any existing drivers we don't want
if has_driver(dev_id):
if dev["Driver_str"] == driver:
print("%s already bound to driver %s, skipping\n"
% (dev_id, driver))
return
else:
saved_driver = dev["Driver_str"]
unbind_one(dev_id, force)
dev["Driver_str"] = "" # clear driver string
# if we are binding to one of DPDK drivers, add PCI id's to that driver
if driver in dpdk_drivers:
filename = "/sys/bus/pci/drivers/%s/new_id" % driver
try:
f = open(filename, "w")
except:
print("Error: bind failed for %s - Cannot open %s"
% (dev_id, filename))
return
try:
f.write("%04x %04x" % (dev["Vendor"], dev["Device"]))
f.close()
except:
print("Error: bind failed for %s - Cannot write new PCI ID to "
"driver %s" % (dev_id, driver))
return
# do the bind by writing to /sys
filename = "/sys/bus/pci/drivers/%s/bind" % driver
try:
f = open(filename, "a")
except:
print("Error: bind failed for %s - Cannot open %s"
% (dev_id, filename))
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
try:
f.write(dev_id)
f.close()
except:
# for some reason, closing dev_id after adding a new PCI ID to new_id
# results in IOError. however, if the device was successfully bound,
# we don't care for any errors and can safely ignore IOError
tmp = get_pci_device_details(dev_id)
if "Driver_str" in tmp and tmp["Driver_str"] == driver:
return
print("Error: bind failed for %s - Cannot bind to driver %s"
% (dev_id, driver))
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
def unbind_all(dev_list, force=False):
"""Unbind method, takes a list of device locations"""
dev_list = map(dev_id_from_dev_name, dev_list)
for d in dev_list:
unbind_one(d, force)
def bind_all(dev_list, driver, force=False):
"""Bind method, takes a list of device locations"""
global devices
dev_list = map(dev_id_from_dev_name, dev_list)
for d in dev_list:
bind_one(d, driver, force)
# when binding devices to a generic driver (i.e. one that doesn't have a
# PCI ID table), some devices that are not bound to any other driver could
# be bound even if no one has asked them to. hence, we check the list of
# drivers again, and see if some of the previously-unbound devices were
# erroneously bound.
for d in devices.keys():
# skip devices that were already bound or that we know should be bound
if "Driver_str" in devices[d] or d in dev_list:
continue
# update information about this device
devices[d] = dict(devices[d].items() +
get_pci_device_details(d).items())
# check if updated information indicates that the device was bound
if "Driver_str" in devices[d]:
unbind_one(d, force)
def display_devices(title, dev_list, extra_params=None):
'''Displays to the user the details of a list of devices given in
"dev_list". The "extra_params" parameter, if given, should contain a string
with %()s fields in it for replacement by the named fields in each
device's dictionary.'''
strings = [] # this holds the strings to print. We sort before printing
print("\n%s" % title)
print("="*len(title))
if len(dev_list) == 0:
strings.append("<none>")
else:
for dev in dev_list:
if extra_params is not None:
strings.append("%s '%s' %s" % (dev["Slot"],
dev["Device_str"], extra_params % dev))
else:
strings.append("%s '%s'" % (dev["Slot"], dev["Device_str"]))
# sort before printing, so that the entries appear in PCI order
strings.sort()
print("\n".join(strings)) # print one per line
def show_status():
'''Function called when the script is passed the "--status" option.
Displays to the user what devices are bound to the igb_uio driver, the
kernel driver or to no driver'''
global dpdk_drivers
kernel_drv = []
dpdk_drv = []
no_drv = []
# split our list of devices into the three categories above
for d in devices.keys():
if not has_driver(d):
no_drv.append(devices[d])
continue
if devices[d]["Driver_str"] in dpdk_drivers:
dpdk_drv.append(devices[d])
else:
kernel_drv.append(devices[d])
# print each category separately, so we can clearly see what's used by DPDK
display_devices("Network devices using DPDK-compatible driver", dpdk_drv,
"drv=%(Driver_str)s unused=%(Module_str)s")
display_devices("Network devices using kernel driver", kernel_drv,
"if=%(Interface)s drv=%(Driver_str)s "
"unused=%(Module_str)s %(Active)s")
display_devices("Other network devices", no_drv, "unused=%(Module_str)s")
def parse_args():
'''Parses the command-line arguments given by the user and takes the
appropriate action for each'''
global b_flag
global status_flag
global force_flag
global args
if len(sys.argv) <= 1:
usage()
sys.exit(0)
try:
opts, args = getopt.getopt(sys.argv[1:], "b:us",
["help", "usage", "status", "force",
"bind=", "unbind"])
except getopt.GetoptError as error:
print(str(error))
print("Run '%s --usage' for further information" % sys.argv[0])
sys.exit(1)
for opt, arg in opts:
if opt == "--help" or opt == "--usage":
usage()
sys.exit(0)
if opt == "--status" or opt == "-s":
status_flag = True
if opt == "--force":
force_flag = True
if opt == "-b" or opt == "-u" or opt == "--bind" or opt == "--unbind":
if b_flag is not None:
print("Error - Only one bind or unbind may be specified\n")
sys.exit(1)
if opt == "-u" or opt == "--unbind":
b_flag = "none"
else:
b_flag = arg
def do_arg_actions():
'''do the actual action requested by the user'''
global b_flag
global status_flag
global force_flag
global args
if b_flag is None and not status_flag:
print("Error: No action specified for devices."
"Please give a -b or -u option")
print("Run '%s --usage' for further information" % sys.argv[0])
sys.exit(1)
if b_flag is not None and len(args) == 0:
print("Error: No devices specified.")
print("Run '%s --usage' for further information" % sys.argv[0])
sys.exit(1)
if b_flag == "none" or b_flag == "None":
unbind_all(args, force_flag)
elif b_flag is not None:
bind_all(args, b_flag, force_flag)
if status_flag:
if b_flag is not None:
get_nic_details() # refresh if we have changed anything
show_status()
def main():
'''program main function'''
parse_args()
check_modules()
get_nic_details()
do_arg_actions()
if __name__ == "__main__":
main()
| apache-2.0 |
hlange/LogSoCR | pysc/usi/log/console_reporter.py | 1 | 4184 | import json
import colorlog
import logging
import logging.handlers
from elloghandler.handler import ElLogHandler
from socr_streamhandler.handler import SoCR_StreamHandler
from socr_filehandler.handler import SoCR_FileHandler
from datetime import datetime, date, time
def logger_conf(loglevel,index=None):
#Formatter
COLORLOG_FORMAT = '@%(time)s ns /%(delta_count)s (%(blue)s%(filename)s%(white)s): %(log_color)s%(levelname)s%(reset)s: %(message)s %(parameters)s'
STD_FORMAT = '@%(time)s ns /%(delta_count)s (%(filename)s): %(levelname)s: %(message)s %(parameters)s '
FILE_FORMAT = '%(asctime)s @%(time)s ns /%(delta_count)s (%(filename)s): %(levelname)s: %(message)s %(parameters)s'
#Logger initialization
log_root = logging.getLogger()
log_root.setLevel(logging.DEBUG)
#disable elastic search logger, makes problem when activated
es_log = logging.getLogger("elasticsearch")
es_log.propagate = False
#ElasticSearch Handler
if index is not None:
eh = ElLogHandler(index)
else:
eh = ElLogHandler()
#console handler
ch = SoCR_StreamHandler()
ch.setFormatter(colorlog.ColoredFormatter(
COLORLOG_FORMAT,
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
}))
#file handler
if loglevel[2] != 'x':
fh = SoCR_FileHandler(datetime.now().strftime('log_%H:%M_%d_%m_%Y.log'))
fh.setFormatter(logging.Formatter(FILE_FORMAT))
#setting the priorities of the handler
#Elasticsearch
if loglevel[0] == '0':
eh.setLevel(logging.INFO)
elif loglevel[0] == '1':
eh.setLevel(logging.WARNING)
elif loglevel[0] == '2':
eh.setLevel(logging.ERROR)
elif loglevel[0] == '3':
eh.setLevel(logging.CRITICAL)
if loglevel[0] == 'x':
pass
else:
log_root.addHandler(eh)
#File
if loglevel[2] == '0':
fh.setLevel(logging.INFO)
elif loglevel[2] == '1':
fh.setLevel(logging.WARNING)
elif loglevel[2] == '2':
fh.setLevel(logging.ERROR)
elif loglevel[2] == '3':
fh.setLevel(logging.CRITICAL)
if loglevel[2] == 'x':
pass
else:
log_root.addHandler(fh)
#Terminal
if loglevel[4] == '0':
ch.setLevel(logging.INFO)
elif loglevel[4] == '1':
ch.setLevel(logging.WARNING)
elif loglevel[4] == '2':
ch.setLevel(logging.ERROR)
elif loglevel[4] == '3':
ch.setLevel(logging.CRITICAL)
if loglevel[4] == 'x':
pass
else:
log_root.addHandler(ch)
logger = logging.getLogger(__name__)
#function for log messages
def report(
message_type=None,
message_text=None,
severity=None,
file_name=None,
line_number=None,
time=None,
delta_count=None,
process_name=None,
verbosity=None,
what=None,
actions=None,
phase=None,
**kwargs):
parameters = " "
for value in kwargs:
if isinstance(kwargs[value], int):
parameters += "{0}={1:#x} ".format(value, kwargs[value])
else:
parameters += "{0}={1} ".format(value, kwargs[value])
extra={
'message_type':message_type,
'severity': severity,
'file_name': file_name,
'line_number': line_number,
'time': time,
'delta_count': delta_count,
'process_name': process_name,
'verbosity': verbosity,
'what': what,
'actions': actions,
'phase': phase,
'parameters':parameters}
#Collection log information
extra.update(kwargs)
if severity == 0:
logger.info(message_text, extra=extra)
elif severity == 1:
logger.warning(message_text, extra=extra)
elif severity == 2:
logger.error(message_text, extra=extra)
elif severity >= 3:
logger.critical(message_text, extra=extra)
| agpl-3.0 |
mafiya69/sympy | sympy/printing/tests/test_ccode.py | 13 | 14112 | from sympy.core import (pi, oo, symbols, Rational, Integer,
GoldenRatio, EulerGamma, Catalan, Lambda, Dummy, Eq)
from sympy.functions import (Piecewise, sin, cos, Abs, exp, ceiling, sqrt,
gamma, sign)
from sympy.logic import ITE
from sympy.utilities.pytest import raises
from sympy.printing.ccode import CCodePrinter
from sympy.utilities.lambdify import implemented_function
from sympy.tensor import IndexedBase, Idx
from sympy.matrices import Matrix, MatrixSymbol
from sympy import ccode
x, y, z = symbols('x,y,z')
def test_printmethod():
class fabs(Abs):
def _ccode(self, printer):
return "fabs(%s)" % printer._print(self.args[0])
assert ccode(fabs(x)) == "fabs(x)"
def test_ccode_sqrt():
assert ccode(sqrt(x)) == "sqrt(x)"
assert ccode(x**0.5) == "sqrt(x)"
assert ccode(sqrt(x)) == "sqrt(x)"
def test_ccode_Pow():
assert ccode(x**3) == "pow(x, 3)"
assert ccode(x**(y**3)) == "pow(x, pow(y, 3))"
g = implemented_function('g', Lambda(x, 2*x))
assert ccode(1/(g(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"pow(3.5*2*x, -x + pow(y, x))/(pow(x, 2) + y)"
assert ccode(x**-1.0) == '1.0/x'
assert ccode(x**Rational(2, 3)) == 'pow(x, 2.0L/3.0L)'
_cond_cfunc = [(lambda base, exp: exp.is_integer, "dpowi"),
(lambda base, exp: not exp.is_integer, "pow")]
assert ccode(x**3, user_functions={'Pow': _cond_cfunc}) == 'dpowi(x, 3)'
assert ccode(x**3.2, user_functions={'Pow': _cond_cfunc}) == 'pow(x, 3.2)'
def test_ccode_constants_mathh():
assert ccode(exp(1)) == "M_E"
assert ccode(pi) == "M_PI"
assert ccode(oo) == "HUGE_VAL"
assert ccode(-oo) == "-HUGE_VAL"
def test_ccode_constants_other():
assert ccode(2*GoldenRatio) == "double const GoldenRatio = 1.61803398874989;\n2*GoldenRatio"
assert ccode(
2*Catalan) == "double const Catalan = 0.915965594177219;\n2*Catalan"
assert ccode(2*EulerGamma) == "double const EulerGamma = 0.577215664901533;\n2*EulerGamma"
def test_ccode_Rational():
assert ccode(Rational(3, 7)) == "3.0L/7.0L"
assert ccode(Rational(18, 9)) == "2"
assert ccode(Rational(3, -7)) == "-3.0L/7.0L"
assert ccode(Rational(-3, -7)) == "3.0L/7.0L"
assert ccode(x + Rational(3, 7)) == "x + 3.0L/7.0L"
assert ccode(Rational(3, 7)*x) == "(3.0L/7.0L)*x"
def test_ccode_Integer():
assert ccode(Integer(67)) == "67"
assert ccode(Integer(-1)) == "-1"
def test_ccode_functions():
assert ccode(sin(x) ** cos(x)) == "pow(sin(x), cos(x))"
def test_ccode_inline_function():
x = symbols('x')
g = implemented_function('g', Lambda(x, 2*x))
assert ccode(g(x)) == "2*x"
g = implemented_function('g', Lambda(x, 2*x/Catalan))
assert ccode(
g(x)) == "double const Catalan = %s;\n2*x/Catalan" % Catalan.n()
A = IndexedBase('A')
i = Idx('i', symbols('n', integer=True))
g = implemented_function('g', Lambda(x, x*(1 + x)*(2 + x)))
assert ccode(g(A[i]), assign_to=A[i]) == (
"for (int i=0; i<n; i++){\n"
" A[i] = (A[i] + 1)*(A[i] + 2)*A[i];\n"
"}"
)
def test_ccode_exceptions():
assert ccode(ceiling(x)) == "ceil(x)"
assert ccode(Abs(x)) == "fabs(x)"
assert ccode(gamma(x)) == "tgamma(x)"
def test_ccode_user_functions():
x = symbols('x', integer=False)
n = symbols('n', integer=True)
custom_functions = {
"ceiling": "ceil",
"Abs": [(lambda x: not x.is_integer, "fabs"), (lambda x: x.is_integer, "abs")],
}
assert ccode(ceiling(x), user_functions=custom_functions) == "ceil(x)"
assert ccode(Abs(x), user_functions=custom_functions) == "fabs(x)"
assert ccode(Abs(n), user_functions=custom_functions) == "abs(n)"
def test_ccode_boolean():
assert ccode(x & y) == "x && y"
assert ccode(x | y) == "x || y"
assert ccode(~x) == "!x"
assert ccode(x & y & z) == "x && y && z"
assert ccode(x | y | z) == "x || y || z"
assert ccode((x & y) | z) == "z || x && y"
assert ccode((x | y) & z) == "z && (x || y)"
def test_ccode_Piecewise():
expr = Piecewise((x, x < 1), (x**2, True))
assert ccode(expr) == (
"((x < 1) ? (\n"
" x\n"
")\n"
": (\n"
" pow(x, 2)\n"
"))")
assert ccode(expr, assign_to="c") == (
"if (x < 1) {\n"
" c = x;\n"
"}\n"
"else {\n"
" c = pow(x, 2);\n"
"}")
expr = Piecewise((x, x < 1), (x + 1, x < 2), (x**2, True))
assert ccode(expr) == (
"((x < 1) ? (\n"
" x\n"
")\n"
": ((x < 2) ? (\n"
" x + 1\n"
")\n"
": (\n"
" pow(x, 2)\n"
")))")
assert ccode(expr, assign_to='c') == (
"if (x < 1) {\n"
" c = x;\n"
"}\n"
"else if (x < 2) {\n"
" c = x + 1;\n"
"}\n"
"else {\n"
" c = pow(x, 2);\n"
"}")
# Check that Piecewise without a True (default) condition error
expr = Piecewise((x, x < 1), (x**2, x > 1), (sin(x), x > 0))
raises(ValueError, lambda: ccode(expr))
def test_ccode_Piecewise_deep():
p = ccode(2*Piecewise((x, x < 1), (x + 1, x < 2), (x**2, True)))
assert p == (
"2*((x < 1) ? (\n"
" x\n"
")\n"
": ((x < 2) ? (\n"
" x + 1\n"
")\n"
": (\n"
" pow(x, 2)\n"
")))")
expr = x*y*z + x**2 + y**2 + Piecewise((0, x < 0.5), (1, True)) + cos(z) - 1
assert ccode(expr) == (
"pow(x, 2) + x*y*z + pow(y, 2) + ((x < 0.5) ? (\n"
" 0\n"
")\n"
": (\n"
" 1\n"
")) + cos(z) - 1")
assert ccode(expr, assign_to='c') == (
"c = pow(x, 2) + x*y*z + pow(y, 2) + ((x < 0.5) ? (\n"
" 0\n"
")\n"
": (\n"
" 1\n"
")) + cos(z) - 1;")
def test_ccode_ITE():
expr = ITE(x < 1, x, x**2)
assert ccode(expr) == (
"((x < 1) ? (\n"
" x\n"
")\n"
": (\n"
" pow(x, 2)\n"
"))")
def test_ccode_settings():
raises(TypeError, lambda: ccode(sin(x), method="garbage"))
def test_ccode_Indexed():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o = symbols('n m o', integer=True)
i, j, k = Idx('i', n), Idx('j', m), Idx('k', o)
p = CCodePrinter()
p._not_c = set()
x = IndexedBase('x')[j]
assert p._print_Indexed(x) == 'x[j]'
A = IndexedBase('A')[i, j]
assert p._print_Indexed(A) == 'A[%s]' % (m*i+j)
B = IndexedBase('B')[i, j, k]
assert p._print_Indexed(B) == 'B[%s]' % (i*o*m+j*o+k)
assert p._not_c == set()
def test_ccode_Indexed_without_looking_for_contraction():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
Dy = IndexedBase('Dy', shape=(len_y-1,))
i = Idx('i', len_y-1)
e=Eq(Dy[i], (y[i+1]-y[i])/(x[i+1]-x[i]))
code0 = ccode(e.rhs, assign_to=e.lhs, contract=False)
assert code0 == 'Dy[i] = (y[%s] - y[i])/(x[%s] - x[i]);' % (i + 1, i + 1)
def test_ccode_loops_matrix_vector():
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = x[j]*A[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}'
)
c = ccode(A[i, j]*x[j], assign_to=y[i])
assert c == s
def test_dummy_loops():
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'for (int i_%(icount)i=0; i_%(icount)i<m_%(mcount)i; i_%(icount)i++){\n'
' y[i_%(icount)i] = x[i_%(icount)i];\n'
'}'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
code = ccode(x[i], assign_to=y[i])
assert code == expected
def test_ccode_loops_add():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
z = IndexedBase('z')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = x[i] + z[i];\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = x[j]*A[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}'
)
c = ccode(A[i, j]*x[j] + x[i] + z[i], assign_to=y[i])
assert c == s
def test_ccode_loops_multiple_contractions():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' for (int k=0; k<o; k++){\n'
' for (int l=0; l<p; l++){\n'
' y[i] = y[i] + b[%s]*a[%s];\n' % (j*o*p + k*p + l, i*n*o*p + j*o*p + k*p + l) +\
' }\n'
' }\n'
' }\n'
'}'
)
c = ccode(b[j, k, l]*a[i, j, k, l], assign_to=y[i])
assert c == s
def test_ccode_loops_addfactor():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' for (int k=0; k<o; k++){\n'
' for (int l=0; l<p; l++){\n'
' y[i] = (a[%s] + b[%s])*c[%s] + y[i];\n' % (i*n*o*p + j*o*p + k*p + l, i*n*o*p + j*o*p + k*p + l, j*o*p + k*p + l) +\
' }\n'
' }\n'
' }\n'
'}'
)
c = ccode((a[i, j, k, l] + b[i, j, k, l])*c[j, k, l], assign_to=y[i])
assert c == s
def test_ccode_loops_multiple_terms():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
s0 = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
)
s1 = (
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' for (int k=0; k<o; k++){\n'
' y[i] = b[j]*b[k]*c[%s] + y[i];\n' % (i*n*o + j*o + k) +\
' }\n'
' }\n'
'}\n'
)
s2 = (
'for (int i=0; i<m; i++){\n'
' for (int k=0; k<o; k++){\n'
' y[i] = b[k]*a[%s] + y[i];\n' % (i*o + k) +\
' }\n'
'}\n'
)
s3 = (
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = b[j]*a[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}\n'
)
c = ccode(
b[j]*a[i, j] + b[k]*a[i, k] + b[j]*b[k]*c[i, j, k], assign_to=y[i])
assert (c == s0 + s1 + s2 + s3[:-1] or
c == s0 + s1 + s3 + s2[:-1] or
c == s0 + s2 + s1 + s3[:-1] or
c == s0 + s2 + s3 + s1[:-1] or
c == s0 + s3 + s1 + s2[:-1] or
c == s0 + s3 + s2 + s1[:-1])
def test_dereference_printing():
expr = x + y + sin(z) + z
assert ccode(expr, dereference=[z]) == "x + y + (*z) + sin((*z))"
def test_Matrix_printing():
# Test returning a Matrix
mat = Matrix([x*y, Piecewise((2 + x, y>0), (y, True)), sin(z)])
A = MatrixSymbol('A', 3, 1)
assert ccode(mat, A) == (
"A[0] = x*y;\n"
"if (y > 0) {\n"
" A[1] = x + 2;\n"
"}\n"
"else {\n"
" A[1] = y;\n"
"}\n"
"A[2] = sin(z);")
# Test using MatrixElements in expressions
expr = Piecewise((2*A[2, 0], x > 0), (A[2, 0], True)) + sin(A[1, 0]) + A[0, 0]
assert ccode(expr) == (
"((x > 0) ? (\n"
" 2*A[2]\n"
")\n"
": (\n"
" A[2]\n"
")) + sin(A[1]) + A[0]")
# Test using MatrixElements in a Matrix
q = MatrixSymbol('q', 5, 1)
M = MatrixSymbol('M', 3, 3)
m = Matrix([[sin(q[1,0]), 0, cos(q[2,0])],
[q[1,0] + q[2,0], q[3, 0], 5],
[2*q[4, 0]/q[1,0], sqrt(q[0,0]) + 4, 0]])
assert ccode(m, M) == (
"M[0] = sin(q[1]);\n"
"M[1] = 0;\n"
"M[2] = cos(q[2]);\n"
"M[3] = q[1] + q[2];\n"
"M[4] = q[3];\n"
"M[5] = 5;\n"
"M[6] = 2*q[4]*1.0/q[1];\n"
"M[7] = 4 + sqrt(q[0]);\n"
"M[8] = 0;")
def test_ccode_reserved_words():
x, y = symbols('x, if')
assert ccode(y**2) == 'pow(if_, 2)'
assert ccode(x * y**2, dereference=[y]) == 'pow((*if_), 2)*x'
expected = 'pow(if_unreserved, 2)'
assert ccode(y**2, reserved_word_suffix='_unreserved') == expected
with raises(ValueError):
ccode(y**2, error_on_reserved=True)
def test_ccode_sign():
expr = sign(x) * y
assert ccode(expr) == 'y*(((x) > 0) - ((x) < 0))'
assert ccode(expr, 'z') == 'z = y*(((x) > 0) - ((x) < 0));'
assert ccode(sign(2 * x + x**2) * x + x**2) == \
'pow(x, 2) + x*(((pow(x, 2) + 2*x) > 0) - ((pow(x, 2) + 2*x) < 0))'
expr = sign(cos(x))
assert ccode(expr) == '(((cos(x)) > 0) - ((cos(x)) < 0))'
| bsd-3-clause |
yarikoptic/vcrpy | vcr/cassette.py | 3 | 10918 | import sys
import inspect
import logging
import wrapt
from .compat import contextlib, collections
from .errors import UnhandledHTTPRequestError
from .matchers import requests_match, uri, method
from .patch import CassettePatcherBuilder
from .persist import load_cassette, save_cassette
from .serializers import yamlserializer
from .util import partition_dict
log = logging.getLogger(__name__)
class CassetteContextDecorator(object):
"""Context manager/decorator that handles installing the cassette and
removing cassettes.
This class defers the creation of a new cassette instance until
the point at which it is installed by context manager or
decorator. The fact that a new cassette is used with each
application prevents the state of any cassette from interfering
with another.
Instances of this class are NOT reentrant as context managers.
However, functions that are decorated by
``CassetteContextDecorator`` instances ARE reentrant. See the
implementation of ``__call__`` on this class for more details.
There is also a guard against attempts to reenter instances of
this class as a context manager in ``__exit__``.
"""
_non_cassette_arguments = ('path_transformer', 'func_path_generator')
@classmethod
def from_args(cls, cassette_class, **kwargs):
return cls(cassette_class, lambda: dict(kwargs))
def __init__(self, cls, args_getter):
self.cls = cls
self._args_getter = args_getter
self.__finish = None
def _patch_generator(self, cassette):
with contextlib.ExitStack() as exit_stack:
for patcher in CassettePatcherBuilder(cassette).build():
exit_stack.enter_context(patcher)
log_format = '{action} context for cassette at {path}.'
log.debug(log_format.format(
action="Entering", path=cassette._path
))
yield cassette
log.debug(log_format.format(
action="Exiting", path=cassette._path
))
# TODO(@IvanMalison): Hmmm. it kind of feels like this should be
# somewhere else.
cassette._save()
def __enter__(self):
# This assertion is here to prevent the dangerous behavior
# that would result from forgetting about a __finish before
# completing it.
# How might this condition be met? Here is an example:
# context_decorator = Cassette.use('whatever')
# with context_decorator:
# with context_decorator:
# pass
assert self.__finish is None, "Cassette already open."
other_kwargs, cassette_kwargs = partition_dict(
lambda key, _: key in self._non_cassette_arguments,
self._args_getter()
)
if other_kwargs.get('path_transformer'):
transformer = other_kwargs['path_transformer']
cassette_kwargs['path'] = transformer(cassette_kwargs['path'])
self.__finish = self._patch_generator(self.cls.load(**cassette_kwargs))
return next(self.__finish)
def __exit__(self, *args):
next(self.__finish, None)
self.__finish = None
@wrapt.decorator
def __call__(self, function, instance, args, kwargs):
# This awkward cloning thing is done to ensure that decorated
# functions are reentrant. This is required for thread
# safety and the correct operation of recursive functions.
args_getter = self._build_args_getter_for_decorator(function)
return type(self)(self.cls, args_getter)._execute_function(
function, args, kwargs
)
def _execute_function(self, function, args, kwargs):
if inspect.isgeneratorfunction(function):
handler = self._handle_coroutine
else:
handler = self._handle_function
return handler(function, args, kwargs)
def _handle_coroutine(self, function, args, kwargs):
"""Wraps a coroutine so that we're inside the cassette context for the
duration of the coroutine.
"""
with self as cassette:
coroutine = self.__handle_function(cassette, function, args, kwargs)
# We don't need to catch StopIteration. The caller (Tornado's
# gen.coroutine, for example) will handle that.
to_yield = next(coroutine)
while True:
try:
to_send = yield to_yield
except Exception:
to_yield = coroutine.throw(*sys.exc_info())
else:
to_yield = coroutine.send(to_send)
def __handle_function(self, cassette, function, args, kwargs):
if cassette.inject:
return function(cassette, *args, **kwargs)
else:
return function(*args, **kwargs)
def _handle_function(self, function, args, kwargs):
with self as cassette:
self.__handle_function(cassette, function, args, kwargs)
@staticmethod
def get_function_name(function):
return function.__name__
def _build_args_getter_for_decorator(self, function):
def new_args_getter():
kwargs = self._args_getter()
if 'path' not in kwargs:
name_generator = (kwargs.get('func_path_generator') or
self.get_function_name)
path = name_generator(function)
kwargs['path'] = path
return kwargs
return new_args_getter
class Cassette(object):
"""A container for recorded requests and responses"""
@classmethod
def load(cls, **kwargs):
"""Instantiate and load the cassette stored at the specified path."""
new_cassette = cls(**kwargs)
new_cassette._load()
return new_cassette
@classmethod
def use_arg_getter(cls, arg_getter):
return CassetteContextDecorator(cls, arg_getter)
@classmethod
def use(cls, **kwargs):
return CassetteContextDecorator.from_args(cls, **kwargs)
def __init__(self, path, serializer=yamlserializer, record_mode='once',
match_on=(uri, method), before_record_request=None,
before_record_response=None, custom_patches=(),
inject=False):
self._path = path
self._serializer = serializer
self._match_on = match_on
self._before_record_request = before_record_request or (lambda x: x)
self._before_record_response = before_record_response or (lambda x: x)
self.inject = inject
self.record_mode = record_mode
self.custom_patches = custom_patches
# self.data is the list of (req, resp) tuples
self.data = []
self.play_counts = collections.Counter()
self.dirty = False
self.rewound = False
@property
def play_count(self):
return sum(self.play_counts.values())
@property
def all_played(self):
"""Returns True if all responses have been played, False otherwise."""
return self.play_count == len(self)
@property
def requests(self):
return [request for (request, response) in self.data]
@property
def responses(self):
return [response for (request, response) in self.data]
@property
def write_protected(self):
return self.rewound and self.record_mode == 'once' or \
self.record_mode == 'none'
def append(self, request, response):
"""Add a request, response pair to this cassette"""
request = self._before_record_request(request)
if not request:
return
response = self._before_record_response(response)
self.data.append((request, response))
self.dirty = True
def filter_request(self, request):
return self._before_record_request(request)
def _responses(self, request):
"""
internal API, returns an iterator with all responses matching
the request.
"""
request = self._before_record_request(request)
for index, (stored_request, response) in enumerate(self.data):
if requests_match(request, stored_request, self._match_on):
yield index, response
def can_play_response_for(self, request):
request = self._before_record_request(request)
return request and request in self and \
self.record_mode != 'all' and \
self.rewound
def play_response(self, request):
"""
Get the response corresponding to a request, but only if it
hasn't been played back before, and mark it as played
"""
for index, response in self._responses(request):
if self.play_counts[index] == 0:
self.play_counts[index] += 1
return response
# The cassette doesn't contain the request asked for.
raise UnhandledHTTPRequestError(
"The cassette (%r) doesn't contain the request (%r) asked for"
% (self._path, request)
)
def responses_of(self, request):
"""
Find the responses corresponding to a request.
This function isn't actually used by VCR internally, but is
provided as an external API.
"""
responses = [response for index, response in self._responses(request)]
if responses:
return responses
# The cassette doesn't contain the request asked for.
raise UnhandledHTTPRequestError(
"The cassette (%r) doesn't contain the request (%r) asked for"
% (self._path, request)
)
def _as_dict(self):
return {"requests": self.requests, "responses": self.responses}
def _save(self, force=False):
if force or self.dirty:
save_cassette(
self._path,
self._as_dict(),
serializer=self._serializer
)
self.dirty = False
def _load(self):
try:
requests, responses = load_cassette(
self._path,
serializer=self._serializer
)
for request, response in zip(requests, responses):
self.append(request, response)
self.dirty = False
self.rewound = True
except IOError:
pass
def __str__(self):
return "<Cassette containing {0} recorded response(s)>".format(
len(self)
)
def __len__(self):
"""Return the number of request,response pairs stored in here"""
return len(self.data)
def __contains__(self, request):
"""Return whether or not a request has been stored"""
for index, response in self._responses(request):
if self.play_counts[index] == 0:
return True
return False
| mit |
p12tic/zeroclickinfo-fathead | lib/fathead/iso_3166_codes/parse.py | 8 | 1518 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Released under the GPL v2 license
# https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
import lxml.etree, lxml.html
import re
from unidecode import unidecode
url = "http://www.iso.org/iso/list-en1-semic-3.txt"
title = "ISO 3166 Country Codes"
article_type = "A"
outp = "output.txt"
inp = "download/raw.data"
#Open input file
input_file = open( inp, "r" )
#Read and throw out first line
input_file.readline()
output_file = open( outp, "w")
#Loop thru the remainder of the file, format each line
#and print it to the output file.
for line in input_file.readlines() :
line = line.strip();
pair = line.split( ';' );
if len( pair ) < 2 :
continue;
pair[0] = unidecode(pair[0])
abstract = "\"" + pair[1] + "\" is the ISO 3166 country code for \"" + pair[0].title() + ".\""
output_file.write( "\t".join([
pair[1], # Title
article_type, # Type
'', # Redirect
'', # Other uses
'', # Categories
'', # References
'', # See also
'', # Further reading
'', # External links
'', # Disambiguation
'', # Images
abstract, # Abstract
url, # Source URL
] ))
output_file.write( "\n" );
input_file.close();
output_file.close();
| apache-2.0 |
Elettronik/SickRage | lib/pyasn1/type/base.py | 185 | 9499 | # Base classes for ASN.1 types
import sys
from pyasn1.type import constraint, tagmap
from pyasn1 import error
class Asn1Item: pass
class Asn1ItemBase(Asn1Item):
# Set of tags for this ASN.1 type
tagSet = ()
# A list of constraint.Constraint instances for checking values
subtypeSpec = constraint.ConstraintsIntersection()
# Used for ambiguous ASN.1 types identification
typeId = None
def __init__(self, tagSet=None, subtypeSpec=None):
if tagSet is None:
self._tagSet = self.tagSet
else:
self._tagSet = tagSet
if subtypeSpec is None:
self._subtypeSpec = self.subtypeSpec
else:
self._subtypeSpec = subtypeSpec
def _verifySubtypeSpec(self, value, idx=None):
try:
self._subtypeSpec(value, idx)
except error.PyAsn1Error:
c, i, t = sys.exc_info()
raise c('%s at %s' % (i, self.__class__.__name__))
def getSubtypeSpec(self): return self._subtypeSpec
def getTagSet(self): return self._tagSet
def getEffectiveTagSet(self): return self._tagSet # used by untagged types
def getTagMap(self): return tagmap.TagMap({self._tagSet: self})
def isSameTypeWith(self, other):
return self is other or \
self._tagSet == other.getTagSet() and \
self._subtypeSpec == other.getSubtypeSpec()
def isSuperTypeOf(self, other):
"""Returns true if argument is a ASN1 subtype of ourselves"""
return self._tagSet.isSuperTagSetOf(other.getTagSet()) and \
self._subtypeSpec.isSuperTypeOf(other.getSubtypeSpec())
class __NoValue:
def __getattr__(self, attr):
raise error.PyAsn1Error('No value for %s()' % attr)
def __getitem__(self, i):
raise error.PyAsn1Error('No value')
noValue = __NoValue()
# Base class for "simple" ASN.1 objects. These are immutable.
class AbstractSimpleAsn1Item(Asn1ItemBase):
defaultValue = noValue
def __init__(self, value=None, tagSet=None, subtypeSpec=None):
Asn1ItemBase.__init__(self, tagSet, subtypeSpec)
if value is None or value is noValue:
value = self.defaultValue
if value is None or value is noValue:
self.__hashedValue = value = noValue
else:
value = self.prettyIn(value)
self._verifySubtypeSpec(value)
self.__hashedValue = hash(value)
self._value = value
self._len = None
def __repr__(self):
if self._value is noValue:
return self.__class__.__name__ + '()'
else:
return self.__class__.__name__ + '(%s)' % (self.prettyOut(self._value),)
def __str__(self): return str(self._value)
def __eq__(self, other):
return self is other and True or self._value == other
def __ne__(self, other): return self._value != other
def __lt__(self, other): return self._value < other
def __le__(self, other): return self._value <= other
def __gt__(self, other): return self._value > other
def __ge__(self, other): return self._value >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._value)
else:
def __bool__(self): return bool(self._value)
def __hash__(self): return self.__hashedValue
def clone(self, value=None, tagSet=None, subtypeSpec=None):
if value is None and tagSet is None and subtypeSpec is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
return self.__class__(value, tagSet, subtypeSpec)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
return self.__class__(value, tagSet, subtypeSpec)
def prettyIn(self, value): return value
def prettyOut(self, value): return str(value)
def prettyPrint(self, scope=0):
if self._value is noValue:
return '<no value>'
else:
return self.prettyOut(self._value)
# XXX Compatibility stub
def prettyPrinter(self, scope=0): return self.prettyPrint(scope)
#
# Constructed types:
# * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice
# * ASN1 types and values are represened by Python class instances
# * Value initialization is made for defaulted components only
# * Primary method of component addressing is by-position. Data model for base
# type is Python sequence. Additional type-specific addressing methods
# may be implemented for particular types.
# * SequenceOf and SetOf types do not implement any additional methods
# * Sequence, Set and Choice types also implement by-identifier addressing
# * Sequence, Set and Choice types also implement by-asn1-type (tag) addressing
# * Sequence and Set types may include optional and defaulted
# components
# * Constructed types hold a reference to component types used for value
# verification and ordering.
# * Component type is a scalar type for SequenceOf/SetOf types and a list
# of types for Sequence/Set/Choice.
#
class AbstractConstructedAsn1Item(Asn1ItemBase):
componentType = None
sizeSpec = constraint.ConstraintsIntersection()
def __init__(self, componentType=None, tagSet=None,
subtypeSpec=None, sizeSpec=None):
Asn1ItemBase.__init__(self, tagSet, subtypeSpec)
if componentType is None:
self._componentType = self.componentType
else:
self._componentType = componentType
if sizeSpec is None:
self._sizeSpec = self.sizeSpec
else:
self._sizeSpec = sizeSpec
self._componentValues = []
self._componentValuesSet = 0
def __repr__(self):
r = self.__class__.__name__ + '()'
for idx in range(len(self._componentValues)):
if self._componentValues[idx] is None:
continue
r = r + '.setComponentByPosition(%s, %r)' % (
idx, self._componentValues[idx]
)
return r
def __eq__(self, other):
return self is other and True or self._componentValues == other
def __ne__(self, other): return self._componentValues != other
def __lt__(self, other): return self._componentValues < other
def __le__(self, other): return self._componentValues <= other
def __gt__(self, other): return self._componentValues > other
def __ge__(self, other): return self._componentValues >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._componentValues)
else:
def __bool__(self): return bool(self._componentValues)
def getComponentTagMap(self):
raise error.PyAsn1Error('Method not implemented')
def _cloneComponentValues(self, myClone, cloneValueFlag): pass
def clone(self, tagSet=None, subtypeSpec=None, sizeSpec=None,
cloneValueFlag=None):
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if sizeSpec is None:
sizeSpec = self._sizeSpec
r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec)
if cloneValueFlag:
self._cloneComponentValues(r, cloneValueFlag)
return r
def subtype(self, implicitTag=None, explicitTag=None, subtypeSpec=None,
sizeSpec=None, cloneValueFlag=None):
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if sizeSpec is None:
sizeSpec = self._sizeSpec
else:
sizeSpec = sizeSpec + self._sizeSpec
r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec)
if cloneValueFlag:
self._cloneComponentValues(r, cloneValueFlag)
return r
def _verifyComponent(self, idx, value): pass
def verifySizeSpec(self): self._sizeSpec(self)
def getComponentByPosition(self, idx):
raise error.PyAsn1Error('Method not implemented')
def setComponentByPosition(self, idx, value, verifyConstraints=True):
raise error.PyAsn1Error('Method not implemented')
def getComponentType(self): return self._componentType
def __getitem__(self, idx): return self.getComponentByPosition(idx)
def __setitem__(self, idx, value): self.setComponentByPosition(idx, value)
def __len__(self): return len(self._componentValues)
def clear(self):
self._componentValues = []
self._componentValuesSet = 0
def setDefaultComponents(self): pass
| gpl-3.0 |
shankari/e-mission-server | emission/net/auth/google_auth.py | 2 | 3146 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
import logging
import json
import traceback
import requests
# For decoding JWTs on the client side
import google.oauth2.id_token as goi
import google.auth.transport.requests as gatr
class GoogleAuthMethod(object):
def __init__(self):
key_file = open('conf/net/auth/google_auth.json')
key_data = json.load(key_file)
key_file.close()
self.client_key = key_data["client_key"]
self.client_key_old = key_data["client_key_old"]
self.ios_client_key = key_data["ios_client_key"]
self.ios_client_key_new = key_data["ios_client_key_new"]
self.valid_keys = [self.client_key, self.client_key_old,
self.ios_client_key, self.ios_client_key_new]
# Code snippet from
# https://developers.google.com/identity/sign-in/android/backend-auth
def __verifyTokenFields(self, tokenFields, audienceKey, issKey):
if audienceKey not in tokenFields:
raise ValueError("Invalid token %s, does not contain %s" %
(tokenFields, audienceKey))
in_client_key = tokenFields[audienceKey]
if in_client_key not in self.valid_keys:
raise ValueError("Incoming client key %s not in valid list %s" %
(in_client_key, self.valid_keys))
if issKey not in tokenFields:
raise ValueError("Invalid token %s" % tokenFields)
in_issuer = tokenFields[issKey]
issuer_valid_list = ['accounts.google.com', 'https://accounts.google.com']
if in_issuer not in issuer_valid_list:
raise ValueError('Wrong issuer %s, expected %s' % (in_issuer, issuer_valid_list))
return tokenFields['email']
def verifyUserToken(self, token):
try:
# attempt to validate token on the client-side
logging.debug("Using the google auth library to verify id token of length %d from android phones" % len(token))
tokenFields = goi.verify_oauth2_token(token, gatr.Request())
logging.debug("tokenFields from library = %s" % tokenFields)
verifiedEmail = self.__verifyTokenFields(tokenFields, "aud", "iss")
logging.debug("Found user email %s" % tokenFields['email'])
return verifiedEmail
except:
logging.debug("OAuth failed to verify id token, falling back to constructedURL")
#fallback to verifying using Google API
constructedURL = ("https://www.googleapis.com/oauth2/v1/tokeninfo?id_token=%s" % token)
r = requests.get(constructedURL)
tokenFields = json.loads(r.content)
logging.debug("tokenFields from constructedURL= %s" % tokenFields)
verifiedEmail = self.__verifyTokenFields(tokenFields, "audience", "issuer")
logging.debug("Found user email %s" % tokenFields['email'])
return verifiedEmail
| bsd-3-clause |
cstrap/python-vuejs | docs/conf.py | 1 | 8556 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# python_vuejs documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import python_vuejs
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python and Vue.js integration'
copyright = u"2017, Christian Strappazzon"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = python_vuejs.__version__
# The full version, including alpha/beta/rc tags.
release = python_vuejs.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'python_vuejsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'python_vuejs.tex',
u'Python and Vue.js integration Documentation',
u'Christian Strappazzon', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python_vuejs',
u'Python and Vue.js integration Documentation',
[u'Christian Strappazzon'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'python_vuejs',
u'Python and Vue.js integration Documentation',
u'Christian Strappazzon',
'python_vuejs',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
JackGavin13/octoprint-test-not-finished | src/octoprint_setuptools/__init__.py | 4 | 17753 | # coding=utf-8
from __future__ import absolute_import, division, print_function
__author__ = "Gina Hรคuรge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2015 The OctoPrint Project - Released under terms of the AGPLv3 License"
import os
import shutil
import glob
from setuptools import Command
from distutils.command.clean import clean as _clean
def package_data_dirs(source, sub_folders):
import os
dirs = []
for d in sub_folders:
folder = os.path.join(source, d)
if not os.path.exists(folder):
continue
for dirname, _, files in os.walk(folder):
dirname = os.path.relpath(dirname, source)
for f in files:
dirs.append(os.path.join(dirname, f))
return dirs
def recursively_handle_files(directory, file_matcher, folder_matcher=None, folder_handler=None, file_handler=None):
applied_handler = False
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
if file_handler is not None and file_matcher(filename):
file_handler(path)
applied_handler = True
elif os.path.isdir(path) and (folder_matcher is None or folder_matcher(directory, filename, path)):
sub_applied_handler = recursively_handle_files(path, file_matcher, folder_handler=folder_handler, file_handler=file_handler)
if sub_applied_handler:
applied_handler = True
if folder_handler is not None:
folder_handler(path, sub_applied_handler)
return applied_handler
class CleanCommand(_clean):
user_options = _clean.user_options + [("orig", None, "behave like original clean command"),
("noeggs", None, "don't clean up eggs"),
("nopyc", None, "don't clean up pyc files")]
boolean_options = _clean.boolean_options + ["orig", "noeggs", "nopyc"]
source_folder = "src"
eggs =None
@classmethod
def for_options(cls, source_folder="src", eggs=None):
if eggs is None:
eggs = []
return type(cls)(cls.__name__, (cls,), dict(
source_folder=source_folder,
eggs=eggs
))
def initialize_options(self):
_clean.initialize_options(self)
self.orig = None
self.noeggs = None
self.nopyc = None
def finalize_options(self):
_clean.finalize_options(self)
if not self.orig:
self.all = True
def run(self):
_clean.run(self)
if self.orig:
return
# eggs
if not self.noeggs:
for egg in self.eggs:
globbed_eggs = glob.glob(egg)
for globbed_egg in globbed_eggs:
print("deleting '%s' egg" % globbed_egg)
if not self.dry_run:
shutil.rmtree(globbed_egg)
# pyc files
if not self.nopyc:
def delete_folder_if_empty(path, applied_handler):
if not applied_handler:
return
if len(os.listdir(path)) == 0:
if not self.dry_run:
shutil.rmtree(path)
print("removed %s since it was empty" % path[len(self.source_folder):])
def delete_file(path):
print("removing '%s'" % path[len(self.source_folder):])
if not self.dry_run:
os.remove(path)
import fnmatch
print("recursively removing *.pyc from '%s'" % self.source_folder)
recursively_handle_files(
os.path.abspath(self.source_folder),
lambda name: fnmatch.fnmatch(name.lower(), "*.pyc"),
folder_matcher=lambda dir, name, path: name != ".git",
folder_handler=delete_folder_if_empty,
file_handler=delete_file
)
class NewTranslation(Command):
description = "create a new translation"
user_options = [
('locale=', 'l', 'locale for the new translation'),
]
boolean_options = []
pot_file = None
output_dir = None
@classmethod
def for_options(cls, pot_file=None, output_dir=None):
if pot_file is None:
raise ValueError("pot_file must not be None")
if output_dir is None:
raise ValueError("output_dir must not be None")
return type(cls)(cls.__name__, (cls,), dict(
pot_file=pot_file,
output_dir=output_dir
))
def __init__(self, dist, **kw):
from babel.messages import frontend as babel
self.babel_init_messages = babel.init_catalog(dist)
Command.__init__(self, dist, **kw)
def initialize_options(self):
self.locale = None
self.babel_init_messages.initialize_options()
def finalize_options(self):
self.babel_init_messages.locale = self.locale
self.babel_init_messages.input_file = self.__class__.pot_file
self.babel_init_messages.output_dir = self.__class__.output_dir
self.babel_init_messages.finalize_options()
def run(self):
self.babel_init_messages.run()
class ExtractTranslation(Command):
description = "extract translations"
user_options = []
boolean_options = []
mail_address = "i18n@octoprint.org"
copyright_holder = "The OctoPrint Project"
mapping_file = None
pot_file = None
input_dirs = None
@classmethod
def for_options(cls, mail_address="i18n@octoprint.org", copyright_holder="The OctoPrint Project", mapping_file=None, pot_file=None, input_dirs=None):
if mapping_file is None:
raise ValueError("mapping_file must not be None")
if pot_file is None:
raise ValueError("pot_file must not be None")
if input_dirs is None:
raise ValueError("input_dirs must not be None")
return type(cls)(cls.__name__, (cls,), dict(
mapping_file=mapping_file,
pot_file=pot_file,
input_dirs=input_dirs,
mail_address=mail_address,
copyright_holder=copyright_holder
))
def __init__(self, dist, **kw):
from babel.messages import frontend as babel
self.babel_extract_messages = babel.extract_messages(dist)
Command.__init__(self, dist, **kw)
def initialize_options(self):
self.babel_extract_messages.initialize_options()
def finalize_options(self):
self.babel_extract_messages.mapping_file = self.__class__.mapping_file
self.babel_extract_messages.output_file = self.__class__.pot_file
self.babel_extract_messages.input_dirs = self.__class__.input_dirs
self.babel_extract_messages.msgid_bugs_address = self.__class__.mail_address
self.babel_extract_messages.copyright_holder = self.__class__.copyright_holder
self.babel_extract_messages.finalize_options()
def run(self):
self.babel_extract_messages.run()
class RefreshTranslation(Command):
description = "refresh translations"
user_options = [
('locale=', 'l', 'locale for the translation to refresh'),
]
boolean_options = []
mail_address = "i18n@octoprint.org"
copyright_holder = "The OctoPrint Project"
mapping_file = None
pot_file = None
input_dirs = None
output_dir = None
@classmethod
def for_options(cls, mail_address="i18n@octoprint.org", copyright_holder="The OctoPrint Project", mapping_file=None, pot_file=None, input_dirs=None, output_dir=None):
if mapping_file is None:
raise ValueError("mapping_file must not be None")
if pot_file is None:
raise ValueError("pot_file must not be None")
if input_dirs is None:
raise ValueError("input_dirs must not be None")
if output_dir is None:
raise ValueError("output_dir must not be None")
return type(cls)(cls.__name__, (cls,), dict(
mapping_file=mapping_file,
pot_file=pot_file,
input_dirs=input_dirs,
mail_address=mail_address,
copyright_holder=copyright_holder,
output_dir=output_dir
))
def __init__(self, dist, **kw):
from babel.messages import frontend as babel
self.babel_extract_messages = babel.extract_messages(dist)
self.babel_update_messages = babel.update_catalog(dist)
Command.__init__(self, dist, **kw)
def initialize_options(self):
self.locale = None
self.babel_extract_messages.initialize_options()
self.babel_update_messages.initialize_options()
def finalize_options(self):
self.babel_extract_messages.mapping_file = self.__class__.mapping_file
self.babel_extract_messages.output_file = self.__class__.pot_file
self.babel_extract_messages.input_dirs = self.__class__.input_dirs
self.babel_extract_messages.msgid_bugs_address = self.__class__.mail_address
self.babel_extract_messages.copyright_holder = self.__class__.copyright_holder
self.babel_extract_messages.finalize_options()
self.babel_update_messages.input_file = self.__class__.pot_file
self.babel_update_messages.output_dir = self.__class__.output_dir
self.babel_update_messages.locale = self.locale
self.babel_update_messages.finalize_options()
def run(self):
self.babel_extract_messages.run()
self.babel_update_messages.run()
class CompileTranslation(Command):
description = "compile translations"
user_options = []
boolean_options = []
output_dir = None
@classmethod
def for_options(cls, output_dir=None):
if output_dir is None:
raise ValueError("output_dir must not be None")
return type(cls)(cls.__name__, (cls,), dict(
output_dir=output_dir
))
def __init__(self, dist, **kw):
from babel.messages import frontend as babel
self.babel_compile_messages = babel.compile_catalog(dist)
Command.__init__(self, dist, **kw)
def initialize_options(self):
self.babel_compile_messages.initialize_options()
def finalize_options(self):
self.babel_compile_messages.directory = self.__class__.output_dir
self.babel_compile_messages.finalize_options()
def run(self):
self.babel_compile_messages.run()
class BundleTranslation(Command):
description = "bundles translations"
user_options = [
('locale=', 'l', 'locale for the translation to bundle')
]
boolean_options = []
source_dir = None
target_dir = None
@classmethod
def for_options(cls, source_dir=None, target_dir=None):
if source_dir is None:
raise ValueError("source_dir must not be None")
if target_dir is None:
raise ValueError("target_dir must not be None")
return type(cls)(cls.__name__, (cls,), dict(
source_dir=source_dir,
target_dir=target_dir
))
def initialize_options(self):
self.locale = None
def finalize_options(self):
pass
def run(self):
locale = self.locale
source_path = os.path.join(self.__class__.source_dir, locale)
target_path = os.path.join(self.__class__.target_dir, locale)
if not os.path.exists(source_path):
raise RuntimeError("source path " + source_path + " does not exist")
if os.path.exists(target_path):
if not os.path.isdir(target_path):
raise RuntimeError("target path " + target_path + " exists and is not a directory")
shutil.rmtree(target_path)
print("Copying translations for locale {locale} from {source_path} to {target_path}...".format(**locals()))
shutil.copytree(source_path, target_path)
class PackTranslation(Command):
description = "creates language packs for translations"
user_options = [
('locale=', 'l', 'locale for the translation to pack'),
('author=', 'a', 'author of the translation'),
('target=', 't', 'target folder for the pack')
]
boolean_options = []
source_dir = None
pack_name_prefix = None
pack_path_prefix = None
@classmethod
def for_options(cls, source_dir=None, pack_name_prefix=None, pack_path_prefix=None):
if source_dir is None:
raise ValueError("source_dir must not be None")
if pack_name_prefix is None:
raise ValueError("pack_name_prefix must not be None")
if pack_path_prefix is None:
raise ValueError("pack_path_prefix must not be None")
return type(cls)(cls.__name__, (cls,), dict(
source_dir=source_dir,
pack_name_prefix=pack_name_prefix,
pack_path_prefix=pack_path_prefix
))
def initialize_options(self):
self.locale = None
self.author = None
self.target = None
def finalize_options(self):
if self.locale is None:
raise ValueError("locale must be provided")
def run(self):
locale = self.locale
locale_dir = os.path.join(self.__class__.source_dir, locale)
if not os.path.isdir(locale_dir):
raise RuntimeError("translation does not exist, please create it first")
import datetime
now = datetime.datetime.utcnow().replace(microsecond=0)
if self.target is None:
self.target = self.__class__.source_dir
zip_path = os.path.join(self.target, "{prefix}{locale}_{date}.zip".format(prefix=self.__class__.pack_name_prefix, locale=locale, date=now.strftime("%Y%m%d%H%M%S")))
print("Packing translation to {zip_path}".format(**locals()))
def add_recursively(zip, path, prefix):
if not os.path.isdir(path):
return
for entry in os.listdir(path):
entry_path = os.path.join(path, entry)
new_prefix = prefix + "/" + entry
if os.path.isdir(entry_path):
add_recursively(zip, entry_path, new_prefix)
elif os.path.isfile(entry_path):
print("Adding {entry_path} as {new_prefix}".format(**locals()))
zip.write(entry_path, new_prefix)
meta_str = "last_update: {date}\n".format(date=now.isoformat())
if self.author:
meta_str += "author: {author}\n".format(author=self.author)
zip_locale_root = self.__class__.pack_path_prefix + locale
import zipfile
with zipfile.ZipFile(zip_path, "w") as zip:
add_recursively(zip, locale_dir, zip_locale_root)
print("Adding meta.yaml as {zip_locale_root}/meta.yaml".format(**locals()))
zip.writestr(zip_locale_root + "/meta.yaml", meta_str)
def get_babel_commandclasses(pot_file=None,
mapping_file="babel.cfg",
input_dirs=".",
output_dir=None,
pack_name_prefix=None,
pack_path_prefix=None,
bundled_dir=None,
mail_address="i18n@octoprint.org",
copyright_holder="The OctoPrint Project"):
result = dict(
babel_new=NewTranslation.for_options(pot_file=pot_file, output_dir=output_dir),
babel_extract=ExtractTranslation.for_options(mapping_file=mapping_file, pot_file=pot_file, input_dirs=input_dirs, mail_address=mail_address, copyright_holder=copyright_holder),
babel_refresh=RefreshTranslation.for_options(mapping_file=mapping_file, pot_file=pot_file, input_dirs=input_dirs, output_dir=output_dir, mail_address=mail_address, copyright_holder=copyright_holder),
babel_compile=CompileTranslation.for_options(output_dir=output_dir),
babel_pack=PackTranslation.for_options(source_dir=output_dir, pack_name_prefix=pack_name_prefix, pack_path_prefix=pack_path_prefix)
)
if bundled_dir is not None:
result["babel_bundle"] = BundleTranslation.for_options(source_dir=output_dir, target_dir=bundled_dir)
return result
def create_plugin_setup_parameters(identifier="todo", name="TODO", version="0.1", description="TODO", author="TODO",
mail="todo@example.com", url="TODO", license="AGPLv3", source_folder=".", additional_data=None,
additional_packages=None, ignored_packages=None, requires=None, extra_requires=None,
cmdclass=None, eggs=None, package=None, dependency_links=None):
import pkg_resources
if package is None:
package = "octoprint_{identifier}".format(**locals())
if additional_data is None:
additional_data = list()
if additional_packages is None:
additional_packages = list()
if ignored_packages is None:
ignored_packages = list()
if dependency_links is None:
dependency_links = list()
if requires is None:
requires = ["OctoPrint"]
if not isinstance(requires, list):
raise ValueError("requires must be a list")
if "OctoPrint" not in requires:
requires = ["OctoPrint"] + list(requires)
if extra_requires is None:
extra_requires = dict()
if not isinstance(extra_requires, dict):
raise ValueError("extra_requires must be a dict")
if cmdclass is None:
cmdclass = dict()
if not isinstance(cmdclass, dict):
raise ValueError("cmdclass must be a dict")
if eggs is None:
eggs = []
if not isinstance(eggs, list):
raise ValueError("eggs must be a list")
egg = "{name}*.egg-info".format(name=pkg_resources.to_filename(pkg_resources.safe_name(name)))
if egg not in eggs:
eggs = [egg] + eggs
cmdclass.update(dict(
clean=CleanCommand.for_options(source_folder=os.path.join(source_folder, package), eggs=eggs)
))
translation_dir = os.path.join(source_folder, "translations")
pot_file = os.path.join(translation_dir, "messages.pot")
bundled_dir = os.path.join(source_folder, package, "translations")
if os.path.isdir(translation_dir) and os.path.isfile(pot_file):
cmdclass.update(get_babel_commandclasses(pot_file=pot_file, output_dir=translation_dir, bundled_dir=bundled_dir, pack_name_prefix="{name}-i18n-".format(**locals()), pack_path_prefix="_plugins/{identifier}/".format(**locals())))
from setuptools import find_packages
packages = list(set([package] + filter(lambda x: x.startswith("{package}.".format(package=package)), find_packages(where=source_folder, exclude=ignored_packages)) + additional_packages))
print("Found packages: {packages!r}".format(**locals()))
return dict(
name=name,
version=version,
description=description,
author=author,
author_email=mail,
url=url,
license=license,
# adding new commands
cmdclass=cmdclass,
# we only have our plugin package to install
packages=packages,
# we might have additional data files in sub folders that need to be installed too
package_data={package: package_data_dirs(os.path.join(source_folder, package), ["static", "templates", "translations"] + additional_data)},
include_package_data=True,
# If you have any package data that needs to be accessible on the file system, such as templates or static assets
# this plugin is not zip_safe.
zip_safe=False,
install_requires=requires,
extras_require=extra_requires,
dependency_links=dependency_links,
# Hook the plugin into the "octoprint.plugin" entry point, mapping the plugin_identifier to the plugin_package.
# That way OctoPrint will be able to find the plugin and load it.
entry_points={
"octoprint.plugin": ["{identifier} = {package}".format(**locals())]
}
)
| agpl-3.0 |
jvkersch/hsmmlearn | docs/conf.py | 1 | 9948 | # -*- coding: utf-8 -*-
#
# hsmmlearn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 1 17:33:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Avoid using C libraries on RTD
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['numpy', 'scipy', 'scipy.stats', 'matplotlib',
'matplotlib.pyplot', 'hsmmlearn.base']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hsmmlearn'
copyright = u'2016, Joris Vankerschaver'
author = u'Joris Vankerschaver'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify
# it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'hsmmlearndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'hsmmlearn.tex', u'hsmmlearn Documentation',
u'Joris Vankerschaver', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hsmmlearn', u'hsmmlearn Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'hsmmlearn', u'hsmmlearn Documentation',
author, 'hsmmlearn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
zyga/snapd | tests/lib/tinyproxy/tinyproxy.py | 3 | 4050 | #!/usr/bin/python3
# Tiny HTTP Proxy. Based on the work of SUZUKI Hisao.
#
# Ported to py3 and modified to remove the bits we don't need
# and modernized.
import os
import http.server
import select
import socket
import socketserver
import sys
import urllib.parse
class ProxyHandler (http.server.BaseHTTPRequestHandler):
server_version = "testsproxy/1.0"
def log_request(self, m=""):
super().log_request(m)
sys.stdout.flush()
sys.stderr.flush()
def handle(self):
(ip, port) = self.client_address
super().handle()
def _connect_to(self, netloc, soc):
i = netloc.find(':')
if i >= 0:
host_port = netloc[:i], int(netloc[i+1:])
else:
host_port = netloc, 80
try:
soc.connect(host_port)
except socket.error as arg:
try:
msg = arg[1]
except:
msg = arg
self.send_error(404, msg)
return False
return True
def do_CONNECT(self):
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if self._connect_to(self.path, soc):
self.log_request(200)
s = self.protocol_version + " 200 Connection established\r\n"
self.wfile.write(s.encode())
s = "Proxy-agent: {}\r\n".format(self.version_string())
self.wfile.write(s.encode())
self.wfile.write("\r\n".encode())
self._read_write(soc, 300)
finally:
soc.close()
self.connection.close()
def do_GET(self):
(scm, netloc, path, params, query, fragment) = urllib.parse.urlparse(
self.path, 'http')
if scm != 'http' or fragment or not netloc:
s = "bad url {}".format(self.path)
self.send_error(400, s.encode())
return
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if self._connect_to(netloc, soc):
self.log_request()
s = "{} {} {}\r\n".format(
self.command,
urllib.parse.urlunparse(('', '', path, params, query, '')),
self.request_version)
soc.send(s.encode())
self.headers['Connection'] = 'close'
del self.headers['Proxy-Connection']
for key, val in self.headers.items():
s = "{}: {}\r\n".format(key, val)
soc.send(s.encode())
soc.send("\r\n".encode())
self._read_write(soc)
finally:
soc.close()
self.connection.close()
def _read_write(self, soc, max_idling=20):
iw = [self.connection, soc]
ow = []
count = 0
while True:
count += 1
(ins, _, exs) = select.select(iw, ow, iw, 3)
if exs:
break
if ins:
for i in ins:
if i is soc:
out = self.connection
else:
out = soc
data = i.recv(8192)
if data:
out.send(data)
count = 0
if count == max_idling:
break
do_HEAD = do_GET
do_POST = do_GET
do_PUT = do_GET
do_DELETE=do_GET
def maybe_sd_notify(s: str) -> None:
addr = os.getenv('NOTIFY_SOCKET')
if not addr:
return
soc = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
soc.connect(addr)
soc.sendall(s.encode())
class ThreadingHTTPServer (socketserver.ThreadingMixIn,
http.server.HTTPServer):
def __init__(self, *args):
super().__init__(*args)
maybe_sd_notify("READY=1")
if __name__ == '__main__':
port=3128
print("starting tinyproxy on port {}".format(port))
http.server.test(ProxyHandler, ThreadingHTTPServer, port=port)
| gpl-3.0 |
hguemar/cinder | cinder/tests/test_drbdmanagedrv.py | 1 | 11637 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo.utils import importutils
from oslo.utils import timeutils
from cinder import context
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
class mock_dbus():
def __init__(self):
pass
@staticmethod
def Array(defaults, signature=None):
return defaults
class mock_dm_utils():
@staticmethod
def dict_to_aux_props(x):
return x
class mock_dm_const():
TQ_GET_PATH = "get_path"
class mock_dm_exc():
DM_SUCCESS = 0
DM_EEXIST = 1
DM_ENOENT = 2
DM_ERROR = 1000
pass
import sys
sys.modules['dbus'] = mock_dbus
sys.modules['drbdmanage'] = collections.namedtuple(
'module', ['consts', 'exceptions', 'utils'])
sys.modules['drbdmanage.utils'] = collections.namedtuple(
'module', ['dict_to_aux_props'])
sys.modules['drbdmanage.consts'] = collections.namedtuple(
'module', [])
sys.modules['drbdmanage.exceptions'] = collections.namedtuple(
'module', ['DM_EEXIST'])
from cinder.volume.drivers.drbdmanagedrv import DrbdManageDriver
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mock.MockObject(conf.Configuration)
configuration.san_is_local = False
configuration.append_config_values(mock.IgnoreArg())
return configuration
class DrbdManageFakeDriver():
resources = {}
def __init__(self):
self.calls = []
def list_resources(self, res, serial, prop, req):
self.calls.append(["list_resources", res, prop, req])
if 'cinder-id' in prop and prop['cinder-id'].startswith("deadbeef"):
return ([mock_dm_exc.DM_ENOENT, "none", []],
[])
else:
return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]],
[("res", dict(prop))])
def create_resource(self, res, props):
self.calls.append(["create_resource", res, props])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
def create_volume(self, res, size, props):
self.calls.append(["create_volume", res, size, props])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
def auto_deploy(self, res, red, delta, site_clients):
self.calls.append(["auto_deploy", res, red, delta, site_clients])
return [[mock_dm_exc.DM_SUCCESS, "ack", []] * red]
def list_volumes(self, res, ser, prop, req):
self.calls.append(["list_volumes", res, ser, prop, req])
if 'cinder-id' in prop and prop['cinder-id'].startswith("deadbeef"):
return ([mock_dm_exc.DM_ENOENT, "none", []],
[])
else:
return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]],
[("res", dict(), [(2, dict(prop))])
])
def remove_volume(self, res, nr, force):
self.calls.append(["remove_volume", res, nr, force])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
def text_query(self, cmd):
self.calls.append(["text_query", cmd])
if cmd[0] == mock_dm_const.TQ_GET_PATH:
return ([(mock_dm_exc.DM_SUCCESS, "ack", [])], ['/dev/drbd0'])
return ([(mock_dm_exc.DM_ERROR, 'unknown command', [])], [])
def list_assignments(self, nodes, res, ser, prop, req):
self.calls.append(["list_assignments", nodes, res, ser, prop, req])
if 'cinder-id' in prop and prop['cinder-id'].startswith("deadbeef"):
return ([mock_dm_exc.DM_ENOENT, "none", []],
[])
else:
return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]],
[("node", "res", dict(), [(2, dict(prop))])
])
def create_snapshot(self, res, snap, nodes, props):
self.calls.append(["create_snapshot", res, snap, nodes, props])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
def list_snapshots(self, res, sn, prop, req):
self.calls.append(["list_snapshots", res, sn, prop, req])
if 'cinder-id' in prop and prop['cinder-id'].startswith("deadbeef"):
return ([mock_dm_exc.DM_ENOENT, "none", []],
[])
else:
return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]],
[("res", [("snap", dict(prop))])
])
def remove_snapshot(self, res, snap, force):
self.calls.append(["remove_snapshot", res, snap, force])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
def resize_volume(self, res, vol, ser, size, delta):
self.calls.append(["resize_volume", res, vol, ser, size, delta])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
def restore_snapshot(self, res, snap, new, rprop, vprops):
self.calls.append(["restore_snapshot", res, snap, new, rprop, vprops])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
class DrbdManageTestCase(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
self._mock = mock.Mock()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.san_is_local = True
self.configuration.reserved_percentage = 1
super(DrbdManageTestCase, self).setUp()
self.stubs.Set(importutils, 'import_object',
self.fake_import_object)
self.stubs.Set(DrbdManageDriver, 'call_or_reconnect',
self.fake_issue_dbus_call)
self.stubs.Set(DrbdManageDriver, 'dbus_connect',
self.fake_issue_dbus_connect)
sys.modules['cinder.volume.drivers.drbdmanagedrv'].dm_const \
= mock_dm_const
sys.modules['cinder.volume.drivers.drbdmanagedrv'].dm_utils \
= mock_dm_utils
sys.modules['cinder.volume.drivers.drbdmanagedrv'].dm_exc \
= mock_dm_exc
self.configuration.safe_get = lambda x: 'fake'
# Infrastructure
def fake_import_object(self, what, configuration, db, executor):
return None
def fake_issue_dbus_call(self, fn, *args):
return apply(fn, args)
def fake_issue_dbus_connect(self):
self.odm = DrbdManageFakeDriver()
def call_or_reconnect(self, method, *params):
return apply(method, params)
# Tests per se
def test_create_volume(self):
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.create_volume(testvol)
self.assertEqual(dmd.odm.calls[0][0], "create_resource")
self.assertEqual(dmd.odm.calls[1][0], "create_volume")
self.assertEqual(dmd.odm.calls[1][2], 1048576)
self.assertEqual(dmd.odm.calls[2][0], "auto_deploy")
def test_delete_volume(self):
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.delete_volume(testvol)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
self.assertEqual(dmd.odm.calls[0][3]["cinder-id"], testvol['id'])
self.assertEqual(dmd.odm.calls[1][0], "remove_volume")
def test_local_path(self):
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
data = dmd.local_path(testvol)
self.assertTrue(data.startswith("/dev/drbd"))
def test_create_snapshot(self):
testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111',
'volume_id': 'ba253fd0-8068-11e4-98c0-5254008ea111'}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.create_snapshot(testsnap)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
self.assertEqual(dmd.odm.calls[1][0], "list_assignments")
self.assertEqual(dmd.odm.calls[2][0], "create_snapshot")
self.assertTrue('node' in dmd.odm.calls[2][3])
def test_delete_snapshot(self):
testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.delete_snapshot(testsnap)
self.assertEqual(dmd.odm.calls[0][0], "list_snapshots")
self.assertEqual(dmd.odm.calls[1][0], "remove_snapshot")
def test_extend_volume(self):
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.extend_volume(testvol, 5)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
self.assertEqual(dmd.odm.calls[0][3]["cinder-id"], testvol['id'])
self.assertEqual(dmd.odm.calls[1][0], "resize_volume")
self.assertEqual(dmd.odm.calls[1][1], "res")
self.assertEqual(dmd.odm.calls[1][2], 2)
self.assertEqual(dmd.odm.calls[1][3], -1)
self.assertEqual(dmd.odm.calls[1][4]['size'], 5242880)
def test_create_cloned_volume(self):
srcvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
newvol = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.create_cloned_volume(newvol, srcvol)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
self.assertEqual(dmd.odm.calls[1][0], "list_assignments")
self.assertEqual(dmd.odm.calls[2][0], "create_snapshot")
self.assertEqual(dmd.odm.calls[3][0], "list_snapshots")
self.assertEqual(dmd.odm.calls[4][0], "restore_snapshot")
self.assertEqual(dmd.odm.calls[5][0], "list_snapshots")
self.assertEqual(dmd.odm.calls[6][0], "remove_snapshot")
self.assertEqual(dmd.odm.calls[6][0], "remove_snapshot")
| apache-2.0 |
madan96/sympy | sympy/physics/unitsystems/tests/test_units.py | 92 | 3071 | # -*- coding: utf-8 -*-
from __future__ import division
from sympy.physics.unitsystems.units import Unit
from sympy.physics.unitsystems.systems.mks import length, time
from sympy.physics.unitsystems.prefixes import PREFIXES
from sympy.utilities.pytest import raises
k = PREFIXES['k']
def test_definition():
u = Unit(length, factor=10, abbrev="dm")
assert u.dim == length
assert u._factor == 10
assert u._abbrev == "dm"
assert u.prefix is None
km = Unit(length, prefix=k)
assert km.prefix == k
v = Unit(u, factor=5)
assert v.dim == length
assert v._factor == 5 * 10
def test_error_definition():
raises(TypeError, lambda: Unit("m"))
def test_factor():
u = Unit(length, factor=10, abbrev="dm")
assert u.factor == 10
u = Unit(length, factor=5, prefix=k)
assert u.factor == 5000
def test_abbrev():
u = Unit(length)
assert u.abbrev == ""
u = Unit(length, abbrev="m")
assert u.abbrev == "m"
u = Unit(length, abbrev="m", prefix=k)
assert u.abbrev == "km"
def test_abbrev_dim():
u = Unit(length, factor=10)
assert u.abbrev_dim == "(10 L)"
def test_str():
u = Unit(length, factor=10)
assert str(u) == u.abbrev_dim
u = Unit(length, factor=10, abbrev="m")
assert str(u) == "m"
def test_repr():
u = Unit(length, factor=10, abbrev="m")
assert repr(u) == u.abbrev_dim
def test_eq():
u = Unit(length, factor=10, abbrev="dm")
v = Unit(length, factor=10)
assert (u == v) is True
v = Unit(time, factor=10, abbrev="ds")
assert (u == v) is False
v = Unit(length, factor=1, abbrev="dm")
assert (u == v) is False
def test_add_sub():
u = Unit(length, factor=10)
v = Unit(length, factor=5)
w = Unit(time, factor=2)
assert u.add(v) == Unit(length, factor=15)
assert u.sub(v) == Unit(length, factor=5)
raises(ValueError, lambda: u.add(w))
raises(ValueError, lambda: u.sub(w))
raises(TypeError, lambda: u.add(1))
raises(TypeError, lambda: u.sub(1))
def test_pow():
u = Unit(length, factor=10)
assert u.pow(0) == 1
assert u.pow(1) == u
assert u.pow(2) == Unit(length.pow(2), factor=100)
assert u.pow(-1) == Unit(length.pow(-1), factor=0.1)
def test_mul():
u = Unit(length, factor=10)
assert u.mul(1) == u
assert u.mul(Unit(time, factor=2)) == Unit(length.mul(time), factor=20)
assert u.mul(Unit(length.pow(-1), factor=2)) == 20
def test_div():
u = Unit(length, factor=10)
assert u.rdiv(1) == u.pow(-1)
assert u.div(1) == u
assert u.div(Unit(time, factor=2)) == Unit(length.div(time), factor=5)
assert u.div(Unit(length, factor=2)) == 5
def test_is_compatible():
u = Unit(length, factor=10)
assert u.is_compatible(Unit(length)) is True
assert u.is_compatible(Unit(time)) is False
assert u.is_compatible(2) is False
def test_as_quantity():
from sympy.physics.unitsystems.quantities import Quantity
u = Unit(length, factor=10)
q = Quantity(10, Unit(length))
assert u.as_quantity == q
| bsd-3-clause |
cloakedcode/CouchPotatoServer | libs/guessit/transfo/__init__.py | 6 | 3799 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import base_text_type, Guess
from guessit.patterns import canonical_form
from guessit.textutils import clean_string
import logging
log = logging.getLogger(__name__)
def found_property(node, name, confidence):
node.guess = Guess({name: node.clean_value}, confidence=confidence)
log.debug('Found with confidence %.2f: %s' % (confidence, node.guess))
def format_guess(guess):
"""Format all the found values to their natural type.
For instance, a year would be stored as an int value, etc...
Note that this modifies the dictionary given as input.
"""
for prop, value in guess.items():
if prop in ('season', 'episodeNumber', 'year', 'cdNumber',
'cdNumberTotal', 'bonusNumber', 'filmNumber'):
guess[prop] = int(guess[prop])
elif isinstance(value, base_text_type):
if prop in ('edition',):
value = clean_string(value)
guess[prop] = canonical_form(value)
return guess
def find_and_split_node(node, strategy, logger):
string = ' %s ' % node.value # add sentinels
for matcher, confidence in strategy:
if getattr(matcher, 'use_node', False):
result, span = matcher(string, node)
else:
result, span = matcher(string)
if result:
# readjust span to compensate for sentinels
span = (span[0] - 1, span[1] - 1)
if isinstance(result, Guess):
if confidence is None:
confidence = result.confidence(list(result.keys())[0])
else:
if confidence is None:
confidence = 1.0
guess = format_guess(Guess(result, confidence=confidence))
msg = 'Found with confidence %.2f: %s' % (confidence, guess)
(logger or log).debug(msg)
node.partition(span)
absolute_span = (span[0] + node.offset, span[1] + node.offset)
for child in node.children:
if child.span == absolute_span:
child.guess = guess
else:
find_and_split_node(child, strategy, logger)
return
class SingleNodeGuesser(object):
def __init__(self, guess_func, confidence, logger=None):
self.guess_func = guess_func
self.confidence = confidence
self.logger = logger
def process(self, mtree):
# strategy is a list of pairs (guesser, confidence)
# - if the guesser returns a guessit.Guess and confidence is specified,
# it will override it, otherwise it will leave the guess confidence
# - if the guesser returns a simple dict as a guess and confidence is
# specified, it will use it, or 1.0 otherwise
strategy = [ (self.guess_func, self.confidence) ]
for node in mtree.unidentified_leaves():
find_and_split_node(node, strategy, self.logger)
| gpl-3.0 |
2015fallproject/2015fallcase2 | static/Brython3.2.0-20150701-214155/Lib/test/unittests/test_timeout.py | 27 | 11379 | """Unit tests for socket timeout feature."""
import functools
import unittest
from test import support
# This requires the 'network' resource as given on the regrtest command line.
skip_expected = not support.is_resource_enabled('network')
import time
import errno
import socket
@functools.lru_cache()
def resolve_address(host, port):
"""Resolve an (host, port) to an address.
We must perform name resolution before timeout tests, otherwise it will be
performed by connect().
"""
with support.transient_internet(host):
return socket.getaddrinfo(host, port, socket.AF_INET,
socket.SOCK_STREAM)[0][4]
class CreationTestCase(unittest.TestCase):
"""Test case for socket.gettimeout() and socket.settimeout()"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def testObjectCreation(self):
# Test Socket creation
self.assertEqual(self.sock.gettimeout(), None,
"timeout not disabled by default")
def testFloatReturnValue(self):
# Test return value of gettimeout()
self.sock.settimeout(7.345)
self.assertEqual(self.sock.gettimeout(), 7.345)
self.sock.settimeout(3)
self.assertEqual(self.sock.gettimeout(), 3)
self.sock.settimeout(None)
self.assertEqual(self.sock.gettimeout(), None)
def testReturnType(self):
# Test return type of gettimeout()
self.sock.settimeout(1)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
self.sock.settimeout(3.9)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
def testTypeCheck(self):
# Test type checking by settimeout()
self.sock.settimeout(0)
self.sock.settimeout(0)
self.sock.settimeout(0.0)
self.sock.settimeout(None)
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, ())
self.assertRaises(TypeError, self.sock.settimeout, [])
self.assertRaises(TypeError, self.sock.settimeout, {})
self.assertRaises(TypeError, self.sock.settimeout, 0j)
def testRangeCheck(self):
# Test range checking by settimeout()
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1.0)
def testTimeoutThenBlocking(self):
# Test settimeout() followed by setblocking()
self.sock.settimeout(10)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), None)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.settimeout(10)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), None)
def testBlockingThenTimeout(self):
# Test setblocking() followed by settimeout()
self.sock.setblocking(0)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
self.sock.setblocking(1)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
class TimeoutTestCase(unittest.TestCase):
# There are a number of tests here trying to make sure that an operation
# doesn't take too much longer than expected. But competing machine
# activity makes it inevitable that such tests will fail at times.
# When fuzz was at 1.0, I (tim) routinely saw bogus failures on Win2K
# and Win98SE. Boosting it to 2.0 helped a lot, but isn't a real
# solution.
fuzz = 2.0
localhost = support.HOST
def setUp(self):
raise NotImplementedError()
tearDown = setUp
def _sock_operation(self, count, timeout, method, *args):
"""
Test the specified socket method.
The method is run at most `count` times and must raise a socket.timeout
within `timeout` + self.fuzz seconds.
"""
self.sock.settimeout(timeout)
method = getattr(self.sock, method)
for i in range(count):
t1 = time.time()
try:
method(*args)
except socket.timeout as e:
delta = time.time() - t1
break
else:
self.fail('socket.timeout was not raised')
# These checks should account for timing unprecision
self.assertLess(delta, timeout + self.fuzz)
self.assertGreater(delta, timeout - 1.0)
class TCPTimeoutTestCase(TimeoutTestCase):
"""TCP test case for socket.socket() timeout functions"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addr_remote = resolve_address('www.python.org.', 80)
def tearDown(self):
self.sock.close()
def testConnectTimeout(self):
# Testing connect timeout is tricky: we need to have IP connectivity
# to a host that silently drops our packets. We can't simulate this
# from Python because it's a function of the underlying TCP/IP stack.
# So, the following Snakebite host has been defined:
blackhole = resolve_address('blackhole.snakebite.net', 56666)
# Blackhole has been configured to silently drop any incoming packets.
# No RSTs (for TCP) or ICMP UNREACH (for UDP/ICMP) will be sent back
# to hosts that attempt to connect to this address: which is exactly
# what we need to confidently test connect timeout.
# However, we want to prevent false positives. It's not unreasonable
# to expect certain hosts may not be able to reach the blackhole, due
# to firewalling or general network configuration. In order to improve
# our confidence in testing the blackhole, a corresponding 'whitehole'
# has also been set up using one port higher:
whitehole = resolve_address('whitehole.snakebite.net', 56667)
# This address has been configured to immediately drop any incoming
# packets as well, but it does it respectfully with regards to the
# incoming protocol. RSTs are sent for TCP packets, and ICMP UNREACH
# is sent for UDP/ICMP packets. This means our attempts to connect to
# it should be met immediately with ECONNREFUSED. The test case has
# been structured around this premise: if we get an ECONNREFUSED from
# the whitehole, we proceed with testing connect timeout against the
# blackhole. If we don't, we skip the test (with a message about not
# getting the required RST from the whitehole within the required
# timeframe).
# For the records, the whitehole/blackhole configuration has been set
# up using the 'pf' firewall (available on BSDs), using the following:
#
# ext_if="bge0"
#
# blackhole_ip="35.8.247.6"
# whitehole_ip="35.8.247.6"
# blackhole_port="56666"
# whitehole_port="56667"
#
# block return in log quick on $ext_if proto { tcp udp } \
# from any to $whitehole_ip port $whitehole_port
# block drop in log quick on $ext_if proto { tcp udp } \
# from any to $blackhole_ip port $blackhole_port
#
skip = True
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Use a timeout of 3 seconds. Why 3? Because it's more than 1, and
# less than 5. i.e. no particular reason. Feel free to tweak it if
# you feel a different value would be more appropriate.
timeout = 3
sock.settimeout(timeout)
try:
sock.connect((whitehole))
except socket.timeout:
pass
except IOError as err:
if err.errno == errno.ECONNREFUSED:
skip = False
finally:
sock.close()
del sock
if skip:
self.skipTest(
"We didn't receive a connection reset (RST) packet from "
"{}:{} within {} seconds, so we're unable to test connect "
"timeout against the corresponding {}:{} (which is "
"configured to silently drop packets)."
.format(
whitehole[0],
whitehole[1],
timeout,
blackhole[0],
blackhole[1],
)
)
# All that hard work just to test if connect times out in 0.001s ;-)
self.addr_remote = blackhole
with support.transient_internet(self.addr_remote[0]):
self._sock_operation(1, 0.001, 'connect', self.addr_remote)
def testRecvTimeout(self):
# Test recv() timeout
with support.transient_internet(self.addr_remote[0]):
self.sock.connect(self.addr_remote)
self._sock_operation(1, 1.5, 'recv', 1024)
def testAcceptTimeout(self):
# Test accept() timeout
support.bind_port(self.sock, self.localhost)
self.sock.listen(5)
self._sock_operation(1, 1.5, 'accept')
def testSend(self):
# Test send() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
support.bind_port(serv, self.localhost)
serv.listen(5)
self.sock.connect(serv.getsockname())
# Send a lot of data in order to bypass buffering in the TCP stack.
self._sock_operation(100, 1.5, 'send', b"X" * 200000)
def testSendto(self):
# Test sendto() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
support.bind_port(serv, self.localhost)
serv.listen(5)
self.sock.connect(serv.getsockname())
# The address argument is ignored since we already connected.
self._sock_operation(100, 1.5, 'sendto', b"X" * 200000,
serv.getsockname())
def testSendall(self):
# Test sendall() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
support.bind_port(serv, self.localhost)
serv.listen(5)
self.sock.connect(serv.getsockname())
# Send a lot of data in order to bypass buffering in the TCP stack.
self._sock_operation(100, 1.5, 'sendall', b"X" * 200000)
class UDPTimeoutTestCase(TimeoutTestCase):
"""UDP test case for socket.socket() timeout functions"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def tearDown(self):
self.sock.close()
def testRecvfromTimeout(self):
# Test recvfrom() timeout
# Prevent "Address already in use" socket exceptions
support.bind_port(self.sock, self.localhost)
self._sock_operation(1, 1.5, 'recvfrom', 1024)
def test_main():
support.requires('network')
support.run_unittest(
CreationTestCase,
TCPTimeoutTestCase,
UDPTimeoutTestCase,
)
if __name__ == "__main__":
test_main()
| agpl-3.0 |
laurent-george/weboob | modules/inrocks/module.py | 7 | 1376 | # -*- coding: utf-8 -*-
# Copyright(C) 2011 Julien Hebert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
"backend for http://www.lesinrocks.com"
from weboob.capabilities.messages import CapMessages
from weboob.tools.capabilities.messages.GenericModule import GenericNewspaperModule
from .browser import NewspaperInrocksBrowser
from .tools import rssid
class NewspaperInrocksModule(GenericNewspaperModule, CapMessages):
MAINTAINER = u'Julien Hebert'
EMAIL = 'juke@free.fr'
VERSION = '1.1'
LICENSE = 'AGPLv3+'
STORAGE = {'seen': {}}
NAME = 'inrocks'
DESCRIPTION = u'Les Inrocks French news website'
BROWSER = NewspaperInrocksBrowser
RSS_FEED = 'http://www.lesinrocks.com/fileadmin/rss/actus.xml'
RSSID = rssid
| agpl-3.0 |
jeasoft/odoo | comunity_modules/attachment_preview/model/ir_attachment.py | 4 | 2843 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import collections
import os.path
import mimetypes
import base64
from openerp.osv.orm import Model
class IrAttachment(Model):
_inherit = 'ir.attachment'
def get_binary_extension(
self, cr, uid, model, ids, binary_field, filename_field=None,
context=None):
result = {}
for this in self.pool[model].browse(
cr, uid,
ids if isinstance(ids, collections.Iterable) else [ids],
context=context):
if not this.id:
result[this.id] = False
continue
extension = ''
if filename_field and this[filename_field]:
filename, extension = os.path.splitext(this[filename_field])
if not this[binary_field]:
result[this.id] = False
continue
if not extension:
try:
import magic
ms = magic.open(
hasattr(magic, 'MAGIC_MIME_TYPE')
and magic.MAGIC_MIME_TYPE or magic.MAGIC_MIME)
ms.load()
mimetype = ms.buffer(
base64.b64decode(this[binary_field]))
except ImportError:
(mimetype, encoding) = mimetypes.guess_type(
'data:;base64,' + this[binary_field], strict=False)
extension = mimetypes.guess_extension(
mimetype.split(';')[0], strict=False)
result[this.id] = (extension or '').lstrip('.').lower()
return result if isinstance(ids, collections.Iterable) else result[ids]
def get_attachment_extension(self, cr, uid, ids, context=None):
return self.get_binary_extension(
cr, uid, self._name, ids, 'datas', 'datas_fname', context=context)
| agpl-3.0 |
RobertWWong/WebDev | djangoApp/ENV/lib/python3.5/site-packages/setuptools/msvc9_support.py | 429 | 2187 | try:
import distutils.msvc9compiler
except ImportError:
pass
unpatched = dict()
def patch_for_specialized_compiler():
"""
Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available.
"""
if 'distutils' not in globals():
# The module isn't available to be patched
return
if unpatched:
# Already patched
return
unpatched.update(vars(distutils.msvc9compiler))
distutils.msvc9compiler.find_vcvarsall = find_vcvarsall
distutils.msvc9compiler.query_vcvarsall = query_vcvarsall
def find_vcvarsall(version):
Reg = distutils.msvc9compiler.Reg
VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
key = VC_BASE % ('', version)
try:
# Per-user installs register the compiler path here
productdir = Reg.get_value(key, "installdir")
except KeyError:
try:
# All-user installs on a 64-bit system register here
key = VC_BASE % ('Wow6432Node\\', version)
productdir = Reg.get_value(key, "installdir")
except KeyError:
productdir = None
if productdir:
import os
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
return unpatched['find_vcvarsall'](version)
def query_vcvarsall(version, *args, **kwargs):
try:
return unpatched['query_vcvarsall'](version, *args, **kwargs)
except distutils.errors.DistutilsPlatformError as exc:
if exc and "vcvarsall.bat" in exc.args[0]:
message = 'Microsoft Visual C++ %0.1f is required (%s).' % (version, exc.args[0])
if int(version) == 9:
# This redirection link is maintained by Microsoft.
# Contact vspython@microsoft.com if it needs updating.
raise distutils.errors.DistutilsPlatformError(
message + ' Get it from http://aka.ms/vcpython27'
)
raise distutils.errors.DistutilsPlatformError(message)
raise
| mit |
cloudbase/cinder | cinder/api/v3/views/group_snapshots.py | 5 | 2432 | # Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model group_snapshot API responses as a python dictionary."""
_collection_name = "group_snapshots"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, group_snapshots):
"""Show a list of group_snapshots without many details."""
return self._list_view(self.summary, request, group_snapshots)
def detail_list(self, request, group_snapshots):
"""Detailed view of a list of group_snapshots ."""
return self._list_view(self.detail, request, group_snapshots)
def summary(self, request, group_snapshot):
"""Generic, non-detailed view of a group_snapshot."""
return {
'group_snapshot': {
'id': group_snapshot.id,
'name': group_snapshot.name
}
}
def detail(self, request, group_snapshot):
"""Detailed view of a single group_snapshot."""
return {
'group_snapshot': {
'id': group_snapshot.id,
'group_id': group_snapshot.group_id,
'status': group_snapshot.status,
'created_at': group_snapshot.created_at,
'name': group_snapshot.name,
'description': group_snapshot.description
}
}
def _list_view(self, func, request, group_snapshots):
"""Provide a view for a list of group_snapshots."""
group_snapshots_list = [func(request, group_snapshot)['group_snapshot']
for group_snapshot in group_snapshots]
group_snapshots_dict = dict(group_snapshots=group_snapshots_list)
return group_snapshots_dict
| apache-2.0 |
samithaj/headphones | lib/unidecode/x080.py | 252 | 4651 | data = (
'Yao ', # 0x00
'Lao ', # 0x01
'[?] ', # 0x02
'Kao ', # 0x03
'Mao ', # 0x04
'Zhe ', # 0x05
'Qi ', # 0x06
'Gou ', # 0x07
'Gou ', # 0x08
'Gou ', # 0x09
'Die ', # 0x0a
'Die ', # 0x0b
'Er ', # 0x0c
'Shua ', # 0x0d
'Ruan ', # 0x0e
'Er ', # 0x0f
'Nai ', # 0x10
'Zhuan ', # 0x11
'Lei ', # 0x12
'Ting ', # 0x13
'Zi ', # 0x14
'Geng ', # 0x15
'Chao ', # 0x16
'Hao ', # 0x17
'Yun ', # 0x18
'Pa ', # 0x19
'Pi ', # 0x1a
'Chi ', # 0x1b
'Si ', # 0x1c
'Chu ', # 0x1d
'Jia ', # 0x1e
'Ju ', # 0x1f
'He ', # 0x20
'Chu ', # 0x21
'Lao ', # 0x22
'Lun ', # 0x23
'Ji ', # 0x24
'Tang ', # 0x25
'Ou ', # 0x26
'Lou ', # 0x27
'Nou ', # 0x28
'Gou ', # 0x29
'Pang ', # 0x2a
'Ze ', # 0x2b
'Lou ', # 0x2c
'Ji ', # 0x2d
'Lao ', # 0x2e
'Huo ', # 0x2f
'You ', # 0x30
'Mo ', # 0x31
'Huai ', # 0x32
'Er ', # 0x33
'Zhe ', # 0x34
'Ting ', # 0x35
'Ye ', # 0x36
'Da ', # 0x37
'Song ', # 0x38
'Qin ', # 0x39
'Yun ', # 0x3a
'Chi ', # 0x3b
'Dan ', # 0x3c
'Dan ', # 0x3d
'Hong ', # 0x3e
'Geng ', # 0x3f
'Zhi ', # 0x40
'[?] ', # 0x41
'Nie ', # 0x42
'Dan ', # 0x43
'Zhen ', # 0x44
'Che ', # 0x45
'Ling ', # 0x46
'Zheng ', # 0x47
'You ', # 0x48
'Wa ', # 0x49
'Liao ', # 0x4a
'Long ', # 0x4b
'Zhi ', # 0x4c
'Ning ', # 0x4d
'Tiao ', # 0x4e
'Er ', # 0x4f
'Ya ', # 0x50
'Die ', # 0x51
'Gua ', # 0x52
'[?] ', # 0x53
'Lian ', # 0x54
'Hao ', # 0x55
'Sheng ', # 0x56
'Lie ', # 0x57
'Pin ', # 0x58
'Jing ', # 0x59
'Ju ', # 0x5a
'Bi ', # 0x5b
'Di ', # 0x5c
'Guo ', # 0x5d
'Wen ', # 0x5e
'Xu ', # 0x5f
'Ping ', # 0x60
'Cong ', # 0x61
'Shikato ', # 0x62
'[?] ', # 0x63
'Ting ', # 0x64
'Yu ', # 0x65
'Cong ', # 0x66
'Kui ', # 0x67
'Tsuraneru ', # 0x68
'Kui ', # 0x69
'Cong ', # 0x6a
'Lian ', # 0x6b
'Weng ', # 0x6c
'Kui ', # 0x6d
'Lian ', # 0x6e
'Lian ', # 0x6f
'Cong ', # 0x70
'Ao ', # 0x71
'Sheng ', # 0x72
'Song ', # 0x73
'Ting ', # 0x74
'Kui ', # 0x75
'Nie ', # 0x76
'Zhi ', # 0x77
'Dan ', # 0x78
'Ning ', # 0x79
'Qie ', # 0x7a
'Ji ', # 0x7b
'Ting ', # 0x7c
'Ting ', # 0x7d
'Long ', # 0x7e
'Yu ', # 0x7f
'Yu ', # 0x80
'Zhao ', # 0x81
'Si ', # 0x82
'Su ', # 0x83
'Yi ', # 0x84
'Su ', # 0x85
'Si ', # 0x86
'Zhao ', # 0x87
'Zhao ', # 0x88
'Rou ', # 0x89
'Yi ', # 0x8a
'Le ', # 0x8b
'Ji ', # 0x8c
'Qiu ', # 0x8d
'Ken ', # 0x8e
'Cao ', # 0x8f
'Ge ', # 0x90
'Di ', # 0x91
'Huan ', # 0x92
'Huang ', # 0x93
'Yi ', # 0x94
'Ren ', # 0x95
'Xiao ', # 0x96
'Ru ', # 0x97
'Zhou ', # 0x98
'Yuan ', # 0x99
'Du ', # 0x9a
'Gang ', # 0x9b
'Rong ', # 0x9c
'Gan ', # 0x9d
'Cha ', # 0x9e
'Wo ', # 0x9f
'Chang ', # 0xa0
'Gu ', # 0xa1
'Zhi ', # 0xa2
'Han ', # 0xa3
'Fu ', # 0xa4
'Fei ', # 0xa5
'Fen ', # 0xa6
'Pei ', # 0xa7
'Pang ', # 0xa8
'Jian ', # 0xa9
'Fang ', # 0xaa
'Zhun ', # 0xab
'You ', # 0xac
'Na ', # 0xad
'Hang ', # 0xae
'Ken ', # 0xaf
'Ran ', # 0xb0
'Gong ', # 0xb1
'Yu ', # 0xb2
'Wen ', # 0xb3
'Yao ', # 0xb4
'Jin ', # 0xb5
'Pi ', # 0xb6
'Qian ', # 0xb7
'Xi ', # 0xb8
'Xi ', # 0xb9
'Fei ', # 0xba
'Ken ', # 0xbb
'Jing ', # 0xbc
'Tai ', # 0xbd
'Shen ', # 0xbe
'Zhong ', # 0xbf
'Zhang ', # 0xc0
'Xie ', # 0xc1
'Shen ', # 0xc2
'Wei ', # 0xc3
'Zhou ', # 0xc4
'Die ', # 0xc5
'Dan ', # 0xc6
'Fei ', # 0xc7
'Ba ', # 0xc8
'Bo ', # 0xc9
'Qu ', # 0xca
'Tian ', # 0xcb
'Bei ', # 0xcc
'Gua ', # 0xcd
'Tai ', # 0xce
'Zi ', # 0xcf
'Ku ', # 0xd0
'Zhi ', # 0xd1
'Ni ', # 0xd2
'Ping ', # 0xd3
'Zi ', # 0xd4
'Fu ', # 0xd5
'Pang ', # 0xd6
'Zhen ', # 0xd7
'Xian ', # 0xd8
'Zuo ', # 0xd9
'Pei ', # 0xda
'Jia ', # 0xdb
'Sheng ', # 0xdc
'Zhi ', # 0xdd
'Bao ', # 0xde
'Mu ', # 0xdf
'Qu ', # 0xe0
'Hu ', # 0xe1
'Ke ', # 0xe2
'Yi ', # 0xe3
'Yin ', # 0xe4
'Xu ', # 0xe5
'Yang ', # 0xe6
'Long ', # 0xe7
'Dong ', # 0xe8
'Ka ', # 0xe9
'Lu ', # 0xea
'Jing ', # 0xeb
'Nu ', # 0xec
'Yan ', # 0xed
'Pang ', # 0xee
'Kua ', # 0xef
'Yi ', # 0xf0
'Guang ', # 0xf1
'Gai ', # 0xf2
'Ge ', # 0xf3
'Dong ', # 0xf4
'Zhi ', # 0xf5
'Xiao ', # 0xf6
'Xiong ', # 0xf7
'Xiong ', # 0xf8
'Er ', # 0xf9
'E ', # 0xfa
'Xing ', # 0xfb
'Pian ', # 0xfc
'Neng ', # 0xfd
'Zi ', # 0xfe
'Gui ', # 0xff
)
| gpl-3.0 |
Toms88/Evasion | node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 2214 | 1347 | #!/usr/bin/env python
import re
import json
# http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
coagulant/django-storages-py3 | storages/tests/s3boto.py | 3 | 8464 | #import os
#import mock
#from uuid import uuid4
#from urllib2 import urlopen
#
#from django.test import TestCase
#from django.core.files.base import ContentFile
#from django.conf import settings
#from django.core.files.storage import FileSystemStorage
#
#from boto.s3.key import Key
#
#from storages.backends import s3boto
#
#__all__ = (
# 'SafeJoinTest',
# 'S3BotoStorageTests',
# #'S3BotoStorageFileTests',
#)
#
#class S3BotoTestCase(TestCase):
# @mock.patch('storages.backends.s3boto.S3Connection')
# def setUp(self, S3Connection):
# self.storage = s3boto.S3BotoStorage()
#
#
#class SafeJoinTest(TestCase):
# def test_normal(self):
# path = s3boto.safe_join("", "path/to/somewhere", "other", "path/to/somewhere")
# self.assertEquals(path, "path/to/somewhere/other/path/to/somewhere")
#
# def test_with_dot(self):
# path = s3boto.safe_join("", "path/./somewhere/../other", "..",
# ".", "to/./somewhere")
# self.assertEquals(path, "path/to/somewhere")
#
# def test_base_url(self):
# path = s3boto.safe_join("base_url", "path/to/somewhere")
# self.assertEquals(path, "base_url/path/to/somewhere")
#
# def test_base_url_with_slash(self):
# path = s3boto.safe_join("base_url/", "path/to/somewhere")
# self.assertEquals(path, "base_url/path/to/somewhere")
#
# def test_suspicious_operation(self):
# self.assertRaises(ValueError,
# s3boto.safe_join, "base", "../../../../../../../etc/passwd")
#
#class S3BotoStorageTests(S3BotoTestCase):
#
# def test_storage_save(self):
# """
# Test saving a file
# """
# name = 'test_storage_save.txt'
# content = ContentFile('new content')
# self.storage.save(name, content)
# self.storage.bucket.get_key.assert_called_once_with(name)
#
# key = self.storage.bucket.get_key.return_value
# key.set_metadata.assert_called_with('Content-Type', 'text/plain')
# key.set_contents_from_file.assert_called_with(
# content,
# headers={},
# policy=self.storage.acl,
# reduced_redundancy=self.storage.reduced_redundancy,
# )
#
# def test_storage_save_gzip(self):
# """
# Test saving a file with gzip enabled.
# """
# if not s3boto.IS_GZIPPED: # Gzip not available.
# return
# name = 'test_storage_save.css'
# content = ContentFile("I should be gzip'd")
# self.storage.save(name, content)
# key = self.storage.bucket.get_key.return_value
# key.set_metadata.assert_called_with('Content-Type', 'text/css')
# key.set_contents_from_file.assert_called_with(
# content,
# headers={'Content-Encoding': 'gzip'},
# policy=self.storage.acl,
# reduced_redundancy=self.storage.reduced_redundancy,
# )
#
# def test_compress_content_len(self):
# """
# Test that file returned by _compress_content() is readable.
# """
# if not s3boto.IS_GZIPPED: # Gzip not available.
# return
# content = ContentFile("I should be gzip'd")
# content = self.storage._compress_content(content)
# self.assertTrue(len(content.read()) > 0)
#
# def test_storage_open_write(self):
# """
# Test opening a file in write mode
# """
# name = 'test_open_for_writing.txt'
# content = 'new content'
#
# # Set the ACL header used when creating/writing data.
# self.storage.bucket.connection.provider.acl_header = 'x-amz-acl'
# # Set the mocked key's bucket
# self.storage.bucket.get_key.return_value.bucket = self.storage.bucket
# # Set the name of the mock object
# self.storage.bucket.get_key.return_value.name = name
#
# file = self.storage.open(name, 'w')
# self.storage.bucket.get_key.assert_called_with(name)
#
# file.write(content)
# self.storage.bucket.initiate_multipart_upload.assert_called_with(
# name,
# headers={'x-amz-acl': 'public-read'},
# reduced_redundancy=self.storage.reduced_redundancy,
# )
#
# # Save the internal file before closing
# _file = file.file
# file.close()
# file._multipart.upload_part_from_file.assert_called_with(
# _file, 1, headers=self.storage.headers,
# )
# file._multipart.complete_upload.assert_called_once()
#
# #def test_storage_exists_and_delete(self):
# # # show file does not exist
# # name = self.prefix_path('test_exists.txt')
# # self.assertFalse(self.storage.exists(name))
# #
# # # create the file
# # content = 'new content'
# # file = self.storage.open(name, 'w')
# # file.write(content)
# # file.close()
# #
# # # show file exists
# # self.assertTrue(self.storage.exists(name))
# #
# # # delete the file
# # self.storage.delete(name)
# #
# # # show file does not exist
# # self.assertFalse(self.storage.exists(name))
#
# def test_storage_listdir_base(self):
# file_names = ["some/path/1.txt", "2.txt", "other/path/3.txt", "4.txt"]
#
# self.storage.bucket.list.return_value = []
# for p in file_names:
# key = mock.MagicMock(spec=Key)
# key.name = p
# self.storage.bucket.list.return_value.append(key)
#
# dirs, files = self.storage.listdir("")
#
# self.assertEqual(len(dirs), 2)
# for directory in ["some", "other"]:
# self.assertTrue(directory in dirs,
# """ "%s" not in directory list "%s".""" % (
# directory, dirs))
#
# self.assertEqual(len(files), 2)
# for filename in ["2.txt", "4.txt"]:
# self.assertTrue(filename in files,
# """ "%s" not in file list "%s".""" % (
# filename, files))
#
# def test_storage_listdir_subdir(self):
# file_names = ["some/path/1.txt", "some/2.txt"]
#
# self.storage.bucket.list.return_value = []
# for p in file_names:
# key = mock.MagicMock(spec=Key)
# key.name = p
# self.storage.bucket.list.return_value.append(key)
#
# dirs, files = self.storage.listdir("some/")
# self.assertEqual(len(dirs), 1)
# self.assertTrue('path' in dirs,
# """ "path" not in directory list "%s".""" % (dirs,))
#
# self.assertEqual(len(files), 1)
# self.assertTrue('2.txt' in files,
# """ "2.txt" not in files list "%s".""" % (files,))
#
# #def test_storage_size(self):
# # name = self.prefix_path('test_storage_size.txt')
# # content = 'new content'
# # f = ContentFile(content)
# # self.storage.save(name, f)
# # self.assertEqual(self.storage.size(name), f.size)
# #
# #def test_storage_url(self):
# # name = self.prefix_path('test_storage_size.txt')
# # content = 'new content'
# # f = ContentFile(content)
# # self.storage.save(name, f)
# # self.assertEqual(content, urlopen(self.storage.url(name)).read())
#
##class S3BotoStorageFileTests(S3BotoTestCase):
## def test_multipart_upload(self):
## nparts = 2
## name = self.prefix_path("test_multipart_upload.txt")
## mode = 'w'
## f = s3boto.S3BotoStorageFile(name, mode, self.storage)
## content_length = 1024 * 1024# 1 MB
## content = 'a' * content_length
##
## bytes = 0
## target = f._write_buffer_size * nparts
## while bytes < target:
## f.write(content)
## bytes += content_length
##
## # make the buffer roll over so f._write_counter
## # is incremented
## f.write("finished")
##
## # verify upload was multipart and correctly partitioned
## self.assertEqual(f._write_counter, nparts)
##
## # complete the upload
## f.close()
##
## # verify that the remaining buffered bytes were
## # uploaded when the file was closed.
## self.assertEqual(f._write_counter, nparts+1)
| bsd-3-clause |
Javiercerna/MissionPlanner | Lib/site-packages/numpy/core/code_generators/genapi.py | 54 | 15182 | """
Get API information encoded in C files.
See ``find_function`` for how functions should be formatted, and
``read_order`` for how the order of the functions should be
specified.
"""
import sys, os, re
try:
import hashlib
md5new = hashlib.md5
except ImportError:
import md5
md5new = md5.new
if sys.version_info[:2] < (2, 6):
from sets import Set as set
import textwrap
from os.path import join
__docformat__ = 'restructuredtext'
# The files under src/ that are scanned for API functions
API_FILES = [join('multiarray', 'methods.c'),
join('multiarray', 'arrayobject.c'),
join('multiarray', 'flagsobject.c'),
join('multiarray', 'descriptor.c'),
join('multiarray', 'iterators.c'),
join('multiarray', 'getset.c'),
join('multiarray', 'number.c'),
join('multiarray', 'sequence.c'),
join('multiarray', 'ctors.c'),
join('multiarray', 'convert.c'),
join('multiarray', 'shape.c'),
join('multiarray', 'item_selection.c'),
join('multiarray', 'convert_datatype.c'),
join('multiarray', 'arraytypes.c.src'),
join('multiarray', 'multiarraymodule.c'),
join('multiarray', 'scalartypes.c.src'),
join('multiarray', 'scalarapi.c'),
join('multiarray', 'calculation.c'),
join('multiarray', 'usertypes.c'),
join('multiarray', 'refcount.c'),
join('multiarray', 'conversion_utils.c'),
join('multiarray', 'buffer.c'),
join('multiarray', 'datetime.c'),
# join('libnumpy', 'npy_arraytypes.c.src'),
join('umath', 'ufunc_object.c'),
join('umath', 'loops.c.src'),
]
THIS_DIR = os.path.dirname(__file__)
API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES]
def file_in_this_dir(filename):
return os.path.join(THIS_DIR, filename)
def remove_whitespace(s):
return ''.join(s.split())
def _repl(str):
return str.replace(' intp', ' npy_intp').replace('Bool','npy_bool')
class Function(object):
def __init__(self, name, return_type, args, doc=''):
self.name = name
self.return_type = _repl(return_type)
self.args = args
self.doc = doc
def _format_arg(self, typename, name):
if typename.endswith('*'):
return typename + name
else:
return typename + ' ' + name
def __str__(self):
argstr = ', '.join([self._format_arg(*a) for a in self.args])
if self.doc:
doccomment = '/* %s */\n' % self.doc
else:
doccomment = ''
return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr)
def to_ReST(self):
lines = ['::', '', ' ' + self.return_type]
argstr = ',\000'.join([self._format_arg(*a) for a in self.args])
name = ' %s' % (self.name,)
s = textwrap.wrap('(%s)' % (argstr,), width=72,
initial_indent=name,
subsequent_indent=' ' * (len(name)+1),
break_long_words=False)
for l in s:
lines.append(l.replace('\000', ' ').rstrip())
lines.append('')
if self.doc:
lines.append(textwrap.dedent(self.doc))
return '\n'.join(lines)
def api_hash(self):
m = md5new()
m.update(remove_whitespace(self.return_type))
m.update('\000')
m.update(self.name)
m.update('\000')
for typename, name in self.args:
m.update(remove_whitespace(typename))
m.update('\000')
return m.hexdigest()[:8]
class ParseError(Exception):
def __init__(self, filename, lineno, msg):
self.filename = filename
self.lineno = lineno
self.msg = msg
def __str__(self):
return '%s:%s:%s' % (self.filename, self.lineno, self.msg)
def skip_brackets(s, lbrac, rbrac):
count = 0
for i, c in enumerate(s):
if c == lbrac:
count += 1
elif c == rbrac:
count -= 1
if count == 0:
return i
raise ValueError("no match '%s' for '%s' (%r)" % (lbrac, rbrac, s))
def split_arguments(argstr):
arguments = []
bracket_counts = {'(': 0, '[': 0}
current_argument = []
state = 0
i = 0
def finish_arg():
if current_argument:
argstr = ''.join(current_argument).strip()
m = re.match(r'(.*(\s+|[*]))(\w+)$', argstr)
if m:
typename = m.group(1).strip()
name = m.group(3)
else:
typename = argstr
name = ''
arguments.append((typename, name))
del current_argument[:]
while i < len(argstr):
c = argstr[i]
if c == ',':
finish_arg()
elif c == '(':
p = skip_brackets(argstr[i:], '(', ')')
current_argument += argstr[i:i+p]
i += p-1
else:
current_argument += c
i += 1
finish_arg()
return arguments
def find_functions(filename, tag='API'):
"""
Scan the file, looking for tagged functions.
Assuming ``tag=='API'``, a tagged function looks like::
/*API*/
static returntype*
function_name(argtype1 arg1, argtype2 arg2)
{
}
where the return type must be on a separate line, the function
name must start the line, and the opening ``{`` must start the line.
An optional documentation comment in ReST format may follow the tag,
as in::
/*API
This function does foo...
*/
"""
fo = open(filename, 'r')
functions = []
return_type = None
function_name = None
function_args = []
doclist = []
SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = range(5)
state = SCANNING
tagcomment = '/*' + tag
for lineno, line in enumerate(fo):
try:
line = line.strip()
if state == SCANNING:
if line.startswith(tagcomment):
if line.endswith('*/'):
state = STATE_RETTYPE
else:
state = STATE_DOC
elif state == STATE_DOC:
if line.startswith('*/'):
state = STATE_RETTYPE
else:
line = line.lstrip(' *')
doclist.append(line)
elif state == STATE_RETTYPE:
# first line of declaration with return type
m = re.match(r'NPY_NO_EXPORT\s+(.*)$', line)
if m:
line = m.group(1)
return_type = line
state = STATE_NAME
elif state == STATE_NAME:
# second line, with function name
m = re.match(r'(\w+)\s*\(', line)
if m:
function_name = m.group(1)
else:
raise ParseError(filename, lineno+1,
'could not find function name')
function_args.append(line[m.end():])
state = STATE_ARGS
elif state == STATE_ARGS:
if line.startswith('{'):
# finished
fargs_str = ' '.join(function_args).rstrip(' )')
fargs = split_arguments(fargs_str)
f = Function(function_name, return_type, fargs,
'\n'.join(doclist))
functions.append(f)
return_type = None
function_name = None
function_args = []
doclist = []
state = SCANNING
else:
function_args.append(line)
except:
print(filename, lineno+1)
raise
fo.close()
return functions
def should_rebuild(targets, source_files):
from distutils.dep_util import newer_group
for t in targets:
if not os.path.exists(t):
return True
sources = API_FILES + list(source_files) + [__file__]
if newer_group(sources, targets[0], missing='newer'):
return True
return False
# Those *Api classes instances know how to output strings for the generated code
class TypeApi:
def __init__(self, name, index, ptr_cast, api_name):
self.index = index
self.name = name
self.ptr_cast = ptr_cast
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.ptr_cast,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
extern NPY_NO_EXPORT PyTypeObject %(type)s;
""" % {'type': self.name}
return astr
class GlobalVarApi:
def __init__(self, name, index, type, api_name):
self.name = name
self.index = index
self.type = type
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (%s *) &%s" % (self.type, self.name)
def internal_define(self):
astr = """\
extern NPY_NO_EXPORT %(type)s %(name)s;
""" % {'type': self.type, 'name': self.name}
return astr
# Dummy to be able to consistently use *Api instances for all items in the
# array api
class BoolValuesApi:
def __init__(self, name, index, api_name):
self.name = name
self.index = index
self.type = 'PyBoolScalarObject'
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s ((%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
"""
return astr
class FunctionApi:
def __init__(self, name, index, return_type, args, api_name):
self.name = name
self.index = index
self.return_type = return_type
self.args = args
self.api_name = api_name
def _argtypes_string(self):
if not self.args:
return 'void'
argstr = ', '.join([_repl(a[0]) for a in self.args])
return argstr
def define_from_array_api_string(self):
define = """\
#define %s \\\n (*(%s (*)(%s)) \\
%s[%d])""" % (self.name,
self.return_type,
self._argtypes_string(),
self.api_name,
self.index)
return define
def array_api_define(self):
return " (void *) %s" % self.name
def internal_define(self):
astr = """\
NPY_NO_EXPORT %s %s \\\n (%s);""" % (self.return_type,
self.name,
self._argtypes_string())
return astr
def order_dict(d):
"""Order dict by its values."""
o = d.items()
def _key(x):
return (x[1], x[0])
return sorted(o, key=_key)
def merge_api_dicts(dicts):
ret = {}
for d in dicts:
for k, v in d.items():
ret[k] = v
return ret
def check_api_dict(d):
"""Check that an api dict is valid (does not use the same index twice)."""
# We have if a same index is used twice: we 'revert' the dict so that index
# become keys. If the length is different, it means one index has been used
# at least twice
revert_dict = dict([(v, k) for k, v in d.items()])
if not len(revert_dict) == len(d):
# We compute a dict index -> list of associated items
doubled = {}
for name, index in d.items():
try:
doubled[index].append(name)
except KeyError:
doubled[index] = [name]
msg = """\
Same index has been used twice in api definition: %s
""" % ['index %d -> %s' % (index, names) for index, names in doubled.items() \
if len(names) != 1]
raise ValueError(msg)
# No 'hole' in the indexes may be allowed, and it must starts at 0
indexes = set(d.values())
expected = set(range(len(indexes)))
if not indexes == expected:
diff = expected.symmetric_difference(indexes)
msg = "There are some holes in the API indexing: " \
"(symmetric diff is %s)" % diff
raise ValueError(msg)
def get_api_functions(tagname, api_dict):
"""Parse source files to get functions tagged by the given tag."""
functions = []
for f in API_FILES:
functions.extend(find_functions(f, tagname))
dfunctions = []
for func in functions:
o = api_dict[func.name]
dfunctions.append( (o, func) )
dfunctions.sort()
return [a[1] for a in dfunctions]
def fullapi_hash(api_dicts):
"""Given a list of api dicts defining the numpy C API, compute a checksum
of the list of items in the API (as a string)."""
a = []
for d in api_dicts:
def sorted_by_values(d):
"""Sort a dictionary by its values. Assume the dictionary items is of
the form func_name -> order"""
return sorted(d.items(), key=lambda x_y: (x_y[1], x_y[0]))
for name, index in sorted_by_values(d):
a.extend(name)
a.extend(str(index))
return md5new(''.join(a).encode('ascii')).hexdigest()
# To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and
# checksum a 128 bits md5 checksum (hex format as well)
VERRE = re.compile('(^0x[\da-f]{8})\s*=\s*([\da-f]{32})')
def get_versions_hash():
d = []
file = os.path.join(os.path.dirname(__file__), 'cversions.txt')
fid = open(file, 'r')
try:
for line in fid.readlines():
m = VERRE.match(line)
if m:
d.append((int(m.group(1), 16), m.group(2)))
finally:
fid.close()
return dict(d)
def main():
tagname = sys.argv[1]
order_file = sys.argv[2]
functions = get_api_functions(tagname, order_file)
m = md5new(tagname)
for func in functions:
print(func)
ah = func.api_hash()
m.update(ah)
print(hex(int(ah,16)))
print(hex(int(m.hexdigest()[:8],16)))
if __name__ == '__main__':
main()
| gpl-3.0 |
thewizardplusplus/micro | micro/utilities.py | 1 | 1209 | import os.path
from . import function_type
MICRO_VERSION = '2.3'
HEXADECIMAL_NUMBER = '[A-Fa-f0-9]'
def extract_and_add_function(entity, functions):
entity_type = function_type.FunctionType(
len(entity.children[0].children[1].children),
function_type.make_type(entity.children[0].children[2].children[0]),
)
_add_to_functions(
functions,
entity.children[0].children[0].value,
entity_type,
)
return entity_type
def extract_and_add_assignment(entity, functions):
entity_type = function_type.make_type(entity.children[0].children[1])
_add_to_functions(
functions,
entity.children[0].children[0].value,
entity_type,
)
return entity_type
def make_arguments_processor(argument_handler):
return lambda function: \
lambda *arguments: \
function(*list(map(argument_handler, arguments)))
def get_base_path(filename):
base_path = None
if filename is not None and filename != '-':
base_path = os.path.dirname(filename)
return base_path
def _add_to_functions(functions, entity_name, entity_type):
if entity_name != '':
functions[entity_name] = entity_type
| mit |
bev-a-tron/pledge_service | lib/requests/packages/urllib3/exceptions.py | 330 | 3364 | # urllib3/exceptions.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class ConnectionError(HTTPError):
"Raised when a normal connection fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
## Leaf Exceptions
class MaxRetryError(RequestError):
"Raised when the maximum number of retries is exceeded."
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s" % url
if reason:
message += " (Caused by %s: %s)" % (type(reason), reason)
else:
message += " (Caused by redirect)"
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationParseError(ValueError, HTTPError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
| apache-2.0 |
deerwalk/voltdb | tests/sqlcoverage/schema/sql-grammar-gen-schema.py | 1 | 14110 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2017 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# This file contains the schema for running SqlCoverage tests against a
# file of SQL statements randomly generated by the SQL-grammar-generator
# test application; as such, it is consistent with:
# voltdb/tests/sqlcoverage/ddl/sql-grammar-gen-DDL.sql, i.e.,
# voltdb/tests/sqlgrammar/DDL.sql.
{
"P0": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ()
},
"R0": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ()
},
"P1": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"R1": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"P2": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"R2": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"P3": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"R3": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"P4": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"R4": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"P5": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"R5": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
}
}
| agpl-3.0 |
juanyaw/python | cpython/Doc/includes/sqlite3/converter_point.py | 55 | 1192 | import sqlite3
class Point:
def __init__(self, x, y):
self.x, self.y = x, y
def __repr__(self):
return "(%f;%f)" % (self.x, self.y)
def adapt_point(point):
return ("%f;%f" % (point.x, point.y)).encode('ascii')
def convert_point(s):
x, y = list(map(float, s.split(b";")))
return Point(x, y)
# Register the adapter
sqlite3.register_adapter(Point, adapt_point)
# Register the converter
sqlite3.register_converter("point", convert_point)
p = Point(4.0, -3.2)
#########################
# 1) Using declared types
con = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_DECLTYPES)
cur = con.cursor()
cur.execute("create table test(p point)")
cur.execute("insert into test(p) values (?)", (p,))
cur.execute("select p from test")
print("with declared types:", cur.fetchone()[0])
cur.close()
con.close()
#######################
# 1) Using column names
con = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_COLNAMES)
cur = con.cursor()
cur.execute("create table test(p)")
cur.execute("insert into test(p) values (?)", (p,))
cur.execute('select p as "p [point]" from test')
print("with column names:", cur.fetchone()[0])
cur.close()
con.close()
| bsd-3-clause |
jgeewax/gcloud-python | vision/google/cloud/vision/safe.py | 3 | 3852 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Safe search class for information returned from annotating an image."""
from google.cloud.vision.likelihood import Likelihood
class SafeSearchAnnotation(object):
"""Representation of a SafeSearchAnnotation.
:type adult_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood`
:param adult_likelihood: Likelihood that image contains adult material.
:type spoof_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood`
:param spoof_likelihood: Likelihood that image is a spoof.
:type medical_likelihood:
:class:`~google.cloud.vision.likelihood.Likelihood`
:param medical_likelihood: Likelihood that image contains medical material.
:type violence_likelihood:
:class:`~google.cloud.vision.likelihood.Likelihood`
:param violence_likelihood: Likelihood that image contains violence.
"""
def __init__(self, adult_likelihood, spoof_likelihood, medical_likelihood,
violence_likelihood):
self._adult_likelihood = adult_likelihood
self._spoof_likelihood = spoof_likelihood
self._medical_likeliehood = medical_likelihood
self._violence_likelihood = violence_likelihood
@classmethod
def from_api_repr(cls, response):
"""Factory: construct SafeSearchAnnotation from Vision API response.
:type response: dict
:param response: Dictionary response from Vision API with safe search
data.
:rtype: :class:`~google.cloud.vision.safe.SafeSearchAnnotation`
:returns: Instance of ``SafeSearchAnnotation``.
"""
adult_likelihood = getattr(Likelihood, response['adult'])
spoof_likelihood = getattr(Likelihood, response['spoof'])
medical_likelihood = getattr(Likelihood, response['medical'])
violence_likelihood = getattr(Likelihood, response['violence'])
return cls(adult_likelihood, spoof_likelihood, medical_likelihood,
violence_likelihood)
@property
def adult(self):
"""Represents the adult contents likelihood for the image.
:rtype: :class:`~google.cloud.vision.likelihood.Likelihood`
:returns: ``Likelihood`` of the image containing adult content.
"""
return self._adult_likelihood
@property
def spoof(self):
"""The likelihood that an obvious modification was made to the image.
:rtype: :class:`~google.cloud.vision.likelihood.Likelihood`
:returns: The ``Likelihood`` that an obvious modification was made to
the image's canonical version to make it appear funny or
offensive.
"""
return self._spoof_likelihood
@property
def medical(self):
"""Likelihood this is a medical image.
:rtype: :class:`~google.cloud.vision.likelihood.Likelihood`
:returns: The ``Likelihood`` that the image is medical in origin.
"""
return self._medical_likeliehood
@property
def violence(self):
"""Likeliehood that this image contains violence.
:rtype: :class:`~google.cloud.vision.likelihood.Likelihood`
:returns: The ``Likelihood`` that the image contains violence.
"""
return self._violence_likelihood
| apache-2.0 |
dreispt/project-service | project_sla/analytic_account.py | 21 | 2967 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Daniel Reis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
import logging
_logger = logging.getLogger(__name__)
class AnalyticAccount(orm.Model):
""" Add SLA to Analytic Accounts """
_inherit = 'account.analytic.account'
_columns = {
'sla_ids': fields.many2many(
'project.sla', string='Service Level Agreement'),
}
def _reapply_sla(self, cr, uid, ids, recalc_closed=False, context=None):
"""
Force SLA recalculation on open documents that already are subject to
this SLA Definition.
To use after changing a Contract SLA or it's Definitions.
The ``recalc_closed`` flag allows to also recompute closed documents.
"""
ctrl_obj = self.pool['project.sla.control']
for contract in self.browse(cr, uid, ids, context=context):
# for each contract, and for each model under SLA control ...
ctrl_models = set([sla.control_model for sla in contract.sla_ids])
for model_name in ctrl_models:
model = self.pool[model_name]
base = [] if recalc_closed else [('stage_id.fold', '=', 0)]
doc_ids = []
if 'analytic_account_id' in model._columns:
domain = base + [
('analytic_account_id', '=', contract.id)]
doc_ids += model.search(cr, uid, domain, context=context)
if 'project_id' in model._columns:
domain = base + [
('project_id.analytic_account_id', '=', contract.id)]
doc_ids += model.search(cr, uid, domain, context=context)
if doc_ids:
model = self.pool[model_name]
docs = model.browse(cr, uid, doc_ids, context=context)
ctrl_obj.store_sla_control(cr, uid, docs, context=context)
return True
def reapply_sla(self, cr, uid, ids, context=None):
""" Reapply SLAs button action """
return self._reapply_sla(cr, uid, ids, context=context)
| agpl-3.0 |
ychen820/microblog | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/mws/connection.py | 6 | 49712 | # Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import hashlib
import base64
import string
import collections
from boto.connection import AWSQueryConnection
from boto.exception import BotoServerError
import boto.mws.exception
import boto.mws.response
from boto.handler import XmlHandler
__all__ = ['MWSConnection']
api_version_path = {
'Feeds': ('2009-01-01', 'Merchant', '/'),
'Reports': ('2009-01-01', 'Merchant', '/'),
'Orders': ('2013-09-01', 'SellerId', '/Orders/2013-09-01'),
'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'),
'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'),
'Inbound': ('2010-10-01', 'SellerId',
'/FulfillmentInboundShipment/2010-10-01'),
'Outbound': ('2010-10-01', 'SellerId',
'/FulfillmentOutboundShipment/2010-10-01'),
'Inventory': ('2010-10-01', 'SellerId',
'/FulfillmentInventory/2010-10-01'),
'Recommendations': ('2013-04-01', 'SellerId',
'/Recommendations/2013-04-01'),
'CustomerInfo': ('2014-03-01', 'SellerId',
'/CustomerInformation/2014-03-01'),
'CartInfo': ('2014-03-01', 'SellerId',
'/CartInformation/2014-03-01'),
'Subscriptions': ('2013-07-01', 'SellerId',
'/Subscriptions/2013-07-01'),
'OffAmazonPayments': ('2013-01-01', 'SellerId',
'/OffAmazonPayments/2013-01-01'),
}
content_md5 = lambda c: base64.encodestring(hashlib.md5(c).digest()).strip()
decorated_attrs = ('action', 'response', 'section',
'quota', 'restore', 'version')
api_call_map = {}
def add_attrs_from(func, to):
for attr in decorated_attrs:
setattr(to, attr, getattr(func, attr, None))
return to
def structured_lists(*fields):
def decorator(func):
def wrapper(self, *args, **kw):
for key, acc in [f.split('.') for f in fields]:
if key in kw:
newkey = key + '.' + acc + (acc and '.' or '')
for i in range(len(kw[key])):
kw[newkey + str(i + 1)] = kw[key][i]
kw.pop(key)
return func(self, *args, **kw)
wrapper.__doc__ = "{0}\nLists: {1}".format(func.__doc__,
', '.join(fields))
return add_attrs_from(func, to=wrapper)
return decorator
def http_body(field):
def decorator(func):
def wrapper(*args, **kw):
if any([f not in kw for f in (field, 'content_type')]):
message = "{0} requires {1} and content_type arguments for " \
"building HTTP body".format(func.action, field)
raise KeyError(message)
kw['body'] = kw.pop(field)
kw['headers'] = {
'Content-Type': kw.pop('content_type'),
'Content-MD5': content_md5(kw['body']),
}
return func(*args, **kw)
wrapper.__doc__ = "{0}\nRequired HTTP Body: " \
"{1}".format(func.__doc__, field)
return add_attrs_from(func, to=wrapper)
return decorator
def destructure_object(value, into, prefix, members=False):
if isinstance(value, boto.mws.response.ResponseElement):
destructure_object(value.__dict__, into, prefix, members=members)
elif isinstance(value, collections.Mapping):
for name in value:
if name.startswith('_'):
continue
destructure_object(value[name], into, prefix + '.' + name,
members=members)
elif isinstance(value, basestring):
into[prefix] = value
elif isinstance(value, collections.Iterable):
for index, element in enumerate(value):
suffix = (members and '.member.' or '.') + str(index + 1)
destructure_object(element, into, prefix + suffix,
members=members)
elif isinstance(value, bool):
into[prefix] = str(value).lower()
else:
into[prefix] = value
def structured_objects(*fields, **kwargs):
def decorator(func):
def wrapper(*args, **kw):
members = kwargs.get('members', False)
for field in filter(kw.has_key, fields):
destructure_object(kw.pop(field), kw, field, members=members)
return func(*args, **kw)
wrapper.__doc__ = "{0}\nElement|Iter|Map: {1}\n" \
"(ResponseElement or anything iterable/dict-like)" \
.format(func.__doc__, ', '.join(fields))
return add_attrs_from(func, to=wrapper)
return decorator
def requires(*groups):
def decorator(func):
def wrapper(*args, **kw):
hasgroup = lambda x: len(x) == len(filter(kw.has_key, x))
if 1 != len(filter(hasgroup, groups)):
message = ' OR '.join(['+'.join(g) for g in groups])
message = "{0} requires {1} argument(s)" \
"".format(func.action, message)
raise KeyError(message)
return func(*args, **kw)
message = ' OR '.join(['+'.join(g) for g in groups])
wrapper.__doc__ = "{0}\nRequired: {1}".format(func.__doc__,
message)
return add_attrs_from(func, to=wrapper)
return decorator
def exclusive(*groups):
def decorator(func):
def wrapper(*args, **kw):
hasgroup = lambda x: len(x) == len(filter(kw.has_key, x))
if len(filter(hasgroup, groups)) not in (0, 1):
message = ' OR '.join(['+'.join(g) for g in groups])
message = "{0} requires either {1}" \
"".format(func.action, message)
raise KeyError(message)
return func(*args, **kw)
message = ' OR '.join(['+'.join(g) for g in groups])
wrapper.__doc__ = "{0}\nEither: {1}".format(func.__doc__,
message)
return add_attrs_from(func, to=wrapper)
return decorator
def dependent(field, *groups):
def decorator(func):
def wrapper(*args, **kw):
hasgroup = lambda x: len(x) == len(filter(kw.has_key, x))
if field in kw and 1 > len(filter(hasgroup, groups)):
message = ' OR '.join(['+'.join(g) for g in groups])
message = "{0} argument {1} requires {2}" \
"".format(func.action, field, message)
raise KeyError(message)
return func(*args, **kw)
message = ' OR '.join(['+'.join(g) for g in groups])
wrapper.__doc__ = "{0}\n{1} requires: {2}".format(func.__doc__,
field,
message)
return add_attrs_from(func, to=wrapper)
return decorator
def requires_some_of(*fields):
def decorator(func):
def wrapper(*args, **kw):
if not filter(kw.has_key, fields):
message = "{0} requires at least one of {1} argument(s)" \
"".format(func.action, ', '.join(fields))
raise KeyError(message)
return func(*args, **kw)
wrapper.__doc__ = "{0}\nSome Required: {1}".format(func.__doc__,
', '.join(fields))
return add_attrs_from(func, to=wrapper)
return decorator
def boolean_arguments(*fields):
def decorator(func):
def wrapper(*args, **kw):
for field in [f for f in fields if isinstance(kw.get(f), bool)]:
kw[field] = str(kw[field]).lower()
return func(*args, **kw)
wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__,
', '.join(fields))
return add_attrs_from(func, to=wrapper)
return decorator
def api_action(section, quota, restore, *api):
def decorator(func, quota=int(quota), restore=float(restore)):
version, accesskey, path = api_version_path[section]
action = ''.join(api or map(str.capitalize, func.func_name.split('_')))
def wrapper(self, *args, **kw):
kw.setdefault(accesskey, getattr(self, accesskey, None))
if kw[accesskey] is None:
message = "{0} requires {1} argument. Set the " \
"MWSConnection.{2} attribute?" \
"".format(action, accesskey, accesskey)
raise KeyError(message)
kw['Action'] = action
kw['Version'] = version
response = self._response_factory(action, connection=self)
request = dict(path=path, quota=quota, restore=restore)
return func(self, request, response, *args, **kw)
for attr in decorated_attrs:
setattr(wrapper, attr, locals().get(attr))
wrapper.__doc__ = "MWS {0}/{1} API call; quota={2} restore={3:.2f}\n" \
"{4}".format(action, version, quota, restore,
func.__doc__)
api_call_map[action] = func.func_name
return wrapper
return decorator
class MWSConnection(AWSQueryConnection):
ResponseFactory = boto.mws.response.ResponseFactory
ResponseErrorFactory = boto.mws.exception.ResponseErrorFactory
def __init__(self, *args, **kw):
kw.setdefault('host', 'mws.amazonservices.com')
self._sandboxed = kw.pop('sandbox', False)
self.Merchant = kw.pop('Merchant', None) or kw.get('SellerId')
self.SellerId = kw.pop('SellerId', None) or self.Merchant
kw = self._setup_factories(kw.pop('factory_scopes', []), **kw)
super(MWSConnection, self).__init__(*args, **kw)
def _setup_factories(self, extrascopes, **kw):
for factory, (scope, Default) in {
'response_factory':
(boto.mws.response, self.ResponseFactory),
'response_error_factory':
(boto.mws.exception, self.ResponseErrorFactory),
}.items():
if factory in kw:
setattr(self, '_' + factory, kw.pop(factory))
else:
scopes = extrascopes + [scope]
setattr(self, '_' + factory, Default(scopes=scopes))
return kw
def _sandboxify(self, path):
if not self._sandboxed:
return path
splat = path.split('/')
splat[-2] += '_Sandbox'
return splat.join('/')
def _required_auth_capability(self):
return ['mws']
def _post_request(self, request, params, parser, body='', headers=None):
"""Make a POST request, optionally with a content body,
and return the response, optionally as raw text.
"""
headers = headers or {}
path = self._sandboxify(request['path'])
request = self.build_base_http_request('POST', path, None, data=body,
params=params, headers=headers,
host=self.host)
try:
response = self._mexe(request, override_num_retries=None)
except BotoServerError, bs:
raise self._response_error_factor(bs.status, bs.reason, bs.body)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self._response_error_factory(response.status,
response.reason, body)
if response.status != 200:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self._response_error_factory(response.status,
response.reason, body)
digest = response.getheader('Content-MD5')
if digest is not None:
assert content_md5(body) == digest
contenttype = response.getheader('Content-Type')
return self._parse_response(parser, contenttype, body)
def _parse_response(self, parser, contenttype, body):
if not contenttype.startswith('text/xml'):
return body
handler = XmlHandler(parser, self)
xml.sax.parseString(body, handler)
return parser
def method_for(self, name):
"""Return the MWS API method referred to in the argument.
The named method can be in CamelCase or underlined_lower_case.
This is the complement to MWSConnection.any_call.action
"""
action = '_' in name and string.capwords(name, '_') or name
if action in api_call_map:
return getattr(self, api_call_map[action])
return None
def iter_call(self, call, *args, **kw):
"""Pass a call name as the first argument and a generator
is returned for the initial response and any continuation
call responses made using the NextToken.
"""
method = self.method_for(call)
assert method, 'No call named "{0}"'.format(call)
return self.iter_response(method(*args, **kw))
def iter_response(self, response):
"""Pass a call's response as the initial argument and a
generator is returned for the initial response and any
continuation call responses made using the NextToken.
"""
yield response
more = self.method_for(response._action + 'ByNextToken')
while more and response._result.HasNext == 'true':
response = more(NextToken=response._result.NextToken)
yield response
@boolean_arguments('PurgeAndReplace')
@http_body('FeedContent')
@structured_lists('MarketplaceIdList.Id')
@requires(['FeedType'])
@api_action('Feeds', 15, 120)
def submit_feed(self, request, response, headers=None, body='', **kw):
"""Uploads a feed for processing by Amazon MWS.
"""
headers = headers or {}
return self._post_request(request, kw, response, body=body,
headers=headers)
@structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type',
'FeedProcessingStatusList.Status')
@api_action('Feeds', 10, 45)
def get_feed_submission_list(self, request, response, **kw):
"""Returns a list of all feed submissions submitted in the
previous 90 days.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Feeds', 0, 0)
def get_feed_submission_list_by_next_token(self, request, response, **kw):
"""Returns a list of feed submissions using the NextToken parameter.
"""
return self._post_request(request, kw, response)
@structured_lists('FeedTypeList.Type', 'FeedProcessingStatusList.Status')
@api_action('Feeds', 10, 45)
def get_feed_submission_count(self, request, response, **kw):
"""Returns a count of the feeds submitted in the previous 90 days.
"""
return self._post_request(request, kw, response)
@structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type')
@api_action('Feeds', 10, 45)
def cancel_feed_submissions(self, request, response, **kw):
"""Cancels one or more feed submissions and returns a
count of the feed submissions that were canceled.
"""
return self._post_request(request, kw, response)
@requires(['FeedSubmissionId'])
@api_action('Feeds', 15, 60)
def get_feed_submission_result(self, request, response, **kw):
"""Returns the feed processing report.
"""
return self._post_request(request, kw, response)
def get_service_status(self, **kw):
"""Instruct the user on how to get service status.
"""
sections = ', '.join(map(str.lower, api_version_path.keys()))
message = "Use {0}.get_(section)_service_status(), " \
"where (section) is one of the following: " \
"{1}".format(self.__class__.__name__, sections)
raise AttributeError(message)
@structured_lists('MarketplaceIdList.Id')
@boolean_arguments('ReportOptions=ShowSalesChannel')
@requires(['ReportType'])
@api_action('Reports', 15, 60)
def request_report(self, request, response, **kw):
"""Creates a report request and submits the request to Amazon MWS.
"""
return self._post_request(request, kw, response)
@structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type',
'ReportProcessingStatusList.Status')
@api_action('Reports', 10, 45)
def get_report_request_list(self, request, response, **kw):
"""Returns a list of report requests that you can use to get the
ReportRequestId for a report.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Reports', 0, 0)
def get_report_request_list_by_next_token(self, request, response, **kw):
"""Returns a list of report requests using the NextToken,
which was supplied by a previous request to either
GetReportRequestListByNextToken or GetReportRequestList, where
the value of HasNext was true in that previous request.
"""
return self._post_request(request, kw, response)
@structured_lists('ReportTypeList.Type',
'ReportProcessingStatusList.Status')
@api_action('Reports', 10, 45)
def get_report_request_count(self, request, response, **kw):
"""Returns a count of report requests that have been submitted
to Amazon MWS for processing.
"""
return self._post_request(request, kw, response)
@api_action('Reports', 10, 45)
def cancel_report_requests(self, request, response, **kw):
"""Cancel one or more report requests, returning the count of the
canceled report requests and the report request information.
"""
return self._post_request(request, kw, response)
@boolean_arguments('Acknowledged')
@structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type')
@api_action('Reports', 10, 60)
def get_report_list(self, request, response, **kw):
"""Returns a list of reports that were created in the previous
90 days that match the query parameters.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Reports', 0, 0)
def get_report_list_by_next_token(self, request, response, **kw):
"""Returns a list of reports using the NextToken, which
was supplied by a previous request to either
GetReportListByNextToken or GetReportList, where the
value of HasNext was true in the previous call.
"""
return self._post_request(request, kw, response)
@boolean_arguments('Acknowledged')
@structured_lists('ReportTypeList.Type')
@api_action('Reports', 10, 45)
def get_report_count(self, request, response, **kw):
"""Returns a count of the reports, created in the previous 90 days,
with a status of _DONE_ and that are available for download.
"""
return self._post_request(request, kw, response)
@requires(['ReportId'])
@api_action('Reports', 15, 60)
def get_report(self, request, response, **kw):
"""Returns the contents of a report.
"""
return self._post_request(request, kw, response)
@requires(['ReportType', 'Schedule'])
@api_action('Reports', 10, 45)
def manage_report_schedule(self, request, response, **kw):
"""Creates, updates, or deletes a report request schedule for
a specified report type.
"""
return self._post_request(request, kw, response)
@structured_lists('ReportTypeList.Type')
@api_action('Reports', 10, 45)
def get_report_schedule_list(self, request, response, **kw):
"""Returns a list of order report requests that are scheduled
to be submitted to Amazon MWS for processing.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Reports', 0, 0)
def get_report_schedule_list_by_next_token(self, request, response, **kw):
"""Returns a list of report requests using the NextToken,
which was supplied by a previous request to either
GetReportScheduleListByNextToken or GetReportScheduleList,
where the value of HasNext was true in that previous request.
"""
return self._post_request(request, kw, response)
@structured_lists('ReportTypeList.Type')
@api_action('Reports', 10, 45)
def get_report_schedule_count(self, request, response, **kw):
"""Returns a count of order report requests that are scheduled
to be submitted to Amazon MWS.
"""
return self._post_request(request, kw, response)
@boolean_arguments('Acknowledged')
@requires(['ReportIdList'])
@structured_lists('ReportIdList.Id')
@api_action('Reports', 10, 45)
def update_report_acknowledgements(self, request, response, **kw):
"""Updates the acknowledged status of one or more reports.
"""
return self._post_request(request, kw, response)
@requires(['ShipFromAddress', 'InboundShipmentPlanRequestItems'])
@structured_objects('ShipFromAddress', 'InboundShipmentPlanRequestItems')
@api_action('Inbound', 30, 0.5)
def create_inbound_shipment_plan(self, request, response, **kw):
"""Returns the information required to create an inbound shipment.
"""
return self._post_request(request, kw, response)
@requires(['ShipmentId', 'InboundShipmentHeader', 'InboundShipmentItems'])
@structured_objects('InboundShipmentHeader', 'InboundShipmentItems')
@api_action('Inbound', 30, 0.5)
def create_inbound_shipment(self, request, response, **kw):
"""Creates an inbound shipment.
"""
return self._post_request(request, kw, response)
@requires(['ShipmentId'])
@structured_objects('InboundShipmentHeader', 'InboundShipmentItems')
@api_action('Inbound', 30, 0.5)
def update_inbound_shipment(self, request, response, **kw):
"""Updates an existing inbound shipment. Amazon documentation
is ambiguous as to whether the InboundShipmentHeader and
InboundShipmentItems arguments are required.
"""
return self._post_request(request, kw, response)
@requires_some_of('ShipmentIdList', 'ShipmentStatusList')
@structured_lists('ShipmentIdList.Id', 'ShipmentStatusList.Status')
@api_action('Inbound', 30, 0.5)
def list_inbound_shipments(self, request, response, **kw):
"""Returns a list of inbound shipments based on criteria that
you specify.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Inbound', 30, 0.5)
def list_inbound_shipments_by_next_token(self, request, response, **kw):
"""Returns the next page of inbound shipments using the NextToken
parameter.
"""
return self._post_request(request, kw, response)
@requires(['ShipmentId'], ['LastUpdatedAfter', 'LastUpdatedBefore'])
@api_action('Inbound', 30, 0.5)
def list_inbound_shipment_items(self, request, response, **kw):
"""Returns a list of items in a specified inbound shipment, or a
list of items that were updated within a specified time frame.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Inbound', 30, 0.5)
def list_inbound_shipment_items_by_next_token(self, request, response, **kw):
"""Returns the next page of inbound shipment items using the
NextToken parameter.
"""
return self._post_request(request, kw, response)
@api_action('Inbound', 2, 300, 'GetServiceStatus')
def get_inbound_service_status(self, request, response, **kw):
"""Returns the operational status of the Fulfillment Inbound
Shipment API section.
"""
return self._post_request(request, kw, response)
@requires(['SellerSkus'], ['QueryStartDateTime'])
@structured_lists('SellerSkus.member')
@api_action('Inventory', 30, 0.5)
def list_inventory_supply(self, request, response, **kw):
"""Returns information about the availability of a seller's
inventory.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Inventory', 30, 0.5)
def list_inventory_supply_by_next_token(self, request, response, **kw):
"""Returns the next page of information about the availability
of a seller's inventory using the NextToken parameter.
"""
return self._post_request(request, kw, response)
@api_action('Inventory', 2, 300, 'GetServiceStatus')
def get_inventory_service_status(self, request, response, **kw):
"""Returns the operational status of the Fulfillment Inventory
API section.
"""
return self._post_request(request, kw, response)
@requires(['PackageNumber'])
@api_action('Outbound', 30, 0.5)
def get_package_tracking_details(self, request, response, **kw):
"""Returns delivery tracking information for a package in
an outbound shipment for a Multi-Channel Fulfillment order.
"""
return self._post_request(request, kw, response)
@structured_objects('Address', 'Items')
@requires(['Address', 'Items'])
@api_action('Outbound', 30, 0.5)
def get_fulfillment_preview(self, request, response, **kw):
"""Returns a list of fulfillment order previews based on items
and shipping speed categories that you specify.
"""
return self._post_request(request, kw, response)
@structured_objects('DestinationAddress', 'Items')
@requires(['SellerFulfillmentOrderId', 'DisplayableOrderId',
'ShippingSpeedCategory', 'DisplayableOrderDateTime',
'DestinationAddress', 'DisplayableOrderComment',
'Items'])
@api_action('Outbound', 30, 0.5)
def create_fulfillment_order(self, request, response, **kw):
"""Requests that Amazon ship items from the seller's inventory
to a destination address.
"""
return self._post_request(request, kw, response)
@requires(['SellerFulfillmentOrderId'])
@api_action('Outbound', 30, 0.5)
def get_fulfillment_order(self, request, response, **kw):
"""Returns a fulfillment order based on a specified
SellerFulfillmentOrderId.
"""
return self._post_request(request, kw, response)
@api_action('Outbound', 30, 0.5)
def list_all_fulfillment_orders(self, request, response, **kw):
"""Returns a list of fulfillment orders fulfilled after (or
at) a specified date or by fulfillment method.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Outbound', 30, 0.5)
def list_all_fulfillment_orders_by_next_token(self, request, response, **kw):
"""Returns the next page of inbound shipment items using the
NextToken parameter.
"""
return self._post_request(request, kw, response)
@requires(['SellerFulfillmentOrderId'])
@api_action('Outbound', 30, 0.5)
def cancel_fulfillment_order(self, request, response, **kw):
"""Requests that Amazon stop attempting to fulfill an existing
fulfillment order.
"""
return self._post_request(request, kw, response)
@api_action('Outbound', 2, 300, 'GetServiceStatus')
def get_outbound_service_status(self, request, response, **kw):
"""Returns the operational status of the Fulfillment Outbound
API section.
"""
return self._post_request(request, kw, response)
@requires(['CreatedAfter'], ['LastUpdatedAfter'])
@exclusive(['CreatedAfter'], ['LastUpdatedAfter'])
@dependent('CreatedBefore', ['CreatedAfter'])
@exclusive(['LastUpdatedAfter'], ['BuyerEmail'], ['SellerOrderId'])
@dependent('LastUpdatedBefore', ['LastUpdatedAfter'])
@exclusive(['CreatedAfter'], ['LastUpdatedBefore'])
@requires(['MarketplaceId'])
@structured_objects('OrderTotal', 'ShippingAddress',
'PaymentExecutionDetail')
@structured_lists('MarketplaceId.Id', 'OrderStatus.Status',
'FulfillmentChannel.Channel', 'PaymentMethod.')
@api_action('Orders', 6, 60)
def list_orders(self, request, response, **kw):
"""Returns a list of orders created or updated during a time
frame that you specify.
"""
toggle = set(('FulfillmentChannel.Channel.1',
'OrderStatus.Status.1', 'PaymentMethod.1',
'LastUpdatedAfter', 'LastUpdatedBefore'))
for do, dont in {
'BuyerEmail': toggle.union(['SellerOrderId']),
'SellerOrderId': toggle.union(['BuyerEmail']),
}.items():
if do in kw and filter(kw.has_key, dont):
message = "Don't include {0} when specifying " \
"{1}".format(' or '.join(dont), do)
raise AssertionError(message)
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Orders', 6, 60)
def list_orders_by_next_token(self, request, response, **kw):
"""Returns the next page of orders using the NextToken value
that was returned by your previous request to either
ListOrders or ListOrdersByNextToken.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderId'])
@structured_lists('AmazonOrderId.Id')
@api_action('Orders', 6, 60)
def get_order(self, request, response, **kw):
"""Returns an order for each AmazonOrderId that you specify.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderId'])
@api_action('Orders', 30, 2)
def list_order_items(self, request, response, **kw):
"""Returns order item information for an AmazonOrderId that
you specify.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Orders', 30, 2)
def list_order_items_by_next_token(self, request, response, **kw):
"""Returns the next page of order items using the NextToken
value that was returned by your previous request to either
ListOrderItems or ListOrderItemsByNextToken.
"""
return self._post_request(request, kw, response)
@api_action('Orders', 2, 300, 'GetServiceStatus')
def get_orders_service_status(self, request, response, **kw):
"""Returns the operational status of the Orders API section.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Query'])
@api_action('Products', 20, 20)
def list_matching_products(self, request, response, **kw):
"""Returns a list of products and their attributes, ordered
by relevancy, based on a search query that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASINList'])
@structured_lists('ASINList.ASIN')
@api_action('Products', 20, 20)
def get_matching_product(self, request, response, **kw):
"""Returns a list of products and their attributes, based on
a list of ASIN values that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'IdType', 'IdList'])
@structured_lists('IdList.Id')
@api_action('Products', 20, 20)
def get_matching_product_for_id(self, request, response, **kw):
"""Returns a list of products and their attributes, based on
a list of Product IDs that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'SellerSKUList'])
@structured_lists('SellerSKUList.SellerSKU')
@api_action('Products', 20, 10, 'GetCompetitivePricingForSKU')
def get_competitive_pricing_for_sku(self, request, response, **kw):
"""Returns the current competitive pricing of a product,
based on the SellerSKUs and MarketplaceId that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASINList'])
@structured_lists('ASINList.ASIN')
@api_action('Products', 20, 10, 'GetCompetitivePricingForASIN')
def get_competitive_pricing_for_asin(self, request, response, **kw):
"""Returns the current competitive pricing of a product,
based on the ASINs and MarketplaceId that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'SellerSKUList'])
@structured_lists('SellerSKUList.SellerSKU')
@api_action('Products', 20, 5, 'GetLowestOfferListingsForSKU')
def get_lowest_offer_listings_for_sku(self, request, response, **kw):
"""Returns the lowest price offer listings for a specific
product by item condition and SellerSKUs.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASINList'])
@structured_lists('ASINList.ASIN')
@api_action('Products', 20, 5, 'GetLowestOfferListingsForASIN')
def get_lowest_offer_listings_for_asin(self, request, response, **kw):
"""Returns the lowest price offer listings for a specific
product by item condition and ASINs.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'SellerSKU'])
@api_action('Products', 20, 20, 'GetProductCategoriesForSKU')
def get_product_categories_for_sku(self, request, response, **kw):
"""Returns the product categories that a SellerSKU belongs to.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASIN'])
@api_action('Products', 20, 20, 'GetProductCategoriesForASIN')
def get_product_categories_for_asin(self, request, response, **kw):
"""Returns the product categories that an ASIN belongs to.
"""
return self._post_request(request, kw, response)
@api_action('Products', 2, 300, 'GetServiceStatus')
def get_products_service_status(self, request, response, **kw):
"""Returns the operational status of the Products API section.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'SellerSKUList'])
@structured_lists('SellerSKUList.SellerSKU')
@api_action('Products', 20, 10, 'GetMyPriceForSKU')
def get_my_price_for_sku(self, request, response, **kw):
"""Returns pricing information for your own offer listings, based on SellerSKU.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASINList'])
@structured_lists('ASINList.ASIN')
@api_action('Products', 20, 10, 'GetMyPriceForASIN')
def get_my_price_for_asin(self, request, response, **kw):
"""Returns pricing information for your own offer listings, based on ASIN.
"""
return self._post_request(request, kw, response)
@api_action('Sellers', 15, 60)
def list_marketplace_participations(self, request, response, **kw):
"""Returns a list of marketplaces that the seller submitting
the request can sell in, and a list of participations that
include seller-specific information in that marketplace.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Sellers', 15, 60)
def list_marketplace_participations_by_next_token(self, request, response,
**kw):
"""Returns the next page of marketplaces and participations
using the NextToken value that was returned by your
previous request to either ListMarketplaceParticipations
or ListMarketplaceParticipationsByNextToken.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId'])
@api_action('Recommendations', 5, 2)
def get_last_updated_time_for_recommendations(self, request, response,
**kw):
"""Checks whether there are active recommendations for each category
for the given marketplace, and if there are, returns the time when
recommendations were last updated for each category.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId'])
@structured_lists('CategoryQueryList.CategoryQuery')
@api_action('Recommendations', 5, 2)
def list_recommendations(self, request, response, **kw):
"""Returns your active recommendations for a specific category or for
all categories for a specific marketplace.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Recommendations', 5, 2)
def list_recommendations_by_next_token(self, request, response, **kw):
"""Returns the next page of recommendations using the NextToken
parameter.
"""
return self._post_request(request, kw, response)
@api_action('Recommendations', 2, 300, 'GetServiceStatus')
def get_recommendations_service_status(self, request, response, **kw):
"""Returns the operational status of the Recommendations API section.
"""
return self._post_request(request, kw, response)
@api_action('CustomerInfo', 15, 12)
def list_customers(self, request, response, **kw):
"""Returns a list of customer accounts based on search criteria that
you specify.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('CustomerInfo', 50, 3)
def list_customers_by_next_token(self, request, response, **kw):
"""Returns the next page of customers using the NextToken parameter.
"""
return self._post_request(request, kw, response)
@requires(['CustomerIdList'])
@structured_lists('CustomerIdList.CustomerId')
@api_action('CustomerInfo', 15, 12)
def get_customers_for_customer_id(self, request, response, **kw):
"""Returns a list of customer accounts based on search criteria that
you specify.
"""
return self._post_request(request, kw, response)
@api_action('CustomerInfo', 2, 300, 'GetServiceStatus')
def get_customerinfo_service_status(self, request, response, **kw):
"""Returns the operational status of the Customer Information API
section.
"""
return self._post_request(request, kw, response)
@requires(['DateRangeStart'])
@api_action('CartInfo', 15, 12)
def list_carts(self, request, response, **kw):
"""Returns a list of shopping carts in your Webstore that were last
updated during the time range that you specify.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('CartInfo', 50, 3)
def list_carts_by_next_token(self, request, response, **kw):
"""Returns the next page of shopping carts using the NextToken
parameter.
"""
return self._post_request(request, kw, response)
@requires(['CartIdList'])
@structured_lists('CartIdList.CartId')
@api_action('CartInfo', 15, 12)
def get_carts(self, request, response, **kw):
"""Returns shopping carts based on the CartId values that you specify.
"""
return self._post_request(request, kw, response)
@api_action('CartInfo', 2, 300, 'GetServiceStatus')
def get_cartinfo_service_status(self, request, response, **kw):
"""Returns the operational status of the Cart Information API section.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def register_destination(self, request, response, **kw):
"""Specifies a new destination where you want to receive notifications.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def deregister_destination(self, request, response, **kw):
"""Removes an existing destination from the list of registered
destinations.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId'])
@api_action('Subscriptions', 25, 0.5)
def list_registered_destinations(self, request, response, **kw):
"""Lists all current destinations that you have registered.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def send_test_notification_to_destination(self, request, response, **kw):
"""Sends a test notification to an existing destination.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Subscription'])
@structured_objects('Subscription', members=True)
@api_action('Subscriptions', 25, 0.5)
def create_subscription(self, request, response, **kw):
"""Creates a new subscription for the specified notification type
and destination.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'NotificationType', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def get_subscription(self, request, response, **kw):
"""Gets the subscription for the specified notification type and
destination.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'NotificationType', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def delete_subscription(self, request, response, **kw):
"""Deletes the subscription for the specified notification type and
destination.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId'])
@api_action('Subscriptions', 25, 0.5)
def list_subscriptions(self, request, response, **kw):
"""Returns a list of all your current subscriptions.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Subscription'])
@structured_objects('Subscription', members=True)
@api_action('Subscriptions', 25, 0.5)
def update_subscription(self, request, response, **kw):
"""Updates the subscription for the specified notification type and
destination.
"""
return self._post_request(request, kw, response)
@api_action('Subscriptions', 2, 300, 'GetServiceStatus')
def get_subscriptions_service_status(self, request, response, **kw):
"""Returns the operational status of the Subscriptions API section.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId', 'OrderReferenceAttributes'])
@structured_objects('OrderReferenceAttributes')
@api_action('OffAmazonPayments', 10, 1)
def set_order_reference_details(self, request, response, **kw):
"""Sets order reference details such as the order total and a
description for the order.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId'])
@api_action('OffAmazonPayments', 20, 2)
def get_order_reference_details(self, request, response, **kw):
"""Returns details about the Order Reference object and its current
state.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId'])
@api_action('OffAmazonPayments', 10, 1)
def confirm_order_reference(self, request, response, **kw):
"""Confirms that the order reference is free of constraints and all
required information has been set on the order reference.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId'])
@api_action('OffAmazonPayments', 10, 1)
def cancel_order_reference(self, request, response, **kw):
"""Cancel an order reference; all authorizations associated with
this order reference are also closed.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId'])
@api_action('OffAmazonPayments', 10, 1)
def close_order_reference(self, request, response, **kw):
"""Confirms that an order reference has been fulfilled (fully
or partially) and that you do not expect to create any new
authorizations on this order reference.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId', 'AuthorizationReferenceId',
'AuthorizationAmount'])
@structured_objects('AuthorizationAmount')
@api_action('OffAmazonPayments', 10, 1)
def authorize(self, request, response, **kw):
"""Reserves a specified amount against the payment method(s) stored in
the order reference.
"""
return self._post_request(request, kw, response)
@requires(['AmazonAuthorizationId'])
@api_action('OffAmazonPayments', 20, 2)
def get_authorization_details(self, request, response, **kw):
"""Returns the status of a particular authorization and the total
amount captured on the authorization.
"""
return self._post_request(request, kw, response)
@requires(['AmazonAuthorizationId', 'CaptureReferenceId', 'CaptureAmount'])
@structured_objects('CaptureAmount')
@api_action('OffAmazonPayments', 10, 1)
def capture(self, request, response, **kw):
"""Captures funds from an authorized payment instrument.
"""
return self._post_request(request, kw, response)
@requires(['AmazonCaptureId'])
@api_action('OffAmazonPayments', 20, 2)
def get_capture_details(self, request, response, **kw):
"""Returns the status of a particular capture and the total amount
refunded on the capture.
"""
return self._post_request(request, kw, response)
@requires(['AmazonAuthorizationId'])
@api_action('OffAmazonPayments', 10, 1)
def close_authorization(self, request, response, **kw):
"""Closes an authorization.
"""
return self._post_request(request, kw, response)
@requires(['AmazonCaptureId', 'RefundReferenceId', 'RefundAmount'])
@structured_objects('RefundAmount')
@api_action('OffAmazonPayments', 10, 1)
def refund(self, request, response, **kw):
"""Refunds a previously captured amount.
"""
return self._post_request(request, kw, response)
@requires(['AmazonRefundId'])
@api_action('OffAmazonPayments', 20, 2)
def get_refund_details(self, request, response, **kw):
"""Returns the status of a particular refund.
"""
return self._post_request(request, kw, response)
@api_action('OffAmazonPayments', 2, 300, 'GetServiceStatus')
def get_offamazonpayments_service_status(self, request, response, **kw):
"""Returns the operational status of the Off-Amazon Payments API
section.
"""
return self._post_request(request, kw, response)
| bsd-3-clause |
tornadozou/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/sugar_test.py | 157 | 4205 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import ops
from tensorflow.contrib.labeled_tensor.python.ops import sugar
from tensorflow.contrib.labeled_tensor.python.ops import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class Base(test_util.Base):
def setUp(self):
super(Base, self).setUp()
self.small_lt = core.LabeledTensor(constant_op.constant([1]), [('x', 1)])
class ReshapeCoderTest(Base):
def setUp(self):
super(ReshapeCoderTest, self).setUp()
self.batch_size = 8
self.num_rows = 50
self.num_columns = 100
self.channels = ['red', 'green', 'blue']
self.masks = [False, True]
tensor = math_ops.range(0,
self.batch_size * self.num_rows * self.num_columns *
len(self.channels) * len(self.masks))
tensor = array_ops.reshape(tensor, [
self.batch_size, self.num_rows, self.num_columns, len(self.channels),
len(self.masks)
])
self.batch_axis = ('batch', range(self.batch_size))
self.row_axis = ('row', range(self.num_rows))
self.column_axis = ('column', range(self.num_columns))
self.channel_axis = ('channel', self.channels)
self.mask_axis = ('mask', self.masks)
axes = [
self.batch_axis, self.row_axis, self.column_axis, self.channel_axis,
self.mask_axis
]
self.masked_image_lt = core.LabeledTensor(tensor, axes)
def test_name(self):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
encode_lt = rc.encode(self.masked_image_lt)
decode_lt = rc.decode(encode_lt)
self.assertIn('lt_reshape_encode', encode_lt.name)
self.assertIn('lt_reshape_decode', decode_lt.name)
def test_bijection_flat(self):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
encode_lt = rc.encode(self.masked_image_lt)
golden_axes = core.Axes([
self.batch_axis, self.row_axis, self.column_axis,
('depth', len(self.channels) * len(self.masks))
])
self.assertEqual(encode_lt.axes, golden_axes)
decode_lt = rc.decode(encode_lt)
self.assertLabeledTensorsEqual(decode_lt, self.masked_image_lt)
def test_bijection_with_labels(self):
depth_axis = core.Axis('depth', range(len(self.channels) * len(self.masks)))
rc = sugar.ReshapeCoder(['channel', 'mask'],
[depth_axis, ('other', ['label'])])
encode_lt = rc.encode(self.masked_image_lt)
golden_axes = core.Axes([
self.batch_axis, self.row_axis, self.column_axis, depth_axis,
('other', ['label'])
])
self.assertEqual(encode_lt.axes, golden_axes)
decode_lt = rc.decode(encode_lt)
self.assertLabeledTensorsEqual(decode_lt, self.masked_image_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
rc.decode(self.masked_image_lt)
with self.assertRaises(ValueError):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
rc.encode(self.masked_image_lt)
rc.encode(ops.select(self.masked_image_lt, {'channel': 'red'}))
if __name__ == '__main__':
test.main()
| apache-2.0 |
webOS-ports/qtwebkit | Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py | 124 | 3032 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.checkout.commitinfo import CommitInfo
from webkitpy.common.config.committers import CommitterList, Committer, Reviewer
class CommitInfoTest(unittest.TestCase):
def test_commit_info_creation(self):
author = Committer("Author", "author@example.com")
committer = Committer("Committer", "committer@example.com")
reviewer = Reviewer("Reviewer", "reviewer@example.com")
committer_list = CommitterList(committers=[author, committer], reviewers=[reviewer])
changelog_data = {
"bug_id": 1234,
"author_name": "Committer",
"author_email": "author@example.com",
"author": author,
"reviewer_text": "Reviewer",
"reviewer": reviewer,
}
commit = CommitInfo(123, "committer@example.com", changelog_data, committer_list)
self.assertEqual(commit.revision(), 123)
self.assertEqual(commit.bug_id(), 1234)
self.assertEqual(commit.author_name(), "Committer")
self.assertEqual(commit.author_email(), "author@example.com")
self.assertEqual(commit.author(), author)
self.assertEqual(commit.reviewer_text(), "Reviewer")
self.assertEqual(commit.reviewer(), reviewer)
self.assertEqual(commit.committer(), committer)
self.assertEqual(commit.committer_email(), "committer@example.com")
self.assertEqual(commit.responsible_parties(), set([author, committer, reviewer]))
| gpl-2.0 |
Code-Fly/stackalytics | stackalytics/processor/vcs.py | 7 | 10961 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
from oslo_log import log as logging
import sh
import six
from stackalytics.processor import utils
LOG = logging.getLogger(__name__)
class Vcs(object):
def __init__(self, repo, sources_root):
self.repo = repo
self.sources_root = sources_root
if not os.path.exists(sources_root):
os.mkdir(sources_root)
else:
if not os.access(sources_root, os.W_OK):
raise Exception('Sources root folder %s is not writable' %
sources_root)
def fetch(self):
pass
def log(self, branch, head_commit_id):
pass
def get_last_id(self, branch):
pass
GIT_LOG_PARAMS = [
('commit_id', '%H'),
('date', '%at'),
('author_name', '%an'),
('author_email', '%ae'),
('subject', '%s'),
('message', '%b'),
]
GIT_LOG_FORMAT = ''.join([(r[0] + ':' + r[1] + '%n')
for r in GIT_LOG_PARAMS]) + 'diff_stat:'
DIFF_STAT_PATTERN = ('[^\d]+(\d+)\s+[^\s]*\s+changed'
'(,\s+(\d+)\s+([^\d\s]*)\s+(\d+)?)?')
GIT_LOG_PATTERN = re.compile(''.join([(r[0] + ':(.*?)\n')
for r in GIT_LOG_PARAMS]) +
'diff_stat:(?P<diff_stat>.+?)(?=commit|\Z)',
re.DOTALL)
CO_AUTHOR_PATTERN_RAW = ('(?P<author_name>.+?)\s*'
'<?(?P<author_email>[\w\.-]+@[\w\.-]+)>?')
CO_AUTHOR_PATTERN = re.compile(CO_AUTHOR_PATTERN_RAW, re.IGNORECASE)
MESSAGE_PATTERNS = {
'bug_id': re.compile(r'bug[\s#:]*(?P<id>\d+)', re.IGNORECASE),
'blueprint_id': re.compile(r'\b(?:blueprint|bp)\b[ \t]*[#:]?[ \t]*'
r'(?P<id>[a-z0-9-]+)', re.IGNORECASE),
'change_id': re.compile('Change-Id: (?P<id>I[0-9a-f]{40})', re.IGNORECASE),
'coauthor': re.compile(r'(?:Co-Authored-By|Also-By|Co-Author):'
r'\s*(?P<id>%s)\s' % CO_AUTHOR_PATTERN_RAW,
re.IGNORECASE)
}
class Git(Vcs):
def __init__(self, repo, sources_root):
super(Git, self).__init__(repo, sources_root)
uri = self.repo['uri']
match = re.search(r'([^/]+)\.git$', uri)
if match:
self.folder = os.path.normpath(self.sources_root + '/' +
match.group(1))
else:
raise Exception('Unexpected uri %s for git' % uri)
self.release_index = {}
def _checkout(self, branch):
try:
sh.git('clean', '-d', '--force')
sh.git('reset', '--hard')
sh.git('checkout', 'origin/' + branch)
return True
except sh.ErrorReturnCode as e:
LOG.error('Unable to checkout branch %(branch)s from repo '
'%(uri)s. Ignore it',
{'branch': branch, 'uri': self.repo['uri']})
LOG.exception(e)
return False
def fetch(self):
LOG.debug('Fetching repo uri %s', self.repo['uri'])
if os.path.exists(self.folder):
os.chdir(self.folder)
try:
uri = str(
sh.git('config', '--get', 'remote.origin.url')).strip()
except sh.ErrorReturnCode as e:
LOG.error('Unable to get config for git repo %s. Ignore it',
self.repo['uri'])
LOG.exception(e)
return {}
if uri != self.repo['uri']:
LOG.warn('Repo uri %(uri)s differs from cloned %(old)s',
{'uri': self.repo['uri'], 'old': uri})
os.chdir('..')
shutil.rmtree(self.folder)
if not os.path.exists(self.folder):
os.chdir(self.sources_root)
try:
sh.git('clone', self.repo['uri'])
os.chdir(self.folder)
except sh.ErrorReturnCode as e:
LOG.error('Unable to clone git repo %s. Ignore it',
self.repo['uri'])
LOG.exception(e)
else:
os.chdir(self.folder)
try:
sh.git('fetch')
except sh.ErrorReturnCode as e:
LOG.error('Unable to fetch git repo %s. Ignore it',
self.repo['uri'])
LOG.exception(e)
return self._get_release_index()
def _get_release_index(self):
if not os.path.exists(self.folder):
return {}
LOG.debug('Get release index for repo uri: %s', self.repo['uri'])
os.chdir(self.folder)
if not self.release_index:
for release in self.repo.get('releases', []):
release_name = release['release_name'].lower()
if 'branch' in release:
branch = release['branch']
else:
branch = 'master'
if not self._checkout(branch):
continue
if 'tag_from' in release:
tag_range = release['tag_from'] + '..' + release['tag_to']
else:
tag_range = release['tag_to']
try:
git_log_iterator = sh.git('log', '--pretty=%H', tag_range,
_tty_out=False)
for commit_id in git_log_iterator:
self.release_index[commit_id.strip()] = release_name
except sh.ErrorReturnCode as e:
LOG.error('Unable to get log of git repo %s. Ignore it',
self.repo['uri'])
LOG.exception(e)
return self.release_index
def log(self, branch, head_commit_id):
LOG.debug('Parsing git log for repo uri %s', self.repo['uri'])
os.chdir(self.folder)
if not self._checkout(branch):
return
commit_range = 'HEAD'
if head_commit_id:
commit_range = head_commit_id + '..HEAD'
try:
output = sh.git('log', '--pretty=' + GIT_LOG_FORMAT, '--shortstat',
'-M', '--no-merges', commit_range, _tty_out=False,
_decode_errors='ignore', _encoding='utf8')
except sh.ErrorReturnCode as e:
LOG.error('Unable to get log of git repo %s. Ignore it',
self.repo['uri'])
LOG.exception(e)
return
for rec in re.finditer(GIT_LOG_PATTERN, six.text_type(output)):
i = 1
commit = {}
for param in GIT_LOG_PARAMS:
commit[param[0]] = rec.group(i)
i += 1
if not commit['author_email']:
# ignore commits with empty email (there are some < Essex)
continue
commit['author_email'] = utils.keep_safe_chars(
commit['author_email'])
diff_stat_str = rec.group('diff_stat')
diff_rec = re.search(DIFF_STAT_PATTERN, diff_stat_str)
if diff_rec:
files_changed = int(diff_rec.group(1))
lines_changed_group = diff_rec.group(2)
lines_changed = diff_rec.group(3)
deleted_or_inserted = diff_rec.group(4)
lines_deleted = diff_rec.group(5)
if lines_changed_group: # there inserted or deleted lines
if not lines_deleted:
if deleted_or_inserted[0] == 'd': # deleted
lines_deleted = lines_changed
lines_changed = 0
else:
files_changed = 0
lines_changed = 0
lines_deleted = 0
commit['files_changed'] = files_changed
commit['lines_added'] = int(lines_changed or 0)
commit['lines_deleted'] = int(lines_deleted or 0)
for pattern_name, pattern in six.iteritems(MESSAGE_PATTERNS):
collection = set()
for item in re.finditer(pattern, commit['message']):
collection.add(item.group('id'))
if collection:
commit[pattern_name] = list(collection)
commit['date'] = int(commit['date'])
commit['module'] = self.repo['module']
commit['branches'] = set([branch])
if commit['commit_id'] in self.release_index:
commit['release'] = self.release_index[commit['commit_id']]
else:
commit['release'] = None
if commit['release'] == 'ignored':
# drop commits that are marked by 'ignored' release
continue
if 'blueprint_id' in commit:
commit['blueprint_id'] = [(commit['module'] + ':' + bp_name)
for bp_name
in commit['blueprint_id']]
if 'coauthor' in commit:
verified_coauthors = []
for coauthor in commit['coauthor']:
m = re.match(CO_AUTHOR_PATTERN, coauthor)
if m and utils.check_email_validity(
m.group("author_email")):
verified_coauthors.append(m.groupdict())
if verified_coauthors:
commit['coauthor'] = verified_coauthors
else:
del commit['coauthor'] # no valid authors
yield commit
def get_last_id(self, branch):
LOG.debug('Get head commit for repo uri: %s', self.repo['uri'])
os.chdir(self.folder)
if not self._checkout(branch):
return None
try:
return str(sh.git('rev-parse', 'HEAD')).strip()
except sh.ErrorReturnCode as e:
LOG.error('Unable to get HEAD for git repo %s. Ignore it',
self.repo['uri'])
LOG.exception(e)
return None
def get_vcs(repo, sources_root):
uri = repo['uri']
LOG.debug('Factory is asked for VCS uri: %s', uri)
match = re.search(r'\.git$', uri)
if match:
return Git(repo, sources_root)
else:
LOG.warning('Unsupported VCS, fallback to dummy')
return Vcs(repo, uri)
| apache-2.0 |
Jgarcia-IAS/SITE | openerp/addons/base/module/wizard/__init__.py | 365 | 1250 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_module_update
import base_language_install
import base_import_language
import base_module_upgrade
import base_module_configuration
import base_export_language
import base_update_translations
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rven/odoo | addons/test_mass_mailing/tests/test_blacklist_mixin.py | 5 | 3060 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.test_mass_mailing.models.mailing_models import MailingBLacklist
from odoo.addons.test_mass_mailing.tests import common
from odoo.exceptions import UserError
from odoo.tests.common import users
class TestBLMixin(common.TestMassMailCommon):
@classmethod
def setUpClass(cls):
super(TestBLMixin, cls).setUpClass()
cls.env['mail.blacklist'].create([{
'email': 'Arya.Stark@example.com',
'active': True,
}, {
'email': 'Sansa.Stark@example.com',
'active': False,
}])
@users('employee')
def test_bl_mixin_primary_field_consistency(self):
MailingBLacklist._primary_email = 'not_a_field'
with self.assertRaises(UserError):
self.env['mailing.test.blacklist'].search([('is_blacklisted', '=', False)])
MailingBLacklist._primary_email = ['not_a_str']
with self.assertRaises(UserError):
self.env['mailing.test.blacklist'].search([('is_blacklisted', '=', False)])
MailingBLacklist._primary_email = 'email_from'
self.env['mailing.test.blacklist'].search([('is_blacklisted', '=', False)])
@users('employee')
def test_bl_mixin_is_blacklisted(self):
""" Test is_blacklisted field computation """
record = self.env['mailing.test.blacklist'].create({'email_from': 'arya.stark@example.com'})
self.assertTrue(record.is_blacklisted)
record = self.env['mailing.test.blacklist'].create({'email_from': 'not.arya.stark@example.com'})
self.assertFalse(record.is_blacklisted)
@users('employee')
def test_bl_mixin_search_blacklisted(self):
""" Test is_blacklisted field search implementation """
record1 = self.env['mailing.test.blacklist'].create({'email_from': 'arya.stark@example.com'})
record2 = self.env['mailing.test.blacklist'].create({'email_from': 'not.arya.stark@example.com'})
search_res = self.env['mailing.test.blacklist'].search([('is_blacklisted', '=', False)])
self.assertEqual(search_res, record2)
search_res = self.env['mailing.test.blacklist'].search([('is_blacklisted', '!=', True)])
self.assertEqual(search_res, record2)
search_res = self.env['mailing.test.blacklist'].search([('is_blacklisted', '=', True)])
self.assertEqual(search_res, record1)
search_res = self.env['mailing.test.blacklist'].search([('is_blacklisted', '!=', False)])
self.assertEqual(search_res, record1)
@users('employee')
def test_bl_mixin_search_blacklisted_format(self):
""" Test is_blacklisted field search using email parsing """
record1 = self.env['mailing.test.blacklist'].create({'email_from': 'Arya Stark <arya.stark@example.com>'})
self.assertTrue(record1.is_blacklisted)
search_res = self.env['mailing.test.blacklist'].search([('is_blacklisted', '=', True)])
self.assertEqual(search_res, record1)
| agpl-3.0 |
davidbgk/udata | udata/tests/frontend/test_territories.py | 1 | 1165 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from flask import url_for
from udata.tests.features.territories.test_territories_process import (
create_geozones_fixtures
)
from udata.tests.frontend import FrontTestCase
class TerritoriesTest(FrontTestCase):
modules = ['features.territories', 'admin', 'search', 'core.dataset',
'core.reuse', 'core.site', 'core.organization']
def setUp(self):
super(TerritoriesTest, self).setUp()
self.paca, self.bdr, self.arles = create_geozones_fixtures()
def test_towns(self):
response = self.client.get(
url_for('territories.territory', territory=self.arles))
self.assert404(response) # By default towns are deactivated.
def test_counties(self):
response = self.client.get(
url_for('territories.territory', territory=self.bdr))
self.assert404(response) # By default counties are deactivated.
def test_regions(self):
response = self.client.get(
url_for('territories.territory', territory=self.paca))
self.assert404(response) # By default regions are deactivated.
| agpl-3.0 |
kpbochenek/empireofcode | call_base.py | 1 | 1180 | # kpbochenek@gmail.com
def total_cost(calls):
result = 0
for c in calls:
length = int(c.split(' ')[2])
mins = int(round(length / 60, 0))
if mins > 100:
result += (mins - 100) * 2
mins = min(100, mins - 100)
result += mins
print(result)
return result
if __name__ == '__main__':
# These "asserts" using for checking and not necessary for auto-testing
assert total_cost(("2014-01-01 01:12:13 181",
"2014-01-02 20:11:10 600",
"2014-01-03 01:12:13 6009",
"2014-01-03 12:13:55 200")) == 124, "Base example"
assert total_cost(("2014-02-05 01:00:00 1",
"2014-02-05 02:00:00 1",
"2014-02-05 03:00:00 1",
"2014-02-05 04:00:00 1")) == 4, "Short calls but money"
assert total_cost(("2014-02-05 01:00:00 60",
"2014-02-05 02:00:00 60",
"2014-02-05 03:00:00 60",
"2014-02-05 04:00:00 6000")) == 106, "Precise calls"
print("All set? Click 'Check' to review your code and earn rewards!")
| apache-2.0 |
gauravbose/digital-menu | django/core/management/commands/dumpdata.py | 82 | 8474 | import warnings
from collections import OrderedDict
from django.apps import apps
from django.core import serializers
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, router
from django.utils.deprecation import RemovedInDjango19Warning
class Command(BaseCommand):
help = ("Output the contents of the database as a fixture of the given "
"format (using each model's default manager unless --all is "
"specified).")
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label[.ModelName]', nargs='*',
help='Restricts dumped data to the specified app_label or app_label.ModelName.')
parser.add_argument('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.')
parser.add_argument('--indent', default=None, dest='indent', type=int,
help='Specifies the indent level to use when pretty-printing output.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to dump fixtures from. '
'Defaults to the "default" database.')
parser.add_argument('-e', '--exclude', dest='exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude '
'(use multiple --exclude to exclude multiple apps/models).')
parser.add_argument('-n', '--natural', action='store_true', dest='use_natural_keys', default=False,
help='Use natural keys if they are available (deprecated: use --natural-foreign instead).')
parser.add_argument('--natural-foreign', action='store_true', dest='use_natural_foreign_keys', default=False,
help='Use natural foreign keys if they are available.')
parser.add_argument('--natural-primary', action='store_true', dest='use_natural_primary_keys', default=False,
help='Use natural primary keys if they are available.')
parser.add_argument('-a', '--all', action='store_true', dest='use_base_manager', default=False,
help="Use Django's base manager to dump all models stored in the database, "
"including those that would otherwise be filtered or modified by a custom manager.")
parser.add_argument('--pks', dest='primary_keys',
help="Only dump objects with given primary keys. "
"Accepts a comma separated list of keys. "
"This option will only work when you specify one model.")
parser.add_argument('-o', '--output', default=None, dest='output',
help='Specifies file to which the output is written.')
def handle(self, *app_labels, **options):
format = options.get('format')
indent = options.get('indent')
using = options.get('database')
excludes = options.get('exclude')
output = options.get('output')
show_traceback = options.get('traceback')
use_natural_keys = options.get('use_natural_keys')
if use_natural_keys:
warnings.warn("``--natural`` is deprecated; use ``--natural-foreign`` instead.",
RemovedInDjango19Warning)
use_natural_foreign_keys = options.get('use_natural_foreign_keys') or use_natural_keys
use_natural_primary_keys = options.get('use_natural_primary_keys')
use_base_manager = options.get('use_base_manager')
pks = options.get('primary_keys')
if pks:
primary_keys = pks.split(',')
else:
primary_keys = []
excluded_apps = set()
excluded_models = set()
for exclude in excludes:
if '.' in exclude:
try:
model = apps.get_model(exclude)
except LookupError:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model)
else:
try:
app_config = apps.get_app_config(exclude)
except LookupError:
raise CommandError('Unknown app in excludes: %s' % exclude)
excluded_apps.add(app_config)
if len(app_labels) == 0:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict((app_config, None)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config not in excluded_apps)
else:
if len(app_labels) > 1 and primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app_config = apps.get_app_config(app_label)
except LookupError:
raise CommandError("Unknown application: %s" % app_label)
if app_config.models_module is None or app_config in excluded_apps:
continue
try:
model = app_config.get_model(model_label)
except LookupError:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
app_list_value = app_list.setdefault(app_config, [])
# We may have previously seen a "all-models" request for
# this app (no model qualifier was given). In this case
# there is no need adding specific models to the list.
if app_list_value is not None:
if model not in app_list_value:
app_list_value.append(model)
except ValueError:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
# This is just an app - no model qualifier
app_label = label
try:
app_config = apps.get_app_config(app_label)
except LookupError:
raise CommandError("Unknown application: %s" % app_label)
if app_config.models_module is None or app_config in excluded_apps:
continue
app_list[app_config] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
try:
serializers.get_serializer(format)
except serializers.SerializerDoesNotExist:
pass
raise CommandError("Unknown serialization format: %s" % format)
def get_objects():
# Collate the objects to be serialized.
for model in serializers.sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_migrate_model(using, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
queryset = objects.using(using).order_by(model._meta.pk.name)
if primary_keys:
queryset = queryset.filter(pk__in=primary_keys)
for obj in queryset.iterator():
yield obj
try:
self.stdout.ending = None
stream = open(output, 'w') if output else None
try:
serializers.serialize(format, get_objects(), indent=indent,
use_natural_foreign_keys=use_natural_foreign_keys,
use_natural_primary_keys=use_natural_primary_keys,
stream=stream or self.stdout)
finally:
if stream:
stream.close()
except Exception as e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
| bsd-3-clause |
kvar/ansible | test/units/modules/network/icx/test_icx_user.py | 21 | 7584 | # Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.icx import icx_user
from units.modules.utils import set_module_args
from .icx_module import TestICXModule, load_fixture
class TestICXSCPModule(TestICXModule):
module = icx_user
def setUp(self):
super(TestICXSCPModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.icx.icx_user.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.icx.icx_user.load_config')
self.load_config = self.mock_load_config.start()
self.mock_exec_command = patch('ansible.modules.network.icx.icx_user.exec_command')
self.exec_command = self.mock_exec_command.start()
self.set_running_config()
def tearDown(self):
super(TestICXSCPModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_exec_command.stop()
def load_fixtures(self, commands=None):
compares = None
def load_file(*args, **kwargs):
module = args
for arg in args:
if arg.params['check_running_config'] is True:
return load_fixture('show_running-config_include_username.txt').strip()
else:
return ''
self.get_config.side_effect = load_file
self.load_config.return_value = None
def test_icx_user_create_new_with_password(self):
set_module_args(dict(name='ale6', configured_password='alethea123'))
if not self.ENV_ICX_USE_DIFF:
commands = ['username ale6 password alethea123']
self.execute_module(commands=commands, changed=True)
else:
commands = ['username ale6 password alethea123']
self.execute_module(commands=commands, changed=True)
def test_icx_user_create_new_with_password_and_privilege(self):
set_module_args(dict(name='ale6', privilege="5", configured_password='alethea123'))
if not self.ENV_ICX_USE_DIFF:
commands = ['username ale6 privilege 5 password alethea123']
self.execute_module(commands=commands, changed=True)
else:
commands = ['username ale6 privilege 5 password alethea123']
self.execute_module(commands=commands, changed=True)
def test_icx_user_update_privilege(self):
set_module_args(dict(name='ale1', privilege="0", configured_password='alethea123'))
if not self.ENV_ICX_USE_DIFF:
commands = ['username ale1 privilege 0 password alethea123']
self.execute_module(commands=commands, changed=True)
else:
commands = ['username ale1 privilege 0 password alethea123']
self.execute_module(commands=commands, changed=True)
def test_icx_user_update_password(self):
set_module_args(dict(name='ale1', configured_password='alethea123'))
if not self.ENV_ICX_USE_DIFF:
commands = ['username ale1 password alethea123'] # previous privilage will be added
self.execute_module(commands=commands, changed=True)
else:
commands = ['username ale1 privilege 5 password alethea123'] # previous privilage will be added
self.execute_module(commands=commands, changed=True)
def test_icx_user_update_password_compare(self):
set_module_args(dict(name='ale1', configured_password='alethea123', check_running_config=True))
if not self.ENV_ICX_USE_DIFF:
commands = ['username ale1 privilege 5 password alethea123'] # previous privilage will be added
self.execute_module(commands=commands, changed=True)
else:
commands = ['username ale1 privilege 5 password alethea123'] # previous privilage will be added
self.execute_module(commands=commands, changed=True)
def test_icx_user_delete_user(self):
set_module_args(dict(name='ale1', state="absent"))
if not self.ENV_ICX_USE_DIFF:
commands = ['no username ale1']
self.execute_module(commands=commands, changed=True)
else:
commands = ['no username ale1']
self.execute_module(commands=commands, changed=True)
def test_icx_user_agregate(self):
set_module_args(dict(aggregate=[
{
"name": 'ale6',
"configured_password": 'alethea123'
},
{
"name": 'ale7',
"configured_password": 'alethea123'
}
]))
if not self.ENV_ICX_USE_DIFF:
commands = [
'username ale6 password alethea123',
'username ale7 password alethea123'
]
self.execute_module(commands=commands, changed=True)
else:
commands = [
'username ale6 password alethea123',
'username ale7 password alethea123'
]
self.execute_module(commands=commands, changed=True)
def test_icx_user_not_update_old_user_password(self):
set_module_args(dict(aggregate=[
{
"name": 'ale6',
"configured_password": 'alethea123'
},
{
"name": 'ale1',
"configured_password": 'alethea123',
},
],
update_password='on_create'
))
if not self.ENV_ICX_USE_DIFF:
commands = [
'username ale1 password alethea123',
'username ale6 password alethea123',
]
self.execute_module(commands=commands, changed=True)
else:
commands = [
'username ale6 password alethea123',
]
self.execute_module(commands=commands, changed=True)
def test_icx_user_only_update_changed_settings(self):
set_module_args(dict(aggregate=[
{
"name": 'ale1'
},
{
"name": 'ale2',
"privilege": 5,
"configured_password": "ale123"
},
{
"name": 'ale3',
"privilege": 4,
"configured_password": "ale123"
}
],
update_password="on_create"
))
if not self.ENV_ICX_USE_DIFF:
commands = [
'username ale2 privilege 5 password ale123',
'username ale3 privilege 4 password ale123'
]
self.execute_module(commands=commands, changed=True)
else:
commands = [
'username ale3 privilege 4 password ale123'
]
self.execute_module(commands=commands, changed=True)
def test_icx_user_purge(self):
set_module_args(dict(aggregate=[
{
"name": 'ale1'
}
],
purge=True
))
if not self.ENV_ICX_USE_DIFF:
commands = [
]
self.execute_module(commands=commands, changed=False)
else:
commands = [
'no username ale2',
'no username ale3',
'no username ale4'
]
self.execute_module(commands=commands, changed=True)
| gpl-3.0 |
uclouvain/OSIS-Louvain | learning_unit/tests/api/serializers/test_component.py | 1 | 2250 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Universitรฉ catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.test import TestCase
from base.tests.factories.learning_component_year import LearningComponentYearFactory
from learning_unit.api.serializers.component import LearningUnitComponentSerializer
class LearningUnitComponentSerializerTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.component = LearningComponentYearFactory()
cls.serializer = LearningUnitComponentSerializer(cls.component)
def test_contains_expected_fields(self):
expected_fields = [
'type',
'type_text',
'planned_classes',
'hourly_volume_total_annual',
'hourly_volume_total_annual_computed'
]
self.assertListEqual(list(self.serializer.data.keys()), expected_fields)
def test_ensure_compute_correct_volume(self):
self.assertEqual(
self.serializer.data['hourly_volume_total_annual_computed'],
str(self.component.vol_global)
)
| agpl-3.0 |
waristo/opensource | tensorflow/tensorflow.py | 1 | 1806 | import tensorflow as tf #Tensorflow์ฌ์ฉ์ ์ํ import
from tensorflow.examples.tutorials.mnist import input_data
# Dataset loading
mnist = input_data.read_data_sets("./samples/MNIST_data/", one_hot=True)
# Set up model
x = tf.placeholder(tf.float32, [None, 784]) #์ฌ๋ณผ๋ฆญ ๋ณ์๋ค์ ์ฌ์ฉํ์ฌ ์ํธ์์ฉํ๋ ์์
๋ค์ ๊ธฐ์
W = tf.Variable(tf.zeros([784, 10])) #tf.Variable๋ฅผ ์ฌ์ฉํ ์์
b = tf.Variable(tf.zeros([10])) #tf.Variable๋ฅผ ์ฌ์ฉํ ์์
y = tf.nn.softmax(tf.matmul(x, W) + b) #9๋ฒ์ค๊ณผ 10๋ฒ์ค์ ์ด์ฉํ ๋ชจ๋ธ ๊ตฌํ
y_ = tf.placeholder(tf.float32, [None, 10]) #์ ๋ต์ ์
๋ ฅํ๊ธฐ ์ํ ์ placeholder๋ฅผ ์ถ๊ฐ
cross_entropy = -tf.reduce_sum(y_*tf.log(y)) #๊ต์ฐจ ์ํธ๋กํผ โโyโฒlog(y) ๋ฅผ ๊ตฌํ
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) #๋น์ฉ ์ต์ํ์ ์ด๋ค ๋ณ์๊ฐ ์ผ๋ง๋ ์ํฅ์ ์ฃผ๋์ง๋ฅผ ๊ณ์ฐ
# Session
init = tf.initialize_all_variables() #๋ง๋ ๋ณ์๋ค์ ์ด๊ธฐํํ๋ ์์
sess = tf.Session() #์ธ์
์์ ๋ชจ๋ธ์ ์์ํ๊ณ ๋ณ์๋ค์ ์ด๊ธฐํ
sess.run(init) #์คํ
# Learning
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Validation
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) #ํน์ ํ ์ถ์ ๋ฐ๋ผ ๊ฐ์ฅ ํฐ ์์์ ์์ธ์ ์๋ ค์ค๋ค
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #์ผ๋ง๋ ๋ง์ ๋น์จ๋ก ๋ง์๋์ง ํ์ธ
# Result should be approximately 91%.
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) #ํ
์คํธ ๋ฐ์ดํฐ๋ฅผ ๋์์ผ๋ก ์ ํ๋
| gpl-3.0 |
ayr-ton/ansible-modules-extras | monitoring/rollbar_deployment.py | 138 | 3898 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014, Max Riveiro, <kavu13@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rollbar_deployment
version_added: 1.6
author: "Max Riveiro (@kavu)"
short_description: Notify Rollbar about app deployments
description:
- Notify Rollbar about app deployments
(see https://rollbar.com/docs/deploys_other/)
options:
token:
description:
- Your project access token.
required: true
environment:
description:
- Name of the environment being deployed, e.g. 'production'.
required: true
revision:
description:
- Revision number/sha being deployed.
required: true
user:
description:
- User who deployed.
required: false
rollbar_user:
description:
- Rollbar username of the user who deployed.
required: false
comment:
description:
- Deploy comment (e.g. what is being deployed).
required: false
url:
description:
- Optional URL to submit the notification to.
required: false
default: 'https://api.rollbar.com/api/1/deploy/'
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated.
This should only be used on personally controlled sites using
self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
- rollbar_deployment: token=AAAAAA
environment='staging'
user='ansible'
revision=4.2,
rollbar_user='admin',
comment='Test Deploy'
'''
import urllib
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
environment=dict(required=True),
revision=dict(required=True),
user=dict(required=False),
rollbar_user=dict(required=False),
comment=dict(required=False),
url=dict(
required=False,
default='https://api.rollbar.com/api/1/deploy/'
),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
if module.check_mode:
module.exit_json(changed=True)
params = dict(
access_token=module.params['token'],
environment=module.params['environment'],
revision=module.params['revision']
)
if module.params['user']:
params['local_username'] = module.params['user']
if module.params['rollbar_user']:
params['rollbar_username'] = module.params['rollbar_user']
if module.params['comment']:
params['comment'] = module.params['comment']
url = module.params.get('url')
try:
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
except Exception, e:
module.fail_json(msg='Unable to notify Rollbar: %s' % e)
else:
if info['status'] == 200:
module.exit_json(changed=True)
else:
module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.