repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
atosatto/ansible | lib/ansible/modules/cloud/vmware/vmware_dvswitch.py | 60 | 7365 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_dvswitch
short_description: Create or remove a distributed vSwitch
description:
- Create or remove a distributed vSwitch
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter_name:
description:
- The name of the datacenter that will contain the dvSwitch
required: True
switch_name:
description:
- The name of the switch to create or remove
required: True
mtu:
description:
- The switch maximum transmission unit
required: True
uplink_quantity:
description:
- Quantity of uplink per ESXi host added to the switch
required: True
discovery_proto:
description:
- Link discovery protocol between Cisco and Link Layer discovery
choices:
- 'cdp'
- 'lldp'
required: True
discovery_operation:
description:
- Select the discovery operation
choices:
- 'both'
- 'none'
- 'advertise'
- 'listen'
state:
description:
- Create or remove dvSwitch
default: 'present'
choices:
- 'present'
- 'absent'
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create dvswitch
local_action:
module: vmware_dvswitch
hostname: vcenter_ip_or_hostname
username: vcenter_username
password: vcenter_password
datacenter_name: datacenter
switch_name: dvSwitch
mtu: 9000
uplink_quantity: 2
discovery_proto: lldp
discovery_operation: both
state: present
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareDVSwitch(object):
def __init__(self, module):
self.module = module
self.dvs = None
self.switch_name = self.module.params['switch_name']
self.datacenter_name = self.module.params['datacenter_name']
self.mtu = self.module.params['mtu']
self.uplink_quantity = self.module.params['uplink_quantity']
self.discovery_proto = self.module.params['discovery_proto']
self.discovery_operation = self.module.params['discovery_operation']
self.switch_name = self.module.params['switch_name']
self.state = self.module.params['state']
self.content = connect_to_api(module)
def process_state(self):
try:
dvs_states = {
'absent': {
'present': self.state_destroy_dvs,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvs,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvs,
}
}
dvs_states[self.state][self.check_dvs_configuration()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def create_dvswitch(self, network_folder):
result = None
changed = False
spec = vim.DistributedVirtualSwitch.CreateSpec()
spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
spec.configSpec.linkDiscoveryProtocolConfig = vim.host.LinkDiscoveryProtocolConfig()
spec.configSpec.name = self.switch_name
spec.configSpec.maxMtu = self.mtu
spec.configSpec.linkDiscoveryProtocolConfig.protocol = self.discovery_proto
spec.configSpec.linkDiscoveryProtocolConfig.operation = self.discovery_operation
spec.productInfo = vim.dvs.ProductSpec()
spec.productInfo.name = "DVS"
spec.productInfo.vendor = "VMware"
for count in range(1, self.uplink_quantity+1):
spec.configSpec.uplinkPortPolicy.uplinkPortName.append("uplink%d" % count)
task = network_folder.CreateDVS_Task(spec)
changed, result = wait_for_task(task)
return changed, result
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_destroy_dvs(self):
task = self.dvs.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=str(result))
def state_update_dvs(self):
self.module.exit_json(changed=False, msg="Currently not implemented.")
def state_create_dvs(self):
changed = True
result = None
if not self.module.check_mode:
dc = find_datacenter_by_name(self.content, self.datacenter_name)
changed, result = self.create_dvswitch(dc.networkFolder)
self.module.exit_json(changed=changed, result=str(result))
def check_dvs_configuration(self):
self.dvs = find_dvs_by_name(self.content, self.switch_name)
if self.dvs is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
mtu=dict(required=True, type='int'),
uplink_quantity=dict(required=True, type='int'),
discovery_proto=dict(required=True, choices=['cdp', 'lldp'], type='str'),
discovery_operation=dict(required=True, choices=['both', 'none', 'advertise', 'listen'], type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_dvswitch = VMwareDVSwitch(module)
vmware_dvswitch.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
sgallagher/reviewboard | reviewboard/webapi/resources/review_reply_file_attachment_comment.py | 6 | 6670 | from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.base_file_attachment_comment import \
BaseFileAttachmentCommentResource
from reviewboard.webapi.resources.review_file_attachment_comment import \
ReviewFileAttachmentCommentResource
class ReviewReplyFileAttachmentCommentResource(
BaseFileAttachmentCommentResource):
"""Provides information on replies to file comments made on a
review reply.
If the reply is a draft, then comments can be added, deleted, or
changed on this list. However, if the reply is already published,
then no changed can be made.
"""
added_in = '1.6'
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
policy_id = 'review_reply_file_attachment_comment'
model_parent_key = 'review'
fields = dict({
'reply_to': {
'type': ReviewFileAttachmentCommentResource,
'description': 'The comment being replied to.',
},
}, **BaseFileAttachmentCommentResource.fields)
mimetype_list_resource_name = 'review-reply-file-attachment-comments'
mimetype_item_resource_name = 'review-reply-file-attachment-comment'
def get_queryset(self, request, review_id, reply_id, *args, **kwargs):
q = super(ReviewReplyFileAttachmentCommentResource, self).get_queryset(
request, *args, **kwargs)
q = q.filter(review=reply_id, review__base_reply_to=review_id)
return q
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
required=
BaseFileAttachmentCommentResource.REPLY_REQUIRED_CREATE_FIELDS,
optional=
BaseFileAttachmentCommentResource.REPLY_OPTIONAL_CREATE_FIELDS,
allow_unknown=True
)
def create(self, request, reply_to_id, *args, **kwargs):
"""Creates a reply to a file comment on a review.
This will create a reply to a file comment on a review.
The new comment will contain the same dimensions of the comment
being replied to, but may contain new text.
"""
try:
resources.review_request.get_object(request, *args, **kwargs)
reply = resources.review_reply.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not resources.review_reply.has_modify_permissions(request, reply):
return self.get_no_access_error(request)
try:
comment = resources.review_file_attachment_comment.get_object(
request,
comment_id=reply_to_id,
*args, **kwargs)
except ObjectDoesNotExist:
return INVALID_FORM_DATA, {
'fields': {
'reply_to_id': ['This is not a valid file comment ID'],
}
}
q = self._get_queryset(request, *args, **kwargs)
q = q.filter(Q(reply_to=comment) & Q(review=reply))
try:
new_comment = q.get()
# This already exists. Go ahead and update, but we're going to
# redirect the user to the right place.
is_new = False
except self.model.DoesNotExist:
new_comment = self.model(file_attachment=comment.file_attachment,
reply_to=comment)
is_new = True
self.update_comment(new_comment, is_reply=True, **kwargs)
data = {
self.item_result_key: new_comment,
}
if is_new:
reply.file_attachment_comments.add(new_comment)
reply.save()
return 201, data
else:
return 303, data, {
'Location': self.get_href(new_comment, request, *args,
**kwargs)
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
optional=
BaseFileAttachmentCommentResource.REPLY_OPTIONAL_UPDATE_FIELDS,
allow_unknown=True
)
def update(self, request, *args, **kwargs):
"""Updates a reply to a file comment.
This can only update the text in the comment. The comment being
replied to cannot change.
"""
try:
resources.review_request.get_object(request, *args, **kwargs)
reply = resources.review_reply.get_object(request, *args, **kwargs)
file_comment = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not resources.review_reply.has_modify_permissions(request, reply):
return self.get_no_access_error(request)
self.update_comment(file_comment, is_reply=True, **kwargs)
return 200, {
self.item_result_key: file_comment,
}
@augment_method_from(BaseFileAttachmentCommentResource)
def delete(self, *args, **kwargs):
"""Deletes a file comment from a draft reply.
This will remove the comment from the reply. This cannot be undone.
Only comments on draft replies can be deleted. Attempting to delete
a published comment will return a Permission Denied error.
Instead of a payload response, this will return :http:`204`.
"""
pass
@augment_method_from(BaseFileAttachmentCommentResource)
def get(self, *args, **kwargs):
"""Returns information on a reply to a file comment.
Much of the information will be identical to that of the comment
being replied to.
"""
pass
@augment_method_from(BaseFileAttachmentCommentResource)
def get_list(self, *args, **kwargs):
"""Returns the list of replies to file comments made on a review reply.
"""
pass
review_reply_file_attachment_comment_resource = \
ReviewReplyFileAttachmentCommentResource()
| mit |
sahana/Turkey | controllers/vehicle.py | 16 | 6780 | # -*- coding: utf-8 -*-
"""
Vehicle Management Functionality
http://eden.sahanafoundation.org/wiki/BluePrint/Vehicle
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# Vehicle Module depends on Assets
if not settings.has_module("asset"):
raise HTTP(404, body="Module disabled: %s" % "asset")
# -----------------------------------------------------------------------------
def index():
""" Module Home Page """
module_name = settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def create():
""" Redirect to vehicle/create """
redirect(URL(f="vehicle", args="create"))
# -----------------------------------------------------------------------------
def vehicle():
"""
RESTful CRUD controller
Filtered version of the asset_asset resource
"""
tablename = "asset_asset"
table = s3db[tablename]
s3db.configure("vehicle_vehicle",
deletable = False,
)
set_method = s3db.set_method
set_method("asset", "asset", method="assign",
action = s3db.hrm_AssignMethod(component="human_resource"))
set_method("asset", "asset", method="check-in",
action = s3base.S3CheckInMethod())
set_method("asset", "asset", method="check-out",
action = s3base.S3CheckOutMethod())
# Type is Vehicle
VEHICLE = s3db.asset_types["VEHICLE"]
field = table.type
field.default = VEHICLE
field.readable = False
field.writable = False
# Only show vehicles
s3.filter = (field == VEHICLE)
# Remove type from list_fields
list_fields = s3db.get_config("asset_asset", "list_fields")
if "type" in list_fields:
list_fields.remove("type")
field = table.item_id
field.label = T("Vehicle Type")
field.comment = S3PopupLink(f="item",
# Use this controller for options.json rather than looking for one called 'asset'
vars=dict(parent="vehicle"),
label=T("Add Vehicle Type"),
info=T("Add a new vehicle type"),
title=T("Vehicle Type"),
tooltip=T("Only Items whose Category are of type 'Vehicle' will be seen in the dropdown."))
# Use this controller for options.json rather than looking for one called 'asset'
table.organisation_id.comment[0].vars = dict(parent="vehicle")
# Only select from vehicles
field.widget = None # We want a simple dropdown
ctable = s3db.supply_item_category
itable = s3db.supply_item
query = (ctable.is_vehicle == True) & \
(itable.item_category_id == ctable.id)
field.requires = IS_ONE_OF(db(query),
"supply_item.id",
"%(name)s",
sort=True)
# Label changes
table.sn.label = T("License Plate")
s3db.asset_log.room_id.label = T("Parking Area")
# CRUD strings
s3.crud_strings[tablename] = Storage(
label_create = T("Add Vehicle"),
title_display = T("Vehicle Details"),
title_list = T("Vehicles"),
title_update = T("Edit Vehicle"),
title_map = T("Map of Vehicles"),
label_list_button = T("List Vehicles"),
label_delete_button = T("Delete Vehicle"),
msg_record_created = T("Vehicle added"),
msg_record_modified = T("Vehicle updated"),
msg_record_deleted = T("Vehicle deleted"),
msg_list_empty = T("No Vehicles currently registered"))
# @ToDo: Tweak the search comment
# Defined in Model
return s3db.asset_controller()
# =============================================================================
def vehicle_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
def item():
""" RESTful CRUD controller """
# Filter to just Vehicles
table = s3db.supply_item
ctable = s3db.supply_item_category
s3.filter = (table.item_category_id == ctable.id) & \
(ctable.is_vehicle == True)
# Limit the Categories to just those with vehicles in
# - make category mandatory so that filter works
field = s3db.supply_item.item_category_id
field.requires = IS_ONE_OF(db,
"supply_item_category.id",
s3db.supply_item_category_represent,
sort=True,
filterby = "is_vehicle",
filter_opts = [True]
)
field.label = T("Vehicle Categories")
field.comment = S3PopupLink(f="item_category",
label=T("Add Vehicle Category"),
info=T("Add a new vehicle category"),
title=T("Vehicle Category"),
tooltip=T("Only Categories of type 'Vehicle' will be seen in the dropdown."))
# CRUD strings
s3.crud_strings["supply_item"] = Storage(
label_create = T("Add New Vehicle Type"),
title_display = T("Vehicle Type Details"),
title_list = T("Vehicle Types"),
title_update = T("Edit Vehicle Type"),
label_list_button = T("List Vehicle Types"),
label_delete_button = T("Delete Vehicle Type"),
msg_record_created = T("Vehicle Type added"),
msg_record_modified = T("Vehicle Type updated"),
msg_record_deleted = T("Vehicle Type deleted"),
msg_list_empty = T("No Vehicle Types currently registered"),
msg_match = T("Matching Vehicle Types"),
msg_no_match = T("No Matching Vehicle Types")
)
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.supply_item_controller()
# =============================================================================
def item_category():
""" RESTful CRUD controller """
table = s3db.supply_item_category
# Filter to just Vehicles
s3.filter = (table.is_vehicle == True)
# Default to Vehicles
field = table.can_be_asset
field.readable = field.writable = False
field.default = True
field = table.is_vehicle
field.readable = field.writable = False
field.default = True
return s3_rest_controller("supply", "item_category")
# END =========================================================================
| mit |
ronekko/chainer | tests/chainer_tests/test_configuration.py | 18 | 2789 | import io
import threading
import unittest
import chainer
from chainer import configuration
from chainer import testing
class TestLocalConfig(unittest.TestCase):
def setUp(self):
self.global_config = configuration.GlobalConfig()
self.config = configuration.LocalConfig(self.global_config)
self.global_config.x = 'global x'
self.global_config.y = 'global y'
self.config.y = 'local y'
self.config.z = 'local z'
def test_attr(self):
self.assertTrue(hasattr(self.config, 'x'))
self.assertEqual(self.config.x, 'global x')
self.assertTrue(hasattr(self.config, 'y'))
self.assertEqual(self.config.y, 'local y')
self.assertTrue(hasattr(self.config, 'z'))
self.assertEqual(self.config.z, 'local z')
self.assertFalse(hasattr(self.config, 'w'))
del self.config.y
self.assertTrue(hasattr(self.config, 'y'))
self.assertEqual(self.config.y, 'global y')
with self.assertRaises(AttributeError):
del self.config.x
def test_multi_thread_attr(self):
def target():
self.config.y = 'local y2'
self.global_config.x = 'global x2'
self.global_config.z = 'global z2'
thread = threading.Thread(target=target)
thread.start()
thread.join()
self.assertEqual(self.config.y, 'local y')
self.assertEqual(self.config.x, 'global x2')
self.assertEqual(self.config.z, 'local z')
self.assertEqual(self.global_config.z, 'global z2')
def test_using_config_local_did_not_exist(self):
with chainer.using_config('x', 'temporary x', self.config):
self.assertEqual(self.config.x, 'temporary x')
self.assertEqual(self.global_config.x, 'global x')
self.assertEqual(self.config.x, 'global x')
self.global_config.x = 'global x2'
self.assertEqual(self.config.x, 'global x2')
def test_using_config_local_existed(self):
with chainer.using_config('y', 'temporary y', self.config):
self.assertEqual(self.config.y, 'temporary y')
self.assertEqual(self.global_config.y, 'global y')
self.assertEqual(self.config.y, 'local y')
def test_print_config(self):
self.config.abc = 1
sio = io.StringIO()
self.config.show(sio)
contents = sio.getvalue()
self.assertEqual(
contents, 'abc 1\nx global x\ny local y\nz local z\n')
def test_print_global_config(self):
self.global_config.abc = 1
sio = io.StringIO()
self.global_config.show(sio)
contents = sio.getvalue()
self.assertEqual(contents, 'abc 1\nx global x\ny global y\n')
testing.run_module(__name__, __file__)
| mit |
mdrumond/tensorflow | tensorflow/contrib/data/python/kernel_tests/range_dataset_op_test.py | 4 | 20666 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test RangeDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.contrib.data.python.ops import enumerate_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
class RangeDatasetTest(test.TestCase):
def tearDown(self):
# Remove all checkpoint files.
prefix = self._iterator_checkpoint_prefix()
pattern = prefix + "*"
files = gfile.Glob(pattern)
map(gfile.Remove, files)
def testStop(self):
stop = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={stop: 5})
for i in range(5):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStartStop(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={start: 2, stop: 5})
for i in range(2, 5):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStartStopStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={start: 2, stop: 10, step: 2})
for i in range(2, 10, 2):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testZeroStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
with self.test_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={start: 2, stop: 10, step: 0})
def testNegativeStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={start: 2, stop: 10, step: -1})
# This for loop is a no-op but will ensure that the implementation is
# consistent with range if it ever changes.
for i in range(2, 10, -1):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStopLessThanStart(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={start: 10, stop: 2})
# This for loop is a no-op but will ensure that the implementation is
# consistent with range if it ever changes.
for i in range(10, 2):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStopLessThanStartWithPositiveStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={start: 10, stop: 2, step: 2})
# This for loop is a no-op but will ensure that the implementation is
# consistent with range if it ever changes.
for i in range(10, 2, 2):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStopLessThanStartWithNegativeStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={start: 10, stop: 2, step: -1})
for i in range(10, 2, -1):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testEnumerateDataset(self):
components = (["a", "b"], [1, 2], [37.0, 38])
start = constant_op.constant(20, dtype=dtypes.int64)
iterator = (dataset_ops.Dataset.from_tensor_slices(components).apply(
enumerate_ops.enumerate_dataset(start)).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual(dtypes.int64, get_next[0].dtype)
self.assertEqual((), get_next[0].shape)
self.assertEqual([tensor_shape.TensorShape([])] * 3,
[t.shape for t in get_next[1]])
with self.test_session() as sess:
sess.run(init_op)
self.assertEqual((20, (b"a", 1, 37.0)), sess.run(get_next))
self.assertEqual((21, (b"b", 2, 38.0)), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def _iterator_checkpoint_prefix(self):
return os.path.join(self.get_temp_dir(), "iterator")
def testSaveRestore(self):
def _build_graph(start, stop):
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
path = self._iterator_checkpoint_prefix()
save_op = gen_dataset_ops.save_iterator(iterator._iterator_resource, path)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Saving and restoring in same session.
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRestoreWithoutBuildingDatasetGraph(self):
def _build_graph(start, stop, num_epochs, path):
dataset = dataset_ops.Dataset.range(start, stop).repeat(num_epochs)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = gen_dataset_ops.save_iterator(iterator._iterator_resource, path)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
num_epochs = 5
break_point = 5
break_epoch = 3
path = self._iterator_checkpoint_prefix()
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop, num_epochs,
path)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for _ in range(break_epoch):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
# Create an empty IteratorResource and restore the Iterator into it.
output_types = dtypes.int64
output_shapes = tensor_shape.scalar()
iterator = iterator_ops.Iterator.from_structure(output_types,
output_shapes)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
get_next = iterator.get_next()
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
for _ in range(break_epoch + 1, num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRestoreInModifiedGraph(self):
def _build_graph(start, stop):
dataset = dataset_ops.Dataset.range(start, stop)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
path = self._iterator_checkpoint_prefix()
save_op = gen_dataset_ops.save_iterator(iterator._iterator_resource, path)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
stop_1 = 8
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
# Intentionally build a graph with a different value for stop to make sure
# the original dataset graph is actually getting loaded.
init_op, get_next, _, restore_op = _build_graph(start, stop_1)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testInitThenRestore(self):
# Note: Calling init_op before restore_op is redundant. This test just makes
# sure we do not fail if restore is called on an already initialized
# iterator resource.
def _build_graph(start, stop):
dataset = dataset_ops.Dataset.range(start, stop)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
path = self._iterator_checkpoint_prefix()
save_op = gen_dataset_ops.save_iterator(iterator._iterator_resource, path)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultipleSaves(self):
def _build_graph(start, stop):
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
path = self._iterator_checkpoint_prefix()
save_op = gen_dataset_ops.save_iterator(iterator._iterator_resource, path)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
break_point1 = 5
break_point2 = 7
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point1):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_point1, break_point2):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
break_point2 = 7
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_point2, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSaveRestoreWithRepeat(self):
def _build_graph(start, stop, num_epochs):
iterator = dataset_ops.Dataset.range(
start, stop).repeat(num_epochs).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
path = self._iterator_checkpoint_prefix()
save_op = gen_dataset_ops.save_iterator(iterator._iterator_resource, path)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
num_epochs = 5
break_range = 5
break_epoch = 3
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(
start, stop, num_epochs)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for _ in range(break_epoch - 1):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
for i in range(start, break_range):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop, num_epochs)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_range, stop):
self.assertEqual(i, sess.run(get_next))
for _ in range(break_epoch, num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSaveRestoreExhaustedIterator(self):
def _build_graph(start, stop, num_epochs):
iterator = dataset_ops.Dataset.range(
start, stop).repeat(num_epochs).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
path = self._iterator_checkpoint_prefix()
save_op = gen_dataset_ops.save_iterator(iterator._iterator_resource, path)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
num_epochs = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(
start, stop, num_epochs)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for _ in range(num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop, num_epochs)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| apache-2.0 |
lukeiwanski/tensorflow | tensorflow/python/profiler/internal/model_analyzer_testlib.py | 46 | 4078 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A test lib that defines some models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python import pywrap_tensorflow as print_mdl
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import variable_scope
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
def BuildSmallModel():
"""Build a small forward conv model."""
image = array_ops.zeros([2, 6, 6, 3])
_ = variable_scope.get_variable(
'ScalarW', [],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
kernel = variable_scope.get_variable(
'DW', [3, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
kernel = variable_scope.get_variable(
'DW2', [2, 2, 6, 12],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(x, kernel, [1, 2, 2, 1], padding='SAME')
return x
def BuildFullModel():
"""Build the full model with conv,rnn,opt."""
seq = []
for i in range(4):
with variable_scope.variable_scope('inp_%d' % i):
seq.append(array_ops.reshape(BuildSmallModel(), [2, 1, -1]))
cell = rnn_cell.BasicRNNCell(16)
out = rnn.dynamic_rnn(
cell, array_ops.concat(seq, axis=1), dtype=dtypes.float32)[0]
target = array_ops.ones_like(out)
loss = nn_ops.l2_loss(math_ops.reduce_mean(target - out))
sgd_op = gradient_descent.GradientDescentOptimizer(1e-2)
return sgd_op.minimize(loss)
def BuildSplitableModel():
"""Build a small model that can be run partially in each step."""
image = array_ops.zeros([2, 6, 6, 3])
kernel1 = variable_scope.get_variable(
'DW', [3, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
r1 = nn_ops.conv2d(image, kernel1, [1, 2, 2, 1], padding='SAME')
kernel2 = variable_scope.get_variable(
'DW2', [2, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
r2 = nn_ops.conv2d(image, kernel2, [1, 2, 2, 1], padding='SAME')
r3 = r1 + r2
return r1, r2, r3
def SearchTFProfNode(node, name):
"""Search a node in the tree."""
if node.name == name:
return node
for c in node.children:
r = SearchTFProfNode(c, name)
if r: return r
return None
@contextlib.contextmanager
def ProfilerFromFile(profile_file):
"""Initialize a profiler from profile file."""
print_mdl.ProfilerFromFile(compat.as_bytes(profile_file))
profiler = model_analyzer.Profiler.__new__(model_analyzer.Profiler)
yield profiler
print_mdl.DeleteProfiler()
def CheckAndRemoveDoc(profile):
assert 'Doc:' in profile
start_pos = profile.find('Profile:')
return profile[start_pos + 9:]
| apache-2.0 |
5GExchange/escape | test/testframework/generator/sg_generator.py | 2 | 5842 | #!/usr/bin/python -u
# Copyright 2017 Balazs Nemeth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generates requests that which can be used as standard test SG-s to cover
most/all functionalities of ESCAPE.
"""
import random
import string
# noinspection PyUnresolvedReferences
from nffg_lib.nffg import NFFG
class NameGenerator(object):
def __init__ (self):
self.prefixes = {}
def _get_gen_for_name (self, prefix):
number = 0
while True:
yield prefix + str(number)
number += 1
def get_name (self, prefix):
if prefix in self.prefixes:
return self.prefixes[prefix].next()
else:
self.prefixes[prefix] = self._get_gen_for_name(prefix)
return self.prefixes[prefix].next()
def reset_name (self, prefix):
if prefix in self.prefixes:
del self.prefixes[prefix]
def get_8loop_request (abc_nf_types_len=10, seed=0, eightloops=1):
"""
Generates simple request NFFGs in all combinations of sap1-->vnf1-->...-->
vnfn-->sap1. Creates the requests for augmented-dfn-gwin.nffg
:param abc_nf_types_len: list of VNF **Types** which should be instantiated
:type abc_nf_types_len: list
:param seed: seed for random generator
:type seed: int
:param eightloops: the number of eight loops
:type eightloops: int
:return: an 8loop NFFG
:rtype: :any:`NFFG`
"""
saps = []
for i in xrange(0, 20):
saps.append("sap" + str(i))
rnd = random.Random()
rnd.seed(seed)
gen = NameGenerator()
nffg = NFFG(id="8loops-req")
nffg.mode = NFFG.MODE_ADD
nf_types = list(string.ascii_uppercase)[:abc_nf_types_len]
i = 1
for j in xrange(0, eightloops):
sap = rnd.choice(saps)
if sap not in nffg:
sapo = nffg.add_sap(id=sap, name=sap + "_name")
else:
sapo = nffg.network.node[sap]
if len(sapo.ports) > 0:
for sapp in sapo.ports:
break
else:
sapp = sapo.add_port(id=gen.get_name("port"))
vnfs1 = rnd.sample(nf_types, rnd.randint(1, len(nf_types)))
vnfs2 = rnd.sample(nf_types, rnd.randint(1, len(nf_types)))
nfmiddle = nffg.add_nf(id="nf0" + str(j), name="nf_middle" + str(j),
func_type=rnd.choice(vnfs1),
cpu=1, mem=1, storage=1)
try:
vnfs1.remove(nfmiddle.functional_type)
except ValueError:
pass
try:
vnfs2.remove(nfmiddle.functional_type)
except ValueError:
pass
once = True
for vnf_list in (vnfs1, vnfs2):
nf0 = nfmiddle
for vnf in vnf_list:
nf1 = nffg.add_nf(id="-".join(("nf", str(j), str(i))),
name="nf" + str(i) + "_" + vnf, func_type=vnf,
cpu=1, mem=1, storage=1)
nffg.add_sglink(src_port=nf0.add_port(id=gen.get_name("port")),
dst_port=nf1.add_port(id=gen.get_name("port")),
flowclass="HTTP", id=i)
nf0 = nf1
i += 1
if once:
nffg.add_sglink(src_port=nf0.add_port(id=gen.get_name("port")),
dst_port=nfmiddle.add_port(id=gen.get_name("port")),
flowclass="HTTP", id=i)
once = False
i += 1
nffg.add_sglink(src_port=nf1.add_port(id=gen.get_name("port")),
dst_port=sapp,
flowclass="HTTP", id=i)
nffg.add_sglink(src_port=sapp,
dst_port=nfmiddle.add_port(id=gen.get_name("port")),
flowclass="HTTP", id=i + 1)
i += 2
return nffg
def get_balanced_tree (r=2, h=3, seed=0, max_cpu=4, max_mem=1600,
max_storage=3, max_link_bw=5, min_link_delay=2,
abc_nf_types_len=10, max_link_delay=4):
"""
Gets a balanced tree which has SAPs in the root and the leaves, directed
from the root to the leaves.
:param r: branching factor of the tree
:param h: height of the tree
:return: NFFG
"""
nf_types = list(string.ascii_uppercase)[:abc_nf_types_len]
nffg = NFFG(id="req-tree-branching-" + str(r) + "-height-" + str(h))
nffg.mode = NFFG.MODE_ADD
rnd = random.Random()
rnd.seed(seed)
gen = NameGenerator()
sap_obj = nffg.add_sap(id=gen.get_name("sap"))
prev_level_nf_ports = [sap_obj.add_port(id=gen.get_name("port"))]
for level in xrange(0, h):
curr_level_nf_ports = []
for prev_level_port in prev_level_nf_ports:
for j in xrange(0, r):
nf = nffg.add_nf(id=gen.get_name("nf"), func_type=rnd.choice(nf_types),
cpu=rnd.random() * max_cpu,
mem=rnd.random() * max_mem,
storage=rnd.random() * max_storage)
nffg.add_sglink(prev_level_port, nf.add_port(gen.get_name("port")),
id=gen.get_name("sghop"))
curr_level_nf_ports.append(nf.add_port(gen.get_name("port")))
prev_level_nf_ports = curr_level_nf_ports
for port in prev_level_nf_ports:
sap = nffg.add_sap(id=gen.get_name("sap"))
nffg.add_sglink(port, sap.add_port(id=gen.get_name("port")),
id=gen.get_name("delay_sghop"),
delay=rnd.uniform(min_link_delay, max_link_delay),
bandwidth=rnd.random() * max_link_bw)
return nffg
if __name__ == '__main__':
# nffg = get_8loop_request(eightloops=3)
nffg = get_balanced_tree(r=2, h=2)
print nffg.dump()
| apache-2.0 |
kustomzone/Fuzium | core/src/Worker/WorkerManager.py | 2 | 21392 | import time
import logging
import random
import collections
import gevent
from Worker import Worker
from Config import config
from util import helper
from Plugin import PluginManager
import util
@PluginManager.acceptPlugins
class WorkerManager(object):
def __init__(self, site):
self.site = site
self.workers = {} # Key: ip:port, Value: Worker.Worker
self.tasks = []
# {"evt": evt, "workers_num": 0, "site": self.site, "inner_path": inner_path, "done": False, "optional_hash_id": None,
# "time_started": None, "time_added": time.time(), "peers": peers, "priority": 0, "failed": peer_ids}
self.started_task_num = 0 # Last added task num
self.asked_peers = []
self.running = True
self.time_task_added = 0
self.log = logging.getLogger("WorkerManager:%s" % self.site.address_short)
self.process_taskchecker = gevent.spawn(self.checkTasks)
def __str__(self):
return "WorkerManager %s" % self.site.address_short
def __repr__(self):
return "<%s>" % self.__str__()
# Check expired tasks
def checkTasks(self):
while self.running:
tasks = task = worker = workers = None # Cleanup local variables
time.sleep(15) # Check every 15 sec
# Clean up workers
for worker in self.workers.values():
if worker.task and worker.task["done"]:
worker.skip() # Stop workers with task done
if not self.tasks:
continue
tasks = self.tasks[:] # Copy it so removing elements wont cause any problem
for task in tasks:
size_extra_time = task["size"] / (1024 * 100) # 1 second for every 100k
if task["time_started"] and time.time() >= task["time_started"] + 60 + size_extra_time:
self.log.debug("Timeout, Skipping: %s" % task) # Task taking too long time, skip it
# Skip to next file workers
workers = self.findWorkers(task)
if workers:
for worker in workers:
worker.skip()
else:
self.failTask(task)
elif time.time() >= task["time_added"] + 60 + size_extra_time and not self.workers: # No workers left
self.log.debug("Timeout, Cleanup task: %s" % task)
# Remove task
self.failTask(task)
elif (task["time_started"] and time.time() >= task["time_started"] + 15) or not self.workers:
# Find more workers: Task started more than 15 sec ago or no workers
workers = self.findWorkers(task)
self.log.debug(
"Slow task: %s 15+%ss, (workers: %s, optional_hash_id: %s, peers: %s, failed: %s, asked: %s)" %
(
task["inner_path"], size_extra_time, len(workers), task["optional_hash_id"],
len(task["peers"] or []), len(task["failed"]), len(self.asked_peers)
)
)
task["site"].announce(mode="more") # Find more peers
if task["optional_hash_id"]:
if not task["time_started"]:
ask_limit = 20
elif task["priority"] > 0:
ask_limit = max(10, time.time() - task["time_started"])
else:
ask_limit = max(10, (time.time() - task["time_started"]) / 2)
if len(self.asked_peers) < ask_limit and len(task["peers"] or []) <= len(task["failed"]) * 2:
# Re-search for high priority
self.startFindOptional(find_more=True)
else:
if task["peers"]: # Release the peer lock
self.log.debug("Task peer lock release: %s" % task["inner_path"])
task["peers"] = []
self.startWorkers()
break # One reannounce per loop
self.log.debug("checkTasks stopped running")
# Returns the next free or less worked task
def getTask(self, peer):
# Sort tasks by priority and worker numbers
self.tasks.sort(key=lambda task: task["priority"] - task["workers_num"] * 5, reverse=True)
for task in self.tasks: # Find a task
if task["peers"] and peer not in task["peers"]:
continue # This peer not allowed to pick this task
if peer in task["failed"]:
continue # Peer already tried to solve this, but failed
if task["optional_hash_id"] and task["peers"] is None:
continue # No peers found yet for the optional task
return task
def removeGoodFileTasks(self):
for task in self.tasks[:]:
if task["inner_path"] not in self.site.bad_files:
self.log.debug("No longer in bad_files, marking as good: %s" % task["inner_path"])
task["done"] = True
task["evt"].set(True)
self.tasks.remove(task)
if not self.tasks:
self.started_task_num = 0
self.site.updateWebsocket()
# New peers added to site
def onPeers(self):
self.startWorkers()
def getMaxWorkers(self):
if len(self.tasks) > 100:
return config.connected_limit * 2
else:
return config.connected_limit
# Add new worker
def addWorker(self, peer):
key = peer.key
if key not in self.workers and len(self.workers) < self.getMaxWorkers():
# We dont have worker for that peer and workers num less than max
worker = Worker(self, peer)
self.workers[key] = worker
worker.key = key
worker.start()
return worker
else: # We have woker for this peer or its over the limit
return False
# Start workers to process tasks
def startWorkers(self, peers=None):
if not self.tasks:
return False # No task for workers
self.log.debug("Starting workers, tasks: %s, peers: %s, workers: %s" % (len(self.tasks), len(peers or []), len(self.workers)))
if len(self.workers) >= self.getMaxWorkers() and not peers:
return False # Workers number already maxed and no starting peers defined
if not peers:
peers = self.site.getConnectedPeers()
if len(peers) < self.getMaxWorkers():
peers += self.site.peers.values()[0:self.getMaxWorkers()]
if type(peers) is set:
peers = list(peers)
random.shuffle(peers)
for peer in peers: # One worker for every peer
if peers and peer not in peers:
continue # If peers defined and peer not valid
worker = self.addWorker(peer)
if worker:
self.log.debug("Added worker: %s, workers: %s/%s" % (peer.key, len(self.workers), self.getMaxWorkers()))
# Find peers for optional hash in local hash tables and add to task peers
def findOptionalTasks(self, optional_tasks, reset_task=False):
found = collections.defaultdict(list) # { found_hash: [peer1, peer2...], ...}
for peer in self.site.peers.values():
if not peer.has_hashfield:
continue
hashfield_set = set(peer.hashfield) # Finding in set is much faster
for task in optional_tasks:
optional_hash_id = task["optional_hash_id"]
if optional_hash_id in hashfield_set:
if reset_task and len(task["failed"]) > 0:
task["failed"] = []
if peer in task["failed"]:
continue
found[optional_hash_id].append(peer)
if task["peers"] and peer not in task["peers"]:
task["peers"].append(peer)
else:
task["peers"] = [peer]
return found
# Find peers for optional hash ids in local hash tables
def findOptionalHashIds(self, optional_hash_ids, limit=0):
found = collections.defaultdict(list) # { found_hash_id: [peer1, peer2...], ...}
for peer in self.site.peers.values():
if not peer.has_hashfield:
continue
hashfield_set = set(peer.hashfield) # Finding in set is much faster
for optional_hash_id in optional_hash_ids:
if optional_hash_id in hashfield_set:
found[optional_hash_id].append(peer)
if limit and len(found[optional_hash_id]) >= limit:
optional_hash_ids.remove(optional_hash_id)
return found
# Add peers to tasks from found result
def addOptionalPeers(self, found_ips):
found = collections.defaultdict(list)
for hash_id, peer_ips in found_ips.iteritems():
task = [task for task in self.tasks if task["optional_hash_id"] == hash_id]
if task: # Found task, lets take the first
task = task[0]
else:
continue
for peer_ip in peer_ips:
peer = self.site.addPeer(peer_ip[0], peer_ip[1], return_peer=True)
if not peer:
continue
if task["peers"] is None:
task["peers"] = []
if peer not in task["peers"]:
task["peers"].append(peer)
found[hash_id].append(peer)
if peer.hashfield.appendHashId(hash_id): # Peer has this file
peer.time_hashfield = None # Peer hashfield probably outdated
return found
# Start find peers for optional files
@util.Noparallel(blocking=False, ignore_args=True)
def startFindOptional(self, reset_task=False, find_more=False, high_priority=False):
# Wait for more file requests
if len(self.tasks) < 20 or high_priority:
time.sleep(0.01)
if len(self.tasks) > 90:
time.sleep(5)
else:
time.sleep(0.5)
optional_tasks = [task for task in self.tasks if task["optional_hash_id"]]
if not optional_tasks:
return False
optional_hash_ids = set([task["optional_hash_id"] for task in optional_tasks])
time_tasks = self.time_task_added
self.log.debug(
"Finding peers for optional files: %s (reset_task: %s, find_more: %s)" %
(optional_hash_ids, reset_task, find_more)
)
found = self.findOptionalTasks(optional_tasks, reset_task=reset_task)
if found:
found_peers = set([peer for peers in found.values() for peer in peers])
self.startWorkers(found_peers)
if len(found) < len(optional_hash_ids) or find_more or (high_priority and any(len(peers) < 10 for peers in found.itervalues())):
self.log.debug("No local result for optional files: %s" % (optional_hash_ids - set(found)))
# Query hashfield from connected peers
threads = []
peers = self.site.getConnectedPeers()
if not peers:
peers = self.site.getConnectablePeers()
for peer in peers:
if not peer.time_hashfield:
threads.append(gevent.spawn(peer.updateHashfield))
gevent.joinall(threads, timeout=5)
if time_tasks != self.time_task_added: # New task added since start
optional_tasks = [task for task in self.tasks if task["optional_hash_id"]]
optional_hash_ids = set([task["optional_hash_id"] for task in optional_tasks])
found = self.findOptionalTasks(optional_tasks)
self.log.debug("Found optional files after query hashtable connected peers: %s/%s" % (
len(found), len(optional_hash_ids)
))
if found:
found_peers = set([peer for hash_id_peers in found.values() for peer in hash_id_peers])
self.startWorkers(found_peers)
if len(found) < len(optional_hash_ids) or find_more:
self.log.debug("No connected hashtable result for optional files: %s" % (optional_hash_ids - set(found)))
# Try to query connected peers
threads = []
peers = [peer for peer in self.site.getConnectedPeers() if peer not in self.asked_peers]
if not peers:
peers = self.site.getConnectablePeers()
for peer in peers:
threads.append(gevent.spawn(peer.findHashIds, list(optional_hash_ids)))
self.asked_peers.append(peer)
for i in range(5):
time.sleep(1)
thread_values = [thread.value for thread in threads if thread.value]
if not thread_values:
continue
found_ips = helper.mergeDicts(thread_values)
found = self.addOptionalPeers(found_ips)
self.log.debug("Found optional files after findhash connected peers: %s/%s (asked: %s)" % (
len(found), len(optional_hash_ids), len(threads)
))
if found:
found_peers = set([peer for hash_id_peers in found.values() for peer in hash_id_peers])
self.startWorkers(found_peers)
if len(thread_values) == len(threads):
# Got result from all started thread
break
if len(found) < len(optional_hash_ids):
self.log.debug("No findHash result, try random peers: %s" % (optional_hash_ids - set(found)))
# Try to query random peers
if time_tasks != self.time_task_added: # New task added since start
optional_tasks = [task for task in self.tasks if task["optional_hash_id"]]
optional_hash_ids = set([task["optional_hash_id"] for task in optional_tasks])
threads = []
peers = self.site.getConnectablePeers(ignore=self.asked_peers)
for peer in peers:
threads.append(gevent.spawn(peer.findHashIds, list(optional_hash_ids)))
self.asked_peers.append(peer)
gevent.joinall(threads, timeout=15)
found_ips = helper.mergeDicts([thread.value for thread in threads if thread.value])
found = self.addOptionalPeers(found_ips)
self.log.debug("Found optional files after findhash random peers: %s/%s" % (len(found), len(optional_hash_ids)))
if found:
found_peers = set([peer for hash_id_peers in found.values() for peer in hash_id_peers])
self.startWorkers(found_peers)
if len(found) < len(optional_hash_ids):
self.log.debug("No findhash result for optional files: %s" % (optional_hash_ids - set(found)))
# Stop all worker
def stopWorkers(self):
for worker in self.workers.values():
worker.stop()
tasks = self.tasks[:] # Copy
for task in tasks: # Mark all current task as failed
self.failTask(task)
# Find workers by task
def findWorkers(self, task):
workers = []
for worker in self.workers.values():
if worker.task == task:
workers.append(worker)
return workers
# Ends and remove a worker
def removeWorker(self, worker):
worker.running = False
if worker.key in self.workers:
del(self.workers[worker.key])
self.log.debug("Removed worker, workers: %s/%s" % (len(self.workers), self.getMaxWorkers()))
if len(self.workers) <= self.getMaxWorkers() / 3 and len(self.asked_peers) < 10:
important_task = (task for task in self.tasks if task["priority"] > 0)
if next(important_task, None) or len(self.asked_peers) == 0:
self.startFindOptional(find_more=True)
else:
self.startFindOptional()
# Tasks sorted by this
def getPriorityBoost(self, inner_path):
if inner_path == "content.json":
return 9999 # Content.json always priority
if inner_path == "index.html":
return 9998 # index.html also important
if "-default" in inner_path:
return -4 # Default files are cloning not important
elif inner_path.endswith(".css"):
return 5 # boost css files priority
elif inner_path.endswith(".js"):
return 4 # boost js files priority
elif inner_path.endswith("dbschema.json"):
return 3 # boost database specification
elif inner_path.endswith("content.json"):
return 1 # boost included content.json files priority a bit
elif inner_path.endswith(".json"):
return 2 # boost data json files priority more
return 0
# Create new task and return asyncresult
def addTask(self, inner_path, peer=None, priority=0):
self.site.onFileStart(inner_path) # First task, trigger site download started
task = self.findTask(inner_path)
if task: # Already has task for that file
if peer and task["peers"]: # This peer also has new version, add it to task possible peers
task["peers"].append(peer)
self.log.debug("Added peer %s to %s" % (peer.key, task["inner_path"]))
self.startWorkers([peer])
elif peer and peer in task["failed"]:
task["failed"].remove(peer) # New update arrived, remove the peer from failed peers
self.log.debug("Removed peer %s from failed %s" % (peer.key, task["inner_path"]))
self.startWorkers([peer])
if priority:
task["priority"] += priority # Boost on priority
return task["evt"]
else: # No task for that file yet
evt = gevent.event.AsyncResult()
if peer:
peers = [peer] # Only download from this peer
else:
peers = None
file_info = self.site.content_manager.getFileInfo(inner_path)
if file_info and file_info["optional"]:
optional_hash_id = helper.toHashId(file_info["sha512"])
else:
optional_hash_id = None
if file_info:
size = file_info.get("size", 0)
else:
size = 0
priority += self.getPriorityBoost(inner_path)
task = {
"evt": evt, "workers_num": 0, "site": self.site, "inner_path": inner_path, "done": False,
"optional_hash_id": optional_hash_id, "time_added": time.time(), "time_started": None,
"time_action": None, "peers": peers, "priority": priority, "failed": [], "size": size
}
self.tasks.append(task)
self.started_task_num += 1
self.log.debug(
"New task: %s, peer lock: %s, priority: %s, optional_hash_id: %s, tasks started: %s" %
(task["inner_path"], peers, priority, optional_hash_id, self.started_task_num)
)
self.time_task_added = time.time()
if optional_hash_id:
if self.asked_peers:
del self.asked_peers[:] # Reset asked peers
self.startFindOptional(high_priority=priority > 0)
if peers:
self.startWorkers(peers)
else:
self.startWorkers(peers)
return evt
# Find a task using inner_path
def findTask(self, inner_path):
for task in self.tasks:
if task["inner_path"] == inner_path:
return task
return None # Not found
# Wait for other tasks
def checkComplete(self):
time.sleep(0.1)
if not self.tasks:
self.log.debug("Check compelte: No tasks")
self.onComplete()
def onComplete(self):
self.started_task_num = 0
del self.asked_peers[:]
self.site.onComplete() # No more task trigger site complete
# Mark a task done
def doneTask(self, task):
task["done"] = True
self.tasks.remove(task) # Remove from queue
if task["optional_hash_id"]:
self.log.debug("Downloaded optional file, adding to hashfield: %s" % task["inner_path"])
self.site.content_manager.optionalDownloaded(task["inner_path"], task["optional_hash_id"], task["size"])
self.site.onFileDone(task["inner_path"])
task["evt"].set(True)
if not self.tasks:
gevent.spawn(self.checkComplete)
# Mark a task failed
def failTask(self, task):
if task in self.tasks:
task["done"] = True
self.tasks.remove(task) # Remove from queue
self.site.onFileFail(task["inner_path"])
task["evt"].set(False)
if not self.tasks:
self.started_task_num = 0
| mit |
cristiana214/cristianachavez214-cristianachavez | python-build/python-libs/gdata/samples/oauth/oauth_on_appengine/appengine_utilities/cron.py | 129 | 18386 | """
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import cgi
import re
import datetime
import pickle
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.api import memcache
APPLICATION_PORT = '8080'
CRON_PORT = '8081'
class _AppEngineUtilities_Cron(db.Model):
"""
Model for the tasks in the datastore. This contains the scheduling and
url information, as well as a field that sets the next time the instance
should run.
"""
cron_entry = db.StringProperty()
next_run = db.DateTimeProperty()
cron_compiled = db.BlobProperty()
url = db.LinkProperty()
class Cron(object):
"""
Cron is a scheduling utility built for appengine, modeled after
crontab for unix systems. While true scheduled tasks are not
possible within the Appengine environment currently, this
is an attmempt to provide a request based alternate. You
configure the tasks in an included interface, and the import
the class on any request you want capable of running tasks.
On each request where Cron is imported, the list of tasks
that need to be run will be pulled and run. A task is a url
within your application. It's important to make sure that these
requests fun quickly, or you could risk timing out the actual
request.
See the documentation for more information on configuring
your application to support Cron and setting up tasks.
"""
def __init__(self):
# Check if any tasks need to be run
query = _AppEngineUtilities_Cron.all()
query.filter('next_run <= ', datetime.datetime.now())
results = query.fetch(1000)
if len(results) > 0:
one_second = datetime.timedelta(seconds = 1)
before = datetime.datetime.now()
for r in results:
if re.search(':' + APPLICATION_PORT, r.url):
r.url = re.sub(':' + APPLICATION_PORT, ':' + CRON_PORT, r.url)
#result = urlfetch.fetch(r.url)
diff = datetime.datetime.now() - before
if int(diff.seconds) < 1:
if memcache.add(str(r.key), "running"):
result = urlfetch.fetch(r.url)
r.next_run = self._get_next_run(pickle.loads(r.cron_compiled))
r.put()
memcache.delete(str(r.key))
else:
break
def add_cron(self, cron_string):
cron = cron_string.split(" ")
if len(cron) is not 6:
raise ValueError, 'Invalid cron string. Format: * * * * * url'
cron = {
'min': cron[0],
'hour': cron[1],
'day': cron[2],
'mon': cron[3],
'dow': cron[4],
'url': cron[5],
}
cron_compiled = self._validate_cron(cron)
next_run = self._get_next_run(cron_compiled)
cron_entry = _AppEngineUtilities_Cron()
cron_entry.cron_entry = cron_string
cron_entry.next_run = next_run
cron_entry.cron_compiled = pickle.dumps(cron_compiled)
cron_entry.url = cron["url"]
cron_entry.put()
def _validate_cron(self, cron):
"""
Parse the field to determine whether it is an integer or lists,
also converting strings to integers where necessary. If passed bad
values, raises a ValueError.
"""
parsers = {
'dow': self._validate_dow,
'mon': self._validate_mon,
'day': self._validate_day,
'hour': self._validate_hour,
'min': self._validate_min,
'url': self. _validate_url,
}
for el in cron:
parse = parsers[el]
cron[el] = parse(cron[el])
return cron
def _validate_type(self, v, t):
"""
Validates that the number (v) passed is in the correct range for the
type (t). Raise ValueError, if validation fails.
Valid ranges:
day of week = 0-7
month = 1-12
day = 1-31
hour = 0-23
minute = 0-59
All can * which will then return the range for that entire type.
"""
if t == "dow":
if v >= 0 and v <= 7:
return [v]
elif v == "*":
return "*"
else:
raise ValueError, "Invalid day of week."
elif t == "mon":
if v >= 1 and v <= 12:
return [v]
elif v == "*":
return range(1, 12)
else:
raise ValueError, "Invalid month."
elif t == "day":
if v >= 1 and v <= 31:
return [v]
elif v == "*":
return range(1, 31)
else:
raise ValueError, "Invalid day."
elif t == "hour":
if v >= 0 and v <= 23:
return [v]
elif v == "*":
return range(0, 23)
else:
raise ValueError, "Invalid hour."
elif t == "min":
if v >= 0 and v <= 59:
return [v]
elif v == "*":
return range(0, 59)
else:
raise ValueError, "Invalid minute."
def _validate_list(self, l, t):
"""
Validates a crontab list. Lists are numerical values seperated
by a comma with no spaces. Ex: 0,5,10,15
Arguments:
l: comma seperated list of numbers
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = l.split(",")
return_list = []
# we have a list, validate all of them
for e in elements:
if "-" in e:
return_list.extend(self._validate_range(e, t))
else:
try:
v = int(e)
self._validate_type(v, t)
return_list.append(v)
except:
raise ValueError, "Names are not allowed in lists."
# return a list of integers
return return_list
def _validate_range(self, r, t):
"""
Validates a crontab range. Ranges are 2 numerical values seperated
by a dash with no spaces. Ex: 0-10
Arguments:
r: dash seperated list of 2 numbers
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = r.split('-')
# a range should be 2 elements
if len(elements) is not 2:
raise ValueError, "Invalid range passed: " + str(r)
# validate the minimum and maximum are valid for the type
for e in elements:
self._validate_type(int(e), t)
# return a list of the numbers in the range.
# +1 makes sure the end point is included in the return value
return range(int(elements[0]), int(elements[1]) + 1)
def _validate_step(self, s, t):
"""
Validates a crontab step. Steps are complicated. They can
be based on a range 1-10/2 or just step through all valid
*/2. When parsing times you should always check for step first
and see if it has a range or not, before checking for ranges because
this will handle steps of ranges returning the final list. Steps
of lists is not supported.
Arguments:
s: slash seperated string
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = s.split('/')
# a range should be 2 elements
if len(elements) is not 2:
raise ValueError, "Invalid step passed: " + str(s)
try:
step = int(elements[1])
except:
raise ValueError, "Invalid step provided " + str(s)
r_list = []
# if the first element is *, use all valid numbers
if elements[0] is "*" or elements[0] is "":
r_list.extend(self._validate_type('*', t))
# check and see if there is a list of ranges
elif "," in elements[0]:
ranges = elements[0].split(",")
for r in ranges:
# if it's a range, we need to manage that
if "-" in r:
r_list.extend(self._validate_range(r, t))
else:
try:
r_list.extend(int(r))
except:
raise ValueError, "Invalid step provided " + str(s)
elif "-" in elements[0]:
r_list.extend(self._validate_range(elements[0], t))
return range(r_list[0], r_list[-1] + 1, step)
def _validate_dow(self, dow):
"""
"""
# if dow is * return it. This is for date parsing where * does not mean
# every day for crontab entries.
if dow is "*":
return dow
days = {
'mon': 1,
'tue': 2,
'wed': 3,
'thu': 4,
'fri': 5,
'sat': 6,
# per man crontab sunday can be 0 or 7.
'sun': [0, 7],
}
if dow in days:
dow = days[dow]
return [dow]
# if dow is * return it. This is for date parsing where * does not mean
# every day for crontab entries.
elif dow is "*":
return dow
elif "/" in dow:
return(self._validate_step(dow, "dow"))
elif "," in dow:
return(self._validate_list(dow, "dow"))
elif "-" in dow:
return(self._validate_range(dow, "dow"))
else:
valid_numbers = range(0, 8)
if not int(dow) in valid_numbers:
raise ValueError, "Invalid day of week " + str(dow)
else:
return [int(dow)]
def _validate_mon(self, mon):
months = {
'jan': 1,
'feb': 2,
'mar': 3,
'apr': 4,
'may': 5,
'jun': 6,
'jul': 7,
'aug': 8,
'sep': 9,
'oct': 10,
'nov': 11,
'dec': 12,
}
if mon in months:
mon = months[mon]
return [mon]
elif mon is "*":
return range(1, 13)
elif "/" in mon:
return(self._validate_step(mon, "mon"))
elif "," in mon:
return(self._validate_list(mon, "mon"))
elif "-" in mon:
return(self._validate_range(mon, "mon"))
else:
valid_numbers = range(1, 13)
if not int(mon) in valid_numbers:
raise ValueError, "Invalid month " + str(mon)
else:
return [int(mon)]
def _validate_day(self, day):
if day is "*":
return range(1, 32)
elif "/" in day:
return(self._validate_step(day, "day"))
elif "," in day:
return(self._validate_list(day, "day"))
elif "-" in day:
return(self._validate_range(day, "day"))
else:
valid_numbers = range(1, 31)
if not int(day) in valid_numbers:
raise ValueError, "Invalid day " + str(day)
else:
return [int(day)]
def _validate_hour(self, hour):
if hour is "*":
return range(0, 24)
elif "/" in hour:
return(self._validate_step(hour, "hour"))
elif "," in hour:
return(self._validate_list(hour, "hour"))
elif "-" in hour:
return(self._validate_range(hour, "hour"))
else:
valid_numbers = range(0, 23)
if not int(hour) in valid_numbers:
raise ValueError, "Invalid hour " + str(hour)
else:
return [int(hour)]
def _validate_min(self, min):
if min is "*":
return range(0, 60)
elif "/" in min:
return(self._validate_step(min, "min"))
elif "," in min:
return(self._validate_list(min, "min"))
elif "-" in min:
return(self._validate_range(min, "min"))
else:
valid_numbers = range(0, 59)
if not int(min) in valid_numbers:
raise ValueError, "Invalid min " + str(min)
else:
return [int(min)]
def _validate_url(self, url):
# kludge for issue 842, right now we use request headers
# to set the host.
if url[0] is not "/":
url = "/" + url
url = 'http://' + str(os.environ['HTTP_HOST']) + url
return url
# content below is for when that issue gets fixed
#regex = re.compile("^(http|https):\/\/([a-z0-9-]\.+)*", re.IGNORECASE)
#if regex.match(url) is not None:
# return url
#else:
# raise ValueError, "Invalid url " + url
def _calc_month(self, next_run, cron):
while True:
if cron["mon"][-1] < next_run.month:
next_run = next_run.replace(year=next_run.year+1, \
month=cron["mon"][0], \
day=1,hour=0,minute=0)
else:
if next_run.month in cron["mon"]:
return next_run
else:
one_month = datetime.timedelta(months=1)
next_run = next_run + one_month
def _calc_day(self, next_run, cron):
# start with dow as per cron if dow and day are set
# then dow is used if it comes before day. If dow
# is *, then ignore it.
if str(cron["dow"]) != str("*"):
# convert any integers to lists in order to easily compare values
m = next_run.month
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.weekday() in cron["dow"] or next_run.day in cron["day"]:
return next_run
else:
one_day = datetime.timedelta(days=1)
next_run = next_run + one_day
else:
m = next_run.month
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
# if cron["dow"] is next_run.weekday() or cron["day"] is next_run.day:
if next_run.day in cron["day"]:
return next_run
else:
one_day = datetime.timedelta(days=1)
next_run = next_run + one_day
def _calc_hour(self, next_run, cron):
m = next_run.month
d = next_run.day
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.day is not d:
next_run = next_run.replace(hour=0)
next_run = self._calc_day(next_run, cron)
if next_run.hour in cron["hour"]:
return next_run
else:
m = next_run.month
d = next_run.day
one_hour = datetime.timedelta(hours=1)
next_run = next_run + one_hour
def _calc_minute(self, next_run, cron):
one_minute = datetime.timedelta(minutes=1)
m = next_run.month
d = next_run.day
h = next_run.hour
while True:
if next_run.month is not m:
next_run = next_run.replace(minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.day is not d:
next_run = next_run.replace(minute=0)
next_run = self._calc_day(next_run, cron)
if next_run.hour is not h:
next_run = next_run.replace(minute=0)
next_run = self._calc_day(next_run, cron)
if next_run.minute in cron["min"]:
return next_run
else:
m = next_run.month
d = next_run.day
h = next_run.hour
next_run = next_run + one_minute
def _get_next_run(self, cron):
one_minute = datetime.timedelta(minutes=1)
# go up 1 minute because it shouldn't happen right when added
now = datetime.datetime.now() + one_minute
next_run = now.replace(second=0, microsecond=0)
# start with month, which will also help calculate year
next_run = self._calc_month(next_run, cron)
next_run = self._calc_day(next_run, cron)
next_run = self._calc_hour(next_run, cron)
next_run = self._calc_minute(next_run, cron)
return next_run
| apache-2.0 |
RNAcentral/rnacentral-import-pipeline | tests/rnacentral/pgloader_test.py | 1 | 1031 | # -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
from rnacentral_pipeline.rnacentral import pgloader
@pytest.mark.parametrize(
"output,expected",
[
("data/pgloader/failed.txt", False),
("data/pgloader/success.txt", True),
("data/pgloader/success-with-check.txt", True),
],
)
def test_can_validate_output(output, expected):
with open(output, "r") as handle:
assert pgloader.validate(handle) is expected
| apache-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django_1_2/tests/regressiontests/string_lookup/models.py | 92 | 1199 | # -*- coding: utf-8 -*-
from django.db import models
class Foo(models.Model):
name = models.CharField(max_length=50)
friend = models.CharField(max_length=50, blank=True)
def __unicode__(self):
return "Foo %s" % self.name
class Bar(models.Model):
name = models.CharField(max_length=50)
normal = models.ForeignKey(Foo, related_name='normal_foo')
fwd = models.ForeignKey("Whiz")
back = models.ForeignKey("Foo")
def __unicode__(self):
return "Bar %s" % self.place.name
class Whiz(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return "Whiz %s" % self.name
class Child(models.Model):
parent = models.OneToOneField('Base')
name = models.CharField(max_length=50)
def __unicode__(self):
return "Child %s" % self.name
class Base(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return "Base %s" % self.name
class Article(models.Model):
name = models.CharField(max_length=50)
text = models.TextField()
submitted_from = models.IPAddressField(blank=True, null=True)
def __str__(self):
return "Article %s" % self.name
| bsd-3-clause |
msbone/LCS | gondul_tools/gondul2pdns.py | 1 | 3160 | import requests
import json
import pprint
import os
from pdns import PowerDNS
#Settings to be changed before use
apiswitchmanagementurl = 'http://192.168.88.224/api/read/switches-management'
tempfile = 'temp.json'
pdnsapiurl = 'http://10.0.1.2:8081/api/v1'
pdnsapikey = 'fun'
zonename = 'lan.sdok.no.'
# Do not look down here :)
r = requests.get(apiswitchmanagementurl)
switches = r.json()['switches'].items()
new = {}
old = {}
pdns = PowerDNS(pdnsapiurl,pdnsapikey)
rrsets = []
if(os.stat(tempfile).st_size != 0):
with open(tempfile) as data_file:
old = json.load(data_file)
for name,data in switches:
fqdn = name+'.'+zonename
if name in old:
# Exists
if(old[name]['mgmt_v4_addr'] != data['mgmt_v4_addr']):
if(data['mgmt_v4_addr'] != None):
print(name+': New IPv4 found, will update DNS')
record = {'content':data['mgmt_v4_addr'], 'disabled': False,'type':'A'}
rrset = {'name':fqdn, 'changetype':'replace', 'type':'A', 'records':[record], 'ttl':900}
rrsets.append(rrset)
else:
print(name+': Blank IPv4 found, removing from DNS')
rrset = {'name':fqdn, 'changetype':'delete', 'type':'A', 'ttl':900}
rrsets.append(rrset)
if(old[name]['mgmt_v6_addr'] != data['mgmt_v6_addr']):
if(data['mgmt_v6_addr'] != None):
print(name+': New IPv6 found, will update DNS')
record = {'content':data['mgmt_v6_addr'], 'disabled': False,'type':'AAAA'}
rrset = {'name':fqdn, 'changetype':'replace', 'type':'AAAA', 'records':[record], 'ttl':900}
rrsets.append(rrset)
else:
print(name+': Blank IPv6 found, removing from DNS')
rrset = {'name':fqdn, 'changetype':'delete', 'type':'AAAA', 'ttl':900}
rrsets.append(rrset)
else:
# New
if(data['mgmt_v4_addr'] != None):
record = {'content':data['mgmt_v4_addr'], 'disabled': False,'type':'A'}
rrset = {'name':fqdn, 'changetype':'replace', 'type':'A', 'records':[record], 'ttl':900}
rrsets.append(rrset)
if(data['mgmt_v6_addr'] != None):
record = {'content':data['mgmt_v6_addr'], 'disabled': False,'type':'AAAA'}
rrset = {'name':fqdn, 'changetype':'replace', 'type':'AAAA', 'records':[record], 'ttl':900}
rrsets.append(rrset)
print(name+': New switch found, will add to DNS')
new[name] = data
# Update powerdns
if rrsets:
print(json.dumps(rrsets))
print(pdns.set_zone_records(zonename,rrsets))
with open(tempfile, 'w') as outfile:
json.dump(new, outfile)
| gpl-2.0 |
boompieman/iim_project | project_python2/lib/python2.7/site-packages/zmq/eventloop/zmqstream.py | 15 | 18977 | #
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A utility class to send to and recv from a non-blocking socket."""
from __future__ import with_statement
import sys
import zmq
from zmq.utils import jsonapi
try:
import cPickle as pickle
except ImportError:
import pickle
from .ioloop import IOLoop
try:
# gen_log will only import from >= 3.0
from tornado.log import gen_log
from tornado import stack_context
except ImportError:
from .minitornado.log import gen_log
from .minitornado import stack_context
try:
from queue import Queue
except ImportError:
from Queue import Queue
from zmq.utils.strtypes import bytes, unicode, basestring
try:
callable
except NameError:
callable = lambda obj: hasattr(obj, '__call__')
class ZMQStream(object):
"""A utility class to register callbacks when a zmq socket sends and receives
For use with zmq.eventloop.ioloop
There are three main methods
Methods:
* **on_recv(callback, copy=True):**
register a callback to be run every time the socket has something to receive
* **on_send(callback):**
register a callback to be run every time you call send
* **send(self, msg, flags=0, copy=False, callback=None):**
perform a send that will trigger the callback
if callback is passed, on_send is also called.
There are also send_multipart(), send_json(), send_pyobj()
Three other methods for deactivating the callbacks:
* **stop_on_recv():**
turn off the recv callback
* **stop_on_send():**
turn off the send callback
which simply call ``on_<evt>(None)``.
The entire socket interface, excluding direct recv methods, is also
provided, primarily through direct-linking the methods.
e.g.
>>> stream.bind is stream.socket.bind
True
"""
socket = None
io_loop = None
poller = None
_send_queue = None
_recv_callback = None
_send_callback = None
_close_callback = None
_state = 0
_flushed = False
_recv_copy = False
def __init__(self, socket, io_loop=None):
self.socket = socket
self.io_loop = io_loop or IOLoop.instance()
self.poller = zmq.Poller()
self._send_queue = Queue()
self._recv_callback = None
self._send_callback = None
self._close_callback = None
self._recv_copy = False
self._flushed = False
self._state = self.io_loop.ERROR
self._init_io_state()
# shortcircuit some socket methods
self.bind = self.socket.bind
self.bind_to_random_port = self.socket.bind_to_random_port
self.connect = self.socket.connect
self.setsockopt = self.socket.setsockopt
self.getsockopt = self.socket.getsockopt
self.setsockopt_string = self.socket.setsockopt_string
self.getsockopt_string = self.socket.getsockopt_string
self.setsockopt_unicode = self.socket.setsockopt_unicode
self.getsockopt_unicode = self.socket.getsockopt_unicode
def stop_on_recv(self):
"""Disable callback and automatic receiving."""
return self.on_recv(None)
def stop_on_send(self):
"""Disable callback on sending."""
return self.on_send(None)
def stop_on_err(self):
"""DEPRECATED, does nothing"""
gen_log.warn("on_err does nothing, and will be removed")
def on_err(self, callback):
"""DEPRECATED, does nothing"""
gen_log.warn("on_err does nothing, and will be removed")
def on_recv(self, callback, copy=True):
"""Register a callback for when a message is ready to recv.
There can be only one callback registered at a time, so each
call to `on_recv` replaces previously registered callbacks.
on_recv(None) disables recv event polling.
Use on_recv_stream(callback) instead, to register a callback that will receive
both this ZMQStream and the message, instead of just the message.
Parameters
----------
callback : callable
callback must take exactly one argument, which will be a
list, as returned by socket.recv_multipart()
if callback is None, recv callbacks are disabled.
copy : bool
copy is passed directly to recv, so if copy is False,
callback will receive Message objects. If copy is True,
then callback will receive bytes/str objects.
Returns : None
"""
self._check_closed()
assert callback is None or callable(callback)
self._recv_callback = stack_context.wrap(callback)
self._recv_copy = copy
if callback is None:
self._drop_io_state(self.io_loop.READ)
else:
self._add_io_state(self.io_loop.READ)
def on_recv_stream(self, callback, copy=True):
"""Same as on_recv, but callback will get this stream as first argument
callback must take exactly two arguments, as it will be called as::
callback(stream, msg)
Useful when a single callback should be used with multiple streams.
"""
if callback is None:
self.stop_on_recv()
else:
self.on_recv(lambda msg: callback(self, msg), copy=copy)
def on_send(self, callback):
"""Register a callback to be called on each send
There will be two arguments::
callback(msg, status)
* `msg` will be the list of sendable objects that was just sent
* `status` will be the return result of socket.send_multipart(msg) -
MessageTracker or None.
Non-copying sends return a MessageTracker object whose
`done` attribute will be True when the send is complete.
This allows users to track when an object is safe to write to
again.
The second argument will always be None if copy=True
on the send.
Use on_send_stream(callback) to register a callback that will be passed
this ZMQStream as the first argument, in addition to the other two.
on_send(None) disables recv event polling.
Parameters
----------
callback : callable
callback must take exactly two arguments, which will be
the message being sent (always a list),
and the return result of socket.send_multipart(msg) -
MessageTracker or None.
if callback is None, send callbacks are disabled.
"""
self._check_closed()
assert callback is None or callable(callback)
self._send_callback = stack_context.wrap(callback)
def on_send_stream(self, callback):
"""Same as on_send, but callback will get this stream as first argument
Callback will be passed three arguments::
callback(stream, msg, status)
Useful when a single callback should be used with multiple streams.
"""
if callback is None:
self.stop_on_send()
else:
self.on_send(lambda msg, status: callback(self, msg, status))
def send(self, msg, flags=0, copy=True, track=False, callback=None):
"""Send a message, optionally also register a new callback for sends.
See zmq.socket.send for details.
"""
return self.send_multipart([msg], flags=flags, copy=copy, track=track, callback=callback)
def send_multipart(self, msg, flags=0, copy=True, track=False, callback=None):
"""Send a multipart message, optionally also register a new callback for sends.
See zmq.socket.send_multipart for details.
"""
kwargs = dict(flags=flags, copy=copy, track=track)
self._send_queue.put((msg, kwargs))
callback = callback or self._send_callback
if callback is not None:
self.on_send(callback)
else:
# noop callback
self.on_send(lambda *args: None)
self._add_io_state(self.io_loop.WRITE)
def send_string(self, u, flags=0, encoding='utf-8', callback=None):
"""Send a unicode message with an encoding.
See zmq.socket.send_unicode for details.
"""
if not isinstance(u, basestring):
raise TypeError("unicode/str objects only")
return self.send(u.encode(encoding), flags=flags, callback=callback)
send_unicode = send_string
def send_json(self, obj, flags=0, callback=None):
"""Send json-serialized version of an object.
See zmq.socket.send_json for details.
"""
if jsonapi is None:
raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
else:
msg = jsonapi.dumps(obj)
return self.send(msg, flags=flags, callback=callback)
def send_pyobj(self, obj, flags=0, protocol=-1, callback=None):
"""Send a Python object as a message using pickle to serialize.
See zmq.socket.send_json for details.
"""
msg = pickle.dumps(obj, protocol)
return self.send(msg, flags, callback=callback)
def _finish_flush(self):
"""callback for unsetting _flushed flag."""
self._flushed = False
def flush(self, flag=zmq.POLLIN|zmq.POLLOUT, limit=None):
"""Flush pending messages.
This method safely handles all pending incoming and/or outgoing messages,
bypassing the inner loop, passing them to the registered callbacks.
A limit can be specified, to prevent blocking under high load.
flush will return the first time ANY of these conditions are met:
* No more events matching the flag are pending.
* the total number of events handled reaches the limit.
Note that if ``flag|POLLIN != 0``, recv events will be flushed even if no callback
is registered, unlike normal IOLoop operation. This allows flush to be
used to remove *and ignore* incoming messages.
Parameters
----------
flag : int, default=POLLIN|POLLOUT
0MQ poll flags.
If flag|POLLIN, recv events will be flushed.
If flag|POLLOUT, send events will be flushed.
Both flags can be set at once, which is the default.
limit : None or int, optional
The maximum number of messages to send or receive.
Both send and recv count against this limit.
Returns
-------
int : count of events handled (both send and recv)
"""
self._check_closed()
# unset self._flushed, so callbacks will execute, in case flush has
# already been called this iteration
already_flushed = self._flushed
self._flushed = False
# initialize counters
count = 0
def update_flag():
"""Update the poll flag, to prevent registering POLLOUT events
if we don't have pending sends."""
return flag & zmq.POLLIN | (self.sending() and flag & zmq.POLLOUT)
flag = update_flag()
if not flag:
# nothing to do
return 0
self.poller.register(self.socket, flag)
events = self.poller.poll(0)
while events and (not limit or count < limit):
s,event = events[0]
if event & zmq.POLLIN: # receiving
self._handle_recv()
count += 1
if self.socket is None:
# break if socket was closed during callback
break
if event & zmq.POLLOUT and self.sending():
self._handle_send()
count += 1
if self.socket is None:
# break if socket was closed during callback
break
flag = update_flag()
if flag:
self.poller.register(self.socket, flag)
events = self.poller.poll(0)
else:
events = []
if count: # only bypass loop if we actually flushed something
# skip send/recv callbacks this iteration
self._flushed = True
# reregister them at the end of the loop
if not already_flushed: # don't need to do it again
self.io_loop.add_callback(self._finish_flush)
elif already_flushed:
self._flushed = True
# update ioloop poll state, which may have changed
self._rebuild_io_state()
return count
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed."""
self._close_callback = stack_context.wrap(callback)
def close(self, linger=None):
"""Close this stream."""
if self.socket is not None:
self.io_loop.remove_handler(self.socket)
self.socket.close(linger)
self.socket = None
if self._close_callback:
self._run_callback(self._close_callback)
def receiving(self):
"""Returns True if we are currently receiving from the stream."""
return self._recv_callback is not None
def sending(self):
"""Returns True if we are currently sending to the stream."""
return not self._send_queue.empty()
def closed(self):
return self.socket is None
def _run_callback(self, callback, *args, **kwargs):
"""Wrap running callbacks in try/except to allow us to
close our socket."""
try:
# Use a NullContext to ensure that all StackContexts are run
# inside our blanket exception handler rather than outside.
with stack_context.NullContext():
callback(*args, **kwargs)
except:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close()
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
def _handle_events(self, fd, events):
"""This method is the actual handler for IOLoop, that gets called whenever
an event on my socket is posted. It dispatches to _handle_recv, etc."""
# print "handling events"
if not self.socket:
gen_log.warning("Got events for closed stream %s", fd)
return
try:
# dispatch events:
if events & IOLoop.ERROR:
gen_log.error("got POLLERR event on ZMQStream, which doesn't make sense")
return
if events & IOLoop.READ:
self._handle_recv()
if not self.socket:
return
if events & IOLoop.WRITE:
self._handle_send()
if not self.socket:
return
# rebuild the poll state
self._rebuild_io_state()
except:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close()
raise
def _handle_recv(self):
"""Handle a recv event."""
if self._flushed:
return
try:
msg = self.socket.recv_multipart(zmq.NOBLOCK, copy=self._recv_copy)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
# state changed since poll event
pass
else:
gen_log.error("RECV Error: %s"%zmq.strerror(e.errno))
else:
if self._recv_callback:
callback = self._recv_callback
# self._recv_callback = None
self._run_callback(callback, msg)
# self.update_state()
def _handle_send(self):
"""Handle a send event."""
if self._flushed:
return
if not self.sending():
gen_log.error("Shouldn't have handled a send event")
return
msg, kwargs = self._send_queue.get()
try:
status = self.socket.send_multipart(msg, **kwargs)
except zmq.ZMQError as e:
gen_log.error("SEND Error: %s", e)
status = e
if self._send_callback:
callback = self._send_callback
self._run_callback(callback, msg, status)
# self.update_state()
def _check_closed(self):
if not self.socket:
raise IOError("Stream is closed")
def _rebuild_io_state(self):
"""rebuild io state based on self.sending() and receiving()"""
if self.socket is None:
return
state = self.io_loop.ERROR
if self.receiving():
state |= self.io_loop.READ
if self.sending():
state |= self.io_loop.WRITE
if state != self._state:
self._state = state
self._update_handler(state)
def _add_io_state(self, state):
"""Add io_state to poller."""
if not self._state & state:
self._state = self._state | state
self._update_handler(self._state)
def _drop_io_state(self, state):
"""Stop poller from watching an io_state."""
if self._state & state:
self._state = self._state & (~state)
self._update_handler(self._state)
def _update_handler(self, state):
"""Update IOLoop handler with state."""
if self.socket is None:
return
self.io_loop.update_handler(self.socket, state)
def _init_io_state(self):
"""initialize the ioloop event handler"""
with stack_context.NullContext():
self.io_loop.add_handler(self.socket, self._handle_events, self._state)
| gpl-3.0 |
bbusemeyer/mainline | utils/autogen/job_control.py | 3 | 8655 | from __future__ import print_function
import os
import json
import shutil
def default_job_record(filename):
job_record={}
job_record['dft']={}
job_record['qmc']={}
job_record['control']={}
#Set up Hamiltonian
with open (filename, "r") as f:
suffix=filename.split('.')[-1]
if suffix=='cif':
job_record['cif']=f.read()
elif suffix=='xyz':
job_record['xyz']=f.read()
else:
print("ERROR: didn't understand file suffix",suffix)
quit()
job_record['supercell']=[[1,0,0],[0,1,0],[0,0,1]]
job_record['pseudopotential']='BFD'
job_record['charge']=0
job_record['total_spin']=0
job_record['assert_nochanges'] = True # Assert no changes in CRYSTAL input.
#DFT-specific options
job_record['dft']['symmetrized'] = False # True to use spacegroup symmetry, False for primitive symmetry
job_record['dft']['levshift'] = None # [shift,lock]; shift is the shift in Hartree, lock (0/1) is whether to lock into a non-conducting state
job_record['dft']['functional']={'exchange':'PBE','correlation':'PBE','hybrid':25}
job_record['dft']['basis']=[0.2,3,3]
job_record['dft']['kmesh']=[8,8,8]
job_record['dft']['tolinteg']=[12,12,12,12,20]
job_record['dft']['spin_polarized']=True
job_record['dft']['initial_spin']=[]
job_record['dft']['initial_charges']={} #For example, 'O':-2,'Mg':2
job_record['dft']['edifftol']=10
job_record['dft']['fmixing']=99
job_record['dft']['broyden']=[0.01,60,8]
job_record['dft']['maxcycle']=200
# None = fresh run, else copy this path to fort.20;
# e.g. job_record['dft']['restart_from'] = ../successful_run/fort.9
job_record['dft']['restart_from']=None
job_record['dft']['smear']=None
# Values:
# 'stubborn' : resume if job is killed or ran out of SCF steps.
# 'optimistic' : resume if job is killed.
# 'conservative' : never resume job.
job_record['dft']['resume_mode']='conservative'
#QMC-specific options
job_record['qmc']['kpoints']='real' # or 'all' for both real and complex valued k-points
job_record['qmc']['vmc']={}
job_record['qmc']['vmc']['jastrow']=['twobody'] #or 'threebody'
job_record['qmc']['vmc']['nblock']=100
job_record['qmc']['vmc']['optimizer']=['variance'] #or 'energy' or None
job_record['qmc']['vmc']['target_error']=0.01
job_record['qmc']['dmc']={}
job_record['qmc']['dmc']['timestep']=[0.02]
job_record['qmc']['dmc']['jastrow']=['twobody'] #or 'threebody'
job_record['qmc']['dmc']['nblock']=16
job_record['qmc']['dmc']['optimizer']=['variance'] #or energy
job_record['qmc']['dmc']['localization']=['tmoves']
job_record['qmc']['dmc']['target_error']=0.01
job_record['qmc']['dmc']['excitations']='no'#VBM-CBM or other..
job_record['qmc']['dmc']['save_trace']=True
job_record['qmc']['postprocess'] = {}
job_record['qmc']['postprocess']['region_fluctuation'] = True
job_record['qmc']['postprocess']['density'] = False #True
job_record['qmc']['postprocess']['obdm'] = False
job_record['qmc']['postprocess']['basis'] = None
job_record['qmc']['postprocess']['orb'] = None
job_record['qmc']['postprocess']['swap_endian'] = False
job_record['qmc']['variance_optimize']={}
job_record['qmc']['variance_optimize']['niterations']=10
job_record['qmc']['variance_optimize']['nruns']=3
job_record['qmc']['variance_optimize']['reltol']=0.1
job_record['qmc']['variance_optimize']['abstol']=1e3 # TODO better default.
job_record['qmc']['variance_optimize']['jastrow']=['twobody']
job_record['qmc']['energy_optimize']={}
job_record['qmc']['energy_optimize']['threshold']=0.001
job_record['qmc']['energy_optimize']['total_nstep']=16384
job_record['qmc']['energy_optimize']['jastrow']=['twobody']
job_record['qmc']['maximize'] = {}
job_record['qmc']['maximize']['nconfig'] = [100]
job_record['qmc']['maximize']['jastrow']=['twobody']
#Control options
job_record['control']['id']=1
job_record['control']['elements']=[]
job_record['control']['pretty_formula']=''
job_record['control']['queue_id']=[]
return job_record
def execute(record, element_list):
"""
Run element_list tasks on this job record
"""
currwd=os.getcwd()
d=str(record['control']['id'])
try:
os.mkdir(d)
except:
pass
os.chdir(d)
jsonfile="record.json"
if os.path.isfile(jsonfile):
#We could do checking to make sure the
#definition hasn't changed.
f=open('record.json','r')
record_read=json.load(f)
record['control']=record_read['control']
f.close()
print("#######################ID",record['control']['id'])
for element in element_list:
status=element.check_status(record)
print(element._name_,status)
if status=='not_started':
status=element.run(record)
print(element._name_,status)
if status=='not_finished':
status=element.resume(record)
print(element._name_,status)
if status != 'ok':
break
record=element.output(record)
with open(jsonfile,'w') as f:
json.dump(record,f)
os.chdir(currwd)
return record
# This will be moved to RunCrystal after our bigger merge.
def restart_job(jobname):
"""
Restart a crystal job from scratch. This means deleting all progress. Use it
for redoing a job that may have been corrupted or you'd like to change
something important.
"""
do_it = raw_input("Restart %s? (y/n)"%jobname)
if do_it == 'y':
try:
os.remove(jobname + "/autogen.d12")
except OSError:
pass
try:
os.remove(jobname + "/autogen.d12.o")
except OSError:
pass
else:
print("Didn't do it")
# Currently only defined for CRYSTAL runs.
# This will be moved to RunCrystal after our bigger merge.
def check_continue(jobname,qchecker,reasonable_lastSCF=50.0):
"""
Look at CRYSTAL output, and report results.
Current return values:
no_record, running, no_output, success, too_many_cycles, finished (fall-back),
scf_fail, not_enough_decrease, divergence, continue
"continue" suggests the calculation should call continue_job(), and is only
returned when no other condition is found.
"""
try:
jobrecord = json.load(open(jobname+"/record.json",'r'))
except IOError:
print("JOB CONTROL: Shouldn't continue %s has no record."%jobname)
return "no_record"
qstatus = qchecker.status(jobrecord)
if qstatus == 'running':
print("JOB CONTROL: Shouldn't continue %s because still running"%jobname)
return "running"
try:
outf = open(jobname+"/autogen.d12.o",'r')
except IOError:
print("JOB CONTROL: Can't continue %s because no output"%jobname)
return "no_output"
outlines = outf.read().split('\n')
reslines = [line for line in outlines if "ENDED" in line]
if len(reslines) > 0:
if "CONVERGENCE" in reslines[0]:
print("JOB CONTROL: Shouldn't continue %s because successful."%jobname)
return "success"
elif "TOO MANY CYCLES" in reslines[0]:
print("JOB CONTROL: check_continue found %s has 'too many cycles'."%jobname)
return "too_many_cycles"
else: # What else can happen?
return "finished"
detots = [float(line.split()[5]) for line in outlines if "DETOT" in line]
if len(detots) == 0:
print("JOB CONTROL: Shouldn't continue %s because no SCF last time."%jobname)
return "scf_fail"
detots_net = sum(detots[1:])
if detots_net > reasonable_lastSCF:
print("JOB CONTROL: Shouldn't continue %s because not enough decrease (%.2f>%.2f)."%\
(jobname,detots_net,reasonable_lastSCF))
return "not_enough_decrease"
etots = [float(line.split()[3]) for line in outlines if "DETOT" in line]
if etots[-1] > 0:
# This case probably won't happen if this works as expected.
print("JOB CONTROL: Shouldn't continue %s because divergence (%.2f)."%\
(jobname,etots[-1]))
return "divergence"
print("JOB CONTROL: Should continue %s."%jobname)
return "continue"
# Currently only defined for CRYSTAL runs. Returns name of restart file.
# This will be moved to RunCrystal after our bigger merge.
# TODO: Add max_continues option.
# TODO: The stdout of this is off because the line between jobs is drawn between
# this output and the execute() output.
def continue_job(jobname):
""" Continue a job that ran out of time."""
jobrecord = json.load(open(jobname+"/record.json",'r'))
trynum = 0
while os.path.isfile(jobname+"/"+str(trynum)+".autogen.d12.o"):
trynum += 1
for filename in ["autogen.d12","autogen.d12.o"]:
shutil.move(jobname+"/"+filename,jobname+"/"+str(trynum)+"."+filename)
for filename in ["fort.79"]:
shutil.copy(jobname+"/"+filename,jobname+"/"+str(trynum)+"."+filename)
return jobname+"/"+str(trynum)+".fort.79"
| gpl-2.0 |
falau/pogom | pogom/pgoapi/protos/POGOProtos/Settings/LevelSettings_pb2.py | 16 | 2541 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Settings/LevelSettings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Settings/LevelSettings.proto',
package='POGOProtos.Settings',
syntax='proto3',
serialized_pb=_b('\n\'POGOProtos/Settings/LevelSettings.proto\x12\x13POGOProtos.Settings\"Q\n\rLevelSettings\x12\x1b\n\x13trainer_cp_modifier\x18\x02 \x01(\x01\x12#\n\x1btrainer_difficulty_modifier\x18\x03 \x01(\x01\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_LEVELSETTINGS = _descriptor.Descriptor(
name='LevelSettings',
full_name='POGOProtos.Settings.LevelSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trainer_cp_modifier', full_name='POGOProtos.Settings.LevelSettings.trainer_cp_modifier', index=0,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trainer_difficulty_modifier', full_name='POGOProtos.Settings.LevelSettings.trainer_difficulty_modifier', index=1,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=145,
)
DESCRIPTOR.message_types_by_name['LevelSettings'] = _LEVELSETTINGS
LevelSettings = _reflection.GeneratedProtocolMessageType('LevelSettings', (_message.Message,), dict(
DESCRIPTOR = _LEVELSETTINGS,
__module__ = 'POGOProtos.Settings.LevelSettings_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Settings.LevelSettings)
))
_sym_db.RegisterMessage(LevelSettings)
# @@protoc_insertion_point(module_scope)
| mit |
unicri/edx-platform | common/lib/xmodule/xmodule/tests/test_graders.py | 102 | 12238 | """Grading tests"""
import unittest
from xmodule import graders
from xmodule.graders import Score, aggregate_scores
class GradesheetTest(unittest.TestCase):
'''Tests the aggregate_scores method'''
def test_weighted_grading(self):
scores = []
Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)
all_total, graded_total = aggregate_scores(scores)
self.assertEqual(all_total, Score(earned=0, possible=0, graded=False, section="summary"))
self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary"))
scores.append(Score(earned=0, possible=5, graded=False, section="summary"))
all_total, graded_total = aggregate_scores(scores)
self.assertEqual(all_total, Score(earned=0, possible=5, graded=False, section="summary"))
self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary"))
scores.append(Score(earned=3, possible=5, graded=True, section="summary"))
all_total, graded_total = aggregate_scores(scores)
self.assertAlmostEqual(all_total, Score(earned=3, possible=10, graded=False, section="summary"))
self.assertAlmostEqual(graded_total, Score(earned=3, possible=5, graded=True, section="summary"))
scores.append(Score(earned=2, possible=5, graded=True, section="summary"))
all_total, graded_total = aggregate_scores(scores)
self.assertAlmostEqual(all_total, Score(earned=5, possible=15, graded=False, section="summary"))
self.assertAlmostEqual(graded_total, Score(earned=5, possible=10, graded=True, section="summary"))
class GraderTest(unittest.TestCase):
'''Tests grader implementations'''
empty_gradesheet = {
}
incomplete_gradesheet = {
'Homework': [],
'Lab': [],
'Midterm': [],
}
test_gradesheet = {
'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'),
Score(earned=16, possible=16.0, graded=True, section='hw2')],
# The dropped scores should be from the assignments that don't exist yet
'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped
Score(earned=1, possible=1.0, graded=True, section='lab2'),
Score(earned=1, possible=1.0, graded=True, section='lab3'),
Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped
Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped
Score(earned=6, possible=7.0, graded=True, section='lab6'),
Score(earned=5, possible=6.0, graded=True, section='lab7')],
'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ],
}
def test_single_section_grader(self):
midterm_grader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
lab4_grader = graders.SingleSectionGrader("Lab", "lab4")
bad_lab_grader = graders.SingleSectionGrader("Lab", "lab42")
for graded in [midterm_grader.grade(self.empty_gradesheet),
midterm_grader.grade(self.incomplete_gradesheet),
bad_lab_grader.grade(self.test_gradesheet)]:
self.assertEqual(len(graded['section_breakdown']), 1)
self.assertEqual(graded['percent'], 0.0)
graded = midterm_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.505)
self.assertEqual(len(graded['section_breakdown']), 1)
graded = lab4_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.2)
self.assertEqual(len(graded['section_breakdown']), 1)
def test_assignment_format_grader(self):
homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
no_drop_grader = graders.AssignmentFormatGrader("Homework", 12, 0)
# Even though the minimum number is 3, this should grade correctly when 7 assignments are found
overflow_grader = graders.AssignmentFormatGrader("Lab", 3, 2)
lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3)
# Test the grading of an empty gradesheet
for graded in [homework_grader.grade(self.empty_gradesheet),
no_drop_grader.grade(self.empty_gradesheet),
homework_grader.grade(self.incomplete_gradesheet),
no_drop_grader.grade(self.incomplete_gradesheet)]:
self.assertAlmostEqual(graded['percent'], 0.0)
# Make sure the breakdown includes 12 sections, plus one summary
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
graded = homework_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.11) # 100% + 10% / 10 assignments
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
graded = no_drop_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.0916666666666666) # 100% + 10% / 12 assignments
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
graded = overflow_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.8880952380952382) # 100% + 10% / 5 assignments
self.assertEqual(len(graded['section_breakdown']), 7 + 1)
graded = lab_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.9226190476190477)
self.assertEqual(len(graded['section_breakdown']), 7 + 1)
def test_assignment_format_grader_on_single_section_entry(self):
midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0)
# Test the grading on a section with one item:
for graded in [midterm_grader.grade(self.empty_gradesheet),
midterm_grader.grade(self.incomplete_gradesheet)]:
self.assertAlmostEqual(graded['percent'], 0.0)
# Make sure the breakdown includes just the one summary
self.assertEqual(len(graded['section_breakdown']), 0 + 1)
self.assertEqual(graded['section_breakdown'][0]['label'], 'Midterm')
graded = midterm_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.505)
self.assertEqual(len(graded['section_breakdown']), 0 + 1)
def test_weighted_subsections_grader(self):
# First, a few sub graders
homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3)
# phasing out the use of SingleSectionGraders, and instead using AssignmentFormatGraders that
# will act like SingleSectionGraders on single sections.
midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0)
weighted_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.25),
(lab_grader, lab_grader.category, 0.25),
(midterm_grader, midterm_grader.category, 0.5)])
over_one_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.5),
(lab_grader, lab_grader.category, 0.5),
(midterm_grader, midterm_grader.category, 0.5)])
# The midterm should have all weight on this one
zero_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.0),
(lab_grader, lab_grader.category, 0.0),
(midterm_grader, midterm_grader.category, 0.5)])
# This should always have a final percent of zero
all_zero_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.0),
(lab_grader, lab_grader.category, 0.0),
(midterm_grader, midterm_grader.category, 0.0)])
empty_grader = graders.WeightedSubsectionsGrader([])
graded = weighted_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
graded = over_one_weights_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.7688095238095238)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
graded = zero_weights_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.2525)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
graded = all_zero_weights_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
for graded in [weighted_grader.grade(self.empty_gradesheet),
weighted_grader.grade(self.incomplete_gradesheet),
zero_weights_grader.grade(self.empty_gradesheet),
all_zero_weights_grader.grade(self.empty_gradesheet)]:
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
graded = empty_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), 0)
self.assertEqual(len(graded['grade_breakdown']), 0)
def test_grader_from_conf(self):
# Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test
# in test_graders.WeightedSubsectionsGrader, but generate the graders with confs.
weighted_grader = graders.grader_from_conf([
{
'type': "Homework",
'min_count': 12,
'drop_count': 2,
'short_label': "HW",
'weight': 0.25,
},
{
'type': "Lab",
'min_count': 7,
'drop_count': 3,
'category': "Labs",
'weight': 0.25
},
{
'type': "Midterm",
'name': "Midterm Exam",
'short_label': "Midterm",
'weight': 0.5,
},
])
empty_grader = graders.grader_from_conf([])
graded = weighted_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
graded = empty_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), 0)
self.assertEqual(len(graded['grade_breakdown']), 0)
# Test that graders can also be used instead of lists of dictionaries
homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
homework_grader2 = graders.grader_from_conf(homework_grader)
graded = homework_grader2.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.11)
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
# TODO: How do we test failure cases? The parser only logs an error when
# it can't parse something. Maybe it should throw exceptions?
| agpl-3.0 |
whummer/moto | tests/test_ecr/test_ecr_boto3.py | 1 | 37480 | from __future__ import unicode_literals
import hashlib
import json
from datetime import datetime
from freezegun import freeze_time
import os
from random import random
import re
import sure # noqa
import boto3
from botocore.exceptions import ClientError, ParamValidationError
from dateutil.tz import tzlocal
from moto import mock_ecr
from nose import SkipTest
def _create_image_digest(contents=None):
if not contents:
contents = 'docker_image{0}'.format(int(random() * 10 ** 6))
return "sha256:%s" % hashlib.sha256(contents.encode('utf-8')).hexdigest()
def _create_image_manifest():
return {
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config":
{
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 7023,
"digest": _create_image_digest("config")
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 32654,
"digest": _create_image_digest("layer1")
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 16724,
"digest": _create_image_digest("layer2")
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 73109,
# randomize image digest
"digest": _create_image_digest()
}
]
}
@mock_ecr
def test_create_repository():
client = boto3.client('ecr', region_name='us-east-1')
response = client.create_repository(
repositoryName='test_ecr_repository'
)
response['repository']['repositoryName'].should.equal('test_ecr_repository')
response['repository']['repositoryArn'].should.equal(
'arn:aws:ecr:us-east-1:012345678910:repository/test_ecr_repository')
response['repository']['registryId'].should.equal('012345678910')
response['repository']['repositoryUri'].should.equal(
'012345678910.dkr.ecr.us-east-1.amazonaws.com/test_ecr_repository')
# response['repository']['createdAt'].should.equal(0)
@mock_ecr
def test_describe_repositories():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository1'
)
_ = client.create_repository(
repositoryName='test_repository0'
)
response = client.describe_repositories()
len(response['repositories']).should.equal(2)
respository_arns = ['arn:aws:ecr:us-east-1:012345678910:repository/test_repository1',
'arn:aws:ecr:us-east-1:012345678910:repository/test_repository0']
set([response['repositories'][0]['repositoryArn'],
response['repositories'][1]['repositoryArn']]).should.equal(set(respository_arns))
respository_uris = ['012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1',
'012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0']
set([response['repositories'][0]['repositoryUri'],
response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris))
@mock_ecr
def test_describe_repositories_1():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository1'
)
_ = client.create_repository(
repositoryName='test_repository0'
)
response = client.describe_repositories(registryId='012345678910')
len(response['repositories']).should.equal(2)
respository_arns = ['arn:aws:ecr:us-east-1:012345678910:repository/test_repository1',
'arn:aws:ecr:us-east-1:012345678910:repository/test_repository0']
set([response['repositories'][0]['repositoryArn'],
response['repositories'][1]['repositoryArn']]).should.equal(set(respository_arns))
respository_uris = ['012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1',
'012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0']
set([response['repositories'][0]['repositoryUri'],
response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris))
@mock_ecr
def test_describe_repositories_2():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository1'
)
_ = client.create_repository(
repositoryName='test_repository0'
)
response = client.describe_repositories(registryId='109876543210')
len(response['repositories']).should.equal(0)
@mock_ecr
def test_describe_repositories_3():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository1'
)
_ = client.create_repository(
repositoryName='test_repository0'
)
response = client.describe_repositories(repositoryNames=['test_repository1'])
len(response['repositories']).should.equal(1)
respository_arn = 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository1'
response['repositories'][0]['repositoryArn'].should.equal(respository_arn)
respository_uri = '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1'
response['repositories'][0]['repositoryUri'].should.equal(respository_uri)
@mock_ecr
def test_describe_repositories_with_image():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='latest'
)
response = client.describe_repositories(repositoryNames=['test_repository'])
len(response['repositories']).should.equal(1)
@mock_ecr
def test_delete_repository():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
response = client.delete_repository(repositoryName='test_repository')
response['repository']['repositoryName'].should.equal('test_repository')
response['repository']['repositoryArn'].should.equal(
'arn:aws:ecr:us-east-1:012345678910:repository/test_repository')
response['repository']['registryId'].should.equal('012345678910')
response['repository']['repositoryUri'].should.equal(
'012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository')
# response['repository']['createdAt'].should.equal(0)
response = client.describe_repositories()
len(response['repositories']).should.equal(0)
@mock_ecr
def test_put_image():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
response = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='latest'
)
response['image']['imageId']['imageTag'].should.equal('latest')
response['image']['imageId']['imageDigest'].should.contain("sha")
response['image']['repositoryName'].should.equal('test_repository')
response['image']['registryId'].should.equal('012345678910')
@mock_ecr
def test_put_image_with_push_date():
if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true':
raise SkipTest('Cant manipulate time in server mode')
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
with freeze_time('2018-08-28 00:00:00'):
image1_date = datetime.now()
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='latest'
)
with freeze_time('2019-05-31 00:00:00'):
image2_date = datetime.now()
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='latest'
)
describe_response = client.describe_images(repositoryName='test_repository')
type(describe_response['imageDetails']).should.be(list)
len(describe_response['imageDetails']).should.be(2)
set([describe_response['imageDetails'][0]['imagePushedAt'],
describe_response['imageDetails'][1]['imagePushedAt']]).should.equal(set([image1_date, image2_date]))
@mock_ecr
def test_put_image_with_multiple_tags():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
manifest = _create_image_manifest()
response = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest),
imageTag='v1'
)
response['image']['imageId']['imageTag'].should.equal('v1')
response['image']['imageId']['imageDigest'].should.contain("sha")
response['image']['repositoryName'].should.equal('test_repository')
response['image']['registryId'].should.equal('012345678910')
response1 = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest),
imageTag='latest'
)
response1['image']['imageId']['imageTag'].should.equal('latest')
response1['image']['imageId']['imageDigest'].should.contain("sha")
response1['image']['repositoryName'].should.equal('test_repository')
response1['image']['registryId'].should.equal('012345678910')
response2 = client.describe_images(repositoryName='test_repository')
type(response2['imageDetails']).should.be(list)
len(response2['imageDetails']).should.be(1)
response2['imageDetails'][0]['imageDigest'].should.contain("sha")
response2['imageDetails'][0]['registryId'].should.equal("012345678910")
response2['imageDetails'][0]['repositoryName'].should.equal("test_repository")
len(response2['imageDetails'][0]['imageTags']).should.be(2)
response2['imageDetails'][0]['imageTags'].should.be.equal(['v1', 'latest'])
@mock_ecr
def test_list_images():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository_1'
)
_ = client.create_repository(
repositoryName='test_repository_2'
)
_ = client.put_image(
repositoryName='test_repository_1',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='latest'
)
_ = client.put_image(
repositoryName='test_repository_1',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='v1'
)
_ = client.put_image(
repositoryName='test_repository_1',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='v2'
)
_ = client.put_image(
repositoryName='test_repository_2',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='oldest'
)
response = client.list_images(repositoryName='test_repository_1')
type(response['imageIds']).should.be(list)
len(response['imageIds']).should.be(3)
image_tags = ['latest', 'v1', 'v2']
set([response['imageIds'][0]['imageTag'],
response['imageIds'][1]['imageTag'],
response['imageIds'][2]['imageTag']]).should.equal(set(image_tags))
response = client.list_images(repositoryName='test_repository_2')
type(response['imageIds']).should.be(list)
len(response['imageIds']).should.be(1)
response['imageIds'][0]['imageTag'].should.equal('oldest')
@mock_ecr
def test_list_images_from_repository_that_doesnt_exist():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository_1'
)
# non existing repo
error_msg = re.compile(
r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*",
re.MULTILINE)
client.list_images.when.called_with(
repositoryName='repo-that-doesnt-exist',
registryId='123',
).should.throw(Exception, error_msg)
# repo does not exist in specified registry
error_msg = re.compile(
r".*The repository with name 'test_repository_1' does not exist in the registry with id '222'.*",
re.MULTILINE)
client.list_images.when.called_with(
repositoryName='test_repository_1',
registryId='222',
).should.throw(Exception, error_msg)
@mock_ecr
def test_describe_images():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest())
)
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='latest'
)
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='v1'
)
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='v2'
)
response = client.describe_images(repositoryName='test_repository')
type(response['imageDetails']).should.be(list)
len(response['imageDetails']).should.be(4)
response['imageDetails'][0]['imageDigest'].should.contain("sha")
response['imageDetails'][1]['imageDigest'].should.contain("sha")
response['imageDetails'][2]['imageDigest'].should.contain("sha")
response['imageDetails'][3]['imageDigest'].should.contain("sha")
response['imageDetails'][0]['registryId'].should.equal("012345678910")
response['imageDetails'][1]['registryId'].should.equal("012345678910")
response['imageDetails'][2]['registryId'].should.equal("012345678910")
response['imageDetails'][3]['registryId'].should.equal("012345678910")
response['imageDetails'][0]['repositoryName'].should.equal("test_repository")
response['imageDetails'][1]['repositoryName'].should.equal("test_repository")
response['imageDetails'][2]['repositoryName'].should.equal("test_repository")
response['imageDetails'][3]['repositoryName'].should.equal("test_repository")
response['imageDetails'][0].should_not.have.key('imageTags')
len(response['imageDetails'][1]['imageTags']).should.be(1)
len(response['imageDetails'][2]['imageTags']).should.be(1)
len(response['imageDetails'][3]['imageTags']).should.be(1)
image_tags = ['latest', 'v1', 'v2']
set([response['imageDetails'][1]['imageTags'][0],
response['imageDetails'][2]['imageTags'][0],
response['imageDetails'][3]['imageTags'][0]]).should.equal(set(image_tags))
response['imageDetails'][0]['imageSizeInBytes'].should.equal(52428800)
response['imageDetails'][1]['imageSizeInBytes'].should.equal(52428800)
response['imageDetails'][2]['imageSizeInBytes'].should.equal(52428800)
response['imageDetails'][3]['imageSizeInBytes'].should.equal(52428800)
@mock_ecr
def test_describe_images_by_tag():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
tag_map = {}
for tag in ['latest', 'v1', 'v2']:
put_response = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag=tag
)
tag_map[tag] = put_response['image']
for tag, put_response in tag_map.items():
response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}])
len(response['imageDetails']).should.be(1)
image_detail = response['imageDetails'][0]
image_detail['registryId'].should.equal("012345678910")
image_detail['repositoryName'].should.equal("test_repository")
image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']])
image_detail['imageDigest'].should.equal(put_response['imageId']['imageDigest'])
@mock_ecr
def test_describe_images_tags_should_not_contain_empty_tag1():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
manifest = _create_image_manifest()
client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest)
)
tags = ['v1', 'v2', 'latest']
for tag in tags:
client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest),
imageTag=tag
)
response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}])
len(response['imageDetails']).should.be(1)
image_detail = response['imageDetails'][0]
len(image_detail['imageTags']).should.equal(3)
image_detail['imageTags'].should.be.equal(tags)
@mock_ecr
def test_describe_images_tags_should_not_contain_empty_tag2():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
manifest = _create_image_manifest()
tags = ['v1', 'v2']
for tag in tags:
client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest),
imageTag=tag
)
client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest)
)
client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest),
imageTag='latest'
)
response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}])
len(response['imageDetails']).should.be(1)
image_detail = response['imageDetails'][0]
len(image_detail['imageTags']).should.equal(3)
image_detail['imageTags'].should.be.equal(['v1', 'v2', 'latest'])
@mock_ecr
def test_describe_repository_that_doesnt_exist():
client = boto3.client('ecr', region_name='us-east-1')
error_msg = re.compile(
r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*",
re.MULTILINE)
client.describe_repositories.when.called_with(
repositoryNames=['repo-that-doesnt-exist'],
registryId='123',
).should.throw(ClientError, error_msg)
@mock_ecr
def test_describe_image_that_doesnt_exist():
client = boto3.client('ecr', region_name='us-east-1')
client.create_repository(repositoryName='test_repository')
error_msg1 = re.compile(
r".*The image with imageId {imageDigest:'null', imageTag:'testtag'} does not exist within "
r"the repository with name 'test_repository' in the registry with id '123'.*",
re.MULTILINE)
client.describe_images.when.called_with(
repositoryName='test_repository', imageIds=[{'imageTag': 'testtag'}], registryId='123',
).should.throw(ClientError, error_msg1)
error_msg2 = re.compile(
r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*",
re.MULTILINE)
client.describe_images.when.called_with(
repositoryName='repo-that-doesnt-exist', imageIds=[{'imageTag': 'testtag'}], registryId='123',
).should.throw(ClientError, error_msg2)
@mock_ecr
def test_delete_repository_that_doesnt_exist():
client = boto3.client('ecr', region_name='us-east-1')
error_msg = re.compile(
r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*",
re.MULTILINE)
client.delete_repository.when.called_with(
repositoryName='repo-that-doesnt-exist',
registryId='123').should.throw(
ClientError, error_msg)
@mock_ecr
def test_describe_images_by_digest():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
tags = ['latest', 'v1', 'v2']
digest_map = {}
for tag in tags:
put_response = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag=tag
)
digest_map[put_response['image']['imageId']['imageDigest']] = put_response['image']
for digest, put_response in digest_map.items():
response = client.describe_images(repositoryName='test_repository',
imageIds=[{'imageDigest': digest}])
len(response['imageDetails']).should.be(1)
image_detail = response['imageDetails'][0]
image_detail['registryId'].should.equal("012345678910")
image_detail['repositoryName'].should.equal("test_repository")
image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']])
image_detail['imageDigest'].should.equal(digest)
@mock_ecr
def test_get_authorization_token_assume_region():
client = boto3.client('ecr', region_name='us-east-1')
auth_token_response = client.get_authorization_token()
auth_token_response.should.contain('authorizationData')
auth_token_response.should.contain('ResponseMetadata')
auth_token_response['authorizationData'].should.equal([
{
'authorizationToken': 'QVdTOjAxMjM0NTY3ODkxMC1hdXRoLXRva2Vu',
'proxyEndpoint': 'https://012345678910.dkr.ecr.us-east-1.amazonaws.com',
'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal())
},
])
@mock_ecr
def test_get_authorization_token_explicit_regions():
client = boto3.client('ecr', region_name='us-east-1')
auth_token_response = client.get_authorization_token(registryIds=['10987654321', '878787878787'])
auth_token_response.should.contain('authorizationData')
auth_token_response.should.contain('ResponseMetadata')
auth_token_response['authorizationData'].should.equal([
{
'authorizationToken': 'QVdTOjEwOTg3NjU0MzIxLWF1dGgtdG9rZW4=',
'proxyEndpoint': 'https://10987654321.dkr.ecr.us-east-1.amazonaws.com',
'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()),
},
{
'authorizationToken': 'QVdTOjg3ODc4Nzg3ODc4Ny1hdXRoLXRva2Vu',
'proxyEndpoint': 'https://878787878787.dkr.ecr.us-east-1.amazonaws.com',
'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal())
}
])
@mock_ecr
def test_batch_get_image():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='latest'
)
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='v1'
)
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='v2'
)
response = client.batch_get_image(
repositoryName='test_repository',
imageIds=[
{
'imageTag': 'v2'
},
],
)
type(response['images']).should.be(list)
len(response['images']).should.be(1)
response['images'][0]['imageManifest'].should.contain("vnd.docker.distribution.manifest.v2+json")
response['images'][0]['registryId'].should.equal("012345678910")
response['images'][0]['repositoryName'].should.equal("test_repository")
response['images'][0]['imageId']['imageTag'].should.equal("v2")
response['images'][0]['imageId']['imageDigest'].should.contain("sha")
type(response['failures']).should.be(list)
len(response['failures']).should.be(0)
@mock_ecr
def test_batch_get_image_that_doesnt_exist():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='latest'
)
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='v1'
)
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='v2'
)
response = client.batch_get_image(
repositoryName='test_repository',
imageIds=[
{
'imageTag': 'v5'
},
],
)
type(response['images']).should.be(list)
len(response['images']).should.be(0)
type(response['failures']).should.be(list)
len(response['failures']).should.be(1)
response['failures'][0]['failureReason'].should.equal("Requested image not found")
response['failures'][0]['failureCode'].should.equal("ImageNotFound")
response['failures'][0]['imageId']['imageTag'].should.equal("v5")
@mock_ecr
def test_batch_get_image_no_tags():
client = boto3.client('ecr', region_name='us-east-1')
_ = client.create_repository(
repositoryName='test_repository'
)
_ = client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='latest'
)
error_msg = re.compile(
r".*Missing required parameter in input: \"imageIds\".*",
re.MULTILINE)
client.batch_get_image.when.called_with(
repositoryName='test_repository').should.throw(
ParamValidationError, error_msg)
@mock_ecr
def test_batch_delete_image_by_tag():
client = boto3.client('ecr', region_name='us-east-1')
client.create_repository(
repositoryName='test_repository'
)
manifest = _create_image_manifest()
tags = ['v1', 'v1.0', 'latest']
for tag in tags:
client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest),
imageTag=tag,
)
describe_response1 = client.describe_images(repositoryName='test_repository')
batch_delete_response = client.batch_delete_image(
registryId='012345678910',
repositoryName='test_repository',
imageIds=[
{
'imageTag': 'latest'
},
],
)
describe_response2 = client.describe_images(repositoryName='test_repository')
type(describe_response1['imageDetails'][0]['imageTags']).should.be(list)
len(describe_response1['imageDetails'][0]['imageTags']).should.be(3)
type(describe_response2['imageDetails'][0]['imageTags']).should.be(list)
len(describe_response2['imageDetails'][0]['imageTags']).should.be(2)
type(batch_delete_response['imageIds']).should.be(list)
len(batch_delete_response['imageIds']).should.be(1)
batch_delete_response['imageIds'][0]['imageTag'].should.equal("latest")
type(batch_delete_response['failures']).should.be(list)
len(batch_delete_response['failures']).should.be(0)
@mock_ecr
def test_batch_delete_image_delete_last_tag():
client = boto3.client('ecr', region_name='us-east-1')
client.create_repository(
repositoryName='test_repository'
)
client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(_create_image_manifest()),
imageTag='v1',
)
describe_response1 = client.describe_images(repositoryName='test_repository')
batch_delete_response = client.batch_delete_image(
registryId='012345678910',
repositoryName='test_repository',
imageIds=[
{
'imageTag': 'v1'
},
],
)
describe_response2 = client.describe_images(repositoryName='test_repository')
type(describe_response1['imageDetails'][0]['imageTags']).should.be(list)
len(describe_response1['imageDetails'][0]['imageTags']).should.be(1)
type(describe_response2['imageDetails']).should.be(list)
len(describe_response2['imageDetails']).should.be(0)
type(batch_delete_response['imageIds']).should.be(list)
len(batch_delete_response['imageIds']).should.be(1)
batch_delete_response['imageIds'][0]['imageTag'].should.equal("v1")
type(batch_delete_response['failures']).should.be(list)
len(batch_delete_response['failures']).should.be(0)
@mock_ecr
def test_batch_delete_image_with_nonexistent_tag():
client = boto3.client('ecr', region_name='us-east-1')
client.create_repository(
repositoryName='test_repository'
)
manifest = _create_image_manifest()
tags = ['v1', 'v1.0', 'latest']
for tag in tags:
client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest),
imageTag=tag,
)
describe_response = client.describe_images(repositoryName='test_repository')
missing_tag = "missing-tag"
batch_delete_response = client.batch_delete_image(
registryId='012345678910',
repositoryName='test_repository',
imageIds=[
{
'imageTag': missing_tag
},
],
)
type(describe_response['imageDetails'][0]['imageTags']).should.be(list)
len(describe_response['imageDetails'][0]['imageTags']).should.be(3)
type(batch_delete_response['imageIds']).should.be(list)
len(batch_delete_response['imageIds']).should.be(0)
batch_delete_response['failures'][0]['imageId']['imageTag'].should.equal(missing_tag)
batch_delete_response['failures'][0]['failureCode'].should.equal("ImageNotFound")
batch_delete_response['failures'][0]['failureReason'].should.equal("Requested image not found")
type(batch_delete_response['failures']).should.be(list)
len(batch_delete_response['failures']).should.be(1)
@mock_ecr
def test_batch_delete_image_by_digest():
client = boto3.client('ecr', region_name='us-east-1')
client.create_repository(
repositoryName='test_repository'
)
manifest = _create_image_manifest()
tags = ['v1', 'v2', 'latest']
for tag in tags:
client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest),
imageTag=tag
)
describe_response = client.describe_images(repositoryName='test_repository')
image_digest = describe_response['imageDetails'][0]['imageDigest']
batch_delete_response = client.batch_delete_image(
registryId='012345678910',
repositoryName='test_repository',
imageIds=[
{
'imageDigest': image_digest
},
],
)
describe_response = client.describe_images(repositoryName='test_repository')
type(describe_response['imageDetails']).should.be(list)
len(describe_response['imageDetails']).should.be(0)
type(batch_delete_response['imageIds']).should.be(list)
len(batch_delete_response['imageIds']).should.be(3)
batch_delete_response['imageIds'][0]['imageDigest'].should.equal(image_digest)
batch_delete_response['imageIds'][1]['imageDigest'].should.equal(image_digest)
batch_delete_response['imageIds'][2]['imageDigest'].should.equal(image_digest)
set([
batch_delete_response['imageIds'][0]['imageTag'],
batch_delete_response['imageIds'][1]['imageTag'],
batch_delete_response['imageIds'][2]['imageTag']]).should.equal(set(tags))
type(batch_delete_response['failures']).should.be(list)
len(batch_delete_response['failures']).should.be(0)
@mock_ecr
def test_batch_delete_image_with_invalid_digest():
client = boto3.client('ecr', region_name='us-east-1')
client.create_repository(
repositoryName='test_repository'
)
manifest = _create_image_manifest()
tags = ['v1', 'v2', 'latest']
for tag in tags:
client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest),
imageTag=tag
)
invalid_image_digest = 'sha256:invalid-digest'
batch_delete_response = client.batch_delete_image(
registryId='012345678910',
repositoryName='test_repository',
imageIds=[
{
'imageDigest': invalid_image_digest
},
],
)
type(batch_delete_response['imageIds']).should.be(list)
len(batch_delete_response['imageIds']).should.be(0)
type(batch_delete_response['failures']).should.be(list)
len(batch_delete_response['failures']).should.be(1)
batch_delete_response['failures'][0]['imageId']['imageDigest'].should.equal(invalid_image_digest)
batch_delete_response['failures'][0]['failureCode'].should.equal("InvalidImageDigest")
batch_delete_response['failures'][0]['failureReason'].should.equal("Invalid request parameters: image digest should satisfy the regex '[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+'")
@mock_ecr
def test_batch_delete_image_with_missing_parameters():
client = boto3.client('ecr', region_name='us-east-1')
client.create_repository(
repositoryName='test_repository'
)
batch_delete_response = client.batch_delete_image(
registryId='012345678910',
repositoryName='test_repository',
imageIds=[
{
},
],
)
type(batch_delete_response['imageIds']).should.be(list)
len(batch_delete_response['imageIds']).should.be(0)
type(batch_delete_response['failures']).should.be(list)
len(batch_delete_response['failures']).should.be(1)
batch_delete_response['failures'][0]['failureCode'].should.equal("MissingDigestAndTag")
batch_delete_response['failures'][0]['failureReason'].should.equal("Invalid request parameters: both tag and digest cannot be null")
@mock_ecr
def test_batch_delete_image_with_matching_digest_and_tag():
client = boto3.client('ecr', region_name='us-east-1')
client.create_repository(
repositoryName='test_repository'
)
manifest = _create_image_manifest()
tags = ['v1', 'v1.0', 'latest']
for tag in tags:
client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest),
imageTag=tag
)
describe_response = client.describe_images(repositoryName='test_repository')
image_digest = describe_response['imageDetails'][0]['imageDigest']
batch_delete_response = client.batch_delete_image(
registryId='012345678910',
repositoryName='test_repository',
imageIds=[
{
'imageDigest': image_digest,
'imageTag': 'v1'
},
],
)
describe_response = client.describe_images(repositoryName='test_repository')
type(describe_response['imageDetails']).should.be(list)
len(describe_response['imageDetails']).should.be(0)
type(batch_delete_response['imageIds']).should.be(list)
len(batch_delete_response['imageIds']).should.be(3)
batch_delete_response['imageIds'][0]['imageDigest'].should.equal(image_digest)
batch_delete_response['imageIds'][1]['imageDigest'].should.equal(image_digest)
batch_delete_response['imageIds'][2]['imageDigest'].should.equal(image_digest)
set([
batch_delete_response['imageIds'][0]['imageTag'],
batch_delete_response['imageIds'][1]['imageTag'],
batch_delete_response['imageIds'][2]['imageTag']]).should.equal(set(tags))
type(batch_delete_response['failures']).should.be(list)
len(batch_delete_response['failures']).should.be(0)
@mock_ecr
def test_batch_delete_image_with_mismatched_digest_and_tag():
client = boto3.client('ecr', region_name='us-east-1')
client.create_repository(
repositoryName='test_repository'
)
manifest = _create_image_manifest()
tags = ['v1', 'latest']
for tag in tags:
client.put_image(
repositoryName='test_repository',
imageManifest=json.dumps(manifest),
imageTag=tag
)
describe_response = client.describe_images(repositoryName='test_repository')
image_digest = describe_response['imageDetails'][0]['imageDigest']
batch_delete_response = client.batch_delete_image(
registryId='012345678910',
repositoryName='test_repository',
imageIds=[
{
'imageDigest': image_digest,
'imageTag': 'v2'
},
],
)
type(batch_delete_response['imageIds']).should.be(list)
len(batch_delete_response['imageIds']).should.be(0)
type(batch_delete_response['failures']).should.be(list)
len(batch_delete_response['failures']).should.be(1)
batch_delete_response['failures'][0]['imageId']['imageDigest'].should.equal(image_digest)
batch_delete_response['failures'][0]['imageId']['imageTag'].should.equal("v2")
batch_delete_response['failures'][0]['failureCode'].should.equal("ImageNotFound")
batch_delete_response['failures'][0]['failureReason'].should.equal("Requested image not found")
| apache-2.0 |
ToontownUprising/src | toontown/classicchars/DistributedGoofySpeedway.py | 5 | 3651 | from pandac.PandaModules import *
import DistributedCCharBase
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
import CharStateDatas
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.hood import DLHood
class DistributedGoofySpeedway(DistributedCCharBase.DistributedCCharBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGoofySpeedway')
def __init__(self, cr):
try:
self.DistributedGoofySpeedway_initialized
except:
self.DistributedGoofySpeedway_initialized = 1
DistributedCCharBase.DistributedCCharBase.__init__(self, cr, TTLocalizer.Goofy, 'g')
self.fsm = ClassicFSM.ClassicFSM(self.getName(), [State.State('Off', self.enterOff, self.exitOff, ['Neutral']), State.State('Neutral', self.enterNeutral, self.exitNeutral, ['Walk']), State.State('Walk', self.enterWalk, self.exitWalk, ['Neutral'])], 'Off', 'Off')
self.fsm.enterInitialState()
self.handleHolidays()
def disable(self):
self.fsm.requestFinalState()
DistributedCCharBase.DistributedCCharBase.disable(self)
del self.neutralDoneEvent
del self.neutral
del self.walkDoneEvent
del self.walk
self.fsm.requestFinalState()
def delete(self):
try:
self.DistributedGoofySpeedway_deleted
except:
del self.fsm
self.DistributedGoofySpeedway_deleted = 1
DistributedCCharBase.DistributedCCharBase.delete(self)
def generate(self):
DistributedCCharBase.DistributedCCharBase.generate(self, self.diffPath)
name = self.getName()
self.neutralDoneEvent = self.taskName(name + '-neutral-done')
self.neutral = CharStateDatas.CharNeutralState(self.neutralDoneEvent, self)
self.walkDoneEvent = self.taskName(name + '-walk-done')
if self.diffPath == None:
self.walk = CharStateDatas.CharWalkState(self.walkDoneEvent, self)
else:
self.walk = CharStateDatas.CharWalkState(self.walkDoneEvent, self, self.diffPath)
self.fsm.request('Neutral')
return
def enterOff(self):
pass
def exitOff(self):
pass
def enterNeutral(self):
self.neutral.enter()
self.acceptOnce(self.neutralDoneEvent, self.__decideNextState)
def exitNeutral(self):
self.ignore(self.neutralDoneEvent)
self.neutral.exit()
def enterWalk(self):
self.walk.enter()
self.acceptOnce(self.walkDoneEvent, self.__decideNextState)
def exitWalk(self):
self.ignore(self.walkDoneEvent)
self.walk.exit()
def __decideNextState(self, doneStatus):
self.fsm.request('Neutral')
def setWalk(self, srcNode, destNode, timestamp):
if destNode and not destNode == srcNode:
self.walk.setWalk(srcNode, destNode, timestamp)
self.fsm.request('Walk')
def walkSpeed(self):
return ToontownGlobals.GoofySpeed
def handleHolidays(self):
DistributedCCharBase.DistributedCCharBase.handleHolidays(self)
if hasattr(base.cr, 'newsManager') and base.cr.newsManager:
holidayIds = base.cr.newsManager.getHolidayIdList()
if ToontownGlobals.APRIL_FOOLS_COSTUMES in holidayIds and isinstance(self.cr.playGame.hood, DLHood.DLHood):
self.diffPath = TTLocalizer.Donald
def getCCLocation(self):
if self.diffPath == None:
return 1
else:
return 0
return
| mit |
kawamon/hue | desktop/core/ext-py/jaeger-client-4.0.0/tests/test_span.py | 2 | 9045 | # Copyright (c) 2016 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import collections
import json
import mock
from opentracing.ext import tags as ext_tags
from jaeger_client import Span, SpanContext, ConstSampler
def test_baggage():
mock_tracer = mock.MagicMock()
mock_tracer.max_tag_value_length = 100
ctx = SpanContext(trace_id=1, span_id=2, parent_id=None, flags=1)
span = Span(context=ctx, operation_name='x', tracer=mock_tracer)
assert span.get_baggage_item('x') is None
span.set_baggage_item('x', 'y').\
set_baggage_item('z', 'why')
assert span.get_baggage_item('x') == 'y'
assert span.get_baggage_item('z') == 'why'
assert span.get_baggage_item('tt') is None
assert len(span.context.baggage) == 2
span.set_baggage_item('x', 'b') # override
assert span.get_baggage_item('x') == 'b'
assert len(span.context.baggage) == 2
span.set_baggage_item('X_y', '123')
assert span.get_baggage_item('X_y') == '123'
assert span.get_baggage_item('x-Y') is None
span.set_baggage_item('nonExistingKey', None).set_baggage_item('z', None)
assert 'z' not in span.context.baggage
def _fields_to_dict(span_log):
return {f.key: f.vStr for f in span_log.fields}
def test_baggage_logs():
mock_tracer = mock.MagicMock()
mock_tracer.max_tag_value_length = 100
ctx = SpanContext(trace_id=1, span_id=2, parent_id=None, flags=1)
span = Span(context=ctx, operation_name='x', tracer=mock_tracer)
span.set_baggage_item('x', 'a')
assert span.get_baggage_item('x') == 'a'
assert len(span.logs) == 1
assert _fields_to_dict(span.logs[0]) == {
'event': 'baggage', 'key': 'x', 'value': 'a',
}
span.set_baggage_item('x', 'b') # override
assert span.get_baggage_item('x') == 'b'
assert len(span.logs) == 2
assert _fields_to_dict(span.logs[1]) == {
'event': 'baggage', 'key': 'x', 'value': 'b', 'override': 'true',
}
span.set_baggage_item('x', None) # deletion
assert span.get_baggage_item('x') is None
assert len(span.logs) == 3
assert _fields_to_dict(span.logs[2]) == {
'event': 'baggage', 'key': 'x', 'value': 'None', 'override': 'true'
}
def test_is_rpc():
mock_tracer = mock.MagicMock()
mock_tracer.max_tag_value_length = 100
ctx = SpanContext(trace_id=1, span_id=2, parent_id=None, flags=1)
span = Span(context=ctx, operation_name='x', tracer=mock_tracer)
assert span.is_rpc() is False
assert span.is_rpc_client() is False
span = Span(context=ctx, operation_name='x', tracer=mock_tracer)
span.set_tag(ext_tags.SPAN_KIND, ext_tags.SPAN_KIND_RPC_SERVER)
assert span.is_rpc() is True
assert span.is_rpc_client() is False
span = Span(context=ctx, operation_name='x', tracer=mock_tracer)
span.set_tag(ext_tags.SPAN_KIND, ext_tags.SPAN_KIND_RPC_CLIENT)
assert span.is_rpc() is True
assert span.is_rpc_client() is True
def test_sampling_priority(tracer):
tracer.sampler = ConstSampler(False)
span = tracer.start_span(operation_name='x')
assert span.is_sampled() is False
span.set_tag(ext_tags.SAMPLING_PRIORITY, 1)
assert span.is_sampled()
assert span.is_debug()
span.set_tag(ext_tags.SAMPLING_PRIORITY, 1)
assert span.is_sampled()
assert span.is_debug()
span.set_tag(ext_tags.SAMPLING_PRIORITY, 0)
assert span.is_sampled() is False
span.set_tag(ext_tags.SAMPLING_PRIORITY, 'test')
assert span.is_sampled() is False
def test_span_logging(tracer):
tpl = collections.namedtuple(
'Test',
['method', 'args', 'kwargs', 'expected', 'error', 'timestamp'])
def test(method, expected,
args=None, kwargs=None, error=False, timestamp=None):
if isinstance(expected, str):
expected = {'event': expected}
return tpl(
method=method,
args=args if args else [],
expected=expected,
kwargs=kwargs if kwargs else {},
error=error,
timestamp=timestamp,
)
def event_payload(event, payload):
return {'event': event, 'payload': payload}
def from_json(val):
return json.loads(val)
tests = [
# deprecated info() method
test(method='info',
args=['msg'],
expected='msg'),
test(method='info',
args=['msg', 'data'],
expected=event_payload('msg', 'data')),
# deprecated error() method
test(method='error',
args=['msg'],
expected='msg', error=True),
test(method='error',
args=['msg', 'data'],
expected=event_payload('msg', 'data'), error=True),
# deprecated log_event() method
test(method='log_event',
args=['msg'],
expected='msg'),
test(method='log_event',
args=['msg', 'data'],
expected=event_payload('msg', 'data')),
# deprecated log() method
test(method='log',
kwargs={'event': 'msg'},
expected='msg'),
test(method='log',
kwargs={'event': 'msg', 'payload': 'data'},
expected=event_payload('msg', 'data')),
test(method='log',
kwargs={'event': 'msg', 'payload': 'data', 'ignored': 'blah'},
expected=event_payload('msg', 'data')),
test(method='log',
kwargs={'event': 'msg', 'payload': 'data', 'timestamp': 123},
expected=event_payload('msg', 'data'),
timestamp=123 * 1000 * 1000), # in microseconds
# log_kv()
test(method='log_kv',
args=[{'event': 'msg'}],
expected='msg'),
test(method='log_kv',
args=[{'event': 'msg', 'x': 'y'}],
expected={'event': 'msg', 'x': 'y'}),
test(method='log_kv',
args=[{'event': 'msg', 'x': 'y'}, 123], # all args positional
expected={'event': 'msg', 'x': 'y'},
timestamp=123 * 1000 * 1000),
test(method='log_kv',
args=[{'event': 'msg', 'x': 'y'}], # positional and kwargs
kwargs={'timestamp': 123},
expected={'event': 'msg', 'x': 'y'},
timestamp=123 * 1000 * 1000),
test(method='log_kv',
args=[], # kwargs only
kwargs={
'key_values': {'event': 'msg', 'x': 'y'},
'timestamp': 123,
},
expected={'event': 'msg', 'x': 'y'},
timestamp=123 * 1000 * 1000), # to microseconds
]
for test in tests:
name = '%s' % (test,)
span = tracer.start_span(operation_name='x')
span.logs = []
span.tags = []
if test.method == 'info':
span.info(*test.args, **test.kwargs)
elif test.method == 'error':
span.error(*test.args, **test.kwargs)
elif test.method == 'log':
span.log(*test.args, **test.kwargs)
elif test.method == 'log_event':
span.log_event(*test.args, **test.kwargs)
elif test.method == 'log_kv':
span.log_kv(*test.args, **test.kwargs)
else:
raise ValueError('Unknown method %s' % test.method)
assert len(span.logs) == 1, name
log = span.logs[0]
log_fields = _fields_to_dict(log)
assert log_fields == test.expected
if test.timestamp:
assert log.timestamp == test.timestamp
def test_span_to_string(tracer):
tracer.service_name = 'unittest'
ctx = SpanContext(trace_id=1, span_id=1, parent_id=1, flags=1)
span = Span(context=ctx, operation_name='crypt', tracer=tracer)
assert '%s' % span == '1:1:1:1 unittest.crypt'
def test_span_tag_value_max_length(tracer):
tracer.max_tag_value_length = 42
span = tracer.start_span(operation_name='x')
span.set_tag('x', 'x' * 50)
tag_n = len(span.tags) - 1
assert span.tags[tag_n].key == 'x'
assert span.tags[tag_n].vStr == 'x' * 42
def test_span_tag_bool(tracer):
span = tracer.start_span(operation_name='y')
span.set_tag('y', True)
tag_n = len(span.tags) - 1
assert span.tags[tag_n].key == 'y'
assert span.tags[tag_n].vBool is True
def test_span_tag_long(tracer):
span = tracer.start_span(operation_name='z')
span.set_tag('z', 200)
tag_n = len(span.tags) - 1
assert span.tags[tag_n].key == 'z'
assert span.tags[tag_n].vLong == 200
| apache-2.0 |
pquentin/django | tests/postgres_tests/migrations/0002_create_test_models.py | 26 | 5556 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.fields.hstore
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('postgres_tests', '0001_setup_extensions'),
]
operations = [
migrations.CreateModel(
name='CharArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', django.contrib.postgres.fields.ArrayField(models.CharField(max_length=10), size=None)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DateTimeArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('datetimes', django.contrib.postgres.fields.ArrayField(models.DateTimeField(), size=None)),
('dates', django.contrib.postgres.fields.ArrayField(models.DateField(), size=None)),
('times', django.contrib.postgres.fields.ArrayField(models.TimeField(), size=None)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HStoreModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', django.contrib.postgres.fields.hstore.HStoreField(blank=True, null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OtherTypesArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ips', django.contrib.postgres.fields.ArrayField(models.GenericIPAddressField(), size=None)),
('uuids', django.contrib.postgres.fields.ArrayField(models.UUIDField(), size=None)),
('decimals', django.contrib.postgres.fields.ArrayField(models.DecimalField(max_digits=5, decimal_places=2), size=None)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='IntegerArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', django.contrib.postgres.fields.ArrayField(models.IntegerField(), size=None)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NestedIntegerArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', django.contrib.postgres.fields.ArrayField(django.contrib.postgres.fields.ArrayField(models.IntegerField(), size=None), size=None)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NullableIntegerArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', django.contrib.postgres.fields.ArrayField(models.IntegerField(), size=None, null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CharFieldModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', models.CharField(max_length=16)),
],
options=None,
bases=None,
),
migrations.CreateModel(
name='TextFieldModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', models.TextField()),
],
options=None,
bases=None,
),
]
pg_92_operations = [
migrations.CreateModel(
name='RangesModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ints', django.contrib.postgres.fields.IntegerRangeField(null=True, blank=True)),
('bigints', django.contrib.postgres.fields.BigIntegerRangeField(null=True, blank=True)),
('floats', django.contrib.postgres.fields.FloatRangeField(null=True, blank=True)),
('timestamps', django.contrib.postgres.fields.DateTimeRangeField(null=True, blank=True)),
('dates', django.contrib.postgres.fields.DateRangeField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
]
def apply(self, project_state, schema_editor, collect_sql=False):
PG_VERSION = schema_editor.connection.pg_version
if PG_VERSION >= 90200:
self.operations = self.operations + self.pg_92_operations
return super(Migration, self).apply(project_state, schema_editor, collect_sql)
| bsd-3-clause |
samuelsmiles/samuel | svksurvey/settings.py | 4 | 2336 | """
Django settings for svksurvey project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vi-)7v9(e_@^mfmb2vyne=a+nd@8+*x@s!m5d^3t11y@lgn7(*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'scaffold_gen',
'household',
'land',
'landlease',
'cropping',
'labourwages',
'housing',
'expenditure',
'adminsortable',
'list',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'svksurvey.urls'
WSGI_APPLICATION = 'svksurvey.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'surveydb',
# 'USER': 'survey',
# 'PASSWORD': '1234',
# 'HOST': ''
# }
#}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| agpl-3.0 |
mancoast/CPythonPyc_test | cpython/242_test_isinstance.py | 15 | 9764 | # Tests some corner cases with isinstance() and issubclass(). While these
# tests use new style classes and properties, they actually do whitebox
# testing of error conditions uncovered when using extension types.
import unittest
from test import test_support
import sys
class TestIsInstanceExceptions(unittest.TestCase):
# Test to make sure that an AttributeError when accessing the instance's
# class's bases is masked. This was actually a bug in Python 2.2 and
# 2.2.1 where the exception wasn't caught but it also wasn't being cleared
# (leading to an "undetected error" in the debug build). Set up is,
# isinstance(inst, cls) where:
#
# - inst isn't an InstanceType
# - cls isn't a ClassType, a TypeType, or a TupleType
# - cls has a __bases__ attribute
# - inst has a __class__ attribute
# - inst.__class__ as no __bases__ attribute
#
# Sounds complicated, I know, but this mimics a situation where an
# extension type raises an AttributeError when its __bases__ attribute is
# gotten. In that case, isinstance() should return False.
def test_class_has_no_bases(self):
class I(object):
def getclass(self):
# This must return an object that has no __bases__ attribute
return None
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertEqual(False, isinstance(I(), C()))
# Like above except that inst.__class__.__bases__ raises an exception
# other than AttributeError
def test_bases_raises_other_than_attribute_error(self):
class E(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class I(object):
def getclass(self):
return E()
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Here's a situation where getattr(cls, '__bases__') raises an exception.
# If that exception is not AttributeError, it should not get masked
def test_dont_mask_non_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Like above, except that getattr(cls, '__bases__') raises an
# AttributeError, which /should/ get masked as a TypeError
def test_mask_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, isinstance, I(), C())
# These tests are similar to above, but tickle certain code paths in
# issubclass() instead of isinstance() -- really PyObject_IsSubclass()
# vs. PyObject_IsInstance().
class TestIsSubclassExceptions(unittest.TestCase):
def test_dont_mask_non_attribute_error(self):
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(RuntimeError, issubclass, C(), S())
def test_mask_attribute_error(self):
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(TypeError, issubclass, C(), S())
# Like above, but test the second branch, where the __bases__ of the
# second arg (the cls arg) is tested. This means the first arg must
# return a valid __bases__, and it's okay for it to be a normal --
# unrelated by inheritance -- class.
def test_dont_mask_non_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, issubclass, B, C())
def test_mask_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, issubclass, B, C())
# meta classes for creating abstract classes and instances
class AbstractClass(object):
def __init__(self, bases):
self.bases = bases
def getbases(self):
return self.bases
__bases__ = property(getbases)
def __call__(self):
return AbstractInstance(self)
class AbstractInstance(object):
def __init__(self, klass):
self.klass = klass
def getclass(self):
return self.klass
__class__ = property(getclass)
# abstract classes
AbstractSuper = AbstractClass(bases=())
AbstractChild = AbstractClass(bases=(AbstractSuper,))
# normal classes
class Super:
pass
class Child(Super):
pass
# new-style classes
class NewSuper(object):
pass
class NewChild(NewSuper):
pass
class TestIsInstanceIsSubclass(unittest.TestCase):
# Tests to ensure that isinstance and issubclass work on abstract
# classes and instances. Before the 2.2 release, TypeErrors were
# raised when boolean values should have been returned. The bug was
# triggered by mixing 'normal' classes and instances were with
# 'abstract' classes and instances. This case tries to test all
# combinations.
def test_isinstance_normal(self):
# normal instances
self.assertEqual(True, isinstance(Super(), Super))
self.assertEqual(False, isinstance(Super(), Child))
self.assertEqual(False, isinstance(Super(), AbstractSuper))
self.assertEqual(False, isinstance(Super(), AbstractChild))
self.assertEqual(True, isinstance(Child(), Super))
self.assertEqual(False, isinstance(Child(), AbstractSuper))
def test_isinstance_abstract(self):
# abstract instances
self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild))
self.assertEqual(False, isinstance(AbstractSuper(), Super))
self.assertEqual(False, isinstance(AbstractSuper(), Child))
self.assertEqual(True, isinstance(AbstractChild(), AbstractChild))
self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractChild(), Super))
self.assertEqual(False, isinstance(AbstractChild(), Child))
def test_subclass_normal(self):
# normal classes
self.assertEqual(True, issubclass(Super, Super))
self.assertEqual(False, issubclass(Super, AbstractSuper))
self.assertEqual(False, issubclass(Super, Child))
self.assertEqual(True, issubclass(Child, Child))
self.assertEqual(True, issubclass(Child, Super))
self.assertEqual(False, issubclass(Child, AbstractSuper))
def test_subclass_abstract(self):
# abstract classes
self.assertEqual(True, issubclass(AbstractSuper, AbstractSuper))
self.assertEqual(False, issubclass(AbstractSuper, AbstractChild))
self.assertEqual(False, issubclass(AbstractSuper, Child))
self.assertEqual(True, issubclass(AbstractChild, AbstractChild))
self.assertEqual(True, issubclass(AbstractChild, AbstractSuper))
self.assertEqual(False, issubclass(AbstractChild, Super))
self.assertEqual(False, issubclass(AbstractChild, Child))
def test_subclass_tuple(self):
# test with a tuple as the second argument classes
self.assertEqual(True, issubclass(Child, (Child,)))
self.assertEqual(True, issubclass(Child, (Super,)))
self.assertEqual(False, issubclass(Super, (Child,)))
self.assertEqual(True, issubclass(Super, (Child, Super)))
self.assertEqual(False, issubclass(Child, ()))
self.assertEqual(True, issubclass(Super, (Child, (Super,))))
self.assertEqual(True, issubclass(NewChild, (NewChild,)))
self.assertEqual(True, issubclass(NewChild, (NewSuper,)))
self.assertEqual(False, issubclass(NewSuper, (NewChild,)))
self.assertEqual(True, issubclass(NewSuper, (NewChild, NewSuper)))
self.assertEqual(False, issubclass(NewChild, ()))
self.assertEqual(True, issubclass(NewSuper, (NewChild, (NewSuper,))))
self.assertEqual(True, issubclass(int, (long, (float, int))))
self.assertEqual(True, issubclass(str, (unicode, (Child, NewChild, basestring))))
def test_subclass_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, issubclass, str, str)
def test_isinstance_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, isinstance, '', str)
def blowstack(fxn, arg, compare_to):
# Make sure that calling isinstance with a deeply nested tuple for its
# argument will raise RuntimeError eventually.
tuple_arg = (compare_to,)
for cnt in xrange(sys.getrecursionlimit()+5):
tuple_arg = (tuple_arg,)
fxn(arg, tuple_arg)
def test_main():
test_support.run_unittest(
TestIsInstanceExceptions,
TestIsSubclassExceptions,
TestIsInstanceIsSubclass
)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
originell/jpype | test/jpypetest/test_ref.py | 2 | 2285 | # *****************************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See NOTICE file for details.
#
# *****************************************************************************
import sys
import _jpype
import jpype
from jpype import JImplements, JOverride
from jpype.types import *
import common
class ReferenceQueueTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
self.refqueue = jpype.JClass(
'org.jpype.ref.JPypeReferenceQueue').getInstance()
def testAccess(self):
# Make sure we can get the instance
self.assertTrue(self.refqueue != None)
def testRunning(self):
# Get the queue instance
self.assertTrue(self.refqueue.isRunning())
def testRefs(self):
# This routine will exercise each of the clean up paths once
fixture = JClass("jpype.common.Fixture")()
def f():
# Create a proxy to test the proxy path
@JImplements("java.util.function.Supplier")
class MySupplier(object):
@JOverride
def get(self):
# Send a Python exc to trigger Python ref path
raise RuntimeError("foo")
try:
u = MySupplier()
fixture.callSupplier(u)
except RuntimeError as ex:
pass
f()
# Force a direct byffer and then trash it
b = bytearray([1, 2, 3])
_jpype.convertToDirectBuffer(b)
# Then force a GC to clean it up
jpype.java.lang.System.gc()
# We can't check the results here as the GC may chose not
# to run which would trigger a failure
| apache-2.0 |
ajylee/gpaw-rtxs | gpaw/cluster.py | 1 | 6097 | """Extensions to the ase Atoms class
"""
import numpy as np
from ase import Atoms
from ase.io import read, write
from ase.data import covalent_radii
from ase.calculators.neighborlist import NeighborList
class Cluster(Atoms):
"""A class for cluster structures
to enable simplified manipulation"""
def __init__(self, *args, **kwargs):
self.data = {}
if len(args) > 0:
filename = args[0]
if isinstance(filename, str):
self.read(filename, kwargs.get('filetype'))
return
else:
Atoms.__init__(self, [])
if kwargs.get('filename') is not None:
filename = kwargs.pop('filename')
Atoms.__init__(self, *args, **kwargs)
self.read(filename, kwargs.get('filetype'))
else:
Atoms.__init__(self, *args, **kwargs)
def extreme_positions(self):
"""get the extreme positions of the structure"""
pos = self.get_positions()
return np.array([np.minimum.reduce(pos), np.maximum.reduce(pos)])
def find_connected(self, index, dmax=None, scale=1.5):
"""Find the atoms connected to self[index] and return them.
If dmax is not None:
Atoms are defined to be connected if they are nearer than dmax
to each other.
If dmax is None:
Atoms are defined to be connected if they are nearer than the
sum of their covalent radii * scale to each other.
"""
# set neighbor lists
neighborlist = []
if dmax is None:
# define neighbors according to covalent radii
radii = scale * covalent_radii[self.get_atomic_numbers()]
for atom in self:
positions = self.positions - atom.position
distances = np.sqrt(np.sum(positions**2, axis=1))
radius = scale * covalent_radii[atom.number]
neighborlist.append(np.where(distances < radii + radius)[0])
else:
# define neighbors according to distance
nl = NeighborList([0.5 * dmax] * len(self), skin=0)
nl.update(self)
for i, atom in enumerate(self):
neighborlist.append(list(nl.get_neighbors(i)[0]))
connected = list(neighborlist[index])
isolated = False
while not isolated:
isolated = True
for i in connected:
for j in neighborlist[i]:
if j in connected:
pass
else:
connected.append(j)
isolated = False
atoms = Cluster()
for i in connected:
atoms.append(self[i])
return atoms
def minimal_box(self, border=0, h=None):
"""The box needed to fit the structure in.
The structure is moved to fit into the box [(0,x),(0,y),(0,z)]
with x,y,z > 0 (fitting the ASE constriction).
The border argument can be used to add a border of empty space
around the structure.
If h is set, the box is extended to ensure that box/h is
a multiple of 4.
This ensures that GPAW uses the desired h.
The shift applied to the structure is returned.
"""
if len(self) == 0:
return None
extr = self.extreme_positions()
# add borders
if type(border)==type([]):
b = border
else:
b = [border, border, border]
for c in range(3):
extr[0][c] -= b[c]
extr[1][c] += b[c] - extr[0][c] # shifted already
# check for multiple of 4
if h is not None:
if not hasattr(h, '__len__'):
h = np.array([h, h, h])
for c in range(3):
# apply the same as in paw.py
L = extr[1][c] # shifted already
N = max(4, int(L / h[c] / 4 + 0.5) * 4)
# correct L
dL = N * h[c] - L
# move accordingly
extr[1][c] += dL # shifted already
extr[0][c] -= dL / 2.
# move lower corner to (0, 0, 0)
shift = tuple(-1. * np.array(extr[0]))
self.translate(shift)
self.set_cell(tuple(extr[1]))
return shift
def get(self, name):
"""General get"""
attr = 'get_' + name
if hasattr(self, attr):
getattr(self, attr)(data)
elif self.data.has_key(name):
return self.data[name]
else:
return None
def set(self, name, data):
"""General set"""
attr = 'set_' + name
if hasattr(self, attr):
getattr(self, attr)(data)
else:
self.data[name] = data
def read(self, filename, format=None):
"""Read the structure from some file. The type can be given
or it will be guessed from the filename."""
self.__init__(read(filename, format=format))
return len(self)
def write(self, filename=None, format=None, repeat=None):
"""Write the structure to file.
Parameters
----------
format: string
can be given or it will be guessed from the filename
repeat: array, eg.: [1,0,1]
can be used to repeat the structure
"""
if filename is None:
if format is None:
raise RuntimeError('Please specify either filename or format.')
else:
filename = self.get_name() + '.' + format
out = self
if repeat is None:
out = self
else:
out = Cluster([])
cell = self.get_cell().diagonal()
for i in range(repeat[0] + 1):
for j in range(repeat[1] + 1):
for k in range(repeat[2] + 1):
copy = self.copy()
copy.translate(np.array([i, j, k]) * cell)
out += copy
write(filename, out, format)
| gpl-3.0 |
evilpie/servo | components/script/dom/bindings/codegen/parser/tests/test_const.py | 134 | 3000 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface TestConsts {
const byte zero = 0;
const byte b = -1;
const octet o = 2;
const short s = -3;
const unsigned short us = 0x4;
const long l = -0X5;
const unsigned long ul = 6;
const unsigned long long ull = 7;
const long long ll = -010;
const boolean t = true;
const boolean f = false;
const boolean? n = null;
const boolean? nt = true;
const boolean? nf = false;
};
""")
results = parser.finish()
harness.ok(True, "TestConsts interface parsed without error.")
harness.check(len(results), 1, "Should be one production.")
iface = results[0]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestConsts", "Interface has the right QName")
harness.check(iface.identifier.name, "TestConsts", "Interface has the right name")
harness.check(len(iface.members), 14, "Expect 14 members")
consts = iface.members
def checkConst(const, QName, name, type, value):
harness.ok(isinstance(const, WebIDL.IDLConst),
"Should be an IDLConst")
harness.ok(const.isConst(), "Const is a const")
harness.ok(not const.isAttr(), "Const is not an attr")
harness.ok(not const.isMethod(), "Const is not a method")
harness.check(const.identifier.QName(), QName, "Const has the right QName")
harness.check(const.identifier.name, name, "Const has the right name")
harness.check(str(const.type), type, "Const has the right type")
harness.ok(const.type.isPrimitive(), "All consts should be primitive")
harness.check(str(const.value.type), str(const.type),
"Const's value has the same type as the type")
harness.check(const.value.value, value, "Const value has the right value.")
checkConst(consts[0], "::TestConsts::zero", "zero", "Byte", 0)
checkConst(consts[1], "::TestConsts::b", "b", "Byte", -1)
checkConst(consts[2], "::TestConsts::o", "o", "Octet", 2)
checkConst(consts[3], "::TestConsts::s", "s", "Short", -3)
checkConst(consts[4], "::TestConsts::us", "us", "UnsignedShort", 4)
checkConst(consts[5], "::TestConsts::l", "l", "Long", -5)
checkConst(consts[6], "::TestConsts::ul", "ul", "UnsignedLong", 6)
checkConst(consts[7], "::TestConsts::ull", "ull", "UnsignedLongLong", 7)
checkConst(consts[8], "::TestConsts::ll", "ll", "LongLong", -8)
checkConst(consts[9], "::TestConsts::t", "t", "Boolean", True)
checkConst(consts[10], "::TestConsts::f", "f", "Boolean", False)
checkConst(consts[11], "::TestConsts::n", "n", "BooleanOrNull", None)
checkConst(consts[12], "::TestConsts::nt", "nt", "BooleanOrNull", True)
checkConst(consts[13], "::TestConsts::nf", "nf", "BooleanOrNull", False)
| mpl-2.0 |
yhpeng-git/mxnet | example/ssd/tools/visualize_net.py | 10 | 1148 | from __future__ import print_function
import find_mxnet
import mxnet as mx
import importlib
import argparse
import sys
parser = argparse.ArgumentParser(description='network visualization')
parser.add_argument('--network', type=str, default='vgg16_ssd_300',
choices = ['vgg16_ssd_300', 'vgg16_ssd_512'],
help = 'the cnn to use')
parser.add_argument('--num-classes', type=int, default=20,
help='the number of classes')
parser.add_argument('--data-shape', type=int, default=300,
help='set image\'s shape')
parser.add_argument('--train', action='store_true', default=False, help='show train net')
args = parser.parse_args()
sys.path.append('../symbol')
if not args.train:
net = importlib.import_module("symbol_" + args.network).get_symbol(args.num_classes)
a = mx.viz.plot_network(net, shape={"data":(1,3,args.data_shape,args.data_shape)}, \
node_attrs={"shape":'rect', "fixedsize":'false'})
a.render("ssd_" + args.network)
else:
net = importlib.import_module("symbol_" + args.network).get_symbol_train(args.num_classes)
print(net.tojson())
| apache-2.0 |
zhouyejoe/spark | examples/src/main/python/mllib/normalizer_example.py | 128 | 1756 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.feature import Normalizer
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="NormalizerExample") # SparkContext
# $example on$
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
labels = data.map(lambda x: x.label)
features = data.map(lambda x: x.features)
normalizer1 = Normalizer()
normalizer2 = Normalizer(p=float("inf"))
# Each sample in data1 will be normalized using $L^2$ norm.
data1 = labels.zip(normalizer1.transform(features))
# Each sample in data2 will be normalized using $L^\infty$ norm.
data2 = labels.zip(normalizer2.transform(features))
# $example off$
print("data1:")
for each in data1.collect():
print(each)
print("data2:")
for each in data2.collect():
print(each)
sc.stop()
| apache-2.0 |
sudheesh001/oh-mainline | vendor/packages/twisted/twisted/cred/pamauth.py | 63 | 1916 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support for asynchronously authenticating using PAM.
"""
import PAM
import getpass, threading, os
from twisted.internet import threads, defer
def pamAuthenticateThread(service, user, conv):
def _conv(items):
from twisted.internet import reactor
try:
d = conv(items)
except:
import traceback
traceback.print_exc()
return
ev = threading.Event()
def cb(r):
ev.r = (1, r)
ev.set()
def eb(e):
ev.r = (0, e)
ev.set()
reactor.callFromThread(d.addCallbacks, cb, eb)
ev.wait()
done = ev.r
if done[0]:
return done[1]
else:
raise done[1].type, done[1].value
return callIntoPAM(service, user, _conv)
def callIntoPAM(service, user, conv):
"""A testing hook.
"""
pam = PAM.pam()
pam.start(service)
pam.set_item(PAM.PAM_USER, user)
pam.set_item(PAM.PAM_CONV, conv)
gid = os.getegid()
uid = os.geteuid()
os.setegid(0)
os.seteuid(0)
try:
pam.authenticate() # these will raise
pam.acct_mgmt()
return 1
finally:
os.setegid(gid)
os.seteuid(uid)
def defConv(items):
resp = []
for i in range(len(items)):
message, kind = items[i]
if kind == 1: # password
p = getpass.getpass(message)
resp.append((p, 0))
elif kind == 2: # text
p = raw_input(message)
resp.append((p, 0))
elif kind in (3,4):
print message
resp.append(("", 0))
else:
return defer.fail('foo')
d = defer.succeed(resp)
return d
def pamAuthenticate(service, user, conv):
return threads.deferToThread(pamAuthenticateThread, service, user, conv)
| agpl-3.0 |
jtauber/online-reader | prototypes/static-paginated-perseus4/generate.py | 1 | 1178 | #!/usr/bin/env python3
import os
from reader import fs, templates
from reader.pagination import paginate
from parse_tei2 import tei_chapters
OUTPUT_DIR = "output"
template = templates.load("chapter.html")
def chapter_filename(num_content):
"takes tuple of (chapter_num, chapter_content)"
if num_content:
return f"{num_content[0]}.html"
def generate(chapter, prev, nxt, output_filename):
chapter_num, chapter_contents = chapter
with open(output_filename, "w") as output:
print(template.render(
title=f"Histories 2.{chapter_num}",
content=chapter_contents,
prev_file=chapter_filename(prev),
next_file=chapter_filename(nxt),
), file=output)
if __name__ == "__main__":
fs.create_dir(OUTPUT_DIR)
chapters = tei_chapters(os.path.join("data", "histories2.xml"))
for prev, item, nxt in paginate(chapters):
output_filename = os.path.join(OUTPUT_DIR, chapter_filename(item))
generate(item, prev, nxt, output_filename)
print(f"wrote {output_filename}")
fs.copy_css(["skolar.css"], OUTPUT_DIR)
fs.copy_files(["reader.css"], "css", OUTPUT_DIR)
| mit |
muku42/bokeh | examples/glyphs/maps_cities.py | 12 | 1081 | from __future__ import print_function
from bokeh.browserlib import view
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle
from bokeh.models import (
GMapPlot, Range1d, ColumnDataSource,
PanTool, WheelZoomTool, GMapOptions)
from bokeh.resources import INLINE
from bokeh.sampledata.world_cities import data
x_range = Range1d(-160, 160)
y_range = Range1d(-80, 80)
map_options = GMapOptions(lat=15, lng=0, zoom=2)
plot = GMapPlot(
x_range=x_range,
y_range=y_range,
plot_width=1000,
plot_height=500,
map_options=map_options,
title="Cities of the world with a population over 5,000 people.",
webgl=True,
)
circle = Circle(x="lng", y="lat", size=5, line_color=None, fill_color='firebrick', fill_alpha=0.2)
plot.add_glyph(ColumnDataSource(data), circle)
plot.add_tools(PanTool(), WheelZoomTool())
if __name__ == "__main__":
filename = "maps_cities.html"
with open(filename, "w") as f:
f.write(file_html(plot, INLINE, "Google Maps - World cities Example"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
sferukshtu/hppi | admin/Pubs_Loader.py | 1 | 1468 | import datetime, xlrd
from pymongo import MongoClient
# import re
def fields(titles):
""" Column names must be named only as below"""
names = ["title", "authors", "abstract", "url", "date", "journal", "pubinfo"]
is_identical = titles.union(set(names)) - titles.intersection(set(names))
return len(is_identical)
def main():
client = MongoClient('localhost', 27017)
db = client.hppi
excel = raw_input("Enter excel file of publications to load: ")
email = raw_input("Enter author's email: ")
rd = xlrd.open_workbook(excel)
sheet = rd.sheet_by_index(0)
header = sheet.row_values(0) # column headers
db.staff.update({'email': email}, {'$unset': {'publist': []}})
if fields(set(header)) == 0:
for rownum in range(1, sheet.nrows):
row = sheet.row_values(rownum) # row values
# print row
data = {}
for el in range(len(header)):
if header[el] == "date":
dt = datetime.datetime(*xlrd.xldate_as_tuple(row[el], rd.datemode)) # convert date from excel
else:
dt = row[el]
data[header[el]] = dt # pack data in a dictionary data[name] = value
data["art_id"] = rownum
db.staff.update_one({'email': email}, {'$push': {'publist': data}})
else:
print "Column names do not correspond to the specification!"
if __name__ == '__main__':
main()
| gpl-2.0 |
egaxegax/django-dbcartajs | django/conf/locale/cs/formats.py | 107 | 1580 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j. E Y G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '05.01.2006', '05.01.06'
'%d. %m. %Y', '%d. %m. %y', # '5. 1. 2006', '5. 1. 06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
# Kept ISO formats as one is in first position
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '04:30:59'
'%H.%M', # '04.30'
'%H:%M', # '04:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '05.01.2006 04:30:59'
'%d.%m.%Y %H.%M', # '05.01.2006 04.30'
'%d.%m.%Y %H:%M', # '05.01.2006 04:30'
'%d.%m.%Y', # '05.01.2006'
'%d. %m. %Y %H:%M:%S', # '05. 01. 2006 04:30:59'
'%d. %m. %Y %H.%M', # '05. 01. 2006 04.30'
'%d. %m. %Y %H:%M', # '05. 01. 2006 04:30'
'%d. %m. %Y', # '05. 01. 2006'
'%Y-%m-%d %H.%M', # '2006-01-05 04.30'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| gpl-2.0 |
liangazhou/django-rdp | packages/Django-1.8.6/tests/template_tests/syntax_tests/test_list_index.py | 521 | 2694 | from django.test import SimpleTestCase
from ..utils import setup
class ListIndexTests(SimpleTestCase):
@setup({'list-index01': '{{ var.1 }}'})
def test_list_index01(self):
"""
List-index syntax allows a template to access a certain item of a
subscriptable object.
"""
output = self.engine.render_to_string('list-index01', {'var': ['first item', 'second item']})
self.assertEqual(output, 'second item')
@setup({'list-index02': '{{ var.5 }}'})
def test_list_index02(self):
"""
Fail silently when the list index is out of range.
"""
output = self.engine.render_to_string('list-index02', {'var': ['first item', 'second item']})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index03': '{{ var.1 }}'})
def test_list_index03(self):
"""
Fail silently when the list index is out of range.
"""
output = self.engine.render_to_string('list-index03', {'var': None})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index04': '{{ var.1 }}'})
def test_list_index04(self):
"""
Fail silently when variable is a dict without the specified key.
"""
output = self.engine.render_to_string('list-index04', {'var': {}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index05': '{{ var.1 }}'})
def test_list_index05(self):
"""
Dictionary lookup wins out when dict's key is a string.
"""
output = self.engine.render_to_string('list-index05', {'var': {'1': "hello"}})
self.assertEqual(output, 'hello')
@setup({'list-index06': '{{ var.1 }}'})
def test_list_index06(self):
"""
But list-index lookup wins out when dict's key is an int, which
behind the scenes is really a dictionary lookup (for a dict)
after converting the key to an int.
"""
output = self.engine.render_to_string('list-index06', {"var": {1: "hello"}})
self.assertEqual(output, 'hello')
@setup({'list-index07': '{{ var.1 }}'})
def test_list_index07(self):
"""
Dictionary lookup wins out when there is a string and int version
of the key.
"""
output = self.engine.render_to_string('list-index07', {"var": {'1': "hello", 1: "world"}})
self.assertEqual(output, 'hello')
| apache-2.0 |
javiergarridomellado/Empresa_django | devcodela/lib/python2.7/site-packages/django/contrib/admin/models.py | 100 | 2831 | from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin.util import quote
from django.utils.translation import ugettext, ugettext_lazy as _
from django.utils.encoding import smart_text
from django.utils.encoding import python_2_unicode_compatible
ADDITION = 1
CHANGE = 2
DELETION = 3
class LogEntryManager(models.Manager):
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
e = self.model(None, None, user_id, content_type_id, smart_text(object_id), object_repr[:200], action_flag, change_message)
e.save()
@python_2_unicode_compatible
class LogEntry(models.Model):
action_time = models.DateTimeField(_('action time'), auto_now=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'django_admin_log'
ordering = ('-action_time',)
def __repr__(self):
return smart_text(self.action_time)
def __str__(self):
if self.action_flag == ADDITION:
return ugettext('Added "%(object)s".') % {'object': self.object_repr}
elif self.action_flag == CHANGE:
return ugettext('Changed "%(object)s" - %(changes)s') % {
'object': self.object_repr,
'changes': self.change_message,
}
elif self.action_flag == DELETION:
return ugettext('Deleted "%(object)s."') % {'object': self.object_repr}
return ugettext('LogEntry Object')
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
This is relative to the Django admin index page.
"""
if self.content_type and self.object_id:
return "%s/%s/%s/" % (self.content_type.app_label, self.content_type.model, quote(self.object_id))
return None
| gpl-2.0 |
gooofy/zamia-ai | data-tools/csv/align_model.py | 3 | 8534 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 Guenter Bartsch
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# train keras module alignment model
#
import os
import sys
import codecs
import logging
import numpy as np
from random import shuffle
from tensorflow import keras
from zamiaai import model
from nltools import misc
from nltools.tokenizer import tokenize
# DEBUG_LIMIT = 5000
DEBUG_LIMIT = 0
LANG = 'en'
MODEL_DIR = 'model'
_PAD = '__pad'
_PAD_ID = 0
INPUT_MAX_LEN = 30
# model / keras
EMB_DIM = 32
DENSE1_DIM = 32
DENSE2_DIM = 32
EPOCHS = 30
BATCH_SIZE = 512
VALIDATION_SPLIT = 0.1
class AlignModel(object):
def __init__(self, session ):
self.session = session
self.keras_weights_fn = '%s/keras_weights.hdf5' % (MODEL_DIR)
self.in_dict_fn = '%s/in_dict.csv' % (MODEL_DIR)
self.out_dict_fn = '%s/out_dict.csv' % (MODEL_DIR)
def _setup_model(self):
self.keras_model = keras.Sequential()
self.keras_model.add(keras.layers.Embedding(len(self.input_dict), EMB_DIM, input_length=INPUT_MAX_LEN))
self.keras_model.add(keras.layers.Flatten())
self.keras_model.add(keras.layers.Dense(DENSE1_DIM, activation='relu'))
self.keras_model.add(keras.layers.Dense(DENSE2_DIM, activation='relu'))
self.keras_model.add(keras.layers.Dense(len(self.output_dict), activation='softmax'))
self.keras_model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
logging.info(self.keras_model.summary())
def train(self, num_steps, incremental):
# load discourses from db
logging.info('load discourses from db...')
self.training_data = []
tds = set()
for td in self.session.query(model.TrainingData).filter(model.TrainingData.lang==LANG).filter(model.TrainingData.module!='bots'):
if td.inp in tds:
continue
tds.add(td.inp)
inp = tokenize(td.inp, lang=LANG)
if len(inp) > INPUT_MAX_LEN:
inp = inp[:INPUT_MAX_LEN]
self.training_data.append((inp, td.module))
if DEBUG_LIMIT and len(tds)>DEBUG_LIMIT:
break
shuffle (self.training_data)
#
# set up model dir
#
if not incremental:
try:
shutil.rmtree(MODEL_DIR)
except:
pass
misc.mkdirs(MODEL_DIR)
#
# load or create input/output dicts
#
if incremental:
logging.info("loading input and output dicts...")
self.load_dicts()
else:
logging.info("computing input and output dicts...")
self.compute_dicts()
self.save_dicts()
#
# compute datasets
#
logging.info("computing datasets...")
train_x = []
train_y = []
cnt = 0
for inp, mn in self.training_data:
x = self.compute_x(inp)
y = self.compute_y(mn)
train_x.append(x)
train_y.append(y)
cnt += 1
self.train_x = np.array(train_x, np.int32)
self.train_y = keras.utils.to_categorical(train_y, len(self.output_dict))
logging.info("computing datasets done. train:x=%s,y=%s" % (self.train_x.shape, self.train_y.shape))
#
# define the keras model
#
self._setup_model()
#
# fit training data
#
best_loss = 100.0
best_epoch = 0
for epoch in range(EPOCHS):
h = self.keras_model.fit(self.train_x, self.train_y,
epochs=1,
validation_split=VALIDATION_SPLIT,
batch_size=BATCH_SIZE)
cur_loss = h.history['val_loss'][0]
if cur_loss < best_loss:
best_loss = cur_loss
best_epoch = epoch
logging.info("%3d/%3d *** BEST LOSS SO FAR IN THIS TUN: %f FROM THIS EPOCH" % (epoch+1, EPOCHS, best_loss))
# save the result
self.keras_model.save_weights(self.keras_weights_fn, overwrite=True)
logging.info ('%s written.' % self.keras_weights_fn)
else:
logging.info("%3d/%3d --- BEST LOSS SO FAR IN THIS TUN: %f FROM EPOCH %d" % (epoch+1, EPOCHS, best_loss, best_epoch))
def load(self):
#
# load dicts
#
self.load_dicts()
#
# define the keras model
#
self._setup_model()
#
# load the weights
#
self.keras_model.load_weights(self.keras_weights_fn)
logging.info ('%s loaded.' % self.keras_weights_fn)
def compute_dicts(self):
# build input and output dicts
self.input_dict = {_PAD: _PAD_ID}
self.output_dict = {_PAD: _PAD_ID}
for inp, mn in self.training_data:
# input
i = 0
for token in inp:
if not token in self.input_dict:
self.input_dict[token] = len(self.input_dict)
# output
if not mn in self.output_dict:
self.output_dict[mn] = len(self.output_dict)
logging.info ('dicts done. input: %d entries, output: %d entries' %
(len(self.input_dict), len(self.output_dict)))
def save_dicts(self):
with codecs.open(self.in_dict_fn, 'w', 'utf8') as f:
for k in sorted(self.input_dict):
f.write(u"%d;%s\n" % (self.input_dict[k], k))
logging.info ('%s written.', self.in_dict_fn)
with codecs.open(self.out_dict_fn, 'w', 'utf8') as f:
for k in sorted(self.output_dict):
f.write(u"%d;%s\n" % (self.output_dict[k], k))
logging.info ('%s written.', self.out_dict_fn)
def load_dicts(self):
with codecs.open(self.in_dict_fn, 'r', 'utf8') as f:
self.input_dict = {}
while True:
line = f.readline()
if not line:
break
line = line.lstrip().rstrip()
parts = line.split(';')
self.input_dict[parts[1]] = int(parts[0])
logging.info ('%s read, %d entries.' % (self.in_dict_fn, len(self.input_dict)))
with codecs.open(self.out_dict_fn, 'r', 'utf8') as f:
self.output_dict = {}
while True:
line = f.readline()
if not line:
break
line = line.lstrip().rstrip()
parts = line.split(';')
self.output_dict[parts[1]] = int(parts[0])
logging.info ('%s read, %d entries.' % (self.out_dict_fn, len(self.output_dict)))
def compute_x(self, inp):
x = list(map(lambda token: self.input_dict[unicode(token)] if unicode(token) in self.input_dict else _PAD_ID, inp))
while len(x) < INPUT_MAX_LEN:
x.append(_PAD_ID)
if len(x) > INPUT_MAX_LEN:
x = x[:INPUT_MAX_LEN]
return x
def compute_y(self, mn):
y = self.output_dict[mn] if mn in self.output_dict else PAD_ID
return y
def predict(self, inp):
x = np.array([self.compute_x(tokenize(inp, lang=LANG))], np.int32)
y = self.keras_model.predict(x)
# logging.info('%s -> %s' % (x, y))
y = np.argmax(y)
# logging.info ('argmax: %s' % y)
# import pdb; pdb.set_trace()
mn = None
for m in self.output_dict:
if self.output_dict[m] == y:
mn = m
break
# logging.info ('mn: %s' % mn)
return mn
| apache-2.0 |
apark263/tensorflow | tensorflow/python/data/kernel_tests/cache_test.py | 5 | 9091 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.cache()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import shutil
import tempfile
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FileCacheTest(test_base.DatasetTestBase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.cache_prefix = path.join(self.tmp_dir, "cache")
def tearDown(self):
if self.tmp_dir:
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def testCacheDatasetPassthrough(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
def dataset_fn(count=5, filename=None):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if filename:
return repeat_dataset.cache(filename)
else:
return repeat_dataset
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_fn().output_shapes)
get_next = self.getNext(dataset_fn())
# First run without caching to collect the "ground truth".
elements = []
for _ in range(20):
elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the cached dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(filename=self.cache_prefix))
cached_elements = []
for _ in range(20):
cached_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(elements, cached_elements)
# Re-initialize with an empty upstream (to throw errors.OutOfRangeError
# if we didn't use the cache).
get_next = self.getNext(dataset_fn(count=0, filename=self.cache_prefix))
replayed_elements = []
for _ in range(20):
replayed_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(cached_elements, replayed_elements)
# Re-initialize with an empty upstream and a missing cache file (should
# throw errors.OutOfRangeError immediately).
get_next = self.getNext(
dataset_fn(count=0, filename=self.cache_prefix + "nonsense"))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testConcurrentWriters(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
self.evaluate(get_next1()) # this should succeed
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(get_next2())
self.evaluate(get_next1()) # this should continue to succeed
def testConcurrentReaders(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
elements = []
for _ in range(4):
elements.append(self.evaluate(get_next1()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
# Re-initialize
get_next1 = self.getNext(cache_dataset1, requires_initialization=True)
get_next2 = self.getNext(cache_dataset2, requires_initialization=True)
# Reading concurrently should succeed.
elements_itr1 = []
elements_itr2 = []
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
# Intentionally reversing the order
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
self.assertAllEqual(elements, elements_itr1)
self.assertAllEqual(elements, elements_itr2)
@test_util.run_all_in_graph_and_eager_modes
class MemoryCacheTest(test_base.DatasetTestBase):
def testCacheDatasetPassthrough(self):
with ops.device("cpu:0"):
repeat_count = variables.Variable(constant_op.constant(10, dtypes.int64))
dataset = dataset_ops.Dataset.range(3).flat_map(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(repeat_count))
cached_dataset = dataset.cache().repeat(2)
uncached_dataset = dataset.repeat(2)
self.evaluate(repeat_count.initializer)
# Needs to be initializable to capture the variable.
cached_next = self.getNext(cached_dataset, requires_initialization=True)
uncached_next = self.getNext(
uncached_dataset, requires_initialization=True)
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
self.assertEqual(self.evaluate(uncached_next()), i)
self.evaluate(repeat_count.assign(0))
# The uncached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(uncached_next())
# The cached iterator replays from cache.
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
# The cached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(cached_next())
def testEmptyCacheReading(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(0))
cache_dataset = repeat_dataset.cache()
# Create initialization ops for iterators without and with
# caching, respectively.
self.assertDatasetProduces(cache_dataset, expected_output=[])
def testConcurrentReaders(self):
dataset = dataset_ops.Dataset.range(5).cache()
d1 = dataset.map(lambda x: x + 1)
d2 = dataset.map(lambda x: x + 6)
get_next1 = self.getNext(d1)
self.assertEqual(1, self.evaluate(get_next1()))
self.assertEqual(2, self.evaluate(get_next1()))
self.assertEqual(3, self.evaluate(get_next1()))
get_next2 = self.getNext(d2)
self.assertEqual(6, self.evaluate(get_next2()))
self.assertEqual(7, self.evaluate(get_next2()))
self.assertEqual(4, self.evaluate(get_next1())) # interleave execution
self.assertEqual([8, 5],
[self.evaluate(get_next2()),
self.evaluate(get_next1())])
self.assertEqual(9, self.evaluate(get_next2()))
self.assertEqual(10, self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
def testCacheTakeRepeat(self):
dataset = dataset_ops.Dataset.range(10).cache().take(5).repeat(2)
expected_output = [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
self.assertDatasetProduces(dataset, expected_output=expected_output)
if __name__ == "__main__":
test.main()
| apache-2.0 |
disruptek/boto | tests/integration/ec2/elb/test_connection.py | 14 | 12328 | # Copyright (c) 2010 Hunter Blanks http://artifex.org/~hblanks/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Initial, and very limited, unit tests for ELBConnection.
"""
import boto
import time
from tests.compat import unittest
from boto.ec2.elb import ELBConnection
class ELBConnectionTest(unittest.TestCase):
ec2 = True
def setUp(self):
"""Creates a named load balancer that can be safely
deleted at the end of each test"""
self.conn = ELBConnection()
self.name = 'elb-boto-unit-test'
self.availability_zones = ['us-east-1a']
self.listeners = [(80, 8000, 'HTTP')]
self.balancer = self.conn.create_load_balancer(
self.name, self.availability_zones, self.listeners)
# S3 bucket for log tests
self.s3 = boto.connect_s3()
self.timestamp = str(int(time.time()))
self.bucket_name = 'boto-elb-%s' % self.timestamp
self.bucket = self.s3.create_bucket(self.bucket_name)
self.bucket.set_canned_acl('public-read-write')
self.addCleanup(self.cleanup_bucket, self.bucket)
def cleanup_bucket(self, bucket):
for key in bucket.get_all_keys():
key.delete()
bucket.delete()
def tearDown(self):
""" Deletes the test load balancer after every test.
It does not delete EVERY load balancer in your account"""
self.balancer.delete()
def test_build_list_params(self):
params = {}
self.conn.build_list_params(
params, ['thing1', 'thing2', 'thing3'], 'ThingName%d')
expected_params = {
'ThingName1': 'thing1',
'ThingName2': 'thing2',
'ThingName3': 'thing3'
}
self.assertEqual(params, expected_params)
# TODO: for these next tests, consider sleeping until our load
# balancer comes up, then testing for connectivity to
# balancer.dns_name, along the lines of the existing EC2 unit tests.
def test_create_load_balancer(self):
self.assertEqual(self.balancer.name, self.name)
self.assertEqual(self.balancer.availability_zones,
self.availability_zones)
self.assertEqual(self.balancer.listeners, self.listeners)
balancers = self.conn.get_all_load_balancers()
self.assertEqual([lb.name for lb in balancers], [self.name])
def test_create_load_balancer_listeners(self):
more_listeners = [(443, 8001, 'HTTP')]
self.conn.create_load_balancer_listeners(self.name, more_listeners)
balancers = self.conn.get_all_load_balancers()
self.assertEqual([lb.name for lb in balancers], [self.name])
self.assertEqual(
sorted(l.get_tuple() for l in balancers[0].listeners),
sorted(self.listeners + more_listeners)
)
def test_delete_load_balancer_listeners(self):
mod_listeners = [(80, 8000, 'HTTP'), (443, 8001, 'HTTP')]
mod_name = self.name + "-mod"
self.mod_balancer = self.conn.create_load_balancer(
mod_name, self.availability_zones, mod_listeners)
mod_balancers = self.conn.get_all_load_balancers(
load_balancer_names=[mod_name])
self.assertEqual([lb.name for lb in mod_balancers], [mod_name])
self.assertEqual(
sorted([l.get_tuple() for l in mod_balancers[0].listeners]),
sorted(mod_listeners))
self.conn.delete_load_balancer_listeners(self.mod_balancer.name, [443])
mod_balancers = self.conn.get_all_load_balancers(
load_balancer_names=[mod_name])
self.assertEqual([lb.name for lb in mod_balancers], [mod_name])
self.assertEqual([l.get_tuple() for l in mod_balancers[0].listeners],
mod_listeners[:1])
self.mod_balancer.delete()
def test_create_load_balancer_listeners_with_policies(self):
more_listeners = [(443, 8001, 'HTTP')]
self.conn.create_load_balancer_listeners(self.name, more_listeners)
lb_policy_name = 'lb-policy'
self.conn.create_lb_cookie_stickiness_policy(
1000, self.name, lb_policy_name)
self.conn.set_lb_policies_of_listener(
self.name, self.listeners[0][0], lb_policy_name)
app_policy_name = 'app-policy'
self.conn.create_app_cookie_stickiness_policy(
'appcookie', self.name, app_policy_name)
self.conn.set_lb_policies_of_listener(
self.name, more_listeners[0][0], app_policy_name)
balancers = self.conn.get_all_load_balancers(
load_balancer_names=[self.name])
self.assertEqual([lb.name for lb in balancers], [self.name])
self.assertEqual(
sorted(l.get_tuple() for l in balancers[0].listeners),
sorted(self.listeners + more_listeners)
)
# Policy names should be checked here once they are supported
# in the Listener object.
def test_create_load_balancer_backend_with_policies(self):
other_policy_name = 'enable-proxy-protocol'
backend_port = 8081
self.conn.create_lb_policy(
self.name, other_policy_name,
'ProxyProtocolPolicyType', {'ProxyProtocol': True})
self.conn.set_lb_policies_of_backend_server(
self.name, backend_port, [other_policy_name])
balancers = self.conn.get_all_load_balancers(
load_balancer_names=[self.name])
self.assertEqual([lb.name for lb in balancers], [self.name])
self.assertEqual(len(balancers[0].policies.other_policies), 1)
self.assertEqual(balancers[0].policies.other_policies[0].policy_name,
other_policy_name)
self.assertEqual(len(balancers[0].backends), 1)
self.assertEqual(balancers[0].backends[0].instance_port, backend_port)
self.assertEqual(balancers[0].backends[0].policies[0].policy_name,
other_policy_name)
self.conn.set_lb_policies_of_backend_server(self.name, backend_port,
[])
balancers = self.conn.get_all_load_balancers(
load_balancer_names=[self.name])
self.assertEqual([lb.name for lb in balancers], [self.name])
self.assertEqual(len(balancers[0].policies.other_policies), 1)
self.assertEqual(len(balancers[0].backends), 0)
def test_create_load_balancer_complex_listeners(self):
complex_listeners = [
(8080, 80, 'HTTP', 'HTTP'),
(2525, 25, 'TCP', 'TCP'),
]
self.conn.create_load_balancer_listeners(
self.name,
complex_listeners=complex_listeners
)
balancers = self.conn.get_all_load_balancers(
load_balancer_names=[self.name]
)
self.assertEqual([lb.name for lb in balancers], [self.name])
self.assertEqual(
sorted(l.get_complex_tuple() for l in balancers[0].listeners),
# We need an extra 'HTTP' here over what ``self.listeners`` uses.
sorted([(80, 8000, 'HTTP', 'HTTP')] + complex_listeners)
)
def test_load_balancer_access_log(self):
attributes = self.balancer.get_attributes()
self.assertEqual(False, attributes.access_log.enabled)
attributes.access_log.enabled = True
attributes.access_log.s3_bucket_name = self.bucket_name
attributes.access_log.s3_bucket_prefix = 'access-logs'
attributes.access_log.emit_interval = 5
self.conn.modify_lb_attribute(self.balancer.name, 'accessLog',
attributes.access_log)
new_attributes = self.balancer.get_attributes()
self.assertEqual(True, new_attributes.access_log.enabled)
self.assertEqual(self.bucket_name,
new_attributes.access_log.s3_bucket_name)
self.assertEqual('access-logs',
new_attributes.access_log.s3_bucket_prefix)
self.assertEqual(5, new_attributes.access_log.emit_interval)
def test_load_balancer_get_attributes(self):
attributes = self.balancer.get_attributes()
connection_draining = self.conn.get_lb_attribute(self.balancer.name,
'ConnectionDraining')
self.assertEqual(connection_draining.enabled,
attributes.connection_draining.enabled)
self.assertEqual(connection_draining.timeout,
attributes.connection_draining.timeout)
access_log = self.conn.get_lb_attribute(self.balancer.name,
'AccessLog')
self.assertEqual(access_log.enabled, attributes.access_log.enabled)
self.assertEqual(access_log.s3_bucket_name,
attributes.access_log.s3_bucket_name)
self.assertEqual(access_log.s3_bucket_prefix,
attributes.access_log.s3_bucket_prefix)
self.assertEqual(access_log.emit_interval,
attributes.access_log.emit_interval)
cross_zone_load_balancing = self.conn.get_lb_attribute(
self.balancer.name, 'CrossZoneLoadBalancing')
self.assertEqual(cross_zone_load_balancing,
attributes.cross_zone_load_balancing.enabled)
def change_and_verify_load_balancer_connection_draining(
self, enabled, timeout=None):
attributes = self.balancer.get_attributes()
attributes.connection_draining.enabled = enabled
if timeout is not None:
attributes.connection_draining.timeout = timeout
self.conn.modify_lb_attribute(
self.balancer.name, 'ConnectionDraining',
attributes.connection_draining)
attributes = self.balancer.get_attributes()
self.assertEqual(enabled, attributes.connection_draining.enabled)
if timeout is not None:
self.assertEqual(timeout, attributes.connection_draining.timeout)
def test_load_balancer_connection_draining_config(self):
self.change_and_verify_load_balancer_connection_draining(True, 128)
self.change_and_verify_load_balancer_connection_draining(True, 256)
self.change_and_verify_load_balancer_connection_draining(False)
self.change_and_verify_load_balancer_connection_draining(True, 64)
def test_set_load_balancer_policies_of_listeners(self):
more_listeners = [(443, 8001, 'HTTP')]
self.conn.create_load_balancer_listeners(self.name, more_listeners)
lb_policy_name = 'lb-policy'
self.conn.create_lb_cookie_stickiness_policy(
1000,
self.name,
lb_policy_name
)
self.conn.set_lb_policies_of_listener(
self.name,
self.listeners[0][0],
lb_policy_name
)
# Try to remove the policy by passing empty list.
# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_SetLoadBalancerPoliciesOfListener.html
# documents this as the way to remove policies.
self.conn.set_lb_policies_of_listener(
self.name,
self.listeners[0][0],
[]
)
if __name__ == '__main__':
unittest.main()
| mit |
tobiajo/hops-tensorflow | yarntf/examples/slim/datasets/download_and_convert_cifar10.py | 16 | 6215 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts cifar10 data to TFRecords of TF-Example protos.
This module downloads the cifar10 data, uncompresses it, reads the files
that make up the cifar10 data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take several minutes to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cPickle
import os
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from datasets import dataset_utils
# The URL where the CIFAR data can be downloaded.
_DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
# The number of training files.
_NUM_TRAIN_FILES = 5
# The height and width of each image.
_IMAGE_SIZE = 32
# The names of the classes.
_CLASS_NAMES = [
'airplane',
'automobile',
'bird',
'cat',
'deer',
'dog',
'frog',
'horse',
'ship',
'truck',
]
def _add_to_tfrecord(filename, tfrecord_writer, offset=0):
"""Loads data from the cifar10 pickle files and writes files to a TFRecord.
Args:
filename: The filename of the cifar10 pickle file.
tfrecord_writer: The TFRecord writer to use for writing.
offset: An offset into the absolute number of images previously written.
Returns:
The new offset.
"""
with tf.gfile.Open(filename, 'r') as f:
data = cPickle.load(f)
images = data['data']
num_images = images.shape[0]
images = images.reshape((num_images, 3, 32, 32))
labels = data['labels']
with tf.Graph().as_default():
image_placeholder = tf.placeholder(dtype=tf.uint8)
encoded_image = tf.image.encode_png(image_placeholder)
with tf.Session('') as sess:
for j in range(num_images):
sys.stdout.write('\r>> Reading file [%s] image %d/%d' % (
filename, offset + j + 1, offset + num_images))
sys.stdout.flush()
image = np.squeeze(images[j]).transpose((1, 2, 0))
label = labels[j]
png_string = sess.run(encoded_image,
feed_dict={image_placeholder: image})
example = dataset_utils.image_to_tfexample(
png_string, 'png', _IMAGE_SIZE, _IMAGE_SIZE, label)
tfrecord_writer.write(example.SerializeToString())
return offset + num_images
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The dataset directory where the dataset is stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/cifar10_%s.tfrecord' % (dataset_dir, split_name)
def _download_and_uncompress_dataset(dataset_dir):
"""Downloads cifar10 and uncompresses it locally.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(_DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dataset_dir)
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'cifar-10-batches-py')
tf.gfile.DeleteRecursively(tmp_dir)
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
# First, process the training data:
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
offset = 0
for i in range(_NUM_TRAIN_FILES):
filename = os.path.join(dataset_dir,
'cifar-10-batches-py',
'data_batch_%d' % (i + 1)) # 1-indexed.
offset = _add_to_tfrecord(filename, tfrecord_writer, offset)
# Next, process the testing data:
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
filename = os.path.join(dataset_dir,
'cifar-10-batches-py',
'test_batch')
_add_to_tfrecord(filename, tfrecord_writer)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the Cifar10 dataset!')
| apache-2.0 |
Oxygem/canaryd | canaryd_packages/requests/packages/chardet/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| mit |
JT5D/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 22 | 9838 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
| bsd-3-clause |
windygu/youku-lixian | flv_join.py | 8 | 9173 | #!/usr/bin/env python
import struct
from cStringIO import StringIO
TAG_TYPE_METADATA = 18
##################################################
# AMF0
##################################################
AMF_TYPE_NUMBER = 0x00
AMF_TYPE_BOOLEAN = 0x01
AMF_TYPE_STRING = 0x02
AMF_TYPE_OBJECT = 0x03
AMF_TYPE_MOVIECLIP = 0x04
AMF_TYPE_NULL = 0x05
AMF_TYPE_UNDEFINED = 0x06
AMF_TYPE_REFERENCE = 0x07
AMF_TYPE_MIXED_ARRAY = 0x08
AMF_TYPE_END_OF_OBJECT = 0x09
AMF_TYPE_ARRAY = 0x0A
AMF_TYPE_DATE = 0x0B
AMF_TYPE_LONG_STRING = 0x0C
AMF_TYPE_UNSUPPORTED = 0x0D
AMF_TYPE_RECORDSET = 0x0E
AMF_TYPE_XML = 0x0F
AMF_TYPE_CLASS_OBJECT = 0x10
AMF_TYPE_AMF3_OBJECT = 0x11
class ECMAObject:
def __init__(self, max_number):
self.max_number = max_number
self.data = []
self.map = {}
def put(self, k, v):
self.data.append((k, v))
self.map[k] = v
def get(self, k):
return self.map[k]
def set(self, k, v):
for i in range(len(self.data)):
if self.data[i][0] == k:
self.data[i] = (k, v)
break
else:
raise KeyError(k)
self.map[k] = v
def keys(self):
return self.map.keys()
def __str__(self):
return 'ECMAObject<'+repr(self.map)+'>'
def __eq__(self, other):
return self.max_number == other.max_number and self.data == other.data
def read_amf_number(stream):
return struct.unpack('>d', stream.read(8))[0]
def read_amf_boolean(stream):
b = read_byte(stream)
assert b in (0, 1)
return bool(b)
def read_amf_string(stream):
xx = stream.read(2)
if xx == '':
# dirty fix for the invalid Qiyi flv
return None
n = struct.unpack('>H', xx)[0]
s = stream.read(n)
assert len(s) == n
return s.decode('utf-8')
def read_amf_object(stream):
obj = {}
while True:
k = read_amf_string(stream)
if not k:
assert read_byte(stream) == AMF_TYPE_END_OF_OBJECT
break
v = read_amf(stream)
obj[k] = v
return obj
def read_amf_mixed_array(stream):
max_number = read_uint(stream)
mixed_results = ECMAObject(max_number)
while True:
k = read_amf_string(stream)
if k is None:
# dirty fix for the invalid Qiyi flv
break
if not k:
assert read_byte(stream) == AMF_TYPE_END_OF_OBJECT
break
v = read_amf(stream)
mixed_results.put(k, v)
assert len(mixed_results.data) == max_number
return mixed_results
def read_amf_array(stream):
n = read_uint(stream)
v = []
for i in range(n):
v.append(read_amf(stream))
return v
amf_readers = {
AMF_TYPE_NUMBER: read_amf_number,
AMF_TYPE_BOOLEAN: read_amf_boolean,
AMF_TYPE_STRING: read_amf_string,
AMF_TYPE_OBJECT: read_amf_object,
AMF_TYPE_MIXED_ARRAY: read_amf_mixed_array,
AMF_TYPE_ARRAY: read_amf_array,
}
def read_amf(stream):
return amf_readers[read_byte(stream)](stream)
def write_amf_number(stream, v):
stream.write(struct.pack('>d', v))
def write_amf_boolean(stream, v):
if v:
stream.write('\x01')
else:
stream.write('\x00')
def write_amf_string(stream, s):
s = s.encode('utf-8')
stream.write(struct.pack('>H', len(s)))
stream.write(s)
def write_amf_object(stream, o):
for k in o:
write_amf_string(stream, k)
write_amf(stream, o[k])
write_amf_string(stream, '')
write_byte(stream, AMF_TYPE_END_OF_OBJECT)
def write_amf_mixed_array(stream, o):
write_uint(stream, o.max_number)
for k, v in o.data:
write_amf_string(stream, k)
write_amf(stream, v)
write_amf_string(stream, '')
write_byte(stream, AMF_TYPE_END_OF_OBJECT)
def write_amf_array(stream, o):
write_uint(stream, len(o))
for v in o:
write_amf(stream, v)
amf_writers_tags = {
float: AMF_TYPE_NUMBER,
bool: AMF_TYPE_BOOLEAN,
unicode: AMF_TYPE_STRING,
dict: AMF_TYPE_OBJECT,
ECMAObject: AMF_TYPE_MIXED_ARRAY,
list: AMF_TYPE_ARRAY,
}
amf_writers = {
AMF_TYPE_NUMBER: write_amf_number,
AMF_TYPE_BOOLEAN: write_amf_boolean,
AMF_TYPE_STRING: write_amf_string,
AMF_TYPE_OBJECT: write_amf_object,
AMF_TYPE_MIXED_ARRAY: write_amf_mixed_array,
AMF_TYPE_ARRAY: write_amf_array,
}
def write_amf(stream, v):
if isinstance(v, ECMAObject):
tag = amf_writers_tags[ECMAObject]
else:
tag = amf_writers_tags[type(v)]
write_byte(stream, tag)
amf_writers[tag](stream, v)
##################################################
# FLV
##################################################
def read_int(stream):
return struct.unpack('>i', stream.read(4))[0]
def read_uint(stream):
return struct.unpack('>I', stream.read(4))[0]
def write_uint(stream, n):
stream.write(struct.pack('>I', n))
def read_byte(stream):
return ord(stream.read(1))
def write_byte(stream, b):
stream.write(chr(b))
def read_unsigned_medium_int(stream):
x1, x2, x3 = struct.unpack('BBB', stream.read(3))
return (x1 << 16) | (x2 << 8) | x3
def read_tag(stream):
# header size: 15 bytes
header = stream.read(15)
if len(header) == 4:
return
x = struct.unpack('>IBBBBBBBBBBB', header)
previous_tag_size = x[0]
data_type = x[1]
body_size = (x[2] << 16) | (x[3] << 8) | x[4]
assert body_size < 1024*1024*128, 'tag body size too big (> 128MB)'
timestamp = (x[5] << 16) | (x[6] << 8) | x[7]
timestamp += x[8] << 24
assert x[9:] == (0, 0, 0)
body = stream.read(body_size)
return (data_type, timestamp, body_size, body, previous_tag_size)
#previous_tag_size = read_uint(stream)
#data_type = read_byte(stream)
#body_size = read_unsigned_medium_int(stream)
#assert body_size < 1024*1024*128, 'tag body size too big (> 128MB)'
#timestamp = read_unsigned_medium_int(stream)
#timestamp += read_byte(stream) << 24
#assert read_unsigned_medium_int(stream) == 0
#body = stream.read(body_size)
#return (data_type, timestamp, body_size, body, previous_tag_size)
def write_tag(stream, tag):
data_type, timestamp, body_size, body, previous_tag_size = tag
write_uint(stream, previous_tag_size)
write_byte(stream, data_type)
write_byte(stream, body_size>>16 & 0xff)
write_byte(stream, body_size>>8 & 0xff)
write_byte(stream, body_size & 0xff)
write_byte(stream, timestamp>>16 & 0xff)
write_byte(stream, timestamp>>8 & 0xff)
write_byte(stream, timestamp & 0xff)
write_byte(stream, timestamp>>24 & 0xff)
stream.write('\0\0\0')
stream.write(body)
def read_flv_header(stream):
assert stream.read(3) == 'FLV'
header_version = read_byte(stream)
assert header_version == 1
type_flags = read_byte(stream)
assert type_flags == 5
data_offset = read_uint(stream)
assert data_offset == 9
def write_flv_header(stream):
stream.write('FLV')
write_byte(stream, 1)
write_byte(stream, 5)
write_uint(stream, 9)
def read_meta_data(stream):
meta_type = read_amf(stream)
meta = read_amf(stream)
return meta_type, meta
def read_meta_tag(tag):
data_type, timestamp, body_size, body, previous_tag_size = tag
assert data_type == TAG_TYPE_METADATA
assert timestamp == 0
assert previous_tag_size == 0
return read_meta_data(StringIO(body))
def write_meta_data(stream, meta_type, meta_data):
assert isinstance(meta_type, basesting)
write_amf(meta_type)
write_amf(meta_data)
def write_meta_tag(stream, meta_type, meta_data):
buffer = StringIO()
write_amf(buffer, meta_type)
write_amf(buffer, meta_data)
body = buffer.getvalue()
write_tag(stream, (TAG_TYPE_METADATA, 0, len(body), body, 0))
##################################################
# main
##################################################
def guess_output(inputs):
import os.path
inputs = map(os.path.basename, inputs)
n = min(map(len, inputs))
for i in reversed(range(1, n)):
if len(set(s[:i] for s in inputs)) == 1:
return inputs[0][:i] + '.flv'
return 'output.flv'
def concat_flvs(flvs, output=None):
assert flvs, 'no flv file found'
import os.path
if not output:
output = guess_output(flvs)
elif os.path.isdir(output):
output = os.path.join(output, guess_output(flvs))
print 'Joining %s into %s' % (', '.join(flvs), output)
ins = [open(flv, 'rb') for flv in flvs]
for stream in ins:
read_flv_header(stream)
meta_tags = map(read_tag, ins)
metas = map(read_meta_tag, meta_tags)
meta_types, metas = zip(*metas)
assert len(set(meta_types)) == 1
meta_type = meta_types[0]
# must merge fields: duration
# TODO: check other meta info, update other meta info
total_duration = sum(meta.get('duration') for meta in metas)
meta_data = metas[0]
meta_data.set('duration', total_duration)
out = open(output, 'wb')
write_flv_header(out)
write_meta_tag(out, meta_type, meta_data)
timestamp_start = 0
for stream in ins:
while True:
tag = read_tag(stream)
if tag:
data_type, timestamp, body_size, body, previous_tag_size = tag
timestamp += timestamp_start
tag = data_type, timestamp, body_size, body, previous_tag_size
write_tag(out, tag)
else:
break
timestamp_start = timestamp
write_uint(out, previous_tag_size)
return output
def usage():
print 'python flv_join.py --output target.flv flv...'
def main():
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:", ["help", "output="])
except getopt.GetoptError, err:
usage()
sys.exit(1)
output = None
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output = a
else:
usage()
sys.exit(1)
if not args:
usage()
sys.exit(1)
concat_flvs(args, output)
if __name__ == '__main__':
main()
| mit |
aperigault/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance.py | 4 | 10390 | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachinescalesetinstance
version_added: "2.8"
short_description: Get Azure Virtual Machine Scale Set Instance facts
description:
- Get facts of Azure Virtual Machine Scale Set VMs.
options:
resource_group:
description:
- The name of the resource group.
required: True
vmss_name:
description:
- The name of the VM scale set.
required: True
instance_id:
description:
- The instance ID of the virtual machine.
required: True
latest_model:
type: bool
description:
- Set to C(yes) to upgrade to the latest model.
power_state:
description:
- Use this option to change power state of the instance.
required: True
choices:
- 'running'
- 'stopped'
- 'deallocated'
state:
description:
- State of the VMSS instance. Use C(present) to update an instance and C(absent) to delete an instance.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Upgrade instance to the latest image
azure_rm_computevirtualmachinescalesetinstance:
resource_group: myResourceGroup
vmss_name: myVMSS
instance_id: "2"
latest_model: yes
'''
RETURN = '''
instances:
description:
- A list of instances.
returned: always
type: complex
contains:
id:
description:
- Instance resource ID.
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.Compute/scalesets/myscaleset/vms/myvm
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute import ComputeManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMVirtualMachineScaleSetInstance(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
vmss_name=dict(
type='str',
required=True
),
instance_id=dict(
type='str'
),
latest_model=dict(
type='bool'
),
power_state=dict(
type='str',
choices=['running', 'stopped', 'deallocated']
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.mgmt_client = None
self.resource_group = None
self.vmss_name = None
self.instance_id = None
self.latest_model = None
self.power_state = None
self.state = None
super(AzureRMVirtualMachineScaleSetInstance, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(ComputeManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
instances = self.get()
if self.state == 'absent':
for item in instances:
if not self.check_mode:
self.delete(item['instance_id'])
self.results['changed'] = True
self.results['instances'] = []
else:
if self.latest_model is not None:
for item in instances:
if not item.get('latest_model', None):
if not self.check_mode:
self.apply_latest_model(item['instance_id'])
item['latest_model'] = True
self.results['changed'] = True
if self.power_state is not None:
for item in instances:
if self.power_state == 'stopped' and item['power_state'] not in ['stopped', 'stopping']:
if not self.check_mode:
self.stop(item['instance_id'])
self.results['changed'] = True
elif self.power_state == 'deallocated' and item['power_state'] not in ['deallocated']:
if not self.check_mode:
self.deallocate(item['instance_id'])
self.results['changed'] = True
elif self.power_state == 'running' and item['power_state'] not in ['running']:
if not self.check_mode:
self.start(item['instance_id'])
self.results['changed'] = True
self.results['instances'] = [{'id': item['id']} for item in instances]
return self.results
def get(self):
response = None
results = []
try:
response = self.mgmt_client.virtual_machine_scale_set_vms.get(resource_group_name=self.resource_group,
vm_scale_set_name=self.vmss_name,
instance_id=self.instance_id)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Virtual Machine Scale Set VM.')
if response:
results.append(self.format_response(response))
return results
def apply_latest_model(self, instance_id):
try:
poller = self.compute_client.virtual_machine_scale_sets.update_instances(resource_group_name=self.resource_group,
vm_scale_set_name=self.vmss_name,
instance_ids=[instance_id])
self.get_poller_result(poller)
except CloudError as exc:
self.log("Error applying latest model {0} - {1}".format(self.name, str(exc)))
self.fail("Error applying latest model {0} - {1}".format(self.name, str(exc)))
def delete(self, instance_id):
try:
self.mgmt_client.virtual_machine_scale_set_vms.delete(resource_group_name=self.resource_group,
vm_scale_set_name=self.vmss_name,
instance_id=instance_id)
except CloudError as e:
self.log('Could not delete instance of Virtual Machine Scale Set VM.')
self.fail('Could not delete instance of Virtual Machine Scale Set VM.')
def start(self, instance_id):
try:
self.mgmt_client.virtual_machine_scale_set_vms.start(resource_group_name=self.resource_group,
vm_scale_set_name=self.vmss_name,
instance_id=instance_id)
except CloudError as e:
self.log('Could not start instance of Virtual Machine Scale Set VM.')
self.fail('Could not start instance of Virtual Machine Scale Set VM.')
def stop(self, instance_id):
try:
self.mgmt_client.virtual_machine_scale_set_vms.power_off(resource_group_name=self.resource_group,
vm_scale_set_name=self.vmss_name,
instance_id=instance_id)
except CloudError as e:
self.log('Could not stop instance of Virtual Machine Scale Set VM.')
self.fail('Could not stop instance of Virtual Machine Scale Set VM.')
def deallocate(self, instance_id):
try:
self.mgmt_client.virtual_machine_scale_set_vms.deallocate(resource_group_name=self.resource_group,
vm_scale_set_name=self.vmss_name,
instance_id=instance_id)
except CloudError as e:
self.log('Could not deallocate instance of Virtual Machine Scale Set VM.')
self.fail('Could not deallocate instance of Virtual Machine Scale Set VM.')
def format_response(self, item):
d = item.as_dict()
iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group,
vm_scale_set_name=self.vmss_name,
instance_id=d.get('instance_id', None)).as_dict()
power_state = ""
for index in range(len(iv['statuses'])):
code = iv['statuses'][index]['code'].split('/')
if code[0] == 'PowerState':
power_state = code[1]
break
d = {
'id': d.get('id'),
'tags': d.get('tags'),
'instance_id': d.get('instance_id'),
'latest_model': d.get('latest_model_applied'),
'power_state': power_state
}
return d
def main():
AzureRMVirtualMachineScaleSetInstance()
if __name__ == '__main__':
main()
| gpl-3.0 |
hyqneuron/pylearn2-maxsom | pylearn2/scripts/papers/maxout/mytests/mytest2.py | 1 | 7661 | from pylearn2.models.mlp import MLP
from pylearn2.models.maxout import Maxout
from pylearn2.training_algorithms.sgd import SGD
import logging
import warnings
import sys
import numpy as np
from theano.compat import six
from theano import config
from theano import function
from theano.gof.op import get_debug_values
import theano.tensor as T
from pylearn2.compat import OrderedDict, first_key
from pylearn2.monitor import Monitor
from pylearn2.space import CompositeSpace, NullSpace
from pylearn2.train_extensions import TrainExtension
from pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm
from pylearn2.training_algorithms.learning_rule import Momentum
from pylearn2.training_algorithms.learning_rule import MomentumAdjustor \
as LRMomentumAdjustor
from pylearn2.utils.iteration import is_stochastic, has_uniform_batch_size
from pylearn2.utils import py_integer_types, py_float_types
from pylearn2.utils import safe_zip
from pylearn2.utils import serial
from pylearn2.utils import sharedX
from pylearn2.utils import contains_nan
from pylearn2.utils import contains_inf
from pylearn2.utils import isfinite
from pylearn2.utils.data_specs import DataSpecsMapping
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.timing import log_timing
from pylearn2.utils.rng import make_np_rng
log = logging.getLogger(__name__)
class TestAlgo(SGD):
# this train function mainly to hack into weight tracking
def train(self, dataset):
"""
Runs one epoch of SGD training on the specified dataset.
Parameters
----------
dataset : Dataset
"""
self.first = False
rng = self.rng
if not is_stochastic(self.train_iteration_mode):
rng = None
data_specs = self.cost.get_data_specs(self.model)
# The iterator should be built from flat data specs, so it returns
# flat, non-redundent tuples of data.
mapping = DataSpecsMapping(data_specs)
space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
if len(space_tuple) == 0:
# No data will be returned by the iterator, and it is impossible
# to know the size of the actual batch.
# It is not decided yet what the right thing to do should be.
raise NotImplementedError("Unable to train with SGD, because "
"the cost does not actually use data from the data set. "
"data_specs: %s" % str(data_specs))
flat_data_specs = (CompositeSpace(space_tuple), source_tuple)
iterator = dataset.iterator(mode=self.train_iteration_mode,
batch_size=self.batch_size,
data_specs=flat_data_specs, return_tuple=True,
rng = rng, num_batches = self.batches_per_iter)
"""
if not hasattr(self, 'batch_count'):
self.batch_count=0
self.param_records=[]
print "Going into first batch"
param_init = self.model.get_param_values()
"""
on_load_batch = self.on_load_batch
for batch in iterator:
for callback in on_load_batch:
callback(*batch)
self.sgd_update(*batch)
# iterator might return a smaller batch if dataset size
# isn't divisible by batch_size
# Note: if data_specs[0] is a NullSpace, there is no way to know
# how many examples would actually have been in the batch,
# since it was empty, so actual_batch_size would be reported as 0.
actual_batch_size = flat_data_specs[0].np_batch_size(batch)
self.monitor.report_batch(actual_batch_size)
for callback in self.update_callbacks:
callback(self)
"""
param_first = self.model.get_param_values()
with log_timing(log, "Saving initial param and first param"):
serial.save("param_init_first.pkl", (param_init, param_first))
sys.exit(0)
# Now, we record the weights every 50 minibatches
# So 10 records per epoch
self.batch_count+=1
if self.batch_count%50==0:
self.param_records.append(self.model.get_param_values())
# for every 2 epochs, we save the param_records
if self.batch_count%(50*20)==0:
record_path = './mytest/'+str(self.batch_count)+'.pkl'
print "We are now about to same lots of param records"
with log_timing(log, 'Saving param records to'+record_path):
serial.save(record_path, self.param_records)
self.param_records=[]
"""
class SOMaxout(Maxout):
"""
A SOM-Maxout layer based on Maxout
Each maxout unit is a group, and units within the same group learn
"together" by copying each other's update in an SOM-like manner.
Usually, in a maxout group, if a unit is winning/maxing all the time, the
other units in its group will never be used, never get updated, and thus get
stuck forever. This wastes maxout's capacity.
SOM-Maxout solves this problem by asking units within the same somaxout
group to be each others' buddies. The winners will help their neighbours to
learn "together". That is, if the winner gets a delta w, it will ask its
neighbours to get a SOM_factor * delta w.
decay_rate
"""
def __init__(self, *args, **kwargs):
super(SOMaxout, self).__init__(*args, **kwargs)
matrix_value = 0.5 * np.eye(self.num_pieces) + \
0.5 * np.ones([self.num_pieces,self.num_pieces])
self.SOM_copy_matrix = sharedX(matrix_value)
self.standardize_norm = True
print "SOM_copy_matrix established"
print matrix_value
def modify_grads(self, grads):
"""
W is a matrix n-input by n-maxout unit
The objective of this function is to ask nearby units in the same SOM
group to learn from each other by asking them to copy each other's
grads
[1, 0.8]
[0.8, 1]
"""
W, = self.transformer.get_params()
grad_old = grads[W]
npi = self.num_pieces
# within each Maxout unit, we perform a within-group copy of grads.
# each within-group copy produces an input-size by num_pieces matrix.
grad_list= [ T.dot(grad_old[:, i*npi:(i+1)*npi ], self.SOM_copy_matrix)
for i in xrange(self.num_units)]
# we then concatenate all those matrices into an input-size by
# num_units*num_pieces matrix
grads[W] = T.concatenate(grad_list, axis=1)
print "Gradients for layer "+self.layer_name+" modified."
def _modify_updates(self, updates):
"""
At each update, make sure all units in the same somaxout group has equal
norm
"""
W, = self.transformer.get_params()
update_old = updates[W]
npi = self.num_pieces
if self.standardize_norm:
norms = T.sqrt(T.sum(T.sqr(update_old), axis=0))
norm_mean = norms.reshape([self.num_units, self.num_pieces]).mean(axis=1)
norm_desired=T.repeat(norm_mean, npi)
if self.max_col_norm is not None:
norm_desired = T.clip(norm_desired, 0, self.max_col_norm)
updates[W] = update_old * norm_desired / norms
print "Updates for layer "+self.layer_name+" modified with within-group norm standardization"
| bsd-3-clause |
gxx/lettuce | tests/integration/lib/Django-1.3/tests/modeltests/generic_relations/models.py | 90 | 2521 | """
34. Generic relations
Generic relations let an object have a foreign key to any object through a
content-type/object-id field. A ``GenericForeignKey`` field can point to any
object, be it animal, vegetable, or mineral.
The canonical example is tags (although this example implementation is *far*
from complete).
"""
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
class TaggedItem(models.Model):
"""A tag on an item."""
tag = models.SlugField()
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
class Meta:
ordering = ["tag", "content_type__name"]
def __unicode__(self):
return self.tag
class ValuableTaggedItem(TaggedItem):
value = models.PositiveIntegerField()
class Comparison(models.Model):
"""
A model that tests having multiple GenericForeignKeys
"""
comparative = models.CharField(max_length=50)
content_type1 = models.ForeignKey(ContentType, related_name="comparative1_set")
object_id1 = models.PositiveIntegerField()
content_type2 = models.ForeignKey(ContentType, related_name="comparative2_set")
object_id2 = models.PositiveIntegerField()
first_obj = generic.GenericForeignKey(ct_field="content_type1", fk_field="object_id1")
other_obj = generic.GenericForeignKey(ct_field="content_type2", fk_field="object_id2")
def __unicode__(self):
return u"%s is %s than %s" % (self.first_obj, self.comparative, self.other_obj)
class Animal(models.Model):
common_name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
tags = generic.GenericRelation(TaggedItem)
comparisons = generic.GenericRelation(Comparison,
object_id_field="object_id1",
content_type_field="content_type1")
def __unicode__(self):
return self.common_name
class Vegetable(models.Model):
name = models.CharField(max_length=150)
is_yucky = models.BooleanField(default=True)
tags = generic.GenericRelation(TaggedItem)
def __unicode__(self):
return self.name
class Mineral(models.Model):
name = models.CharField(max_length=150)
hardness = models.PositiveSmallIntegerField()
# note the lack of an explicit GenericRelation here...
def __unicode__(self):
return self.name
| gpl-3.0 |
songmonit/CTTMSONLINE | addons/mail/res_users.py | 314 | 10337 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
from openerp import api
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
import openerp
class res_users(osv.Model):
""" Update of res.users class
- add a preference about sending emails about notifications
- make a new user follow itself
- add a welcome message
- add suggestion preference
"""
_name = 'res.users'
_inherit = ['res.users']
_inherits = {'mail.alias': 'alias_id'}
_columns = {
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Email address internally associated with this user. Incoming "\
"emails will appear in the user's notifications.", copy=False, auto_join=True),
'display_groups_suggestions': fields.boolean("Display Groups Suggestions"),
}
_defaults = {
'display_groups_suggestions': True,
}
def __init__(self, pool, cr):
""" Override of __init__ to add access rights on notification_email_send
and alias fields. Access rights are disabled by default, but allowed
on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.
"""
init_res = super(res_users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
self.SELF_WRITEABLE_FIELDS.extend(['notify_email', 'display_groups_suggestions'])
# duplicate list to avoid modifying the original reference
self.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)
self.SELF_READABLE_FIELDS.extend(['notify_email', 'alias_domain', 'alias_name', 'display_groups_suggestions'])
return init_res
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, partner following themselves """
# create aliases for all users and avoid constraint errors
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(res_users, self)._auto_init,
self._name, self._columns['alias_id'], 'login', alias_force_key='id', context=context)
def create(self, cr, uid, data, context=None):
if not data.get('login', False):
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base', 'action_res_users')
msg = _("You cannot create a new user from here.\n To create new user please go to configuration panel.")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
if context is None:
context = {}
create_context = dict(context, alias_model_name=self._name, alias_parent_model_name=self._name)
user_id = super(res_users, self).create(cr, uid, data, context=create_context)
user = self.browse(cr, uid, user_id, context=context)
self.pool.get('mail.alias').write(cr, SUPERUSER_ID, [user.alias_id.id], {"alias_force_thread_id": user_id, "alias_parent_thread_id": user_id}, context)
# create a welcome message
self._create_welcome_message(cr, uid, user, context=context)
return user_id
def copy_data(self, *args, **kwargs):
data = super(res_users, self).copy_data(*args, **kwargs)
if data and data.get('alias_name'):
data['alias_name'] = data['login']
return data
def _create_welcome_message(self, cr, uid, user, context=None):
if not self.has_group(cr, uid, 'base.group_user'):
return False
company_name = user.company_id.name if user.company_id else ''
body = _('%s has joined the %s network.') % (user.name, company_name)
# TODO change SUPERUSER_ID into user.id but catch errors
return self.pool.get('res.partner').message_post(cr, SUPERUSER_ID, [user.partner_id.id],
body=body, context=context)
def unlink(self, cr, uid, ids, context=None):
# Cascade-delete mail aliases as well, as they should not exist without the user.
alias_pool = self.pool.get('mail.alias')
alias_ids = [user.alias_id.id for user in self.browse(cr, uid, ids, context=context) if user.alias_id]
res = super(res_users, self).unlink(cr, uid, ids, context=context)
alias_pool.unlink(cr, uid, alias_ids, context=context)
return res
def _message_post_get_pid(self, cr, uid, thread_id, context=None):
assert thread_id, "res.users does not support posting global messages"
if context and 'thread_model' in context:
context['thread_model'] = 'res.users'
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
return self.browse(cr, SUPERUSER_ID, thread_id).partner_id.id
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, context=None, **kwargs):
""" Redirect the posting of message on res.users as a private discussion.
This is done because when giving the context of Chatter on the
various mailboxes, we do not have access to the current partner_id. """
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
current_pids = []
partner_ids = kwargs.get('partner_ids', [])
user_pid = self._message_post_get_pid(cr, uid, thread_id, context=context)
for partner_id in partner_ids:
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 4 and len(partner_id) == 2:
current_pids.append(partner_id[1])
elif isinstance(partner_id, (list, tuple)) and partner_id[0] == 6 and len(partner_id) == 3:
current_pids.append(partner_id[2])
elif isinstance(partner_id, (int, long)):
current_pids.append(partner_id)
if user_pid not in current_pids:
partner_ids.append(user_pid)
kwargs['partner_ids'] = partner_ids
if context and context.get('thread_model') == 'res.partner':
return self.pool['res.partner'].message_post(cr, uid, user_pid, **kwargs)
return self.pool['mail.thread'].message_post(cr, uid, uid, **kwargs)
def message_update(self, cr, uid, ids, msg_dict, update_vals=None, context=None):
return True
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
return True
def message_get_partner_info_from_emails(self, cr, uid, emails, link_mail=False, context=None):
return self.pool.get('mail.thread').message_get_partner_info_from_emails(cr, uid, emails, link_mail=link_mail, context=context)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
return dict((res_id, list()) for res_id in ids)
def stop_showing_groups_suggestions(self, cr, uid, user_id, context=None):
"""Update display_groups_suggestions value to False"""
if context is None:
context = {}
self.write(cr, uid, user_id, {"display_groups_suggestions": False}, context)
class res_users_mail_group(osv.Model):
""" Update of res.users class
- if adding groups to an user, check mail.groups linked to this user
group, and the user. This is done by overriding the write method.
"""
_name = 'res.users'
_inherit = ['res.users']
# FP Note: to improve, post processing may be better ?
def write(self, cr, uid, ids, vals, context=None):
write_res = super(res_users_mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('groups_id'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_group_ids = [command[1] for command in vals['groups_id'] if command[0] == 4]
user_group_ids += [id for command in vals['groups_id'] if command[0] == 6 for id in command[2]]
mail_group_obj = self.pool.get('mail.group')
mail_group_ids = mail_group_obj.search(cr, uid, [('group_ids', 'in', user_group_ids)], context=context)
mail_group_obj.message_subscribe_users(cr, uid, mail_group_ids, ids, context=context)
return write_res
class res_groups_mail_group(osv.Model):
""" Update of res.groups class
- if adding users from a group, check mail.groups linked to this user
group and subscribe them. This is done by overriding the write method.
"""
_name = 'res.groups'
_inherit = 'res.groups'
# FP Note: to improve, post processeing, after the super may be better
def write(self, cr, uid, ids, vals, context=None):
write_res = super(res_groups_mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('users'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_ids = [command[1] for command in vals['users'] if command[0] == 4]
user_ids += [id for command in vals['users'] if command[0] == 6 for id in command[2]]
mail_group_obj = self.pool.get('mail.group')
mail_group_ids = mail_group_obj.search(cr, uid, [('group_ids', 'in', ids)], context=context)
mail_group_obj.message_subscribe_users(cr, uid, mail_group_ids, user_ids, context=context)
return write_res
| agpl-3.0 |
doduytrung/odoo-8.0 | addons/hw_escpos/escpos/constants.py | 65 | 7409 | # -*- coding: utf-8 -*-
""" ESC/POS Commands (Constants) """
# Feed control sequences
CTL_LF = '\x0a' # Print and line feed
CTL_FF = '\x0c' # Form feed
CTL_CR = '\x0d' # Carriage return
CTL_HT = '\x09' # Horizontal tab
CTL_VT = '\x0b' # Vertical tab
# RT Status commands
DLE_EOT_PRINTER = '\x10\x04\x01' # Transmit printer status
DLE_EOT_OFFLINE = '\x10\x04\x02'
DLE_EOT_ERROR = '\x10\x04\x03'
DLE_EOT_PAPER = '\x10\x04\x04'
# Printer hardware
HW_INIT = '\x1b\x40' # Clear data in buffer and reset modes
HW_SELECT = '\x1b\x3d\x01' # Printer select
HW_RESET = '\x1b\x3f\x0a\x00' # Reset printer hardware
# Cash Drawer
CD_KICK_2 = '\x1b\x70\x00' # Sends a pulse to pin 2 []
CD_KICK_5 = '\x1b\x70\x01' # Sends a pulse to pin 5 []
# Paper
PAPER_FULL_CUT = '\x1d\x56\x00' # Full cut paper
PAPER_PART_CUT = '\x1d\x56\x01' # Partial cut paper
# Text format
TXT_NORMAL = '\x1b\x21\x00' # Normal text
TXT_2HEIGHT = '\x1b\x21\x10' # Double height text
TXT_2WIDTH = '\x1b\x21\x20' # Double width text
TXT_DOUBLE = '\x1b\x21\x30' # Double height & Width
TXT_UNDERL_OFF = '\x1b\x2d\x00' # Underline font OFF
TXT_UNDERL_ON = '\x1b\x2d\x01' # Underline font 1-dot ON
TXT_UNDERL2_ON = '\x1b\x2d\x02' # Underline font 2-dot ON
TXT_BOLD_OFF = '\x1b\x45\x00' # Bold font OFF
TXT_BOLD_ON = '\x1b\x45\x01' # Bold font ON
TXT_FONT_A = '\x1b\x4d\x00' # Font type A
TXT_FONT_B = '\x1b\x4d\x01' # Font type B
TXT_ALIGN_LT = '\x1b\x61\x00' # Left justification
TXT_ALIGN_CT = '\x1b\x61\x01' # Centering
TXT_ALIGN_RT = '\x1b\x61\x02' # Right justification
TXT_COLOR_BLACK = '\x1b\x72\x00' # Default Color
TXT_COLOR_RED = '\x1b\x72\x01' # Alternative Color ( Usually Red )
# Text Encoding
TXT_ENC_PC437 = '\x1b\x74\x00' # PC437 USA
TXT_ENC_KATAKANA= '\x1b\x74\x01' # KATAKANA (JAPAN)
TXT_ENC_PC850 = '\x1b\x74\x02' # PC850 Multilingual
TXT_ENC_PC860 = '\x1b\x74\x03' # PC860 Portuguese
TXT_ENC_PC863 = '\x1b\x74\x04' # PC863 Canadian-French
TXT_ENC_PC865 = '\x1b\x74\x05' # PC865 Nordic
TXT_ENC_KANJI6 = '\x1b\x74\x06' # One-pass Kanji, Hiragana
TXT_ENC_KANJI7 = '\x1b\x74\x07' # One-pass Kanji
TXT_ENC_KANJI8 = '\x1b\x74\x08' # One-pass Kanji
TXT_ENC_PC851 = '\x1b\x74\x0b' # PC851 Greek
TXT_ENC_PC853 = '\x1b\x74\x0c' # PC853 Turkish
TXT_ENC_PC857 = '\x1b\x74\x0d' # PC857 Turkish
TXT_ENC_PC737 = '\x1b\x74\x0e' # PC737 Greek
TXT_ENC_8859_7 = '\x1b\x74\x0f' # ISO8859-7 Greek
TXT_ENC_WPC1252 = '\x1b\x74\x10' # WPC1252
TXT_ENC_PC866 = '\x1b\x74\x11' # PC866 Cyrillic #2
TXT_ENC_PC852 = '\x1b\x74\x12' # PC852 Latin2
TXT_ENC_PC858 = '\x1b\x74\x13' # PC858 Euro
TXT_ENC_KU42 = '\x1b\x74\x14' # KU42 Thai
TXT_ENC_TIS11 = '\x1b\x74\x15' # TIS11 Thai
TXT_ENC_TIS18 = '\x1b\x74\x1a' # TIS18 Thai
TXT_ENC_TCVN3 = '\x1b\x74\x1e' # TCVN3 Vietnamese
TXT_ENC_TCVN3B = '\x1b\x74\x1f' # TCVN3 Vietnamese
TXT_ENC_PC720 = '\x1b\x74\x20' # PC720 Arabic
TXT_ENC_WPC775 = '\x1b\x74\x21' # WPC775 Baltic Rim
TXT_ENC_PC855 = '\x1b\x74\x22' # PC855 Cyrillic
TXT_ENC_PC861 = '\x1b\x74\x23' # PC861 Icelandic
TXT_ENC_PC862 = '\x1b\x74\x24' # PC862 Hebrew
TXT_ENC_PC864 = '\x1b\x74\x25' # PC864 Arabic
TXT_ENC_PC869 = '\x1b\x74\x26' # PC869 Greek
TXT_ENC_8859_2 = '\x1b\x74\x27' # ISO8859-2 Latin2
TXT_ENC_8859_9 = '\x1b\x74\x28' # ISO8859-2 Latin9
TXT_ENC_PC1098 = '\x1b\x74\x29' # PC1098 Farsi
TXT_ENC_PC1118 = '\x1b\x74\x2a' # PC1118 Lithuanian
TXT_ENC_PC1119 = '\x1b\x74\x2b' # PC1119 Lithuanian
TXT_ENC_PC1125 = '\x1b\x74\x2c' # PC1125 Ukrainian
TXT_ENC_WPC1250 = '\x1b\x74\x2d' # WPC1250 Latin2
TXT_ENC_WPC1251 = '\x1b\x74\x2e' # WPC1251 Cyrillic
TXT_ENC_WPC1253 = '\x1b\x74\x2f' # WPC1253 Greek
TXT_ENC_WPC1254 = '\x1b\x74\x30' # WPC1254 Turkish
TXT_ENC_WPC1255 = '\x1b\x74\x31' # WPC1255 Hebrew
TXT_ENC_WPC1256 = '\x1b\x74\x32' # WPC1256 Arabic
TXT_ENC_WPC1257 = '\x1b\x74\x33' # WPC1257 Baltic Rim
TXT_ENC_WPC1258 = '\x1b\x74\x34' # WPC1258 Vietnamese
TXT_ENC_KZ1048 = '\x1b\x74\x35' # KZ-1048 Kazakhstan
TXT_ENC_KATAKANA_MAP = {
# Maps UTF-8 Katakana symbols to KATAKANA Page Codes
# Half-Width Katakanas
'\xef\xbd\xa1':'\xa1', # 。
'\xef\xbd\xa2':'\xa2', # 「
'\xef\xbd\xa3':'\xa3', # 」
'\xef\xbd\xa4':'\xa4', # 、
'\xef\xbd\xa5':'\xa5', # ・
'\xef\xbd\xa6':'\xa6', # ヲ
'\xef\xbd\xa7':'\xa7', # ァ
'\xef\xbd\xa8':'\xa8', # ィ
'\xef\xbd\xa9':'\xa9', # ゥ
'\xef\xbd\xaa':'\xaa', # ェ
'\xef\xbd\xab':'\xab', # ォ
'\xef\xbd\xac':'\xac', # ャ
'\xef\xbd\xad':'\xad', # ュ
'\xef\xbd\xae':'\xae', # ョ
'\xef\xbd\xaf':'\xaf', # ッ
'\xef\xbd\xb0':'\xb0', # ー
'\xef\xbd\xb1':'\xb1', # ア
'\xef\xbd\xb2':'\xb2', # イ
'\xef\xbd\xb3':'\xb3', # ウ
'\xef\xbd\xb4':'\xb4', # エ
'\xef\xbd\xb5':'\xb5', # オ
'\xef\xbd\xb6':'\xb6', # カ
'\xef\xbd\xb7':'\xb7', # キ
'\xef\xbd\xb8':'\xb8', # ク
'\xef\xbd\xb9':'\xb9', # ケ
'\xef\xbd\xba':'\xba', # コ
'\xef\xbd\xbb':'\xbb', # サ
'\xef\xbd\xbc':'\xbc', # シ
'\xef\xbd\xbd':'\xbd', # ス
'\xef\xbd\xbe':'\xbe', # セ
'\xef\xbd\xbf':'\xbf', # ソ
'\xef\xbe\x80':'\xc0', # タ
'\xef\xbe\x81':'\xc1', # チ
'\xef\xbe\x82':'\xc2', # ツ
'\xef\xbe\x83':'\xc3', # テ
'\xef\xbe\x84':'\xc4', # ト
'\xef\xbe\x85':'\xc5', # ナ
'\xef\xbe\x86':'\xc6', # ニ
'\xef\xbe\x87':'\xc7', # ヌ
'\xef\xbe\x88':'\xc8', # ネ
'\xef\xbe\x89':'\xc9', # ノ
'\xef\xbe\x8a':'\xca', # ハ
'\xef\xbe\x8b':'\xcb', # ヒ
'\xef\xbe\x8c':'\xcc', # フ
'\xef\xbe\x8d':'\xcd', # ヘ
'\xef\xbe\x8e':'\xce', # ホ
'\xef\xbe\x8f':'\xcf', # マ
'\xef\xbe\x90':'\xd0', # ミ
'\xef\xbe\x91':'\xd1', # ム
'\xef\xbe\x92':'\xd2', # メ
'\xef\xbe\x93':'\xd3', # モ
'\xef\xbe\x94':'\xd4', # ヤ
'\xef\xbe\x95':'\xd5', # ユ
'\xef\xbe\x96':'\xd6', # ヨ
'\xef\xbe\x97':'\xd7', # ラ
'\xef\xbe\x98':'\xd8', # リ
'\xef\xbe\x99':'\xd9', # ル
'\xef\xbe\x9a':'\xda', # レ
'\xef\xbe\x9b':'\xdb', # ロ
'\xef\xbe\x9c':'\xdc', # ワ
'\xef\xbe\x9d':'\xdd', # ン
'\xef\xbe\x9e':'\xde', # ゙
'\xef\xbe\x9f':'\xdf', # ゚
}
# Barcod format
BARCODE_TXT_OFF = '\x1d\x48\x00' # HRI barcode chars OFF
BARCODE_TXT_ABV = '\x1d\x48\x01' # HRI barcode chars above
BARCODE_TXT_BLW = '\x1d\x48\x02' # HRI barcode chars below
BARCODE_TXT_BTH = '\x1d\x48\x03' # HRI barcode chars both above and below
BARCODE_FONT_A = '\x1d\x66\x00' # Font type A for HRI barcode chars
BARCODE_FONT_B = '\x1d\x66\x01' # Font type B for HRI barcode chars
BARCODE_HEIGHT = '\x1d\x68\x64' # Barcode Height [1-255]
BARCODE_WIDTH = '\x1d\x77\x03' # Barcode Width [2-6]
BARCODE_UPC_A = '\x1d\x6b\x00' # Barcode type UPC-A
BARCODE_UPC_E = '\x1d\x6b\x01' # Barcode type UPC-E
BARCODE_EAN13 = '\x1d\x6b\x02' # Barcode type EAN13
BARCODE_EAN8 = '\x1d\x6b\x03' # Barcode type EAN8
BARCODE_CODE39 = '\x1d\x6b\x04' # Barcode type CODE39
BARCODE_ITF = '\x1d\x6b\x05' # Barcode type ITF
BARCODE_NW7 = '\x1d\x6b\x06' # Barcode type NW7
# Image format
S_RASTER_N = '\x1d\x76\x30\x00' # Set raster image normal size
S_RASTER_2W = '\x1d\x76\x30\x01' # Set raster image double width
S_RASTER_2H = '\x1d\x76\x30\x02' # Set raster image double height
S_RASTER_Q = '\x1d\x76\x30\x03' # Set raster image quadruple
| agpl-3.0 |
agx/linux-wpan-next | scripts/gdb/linux/modules.py | 774 | 2718 | #
# gdb helper commands and functions for Linux kernel debugging
#
# module tools
#
# Copyright (c) Siemens AG, 2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import cpus, utils
module_type = utils.CachedType("struct module")
def module_list():
global module_type
module_ptr_type = module_type.get_type().pointer()
modules = gdb.parse_and_eval("modules")
entry = modules['next']
end_of_list = modules.address
while entry != end_of_list:
yield utils.container_of(entry, module_ptr_type, "list")
entry = entry['next']
def find_module_by_name(name):
for module in module_list():
if module['name'].string() == name:
return module
return None
class LxModule(gdb.Function):
"""Find module by name and return the module variable.
$lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules
of the target and return that module variable which MODULE matches."""
def __init__(self):
super(LxModule, self).__init__("lx_module")
def invoke(self, mod_name):
mod_name = mod_name.string()
module = find_module_by_name(mod_name)
if module:
return module.dereference()
else:
raise gdb.GdbError("Unable to find MODULE " + mod_name)
LxModule()
class LxLsmod(gdb.Command):
"""List currently loaded modules."""
_module_use_type = utils.CachedType("struct module_use")
def __init__(self):
super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
gdb.write(
"Address{0} Module Size Used by\n".format(
" " if utils.get_long_type().sizeof == 8 else ""))
for module in module_list():
gdb.write("{address} {name:<19} {size:>8} {ref}".format(
address=str(module['module_core']).split()[0],
name=module['name'].string(),
size=str(module['core_size']),
ref=str(module['refcnt']['counter'])))
source_list = module['source_list']
t = self._module_use_type.get_type().pointer()
entry = source_list['next']
first = True
while entry != source_list.address:
use = utils.container_of(entry, t, "source_list")
gdb.write("{separator}{name}".format(
separator=" " if first else ",",
name=use['source']['name'].string()))
first = False
entry = entry['next']
gdb.write("\n")
LxLsmod()
| gpl-2.0 |
XiaosongWei/blink-crosswalk | Tools/Scripts/webkitpy/style/optparser_unittest.py | 48 | 11027 | # Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for parser.py."""
import unittest
from webkitpy.common.system.logtesting import LoggingTestCase
from webkitpy.style.optparser import ArgumentParser
from webkitpy.style.optparser import ArgumentPrinter
from webkitpy.style.optparser import CommandOptionValues as ProcessorOptions
from webkitpy.style.optparser import DefaultCommandOptionValues
class ArgumentPrinterTest(unittest.TestCase):
"""Tests the ArgumentPrinter class."""
_printer = ArgumentPrinter()
def _create_options(self,
output_format='emacs',
min_confidence=3,
filter_rules=[],
git_commit=None):
return ProcessorOptions(filter_rules=filter_rules,
git_commit=git_commit,
min_confidence=min_confidence,
output_format=output_format)
def test_to_flag_string(self):
options = self._create_options('vs7', 5, ['+foo', '-bar'], 'git')
self.assertEqual('--filter=+foo,-bar --git-commit=git '
'--min-confidence=5 --output=vs7',
self._printer.to_flag_string(options))
# This is to check that --filter and --git-commit do not
# show up when not user-specified.
options = self._create_options()
self.assertEqual('--min-confidence=3 --output=emacs',
self._printer.to_flag_string(options))
class ArgumentParserTest(LoggingTestCase):
"""Test the ArgumentParser class."""
class _MockStdErr(object):
def write(self, message):
# We do not want the usage string or style categories
# to print during unit tests, so print nothing.
return
def _parse(self, args):
"""Call a test parser.parse()."""
parser = self._create_parser()
return parser.parse(args)
def _create_defaults(self):
"""Return a DefaultCommandOptionValues instance for testing."""
base_filter_rules = ["-", "+whitespace"]
return DefaultCommandOptionValues(min_confidence=3,
output_format="vs7")
def _create_parser(self):
"""Return an ArgumentParser instance for testing."""
default_options = self._create_defaults()
all_categories = ["build" ,"whitespace"]
mock_stderr = self._MockStdErr()
return ArgumentParser(all_categories=all_categories,
base_filter_rules=[],
default_options=default_options,
mock_stderr=mock_stderr,
usage="test usage")
def test_parse_documentation(self):
parse = self._parse
# FIXME: Test both the printing of the usage string and the
# filter categories help.
# Request the usage string.
self.assertRaises(SystemExit, parse, ['--help'])
# Request default filter rules and available style categories.
self.assertRaises(SystemExit, parse, ['--filter='])
def test_parse_bad_values(self):
parse = self._parse
# Pass an unsupported argument.
self.assertRaises(SystemExit, parse, ['--bad'])
self.assertLog(['ERROR: no such option: --bad\n'])
self.assertRaises(SystemExit, parse, ['--min-confidence=bad'])
self.assertLog(['ERROR: option --min-confidence: '
"invalid integer value: 'bad'\n"])
self.assertRaises(SystemExit, parse, ['--min-confidence=0'])
self.assertLog(['ERROR: option --min-confidence: invalid integer: 0: '
'value must be between 1 and 5\n'])
self.assertRaises(SystemExit, parse, ['--min-confidence=6'])
self.assertLog(['ERROR: option --min-confidence: invalid integer: 6: '
'value must be between 1 and 5\n'])
parse(['--min-confidence=1']) # works
parse(['--min-confidence=5']) # works
self.assertRaises(SystemExit, parse, ['--output=bad'])
self.assertLog(['ERROR: option --output-format: invalid choice: '
"'bad' (choose from 'emacs', 'vs7')\n"])
parse(['--output=vs7']) # works
# Pass a filter rule not beginning with + or -.
self.assertRaises(SystemExit, parse, ['--filter=build'])
self.assertLog(['ERROR: Invalid filter rule "build": '
'every rule must start with + or -.\n'])
parse(['--filter=+build']) # works
def test_parse_default_arguments(self):
parse = self._parse
(files, options) = parse([])
self.assertEqual(files, [])
self.assertEqual(options.filter_rules, [])
self.assertIsNone(options.git_commit)
self.assertFalse(options.diff_files)
self.assertFalse(options.is_verbose)
self.assertEqual(options.min_confidence, 3)
self.assertEqual(options.output_format, 'vs7')
def test_parse_explicit_arguments(self):
parse = self._parse
# Pass non-default explicit values.
(files, options) = parse(['--min-confidence=4'])
self.assertEqual(options.min_confidence, 4)
(files, options) = parse(['--output=emacs'])
self.assertEqual(options.output_format, 'emacs')
(files, options) = parse(['-g', 'commit'])
self.assertEqual(options.git_commit, 'commit')
(files, options) = parse(['--git-commit=commit'])
self.assertEqual(options.git_commit, 'commit')
(files, options) = parse(['--git-diff=commit'])
self.assertEqual(options.git_commit, 'commit')
(files, options) = parse(['--verbose'])
self.assertTrue(options.is_verbose)
(files, options) = parse(['--diff-files', 'file.txt'])
self.assertTrue(options.diff_files)
# Pass user_rules.
(files, options) = parse(['--filter=+build,-whitespace'])
self.assertEqual(options.filter_rules,
["+build", "-whitespace"])
# Pass spurious white space in user rules.
(files, options) = parse(['--filter=+build, -whitespace'])
self.assertEqual(options.filter_rules,
["+build", "-whitespace"])
def test_parse_files(self):
parse = self._parse
(files, options) = parse(['foo.cpp'])
self.assertEqual(files, ['foo.cpp'])
# Pass multiple files.
(files, options) = parse(['--output=emacs', 'foo.cpp', 'bar.cpp'])
self.assertEqual(files, ['foo.cpp', 'bar.cpp'])
class CommandOptionValuesTest(unittest.TestCase):
"""Tests CommandOptionValues class."""
def test_init(self):
"""Test __init__ constructor."""
# Check default parameters.
options = ProcessorOptions()
self.assertEqual(options.filter_rules, [])
self.assertIsNone(options.git_commit)
self.assertFalse(options.is_verbose)
self.assertEqual(options.min_confidence, 1)
self.assertEqual(options.output_format, "emacs")
# Check argument validation.
self.assertRaises(ValueError, ProcessorOptions, output_format="bad")
ProcessorOptions(output_format="emacs") # No ValueError: works
ProcessorOptions(output_format="vs7") # works
self.assertRaises(ValueError, ProcessorOptions, min_confidence=0)
self.assertRaises(ValueError, ProcessorOptions, min_confidence=6)
ProcessorOptions(min_confidence=1) # works
ProcessorOptions(min_confidence=5) # works
# Check attributes.
options = ProcessorOptions(filter_rules=["+"],
git_commit="commit",
is_verbose=True,
min_confidence=3,
output_format="vs7")
self.assertEqual(options.filter_rules, ["+"])
self.assertEqual(options.git_commit, "commit")
self.assertTrue(options.is_verbose)
self.assertEqual(options.min_confidence, 3)
self.assertEqual(options.output_format, "vs7")
def test_eq(self):
"""Test __eq__ equality function."""
self.assertTrue(ProcessorOptions().__eq__(ProcessorOptions()))
# Also verify that a difference in any argument causes equality to fail.
# Explicitly create a ProcessorOptions instance with all default
# values. We do this to be sure we are assuming the right default
# values in our self.assertFalse() calls below.
options = ProcessorOptions(filter_rules=[],
git_commit=None,
is_verbose=False,
min_confidence=1,
output_format="emacs")
# Verify that we created options correctly.
self.assertTrue(options.__eq__(ProcessorOptions()))
self.assertFalse(options.__eq__(ProcessorOptions(filter_rules=["+"])))
self.assertFalse(options.__eq__(ProcessorOptions(git_commit="commit")))
self.assertFalse(options.__eq__(ProcessorOptions(is_verbose=True)))
self.assertFalse(options.__eq__(ProcessorOptions(min_confidence=2)))
self.assertFalse(options.__eq__(ProcessorOptions(output_format="vs7")))
def test_ne(self):
"""Test __ne__ inequality function."""
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
self.assertFalse(ProcessorOptions().__ne__(ProcessorOptions()))
| bsd-3-clause |
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/compat/tests/__init__.py | 128 | 1267 | # (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
This module contains things that are only needed for compat in the testsuites,
not in ansible itself. If you are not installing the test suite, you can
safely remove this subdirectory.
'''
#
# Compat for python2.7
#
# One unittest needs to import builtins via __import__() so we need to have
# the string that represents it
try:
import __builtin__
except ImportError:
BUILTINS = 'builtins'
else:
BUILTINS = '__builtin__'
| bsd-3-clause |
pabloborrego93/edx-platform | cms/djangoapps/contentstore/management/commands/force_publish.py | 61 | 3385 | """
Script for force publishing a course
"""
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from .prompt import query_yes_no
from .utils import get_course_versions
# To run from command line: ./manage.py cms force_publish course-v1:org+course+run
class Command(BaseCommand):
"""Force publish a course"""
help = '''
Force publish a course. Takes two arguments:
<course_id>: the course id of the course you want to publish forcefully
--commit: do the force publish
If you do not specify '--commit', the command will print out what changes would be made.
'''
def add_arguments(self, parser):
parser.add_argument('course_key', help="ID of the Course to force publish")
parser.add_argument('--commit', action='store_true', help="Pull updated metadata from external IDPs")
def handle(self, *args, **options):
"""Execute the command"""
try:
course_key = CourseKey.from_string(options['course_key'])
except InvalidKeyError:
raise CommandError("Invalid course key.")
if not modulestore().get_course(course_key):
raise CommandError("Course not found.")
# for now only support on split mongo
owning_store = modulestore()._get_modulestore_for_courselike(course_key) # pylint: disable=protected-access
if hasattr(owning_store, 'force_publish_course'):
versions = get_course_versions(options['course_key'])
print "Course versions : {0}".format(versions)
if options['commit']:
if query_yes_no("Are you sure to publish the {0} course forcefully?".format(course_key), default="no"):
# publish course forcefully
updated_versions = owning_store.force_publish_course(
course_key, ModuleStoreEnum.UserID.mgmt_command, options['commit']
)
if updated_versions:
# if publish and draft were different
if versions['published-branch'] != versions['draft-branch']:
print "Success! Published the course '{0}' forcefully.".format(course_key)
print "Updated course versions : \n{0}".format(updated_versions)
else:
print "Course '{0}' is already in published state.".format(course_key)
else:
print "Error! Could not publish course {0}.".format(course_key)
else:
# if publish and draft were different
if versions['published-branch'] != versions['draft-branch']:
print "Dry run. Following would have been changed : "
print "Published branch version {0} changed to draft branch version {1}".format(
versions['published-branch'], versions['draft-branch']
)
else:
print "Dry run. Course '{0}' is already in published state.".format(course_key)
else:
raise CommandError("The owning modulestore does not support this command.")
| agpl-3.0 |
cosmiclattes/TPBviz | torrent/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/version.py | 331 | 1502 | """
Extracts the version of the PostgreSQL server.
"""
import re
# This reg-exp is intentionally fairly flexible here.
# Needs to be able to handle stuff like:
# PostgreSQL 8.3.6
# EnterpriseDB 8.3
# PostgreSQL 8.3 beta4
# PostgreSQL 8.4beta1
VERSION_RE = re.compile(r'\S+ (\d+)\.(\d+)\.?(\d+)?')
def _parse_version(text):
"Internal parsing method. Factored out for testing purposes."
major, major2, minor = VERSION_RE.search(text).groups()
try:
return int(major) * 10000 + int(major2) * 100 + int(minor)
except (ValueError, TypeError):
return int(major) * 10000 + int(major2) * 100
def get_version(connection):
"""
Returns an integer representing the major, minor and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.)
For example, 80304 for 8.3.4. The last two digits will be 00 in the case of
releases (e.g., 80400 for 'PostgreSQL 8.4') or in the case of beta and
prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
PQServerVersion()/``server_version`` doesn't execute a query so try that
first, then fallback to a ``SELECT version()`` query.
"""
if hasattr(connection, 'server_version'):
return connection.server_version
else:
cursor = connection.cursor()
cursor.execute("SELECT version()")
return _parse_version(cursor.fetchone()[0])
| gpl-3.0 |
stuffandthings/linux | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
alvaroaleman/ansible | lib/ansible/modules/network/nxos/nxos_static_route.py | 12 | 14632 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_static_route
version_added: "2.2"
short_description: Manages static route configuration
description:
- Manages static route configuration
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- If no vrf is supplied, vrf is set to default.
- If C(state=absent), the route will be removed, regardless of the
non-required parameters.
options:
prefix:
description:
- Destination prefix of static route.
required: true
next_hop:
description:
- Next hop address or interface of static route.
If interface, it must be the fully-qualified interface name.
required: true
vrf:
description:
- VRF for static route.
required: false
default: default
tag:
description:
- Route tag value (numeric).
required: false
default: null
route_name:
description:
- Name of the route. Used with the name parameter on the CLI.
required: false
default: null
pref:
description:
- Preference or administrative difference of route (range 1-255).
required: false
default: null
state:
description:
- Manage the state of the resource.
required: true
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_static_route:
prefix: "192.168.20.64/24"
next_hop: "3.3.3.3"
route_name: testing
pref: 100
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"next_hop": "3.3.3.3", "pref": "100",
"prefix": "192.168.20.64/24", "route_name": "testing",
"vrf": "default"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"next_hop": "3.3.3.3", "pref": "100",
"prefix": "192.168.20.0/24", "route_name": "testing",
"tag": null}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["ip route 192.168.20.0/24 3.3.3.3 name testing 100"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
import ansible.module_utils.nxos
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine, dumps
from ansible.module_utils.network import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
if self._device_os == 'junos':
return dumps(section, output='lines')
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def state_present(module, candidate, prefix):
commands = list()
invoke('set_route', module, commands, prefix)
if commands:
if module.params['vrf'] == 'default':
candidate.add(commands, parents=[])
else:
candidate.add(commands, parents=['vrf context {0}'.format(module.params['vrf'])])
def state_absent(module, candidate, prefix):
netcfg = get_config(module)
commands = list()
parents = 'vrf context {0}'.format(module.params['vrf'])
invoke('set_route', module, commands, prefix)
if module.params['vrf'] == 'default':
config = netcfg.get_section(commands[0])
if config:
invoke('remove_route', module, commands, config, prefix)
candidate.add(commands, parents=[])
else:
config = netcfg.get_section(parents)
splitted_config = config.split('\n')
splitted_config = map(str.strip, splitted_config)
if commands[0] in splitted_config:
invoke('remove_route', module, commands, config, prefix)
candidate.add(commands, parents=[parents])
def fix_prefix_to_regex(prefix):
prefix = prefix.replace('.', '\.').replace('/', '\/')
return prefix
def get_existing(module, prefix, warnings):
key_map = ['tag', 'pref', 'route_name', 'next_hop']
netcfg = get_config(module)
parents = 'vrf context {0}'.format(module.params['vrf'])
prefix_to_regex = fix_prefix_to_regex(prefix)
route_regex = ('.*ip\sroute\s{0}\s(?P<next_hop>\S+)(\sname\s(?P<route_name>\S+))?'
'(\stag\s(?P<tag>\d+))?(\s(?P<pref>\d+)).*'.format(prefix_to_regex))
if module.params['vrf'] == 'default':
config = str(netcfg)
else:
config = netcfg.get_section(parents)
if config:
try:
match_route = re.match(route_regex, config, re.DOTALL)
group_route = match_route.groupdict()
for key in key_map:
if key not in group_route:
group_route[key] = ''
group_route['prefix'] = prefix
group_route['vrf'] = module.params['vrf']
except (AttributeError, TypeError):
group_route = {}
else:
group_route = {}
msg = ("VRF {0} didn't exist.".format(module.params['vrf']))
if msg not in warnings:
warnings.append(msg)
return group_route
def remove_route(module, commands, config, prefix):
commands.append('no ip route {0} {1}'.format(prefix, module.params['next_hop']))
def set_route(module, commands, prefix):
route_cmd = 'ip route {0} {1}'.format(prefix, module.params['next_hop'])
if module.params['route_name']:
route_cmd += ' name {0}'.format(module.params['route_name'])
if module.params['tag']:
route_cmd += ' tag {0}'.format(module.params['tag'])
if module.params['pref']:
route_cmd += ' {0}'.format(module.params['pref'])
commands.append(route_cmd)
def get_dotted_mask(mask):
bits = 0
for i in xrange(32-mask,32):
bits |= (1 << i)
mask = ("%d.%d.%d.%d" % ((bits & 0xff000000) >> 24,
(bits & 0xff0000) >> 16, (bits & 0xff00) >> 8 , (bits & 0xff)))
return mask
def get_network_start(address, netmask):
address = address.split('.')
netmask = netmask.split('.')
return [str(int(address[x]) & int(netmask[x])) for x in range(0, 4)]
def network_from_string(address, mask, module):
octects = address.split('.')
if len(octects) > 4:
module.fail_json(msg='Incorrect address format.', address=address)
for octect in octects:
try:
if int(octect) < 0 or int(octect) > 255:
module.fail_json(msg='Address may contain invalid values.',
address=address)
except ValueError:
module.fail_json(msg='Address may contain non-integer values.',
address=address)
try:
if int(mask) < 0 or int(mask) > 32:
module.fail_json(msg='Incorrect mask value.', mask=mask)
except ValueError:
module.fail_json(msg='Mask may contain non-integer values.', mask=mask)
netmask = get_dotted_mask(int(mask))
return '.'.join(get_network_start(address, netmask))
def normalize_prefix(module, prefix):
splitted_prefix = prefix.split('/')
address = splitted_prefix[0]
if len(splitted_prefix) > 2:
module.fail_json(msg='Incorrect address format.', address=address)
elif len(splitted_prefix) == 2:
mask = splitted_prefix[1]
network = network_from_string(address, mask, module)
normalized_prefix = str(network) + '/' + str(mask)
else:
normalized_prefix = prefix + '/' + str(32)
return normalized_prefix
def main():
argument_spec = dict(
prefix=dict(required=True, type='str'),
next_hop=dict(required=True, type='str'),
vrf=dict(type='str', default='default'),
tag=dict(type='str'),
route_name=dict(type='str'),
pref=dict(type='str'),
state=dict(choices=['absent', 'present'],
default='present'),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
result = dict(changed=False)
warnings = list()
prefix = invoke('normalize_prefix', module, module.params['prefix'])
existing = invoke('get_existing', module, prefix, warnings)
end_state = existing
args = ['route_name', 'vrf', 'pref', 'tag', 'next_hop', 'prefix']
proposed = dict((k, v) for k, v in module.params.items() if v is not None and k in args)
if state == 'present' or (state == 'absent' and existing):
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, candidate, prefix)
try:
response = load_config(module, candidate)
result.update(response)
except Exception:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
result['warnings'] = warnings
if module._verbosity > 0:
end_state = invoke('get_existing', module, prefix, warnings)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
techdragon/django | tests/custom_columns/tests.py | 55 | 4002 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from django.utils import six
from .models import Article, Author
class CustomColumnsTests(TestCase):
def setUp(self):
self.a1 = Author.objects.create(first_name="John", last_name="Smith")
self.a2 = Author.objects.create(first_name="Peter", last_name="Jones")
self.authors = [self.a1, self.a2]
self.article = Article.objects.create(headline="Django lets you build Web apps easily", primary_author=self.a1)
self.article.authors.set(self.authors)
def test_query_all_available_authors(self):
self.assertQuerysetEqual(
Author.objects.all(), [
"Peter Jones", "John Smith",
],
six.text_type
)
def test_get_first_name(self):
self.assertEqual(
Author.objects.get(first_name__exact="John"),
self.a1,
)
def test_filter_first_name(self):
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact="John"), [
"John Smith",
],
six.text_type
)
def test_field_error(self):
with self.assertRaises(FieldError):
Author.objects.filter(firstname__exact="John")
def test_attribute_error(self):
with self.assertRaises(AttributeError):
self.a1.firstname
with self.assertRaises(AttributeError):
self.a1.last
def test_get_all_authors_for_an_article(self):
self.assertQuerysetEqual(
self.article.authors.all(), [
"Peter Jones",
"John Smith",
],
six.text_type
)
def test_get_all_articles_for_an_author(self):
self.assertQuerysetEqual(
self.a1.article_set.all(), [
"Django lets you build Web apps easily",
],
lambda a: a.headline
)
def test_get_author_m2m_relation(self):
self.assertQuerysetEqual(
self.article.authors.filter(last_name='Jones'), [
"Peter Jones"
],
six.text_type
)
def test_author_querying(self):
self.assertQuerysetEqual(
Author.objects.all().order_by('last_name'),
['<Author: Peter Jones>', '<Author: John Smith>']
)
def test_author_filtering(self):
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact='John'),
['<Author: John Smith>']
)
def test_author_get(self):
self.assertEqual(self.a1, Author.objects.get(first_name__exact='John'))
def test_filter_on_nonexistent_field(self):
msg = (
"Cannot resolve keyword 'firstname' into field. Choices are: "
"Author_ID, article, first_name, last_name, primary_set"
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.filter(firstname__exact='John')
def test_author_get_attributes(self):
a = Author.objects.get(last_name__exact='Smith')
self.assertEqual('John', a.first_name)
self.assertEqual('Smith', a.last_name)
with self.assertRaisesMessage(AttributeError, "'Author' object has no attribute 'firstname'"):
getattr(a, 'firstname')
with self.assertRaisesMessage(AttributeError, "'Author' object has no attribute 'last'"):
getattr(a, 'last')
def test_m2m_table(self):
self.assertQuerysetEqual(
self.article.authors.all().order_by('last_name'),
['<Author: Peter Jones>', '<Author: John Smith>']
)
self.assertQuerysetEqual(
self.a1.article_set.all(),
['<Article: Django lets you build Web apps easily>']
)
self.assertQuerysetEqual(
self.article.authors.filter(last_name='Jones'),
['<Author: Peter Jones>']
)
| bsd-3-clause |
GoSteven/Diary | django/core/management/__init__.py | 13 | 17843 | import os
import sys
from optparse import OptionParser, NO_DEFAULT
import imp
import django
from django.core.management.base import BaseCommand, CommandError, handle_default_options
from django.utils.importlib import import_module
# For backwards compatibility: get_version() used to be in this module.
get_version = django.get_version
# A cache of loaded commands, so that call_command
# doesn't have to reload every time it's called.
_commands = None
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
def find_management_module(app_name):
"""
Determines the path to the management module for the given app_name,
without actually importing the application or the management module.
Raises ImportError if the management module cannot be found for any reason.
"""
parts = app_name.split('.')
parts.append('management')
parts.reverse()
part = parts.pop()
path = None
# When using manage.py, the project module is added to the path,
# loaded, then removed from the path. This means that
# testproject.testapp.models can be loaded in future, even if
# testproject isn't in the path. When looking for the management
# module, we need look for the case where the project name is part
# of the app_name but the project directory itself isn't on the path.
try:
f, path, descr = imp.find_module(part,path)
except ImportError,e:
if os.path.basename(os.getcwd()) != part:
raise e
while parts:
part = parts.pop()
f, path, descr = imp.find_module(part, path and [path] or None)
return path
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included, the
startproject command will be disabled, and the startapp command
will be modified to use the directory in which the settings module appears.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
global _commands
if _commands is None:
_commands = dict([(name, 'django.core') for name in find_commands(__path__[0])])
# Find the installed apps
try:
from django.conf import settings
apps = settings.INSTALLED_APPS
except (AttributeError, EnvironmentError, ImportError):
apps = []
# Find the project directory
try:
from django.conf import settings
module = import_module(settings.SETTINGS_MODULE)
project_directory = setup_environ(module, settings.SETTINGS_MODULE)
except (AttributeError, EnvironmentError, ImportError, KeyError):
project_directory = None
# Find and load the management module for each installed app.
for app_name in apps:
try:
path = find_management_module(app_name)
_commands.update(dict([(name, app_name)
for name in find_commands(path)]))
except ImportError:
pass # No management module - ignore this app
if project_directory:
# Remove the "startproject" command from self.commands, because
# that's a django-admin.py command, not a manage.py command.
del _commands['startproject']
# Override the startapp command so that it always uses the
# project_directory, not the current working directory
# (which is default).
from django.core.management.commands.startapp import ProjectCommand
_commands['startapp'] = ProjectCommand(project_directory)
return _commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlall', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, name)
except KeyError:
raise CommandError("Unknown command: %r" % name)
# Grab out a list of defaults from the options. optparse does this for us
# when the script runs from the command line, but since call_command can
# be called programatically, we need to simulate the loading and handling
# of defaults (see #10080 for details).
defaults = dict([(o.dest, o.default)
for o in klass.option_list
if o.default is not NO_DEFAULT])
defaults.update(options)
return klass.execute(*args, **defaults)
class LaxOptionParser(OptionParser):
"""
An option parser that doesn't raise any errors on unknown options.
This is needed because the --settings and --pythonpath options affect
the commands (and thus the options) that are available to the user.
"""
def error(self, msg):
pass
def print_help(self):
"""Output nothing.
The lax options are included in the normal option parser, so under
normal usage, we don't need to print the lax options.
"""
pass
def print_lax_help(self):
"""Output the basic options available to every command.
This just redirects to the default print_help() behaviour.
"""
OptionParser.print_help(self)
def _process_args(self, largs, rargs, values):
"""
Overrides OptionParser._process_args to exclusively handle default
options and ignore args and other options.
This overrides the behavior of the super class, which stop parsing
at the first unrecognized option.
"""
while rargs:
arg = rargs[0]
try:
if arg[0:2] == "--" and len(arg) > 2:
# process a single long option (possibly with value(s))
# the superclass code pops the arg off rargs
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
# the superclass code pops the arg off rargs
self._process_short_opts(rargs, values)
else:
# it's either a non-default option or an arg
# either way, add it to the args list so we can keep
# dealing with options
del rargs[0]
raise Exception
except:
largs.append(arg)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin.py and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
def main_help_text(self):
"""
Returns the script's main help text, as a string.
"""
usage = ['',"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,'']
usage.append('Available subcommands:')
commands = get_commands().keys()
commands.sort()
for cmd in commands:
usage.append(' %s' % cmd)
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin.py" or "manage.py") if it can't be found.
"""
try:
app_name = get_commands()[subcommand]
except KeyError:
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" % \
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, a equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if not os.environ.has_key('DJANGO_AUTO_COMPLETE'):
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword-1]
except IndexError:
curr = ''
subcommands = get_commands().keys() + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print ' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands)))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'reset', 'sql', 'sqlall',
'sqlclear', 'sqlcustom', 'sqlindexes',
'sqlreset', 'sqlsequencereset', 'test'):
try:
from django.conf import settings
# Get the last part of the dotted path as the app name.
options += [(a.split('.')[-1], 0) for a in settings.INSTALLED_APPS]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
subcommand_cls.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword-1]]
options = filter(lambda (x, v): x not in prev_opts, options)
# filter options by current input
options = sorted([(k, v) for k, v in options if k.startswith(curr)])
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print opt_label
sys.exit(1)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = LaxOptionParser(usage="%prog subcommand [options] [args]",
version=get_version(),
option_list=BaseCommand.option_list)
self.autocomplete()
try:
options, args = parser.parse_args(self.argv)
handle_default_options(options)
except:
pass # Ignore any option errors at this point.
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
if subcommand == 'help':
if len(args) > 2:
self.fetch_command(args[2]).print_help(self.prog_name, args[2])
else:
parser.print_lax_help()
sys.stderr.write(self.main_help_text() + '\n')
sys.exit(1)
# Special-cases: We want 'django-admin.py --version' and
# 'django-admin.py --help' to work, for backwards compatibility.
elif self.argv[1:] == ['--version']:
# LaxOptionParser already takes care of printing the version.
pass
elif self.argv[1:] == ['--help']:
parser.print_lax_help()
sys.stderr.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def setup_environ(settings_mod, original_settings_path=None):
"""
Configures the runtime environment. This can also be used by external
scripts wanting to set up a similar environment to manage.py.
Returns the project directory (assuming the passed settings module is
directly in the project directory).
The "original_settings_path" parameter is optional, but recommended, since
trying to work out the original path from the module can be problematic.
"""
# Add this project to sys.path so that it's importable in the conventional
# way. For example, if this file (manage.py) lives in a directory
# "myproject", this code would add "/path/to/myproject" to sys.path.
if '__init__.py' in settings_mod.__file__:
p = os.path.dirname(settings_mod.__file__)
else:
p = settings_mod.__file__
project_directory, settings_filename = os.path.split(p)
if project_directory == os.curdir or not project_directory:
project_directory = os.getcwd()
project_name = os.path.basename(project_directory)
# Strip filename suffix to get the module name.
settings_name = os.path.splitext(settings_filename)[0]
# Strip $py for Jython compiled files (like settings$py.class)
if settings_name.endswith("$py"):
settings_name = settings_name[:-3]
# Set DJANGO_SETTINGS_MODULE appropriately.
if original_settings_path:
os.environ['DJANGO_SETTINGS_MODULE'] = original_settings_path
else:
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.%s' % (project_name, settings_name)
# Import the project module. We add the parent directory to PYTHONPATH to
# avoid some of the path errors new users can have.
sys.path.append(os.path.join(project_directory, os.pardir))
project_module = import_module(project_name)
sys.path.pop()
return project_directory
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
def execute_manager(settings_mod, argv=None):
"""
Like execute_from_command_line(), but for use by manage.py, a
project-specific django-admin.py utility.
"""
setup_environ(settings_mod)
utility = ManagementUtility(argv)
utility.execute()
| bsd-3-clause |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/email/_parseaddr.py | 150 | 15733 | # Copyright (C) 2002-2007 Python Software Foundation
# Contact: email-sig@python.org
"""Email address parsing code.
Lifted directly from rfc822.py. This should eventually be rewritten.
"""
__all__ = [
'mktime_tz',
'parsedate',
'parsedate_tz',
'quote',
]
import time, calendar
SPACE = ' '
EMPTYSTRING = ''
COMMASPACE = ', '
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
data = data.split()
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(',') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
# Check for a yy specified in two-digit format, then convert it to the
# appropriate four-digit format, according to the POSIX standard. RFC 822
# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
# mandates a 4-digit yy. For more information, see the documentation for
# the time module.
if yy < 100:
# The year is between 1969 and 1999 (inclusive).
if yy > 68:
yy += 1900
# The year is between 2000 and 2068 (inclusive).
else:
yy += 2000
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if isinstance(t, tuple):
return t[:9]
else:
return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = calendar.timegm(data)
return t - data[9]
def quote(str):
"""Prepare string to be used in a quoted string.
Turns backslash and double quote characters into quoted pairs. These
are the only characters that need to be quoted inside a quoted string.
Does not add the surrounding double quotes.
"""
return str.replace('\\', '\\\\').replace('"', '\\"')
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of RFC 2822 in
front of you.
Note: this class interface is deprecated and may be removed in the future.
Use rfc822.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.FWS = self.LWS + self.CR
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
def gotonext(self):
"""Parse up to the start of the next address."""
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else:
break
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
while self.pos < len(self.field):
ad = self.getaddress()
if ad:
result += ad
else:
result.append(('', ''))
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(SPACE.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(SPACE.join(plist) + ' (' +
' '.join(self.commentlist) + ')', routeaddr)]
else:
returnlist = [(SPACE.join(plist), routeaddr)]
else:
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = False
self.pos += 1
self.gotonext()
adlist = ''
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = False
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = True
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
if self.field[self.pos] == '.':
aslist.append('.')
self.pos += 1
elif self.field[self.pos] == '"':
aslist.append('"%s"' % quote(self.getquote()))
elif self.field[self.pos] in self.atomends:
break
else:
aslist.append(self.getatom())
self.gotonext()
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return EMPTYSTRING.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return EMPTYSTRING.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else:
sdlist.append(self.getatom())
return EMPTYSTRING.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments=True):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment.
If self is not looking at an instance of `beginchar' then
getdelimited returns the empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = False
self.pos += 1
while self.pos < len(self.field):
if quote:
slist.append(self.field[self.pos])
quote = False
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == '\\':
quote = True
else:
slist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', False)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', True)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', False)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.FWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
| mit |
suku248/nest-simulator | pynest/nest/tests/test_connect_fixed_total_number.py | 10 | 4717 | # -*- coding: utf-8 -*-
#
# test_connect_fixed_total_number.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import unittest
import scipy.stats
from . import test_connect_helpers as hf
from .test_connect_parameters import TestParams
class TestFixedTotalNumber(TestParams):
# specify connection pattern and specific params
rule = 'fixed_total_number'
conn_dict = {'rule': rule}
# sizes of source-, target-population and outdegree for connection test
N1 = 50
N2 = 70
Nconn = 100
conn_dict['N'] = Nconn
# sizes of source-, target-population and total number of connections for
# statistical test
N_s = 20
N_t = 20
N = 100
# Critical values and number of iterations of two level test
stat_dict = {'alpha2': 0.05, 'n_runs': 200}
# tested on each mpi process separately
def testErrorMessages(self):
got_error = False
conn_params = self.conn_dict.copy()
conn_params['allow_autapses'] = True
conn_params['allow_multapses'] = False
conn_params['N'] = self.N1 * self.N2 + 1
try:
self.setUpNetwork(conn_params)
except hf.nest.kernel.NESTError:
got_error = True
self.assertTrue(got_error)
def testTotalNumberOfConnections(self):
conn_params = self.conn_dict.copy()
self.setUpNetwork(conn_params)
total_conn = len(hf.nest.GetConnections(self.pop1, self.pop2))
hf.mpi_assert(total_conn, self.Nconn, self)
# make sure no connections were drawn from the target to the source
# population
M = hf.get_connectivity_matrix(self.pop2, self.pop1)
M_none = np.zeros((len(self.pop1), len(self.pop2)))
hf.mpi_assert(M, M_none, self)
def testStatistics(self):
conn_params = self.conn_dict.copy()
conn_params['allow_autapses'] = True
conn_params['allow_multapses'] = True
conn_params['N'] = self.N
for fan in ['in', 'out']:
expected = hf.get_expected_degrees_totalNumber(
self.N, fan, self.N_s, self.N_t)
pvalues = []
for i in range(self.stat_dict['n_runs']):
hf.reset_seed(i + 1, self.nr_threads)
self.setUpNetwork(conn_dict=conn_params,
N1=self.N_s, N2=self.N_t)
degrees = hf.get_degrees(fan, self.pop1, self.pop2)
degrees = hf.gather_data(degrees)
if degrees is not None:
chi, p = hf.chi_squared_check(degrees, expected)
pvalues.append(p)
hf.mpi_barrier()
p = None
if degrees is not None:
ks, p = scipy.stats.kstest(pvalues, 'uniform')
p = hf.bcast_data(p)
self.assertGreater(p, self.stat_dict['alpha2'])
def testAutapsesTrue(self):
conn_params = self.conn_dict.copy()
N = 3
# test that autapses exist
conn_params['N'] = N * N * N
conn_params['allow_autapses'] = True
pop = hf.nest.Create('iaf_psc_alpha', N)
hf.nest.Connect(pop, pop, conn_params)
# make sure all connections do exist
M = hf.get_connectivity_matrix(pop, pop)
M = hf.gather_data(M)
if M is not None:
self.assertTrue(np.sum(np.diag(M)) > N)
def testAutapsesFalse(self):
conn_params = self.conn_dict.copy()
N = 3
# test that autapses were excluded
conn_params['N'] = N * (N - 1)
conn_params['allow_autapses'] = False
pop = hf.nest.Create('iaf_psc_alpha', N)
hf.nest.Connect(pop, pop, conn_params)
# make sure all connections do exist
M = hf.get_connectivity_matrix(pop, pop)
hf.mpi_assert(np.diag(M), np.zeros(N), self)
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TestFixedTotalNumber)
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == '__main__':
run()
| gpl-2.0 |
DinoCow/airflow | tests/providers/microsoft/azure/hooks/test_azure_fileshare.py | 7 | 8227 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains integration with Azure File Share.
Cloud variant of a SMB file share. Make sure that a Airflow connection of
type `wasb` exists. Authorization can be done by supplying a login (=Storage account name)
and password (=Storage account key), or login and SAS token in the extra field
(see connection `wasb_default` for an example).
"""
import json
import unittest
from unittest import mock
from azure.storage.file import Directory, File
from airflow.models import Connection
from airflow.providers.microsoft.azure.hooks.azure_fileshare import AzureFileShareHook
from airflow.utils import db
class TestAzureFileshareHook(unittest.TestCase):
def setUp(self):
db.merge_conn(Connection(conn_id='wasb_test_key', conn_type='wasb', login='login', password='key'))
db.merge_conn(
Connection(
conn_id='wasb_test_sas_token',
conn_type='wasb',
login='login',
extra=json.dumps({'sas_token': 'token'}),
)
)
def test_key_and_connection(self):
from azure.storage.file import FileService
hook = AzureFileShareHook(wasb_conn_id='wasb_test_key')
self.assertEqual(hook.conn_id, 'wasb_test_key')
self.assertIsNone(hook._conn)
self.assertIsInstance(hook.get_conn(), FileService)
def test_sas_token(self):
from azure.storage.file import FileService
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
self.assertEqual(hook.conn_id, 'wasb_test_sas_token')
self.assertIsInstance(hook.get_conn(), FileService)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_fileshare.FileService', autospec=True)
def test_check_for_file(self, mock_service):
mock_instance = mock_service.return_value
mock_instance.exists.return_value = True
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
self.assertTrue(hook.check_for_file('share', 'directory', 'file', timeout=3))
mock_instance.exists.assert_called_once_with('share', 'directory', 'file', timeout=3)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_fileshare.FileService', autospec=True)
def test_check_for_directory(self, mock_service):
mock_instance = mock_service.return_value
mock_instance.exists.return_value = True
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
self.assertTrue(hook.check_for_directory('share', 'directory', timeout=3))
mock_instance.exists.assert_called_once_with('share', 'directory', timeout=3)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_fileshare.FileService', autospec=True)
def test_load_file(self, mock_service):
mock_instance = mock_service.return_value
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
hook.load_file('path', 'share', 'directory', 'file', max_connections=1)
mock_instance.create_file_from_path.assert_called_once_with(
'share', 'directory', 'file', 'path', max_connections=1
)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_fileshare.FileService', autospec=True)
def test_load_string(self, mock_service):
mock_instance = mock_service.return_value
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
hook.load_string('big string', 'share', 'directory', 'file', timeout=1)
mock_instance.create_file_from_text.assert_called_once_with(
'share', 'directory', 'file', 'big string', timeout=1
)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_fileshare.FileService', autospec=True)
def test_load_stream(self, mock_service):
mock_instance = mock_service.return_value
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
hook.load_stream('stream', 'share', 'directory', 'file', 42, timeout=1)
mock_instance.create_file_from_stream.assert_called_once_with(
'share', 'directory', 'file', 'stream', 42, timeout=1
)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_fileshare.FileService', autospec=True)
def test_list_directories_and_files(self, mock_service):
mock_instance = mock_service.return_value
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
hook.list_directories_and_files('share', 'directory', timeout=1)
mock_instance.list_directories_and_files.assert_called_once_with('share', 'directory', timeout=1)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_fileshare.FileService', autospec=True)
def test_list_files(self, mock_service):
mock_instance = mock_service.return_value
mock_instance.list_directories_and_files.return_value = [
File("file1"),
File("file2"),
Directory("dir1"),
Directory("dir2"),
]
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
files = hook.list_files('share', 'directory', timeout=1)
self.assertEqual(files, ["file1", 'file2'])
mock_instance.list_directories_and_files.assert_called_once_with('share', 'directory', timeout=1)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_fileshare.FileService', autospec=True)
def test_create_directory(self, mock_service):
mock_instance = mock_service.return_value
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
hook.create_directory('share', 'directory', timeout=1)
mock_instance.create_directory.assert_called_once_with('share', 'directory', timeout=1)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_fileshare.FileService', autospec=True)
def test_get_file(self, mock_service):
mock_instance = mock_service.return_value
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
hook.get_file('path', 'share', 'directory', 'file', max_connections=1)
mock_instance.get_file_to_path.assert_called_once_with(
'share', 'directory', 'file', 'path', max_connections=1
)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_fileshare.FileService', autospec=True)
def test_get_file_to_stream(self, mock_service):
mock_instance = mock_service.return_value
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
hook.get_file_to_stream('stream', 'share', 'directory', 'file', max_connections=1)
mock_instance.get_file_to_stream.assert_called_once_with(
'share', 'directory', 'file', 'stream', max_connections=1
)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_fileshare.FileService', autospec=True)
def test_create_share(self, mock_service):
mock_instance = mock_service.return_value
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
hook.create_share('my_share')
mock_instance.create_share.assert_called_once_with('my_share')
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_fileshare.FileService', autospec=True)
def test_delete_share(self, mock_service):
mock_instance = mock_service.return_value
hook = AzureFileShareHook(wasb_conn_id='wasb_test_sas_token')
hook.delete_share('my_share')
mock_instance.delete_share.assert_called_once_with('my_share')
| apache-2.0 |
PySide/PySide | doc/codesnippets/examples/dbus/example-server.py | 6 | 2275 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# DBUS Server Example of use PySide with PyDBus library
import dbus
import dbus.service
import dbus.mainloop.glib
import random
from PySide.QtCore import *
from PySide.QtGui import QPushButton, QApplication
# The adaptor, MUST inherit dbus.service.Object
class DBusWidget(dbus.service.Object):
def __init__(self, name, session):
# export this object to dbus
dbus.service.Object.__init__(self, name, session)
# create a simple widget
self.widget = QPushButton()
self.widget.resize(200, 50)
# To export a Qt signal as a DBus-signal, you need to connect it to a method in this class.
# The method MUST have the signal annotation, so python-dbus will export it as a dbus-signal
QObject.connect(self.widget, SIGNAL("clicked()"), self.clicked)
QObject.connect(QApplication.instance(), SIGNAL("lastWindowClosed()"), self.lastWindowClosed)
# You can export methods to dbus like you do in python-dbus.
@dbus.service.method("com.example.SampleWidget", in_signature='', out_signature='')
def show(self):
self.widget.show()
# Another method... now with a parameter
@dbus.service.method("com.example.SampleWidget", in_signature='s', out_signature='')
def setText(self, value):
self.widget.setText(value)
# Another one...
@dbus.service.method("com.example.SampleWidget", in_signature='', out_signature='')
def exit(self):
qApp().quit()
# A signal that will be exported to dbus
@dbus.service.signal("com.example.SampleWidget", signature='')
def clicked(self):
pass
# Another signal that will be exported to dbus
@dbus.service.signal("com.example.SampleWidget", signature='')
def lastWindowClosed(self):
pass
if __name__ == '__main__':
app = QApplication([])
# Use qt/glib mainloop integration to get dbus mainloop working
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
session_bus = dbus.SessionBus()
# Export the service
name = dbus.service.BusName("com.example.SampleService", session_bus)
# Export the object
widget = DBusWidget(session_bus, '/DBusWidget')
print "Running example service."
app.exec_()
| lgpl-2.1 |
roadmapper/ansible | lib/ansible/modules/cloud/vultr/_vultr_account_facts.py | 21 | 3670 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_account_facts
short_description: Gather facts about the Vultr account.
description:
- Gather facts about account balance, charges and payments.
version_added: "2.5"
deprecated:
removed_in: "2.12"
why: Transformed into an info module.
alternative: Use M(vultr_account_info) instead.
author: "René Moser (@resmo)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Gather Vultr account facts
local_action:
module: vultr_account_facts
- name: Print the gathered facts
debug:
var: ansible_facts.vultr_account_facts
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_account_facts:
description: Response from Vultr API
returned: success
type: complex
contains:
balance:
description: Your account balance.
returned: success
type: float
sample: -214.69
pending_charges:
description: Charges pending.
returned: success
type: float
sample: 57.03
last_payment_date:
description: Date of the last payment.
returned: success
type: str
sample: "2017-08-26 12:47:48"
last_payment_amount:
description: The amount of the last payment transaction.
returned: success
type: float
sample: -250.0
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrAccountFacts(Vultr):
def __init__(self, module):
super(AnsibleVultrAccountFacts, self).__init__(module, "vultr_account_facts")
self.returns = {
'balance': dict(convert_to='float'),
'pending_charges': dict(convert_to='float'),
'last_payment_date': dict(),
'last_payment_amount': dict(convert_to='float'),
}
def get_account_info(self):
return self.api_query(path="/v1/account/info")
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
account_facts = AnsibleVultrAccountFacts(module)
result = account_facts.get_result(account_facts.get_account_info())
ansible_facts = {
'vultr_account_facts': result['vultr_account_facts']
}
module.exit_json(ansible_facts=ansible_facts, **result)
if __name__ == '__main__':
main()
| gpl-3.0 |
nt/code-jam-ruby | lib/multipart_data.py | 1 | 3424 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module exposes one public function Login(), that given the password of
the Code Jam contestant should log him in and return a cookie."""
import mimetypes
from lib import error
class MultipartData(object):
"""Class to manage multipart data in HTTP requests."""
def __init__(self, boundary):
"""Initialize the object data empty and store the used boundary.
Args:
boundary: Boundary used to separate multipart data elements.
"""
self.data = []
self.boundary = boundary
def __str__(self):
"""Convert this multipart data to a readable string.
Returns:
A string with the body of the HTTP packet that will contain the multipart
data.
"""
return "\r\n".join(self.data + ['--' + self.boundary + '--', ''])
def _GetContentType(self, filename):
"""Guess the content type of a file given its name.
Args:
filename: Name of the file whose mimetype should be guessed.
Returns:
The guessed mimetype for the file, or 'application/octet-stream' if no
guess could be made.
"""
guessed_type = mimetypes.guess_type(filename)[0]
return guessed_type if guessed_type != None else 'application/octet-stream'
def AddFile(self, name, filename):
"""Add a file's contents to this multipart data.
Args:
name: Name of the element to add to the multipart data.
filename: Name of the file with the contents to add to the multipart data.
Raises:
error.InternalError: If a problem occurs when reading the file.
"""
try:
# Read the data from the specified file.
file = open(filename, 'rb')
file_data = file.read()
file.close()
# Append the metadata and then the read file data. Finally, complete with
# a closing boundary.
self.data.append('--' + self.boundary)
self.data.append('Content-Disposition: form-data; name="{0}"; '
'filename="{1}"'.format(name, filename))
self.data.append('Content-Type: {0}'.format(
self._GetContentType(filename)))
self.data.append('')
self.data.append(file_data)
except IOError as e:
raise error.InternalError('I/O error while reading file "{0}": '
'{1}.\n'.format(filename, e))
def AddString(self, name, value):
"""Add a string value to this multipart data.
Args:
name: Name of the element to add to the multipart data.
value: String with the contents to add to the multipart data.
"""
# Append the field metadata and then the value. Finally, complete with a
# closing boundary.
self.data.append('--' + self.boundary);
self.data.append('Content-Disposition: form-data; name="{0}"'.format(name))
self.data.append('')
self.data.append(str(value))
| apache-2.0 |
geekaia/edx-platform | common/lib/xmodule/xmodule/errortracker.py | 74 | 1472 | import logging
import sys
import traceback
from collections import namedtuple
log = logging.getLogger(__name__)
ErrorLog = namedtuple('ErrorLog', 'tracker errors')
def exc_info_to_str(exc_info):
"""Given some exception info, convert it into a string using
the traceback.format_exception() function.
"""
return ''.join(traceback.format_exception(*exc_info))
def in_exception_handler():
'''Is there an active exception?'''
return sys.exc_info() != (None, None, None)
def make_error_tracker():
'''Return an ErrorLog (named tuple), with fields (tracker, errors), where
the logger appends a tuple (message, exception_str) to the errors on every
call. exception_str is in the format returned by traceback.format_exception.
error_list is a simple list. If the caller modifies it, info
will be lost.
'''
errors = []
def error_tracker(msg):
'''Log errors'''
exc_str = ''
if in_exception_handler():
exc_str = exc_info_to_str(sys.exc_info())
# don't display irrelevant gunicorn sync error
if (('python2.7/site-packages/gunicorn/workers/sync.py' in exc_str) and
('[Errno 11] Resource temporarily unavailable' in exc_str)):
exc_str = ''
errors.append((msg, exc_str))
return ErrorLog(error_tracker, errors)
def null_error_tracker(msg):
'''A dummy error tracker that just ignores the messages'''
pass
| agpl-3.0 |
ecreall/nova-ideo | novaideo/testing.py | 1 | 4835 | # Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import pytz
import unittest
from pyramid import testing
try:
from pyramid_robot.layer import Layer
except ImportError:
class Layer():
pass
from substanced.db import root_factory
from dace.subscribers import stop_ioloop
from novaideo import searchable_contents
class BaseFunctionalTests(object):
def setUp(self):
import tempfile
import os.path
self.tmpdir = tempfile.mkdtemp()
dbpath = os.path.join(self.tmpdir, 'test.db')
uri = 'file://' + dbpath + '?blobstorage_dir=' + self.tmpdir
settings = {'zodbconn.uri': uri,
'sms.service': 'pyramid_sms.ovh.OvhService',
'substanced.secret': 'sosecret',
'substanced.initial_login': 'admin',
'substanced.initial_password': 'admin',
'novaideo.secret' : 'seekri1',
'substanced.uploads_tempdir' : self.tmpdir,
'mail.default_sender': 'admin@example.com',
'pyramid.includes': [
'substanced',
'pyramid_chameleon',
'pyramid_layout',
'pyramid_mailer.testing', # have to be after substanced to override the mailer
'pyramid_tm',
'dace',
'pontus',
'daceui'
]}
testing.setUp()
from novaideo import main
self.app = app = main({}, **settings)
self.db = app.registry._zodb_databases['']
self.request = request = testing.DummyRequest()
self.request.invalidate_cache = True
self.config = testing.setUp(registry=app.registry, request=request)
self.registry = self.config.registry
from .catalog import (
NovaideoIndexes, Text,
Lexicon, Splitter,
CaseNormalizer, StopWordRemover)
# lexicon is a persistent object, we need to be sure it's a fresh one
# between tests
NovaideoIndexes.relevant_data = Text(
lexicon=Lexicon(Splitter(), CaseNormalizer(), StopWordRemover()))
self.root = root_factory(request)
request.root = self.root
def tearDown(self):
stop_ioloop()
import shutil
testing.tearDown()
self.db.close()
shutil.rmtree(self.tmpdir)
class FunctionalTests(BaseFunctionalTests, unittest.TestCase):
def setUp(self):
super(FunctionalTests, self).setUp()
def default_novaideo_config(self):
self.request.get_time_zone = pytz.timezone('Europe/Paris')
self.request.moderate_ideas = False
self.request.moderate_proposals = False
self.request.examine_ideas = False
self.request.examine_proposals = False
self.request.support_ideas = True
self.request.support_proposals = True
self.request.root.content_to_support = ['idea', 'proposal']
self.request.content_to_examine = []
self.request.content_to_support = ['idea', 'proposal']
self.request.accessible_to_anonymous = True
self.request.content_to_manage = [
'question', 'idea', 'proposal']
self.request.root.content_to_manage = [
'challenge', 'question', 'idea', 'proposal']
self.request.searchable_contents = searchable_contents(
self.request)
self.request.user = self.request.root['principals']['users']['admin']
self.request.user.email = None
def moderation_novaideo_config(self):
self.default_novaideo_config()
self.request.moderate_ideas = True
self.request.moderate_proposals = True
self.request.root.content_to_moderate = ['idea', 'proposal']
def no_support_novaideo_config(self):
self.default_novaideo_config()
self.request.support_ideas = False
self.request.support_proposals = False
self.request.root.content_to_support = []
def examination_novaideo_config(self):
self.default_novaideo_config()
self.request.examine_ideas = True
self.request.examine_proposals = True
self.request.content_to_examine = ['idea', 'proposal']
self.request.root.content_to_examine = ['idea', 'proposal']
class RobotLayer(BaseFunctionalTests, Layer):
defaultBases = ()
def setUp(self):
super(RobotLayer, self).setUp()
from webtest import http
self.server = http.StopableWSGIServer.create(self.app, port=8080)
def tearDown(self):
super(RobotLayer, self).tearDown()
self.server.shutdown()
ROBOT_LAYER = RobotLayer()
| agpl-3.0 |
xguse/ete | ete_dev/__init__.py | 1 | 1200 | # #START_LICENSE###########################################################
#
#
#
# #END_LICENSE#############################################################
# Note that the use of "from x import *" is safe here. Modules include
# the __all__ variable.
from sys import stderr
from coretype.tree import *
from coretype.seqgroup import *
from phylo.phylotree import *
from webplugin.webapp import *
from phyloxml import Phyloxml, PhyloxmlTree
from nexml import Nexml, NexmlTree
try:
from coretype.arraytable import *
except ImportError, e:
print >>stderr, "Clustering module could not be loaded"
print e
else:
from clustering.clustertree import *
try:
from phylomedb.phylomeDB3 import *
except ImportError, e:
print >>stderr, " MySQLdb module could not be loaded"
print e
try:
from treeview.main import *
from treeview.faces import *
from treeview import faces
from treeview import layouts
from treeview.svg_colors import SVG_COLORS
except ImportError, e:
print >>stderr, "Treeview module could not be loaded"
print e
# Do not modify the following line. It will be checked during
# installation
__ETEID__="643cc3270c842b3cf4990861a1126060"
| gpl-3.0 |
linked67/p2pool-sfr-scrypt | nattraverso/utils.py | 288 | 1563 | """
Various utility functions used in the nattraverso package.
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
def is_rfc1918_ip(ip):
"""
Checks if the given ip address is a rfc1918 one.
@param ip: The ip address to test
@type ip: a string "x.x.x.x"
@return: True if it's a LAN address, False otherwise
"""
if isinstance(ip, basestring):
ip = _ip_to_number(ip)
for net, mask in _nets:
if ip&mask == net:
return True
return False
def is_bogus_ip(ip):
"""
Checks if the given ip address is bogus, i.e. 0.0.0.0 or 127.0.0.1.
@param ip: The ip address to test
@type ip: a string "x.x.x.x"
@return: True if it's bogus, False otherwise
"""
return ip.startswith('0.') or ip.startswith('127.')
def _ip_to_number(ipstr):
"""
Translate a string ip address to a packed number.
@param ipstr: the ip address to transform
@type ipstr: a string "x.x.x.x"
@return: an int32 number representing the ip address
"""
net = [ int(digit) for digit in ipstr.split('.') ] + [ 0, 0, 0 ]
net = net[:4]
return ((((((0L+net[0])<<8) + net[1])<<8) + net[2])<<8) +net[3]
# List of rfc1918 net/mask
_rfc1918_networks = [('127', 8), ('192.168', 16), ('10', 8), ('172.16', 12)]
# Machine readable form of the above
_nets = [(_ip_to_number(net), (2L**32 -1)^(2L**(32-mask)-1))
for net, mask in _rfc1918_networks]
| gpl-3.0 |
rdelval/aurora | src/test/python/apache/aurora/client/api/test_restarter.py | 5 | 4562 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mox import IgnoreArg, MoxTestBase
from apache.aurora.client.api.instance_watcher import InstanceWatcher
from apache.aurora.client.api.restarter import Restarter, RestartSettings
from apache.aurora.common.aurora_job_key import AuroraJobKey
from ...api_util import SchedulerProxyApiSpec
from gen.apache.aurora.api.ttypes import (
AssignedTask,
Response,
ResponseCode,
ResponseDetail,
Result,
ScheduledTask,
ScheduleStatus,
ScheduleStatusResult,
ServerInfo,
TaskConfig
)
CLUSTER = 'east'
JOB = AuroraJobKey(CLUSTER, 'johndoe', 'test', 'test_job')
RESTART_SETTINGS = RestartSettings(
batch_size=2,
watch_secs=45,
max_per_instance_failures=0,
max_total_failures=0,
health_check_interval_seconds=5)
def make_response(code=ResponseCode.OK, message='test', result=None):
return Response(
responseCode=code,
details=[ResponseDetail(message=message)],
result=result,
serverInfo=ServerInfo(clusterName='test'))
class TestRestarter(MoxTestBase):
def setUp(self):
super(TestRestarter, self).setUp()
self.mock_instance_watcher = self.mox.CreateMock(InstanceWatcher)
self.mock_scheduler_proxy = self.mox.CreateMock(SchedulerProxyApiSpec)
self.restarter = Restarter(
JOB,
RESTART_SETTINGS,
self.mock_scheduler_proxy,
self.mock_instance_watcher)
def mock_restart_instances(self, instances):
self.mock_scheduler_proxy.restartShards(JOB.to_thrift(), instances, retry=True).AndReturn(
make_response())
self.mock_instance_watcher.watch(instances).AndReturn([])
def test_restart_one_iteration(self):
self.mock_status_active_tasks([0, 1, 3, 4, 5])
self.mock_restart_instances([0, 1])
self.mox.ReplayAll()
self.restarter.restart([0, 1])
def mock_three_iterations(self):
self.mock_restart_instances([0, 1])
self.mock_restart_instances([3, 4])
self.mock_restart_instances([5])
def test_rolling_restart(self):
self.mock_status_active_tasks([0, 1, 3, 4, 5])
self.mock_three_iterations()
self.mox.ReplayAll()
self.restarter.restart([0, 1, 3, 4, 5])
def mock_status_active_tasks(self, instance_ids):
tasks = []
for i in instance_ids:
tasks.append(ScheduledTask(
status=ScheduleStatus.RUNNING,
assignedTask=AssignedTask(task=TaskConfig(), instanceId=i)
))
response = make_response(result=Result(scheduleStatusResult=ScheduleStatusResult(tasks=tasks)))
self.mock_scheduler_proxy.getTasksWithoutConfigs(IgnoreArg(), retry=True).AndReturn(response)
def test_restart_all_instances(self):
self.mock_status_active_tasks([0, 1, 3, 4, 5])
self.mock_three_iterations()
self.mox.ReplayAll()
self.restarter.restart(None)
def mock_status_no_active_task(self):
response = make_response(code=ResponseCode.INVALID_REQUEST)
self.mock_scheduler_proxy.getTasksWithoutConfigs(IgnoreArg(), retry=True).AndReturn(response)
def test_restart_no_instance_active(self):
self.mock_status_no_active_task()
self.mox.ReplayAll()
self.restarter.restart(None)
def mock_restart_fails(self):
response = make_response(code=ResponseCode.ERROR, message='test error')
self.mock_scheduler_proxy.restartShards(JOB.to_thrift(), IgnoreArg(), retry=True).AndReturn(
response)
def test_restart_instance_fails(self):
self.mock_status_active_tasks([0, 1])
self.mock_restart_fails()
self.mox.ReplayAll()
assert self.restarter.restart(None).responseCode == ResponseCode.ERROR
def mock_restart_watch_fails(self, instances):
self.mock_scheduler_proxy.restartShards(JOB.to_thrift(), instances, retry=True).AndReturn(
make_response())
self.mock_instance_watcher.watch(instances).AndReturn(instances)
def test_restart_instances_watch_fails(self):
instances = [0, 1]
self.mock_status_active_tasks(instances)
self.mock_restart_watch_fails(instances)
self.mox.ReplayAll()
self.restarter.restart(None)
| apache-2.0 |
mbjadhav/AliPhysics | PWGJE/EMCALJetTasks/Tracks/analysis/base/MonteCarloFileHandler.py | 41 | 4856 | #**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
"""
Handler module for Monte-Carlo output from the ALICE Lego Trains. Lego train output
can be min. bias events or productions in pt-hat bins
@author: Markus Fasel
"""
from PWGJE.EMCALJetTasks.Tracks.analysis.base.WeightHandler import WeightHandler
from PWGJE.EMCALJetTasks.Tracks.analysis.base.FileHandler import LegoTrainFileReader
from PWGJE.EMCALJetTasks.Tracks.analysis.base.SpectraSum import SpectraSum
class MonteCarloDataCollection(object):
"""
Collection of Monte-Carlo based outputs
"""
def __init__(self, isPtHat = False):
"""
Constructor
"""
self.__weighthandler = None
if isPtHat:
self.__weighthandler = WeightHandler()
self.__data = {"All":None}
def AddData(self, results, pthatbin = -1, weightdata = None):
"""
Add new data (with or without pthat bins)
"""
if pthatbin >= 0:
self.__data[pthatbin] = results
self.__weighthandler.AddPtHatBin(pthatbin, weightdata["crosssection"], weightdata["trials"])
else:
self.__data["All"] = results
def GetData(self, pthatbin = -1):
"""
Access to data (if necessary in a given pt-hat bin
"""
if pthatbin >= 0:
return self.__data[pthatbin]
return self.__data["All"]
def GetWeigthHandler(self):
"""
Access to the weight handler
"""
return self.__weighthandler
def SumWeightedData(self):
"""
Sum weighted containers from the different pthat bins
"""
if not self.__weighthandler:
print "No weight handler"
return None
summer = SpectraSum()
for pthatbin in self.__data.keys():
if pthatbin == "All":
continue
self.__weighthandler.ReweightSpectrum(pthatbin, self.__data[pthatbin])
summer.AddSpectrum(self.__data[pthatbin])
return summer.GetSummedSpectrum()
class MonteCarloFileHandler(object):
"""
Class handling the reading of one file or a set of MonteCarlo files
"""
def __init__(self, hasPtHardBins = False):
"""
Constructor
"""
self.__datacollection = MonteCarloDataCollection(hasPtHardBins)
self.__histlist = ""
def GetCollection(self):
"""
Access to the file collection
"""
return self.__datacollection
def AddFile(self, filename, pthatbin = -1, isNew = True):
"""
Handle new file
"""
reader = LegoTrainFileReader(filename, isMC = True, isNew = isNew)
if pthatbin >= 0:
reader.SetReadWeights()
self.__datacollection.AddData(reader.ReadFile(), pthatbin, reader.GetWeightHistograms())
class MonteCarloFileMerger(object):
"""
Class merging Monte-Carlo files in pt-hat bins, weighted by the cross section
"""
def __init__(self):
"""
Constructor
"""
self.__reader = MonteCarloFileHandler(True)
def AddFile(self, filename, pthatbin):
"""
Add next file
"""
self.__reader.AddFile(filename, pthatbin)
def MergeAndWrite(self, outputfile):
summed = self.__reader.GetCollection().SumWeightedData()
summed.Write(outputfile)
def MergePtHardBins(outputfile, basedir, firstbin, lastbin):
"""
Merge files from different pt-hard bins, weighted by the cross section, into one file
"""
merger = MonteCarloFileMerger()
for pthardbin in range(firstbin, lastbin+1):
merger.AddFile("%s/%02d/AnalysisResults.root" %(basedir, pthardbin), pthardbin)
merger.MergeAndWrite(outputfile)
| bsd-3-clause |
ghtmtt/QGIS | python/plugins/db_manager/db_plugins/info_model.py | 33 | 18079 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import object
from qgis.PyQt.QtWidgets import QApplication
from .html_elems import HtmlContent, HtmlSection, HtmlParagraph, HtmlList, HtmlTable, HtmlTableHeader, HtmlTableCol
class DatabaseInfo(object):
def __init__(self, db):
self.db = db
def __del__(self):
self.db = None
def generalInfo(self):
info = self.db.connector.getInfo()
tbl = [
(QApplication.translate("DBManagerPlugin", "Server version: "), info[0])
]
return HtmlTable(tbl)
def connectionDetails(self):
tbl = [
(QApplication.translate("DBManagerPlugin", "Host:"), self.db.connector.host),
(QApplication.translate("DBManagerPlugin", "User:"), self.db.connector.user)
]
return HtmlTable(tbl)
def spatialInfo(self):
ret = []
info = self.db.connector.getSpatialInfo()
if info is None:
return
tbl = [
(QApplication.translate("DBManagerPlugin", "Library:"), info[0]),
("GEOS:", info[1]),
("Proj:", info[2])
]
ret.append(HtmlTable(tbl))
if not self.db.connector.has_geometry_columns:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> geometry_columns table doesn't exist!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
return ret
def privilegesDetails(self):
details = self.db.connector.getDatabasePrivileges()
lst = []
if details[0]:
lst.append(QApplication.translate("DBManagerPlugin", "create new schemas"))
if details[1]:
lst.append(QApplication.translate("DBManagerPlugin", "create temporary tables"))
return HtmlList(lst)
def toHtml(self):
if self.db is None:
return HtmlSection(QApplication.translate("DBManagerPlugin", 'Not connected')).toHtml()
ret = []
# connection details
conn_details = self.connectionDetails()
if conn_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Connection details'), conn_details))
# database information
general_info = self.generalInfo()
if general_info is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'General info'), general_info))
# has spatial enabled?
spatial_info = self.spatialInfo()
if spatial_info is None:
pass
else:
typename = self.db.connection().typeNameString()
spatial_info = HtmlContent(spatial_info)
if not spatial_info.hasContents():
spatial_info = QApplication.translate("DBManagerPlugin", '<warning> {0} support not enabled!').format(typename)
ret.append(HtmlSection(typename, spatial_info))
# privileges
priv_details = self.privilegesDetails()
if priv_details is None:
pass
else:
priv_details = HtmlContent(priv_details)
if not priv_details.hasContents():
priv_details = QApplication.translate("DBManagerPlugin", '<warning> This user has no privileges!')
else:
priv_details = [QApplication.translate("DBManagerPlugin", "User has privileges:"), priv_details]
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Privileges'), priv_details))
return HtmlContent(ret).toHtml()
class SchemaInfo(object):
def __init__(self, schema):
self.schema = schema
def __del__(self):
self.schema = None
def generalInfo(self):
tbl = [
# ("Tables:", self.schema.tableCount)
]
if self.schema.owner:
tbl.append((QApplication.translate("DBManagerPlugin", "Owner:"), self.schema.owner))
if self.schema.comment:
tbl.append((QApplication.translate("DBManagerPlugin", "Comment:"), self.schema.comment))
return HtmlTable(tbl)
def privilegesDetails(self):
details = self.schema.database().connector.getSchemaPrivileges(self.schema.name)
lst = []
if details[0]:
lst.append(QApplication.translate("DBManagerPlugin", "create new objects"))
if details[1]:
lst.append(QApplication.translate("DBManagerPlugin", "access objects"))
return HtmlList(lst)
def toHtml(self):
ret = []
general_info = self.generalInfo()
if general_info is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Schema details'), general_info))
priv_details = self.privilegesDetails()
if priv_details is None:
pass
else:
priv_details = HtmlContent(priv_details)
if not priv_details.hasContents():
priv_details = QApplication.translate("DBManagerPlugin",
'<warning> This user has no privileges to access this schema!')
else:
priv_details = [QApplication.translate("DBManagerPlugin", "User has privileges:"), priv_details]
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Privileges'), priv_details))
return HtmlContent(ret).toHtml()
class TableInfo(object):
def __init__(self, table):
self.table = table
def __del__(self):
self.table = None
def generalInfo(self):
if self.table.rowCount is None:
# row count information is not displayed yet, so just block
# table signals to avoid double refreshing (infoViewer->refreshRowCount->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshRowCount()
self.table.blockSignals(False)
tbl = [
(QApplication.translate("DBManagerPlugin", "Relation type:"),
QApplication.translate("DBManagerPlugin", "View") if self.table.isView else QApplication.translate(
"DBManagerPlugin", "Table")),
(QApplication.translate("DBManagerPlugin", "Rows:"),
self.table.rowCount if self.table.rowCount is not None else QApplication.translate("DBManagerPlugin",
'Unknown (<a href="action:rows/count">find out</a>)'))
]
if self.table.comment:
tbl.append((QApplication.translate("DBManagerPlugin", "Comment:"), self.table.comment))
return HtmlTable(tbl)
def spatialInfo(self): # implemented in subclasses
return None
def fieldsDetails(self):
tbl = []
# define the table header
header = (
"#", QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Null"), QApplication.translate("DBManagerPlugin", "Default"))
tbl.append(HtmlTableHeader(header))
# add table contents
for fld in self.table.fields():
is_null_txt = "N" if fld.notNull else "Y"
# make primary key field underlined
attrs = {"class": "underline"} if fld.primaryKey else None
name = HtmlTableCol(fld.name, attrs)
tbl.append((fld.num, name, fld.type2String(), is_null_txt, fld.default2String()))
return HtmlTable(tbl, {"class": "header"})
def constraintsDetails(self):
if self.table.constraints() is None or len(self.table.constraints()) <= 0:
return None
tbl = []
# define the table header
header = (QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Column(s)"))
tbl.append(HtmlTableHeader(header))
# add table contents
for con in self.table.constraints():
# get the fields the constraint is defined on
cols = [p[1].name if p[1] is not None else u"??? (#%d)" % p[0] for p in iter(list(con.fields().items()))]
tbl.append((con.name, con.type2String(), u'\n'.join(cols)))
return HtmlTable(tbl, {"class": "header"})
def indexesDetails(self):
if self.table.indexes() is None or len(self.table.indexes()) <= 0:
return None
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Column(s)"))
tbl.append(HtmlTableHeader(header))
# add table contents
for idx in self.table.indexes():
# get the fields the index is defined on
cols = [p[1].name if p[1] is not None else u"??? (#%d)" % p[0] for p in iter(list(idx.fields().items()))]
tbl.append((idx.name, u'\n'.join(cols)))
return HtmlTable(tbl, {"class": "header"})
def triggersDetails(self):
if self.table.triggers() is None or len(self.table.triggers()) <= 0:
return None
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Function"))
tbl.append(HtmlTableHeader(header))
# add table contents
for trig in self.table.triggers():
name = u'%(name)s (<a href="action:trigger/%(name)s/%(action)s">%(action)s</a>)' % {"name": trig.name,
"action": "delete"}
tbl.append((name, trig.function))
return HtmlTable(tbl, {"class": "header"})
def getViewDefinition(self):
if not self.table.isView:
return None
return self.table.database().connector.getViewDefinition((self.table.schemaName(), self.table.name))
def getTableInfo(self):
ret = []
general_info = self.generalInfo()
if general_info is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'General info'), general_info))
# spatial info
spatial_info = self.spatialInfo()
if spatial_info is None:
pass
else:
spatial_info = HtmlContent(spatial_info)
if not spatial_info.hasContents():
spatial_info = QApplication.translate("DBManagerPlugin", '<warning> This is not a spatial table.')
ret.append(HtmlSection(self.table.database().connection().typeNameString(), spatial_info))
# fields
fields_details = self.fieldsDetails()
if fields_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Fields'), fields_details))
# constraints
constraints_details = self.constraintsDetails()
if constraints_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Constraints'), constraints_details))
# indexes
indexes_details = self.indexesDetails()
if indexes_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Indexes'), indexes_details))
# triggers
triggers_details = self.triggersDetails()
if triggers_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Triggers'), triggers_details))
return ret
def getViewInfo(self):
if not self.table.isView:
return []
ret = self.getTableInfo()
# view definition
view_def = self.getViewDefinition()
if view_def is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'View definition'), view_def))
return ret
def toHtml(self):
if self.table.isView:
ret = self.getViewInfo()
else:
ret = self.getTableInfo()
return HtmlContent(ret).toHtml()
class VectorTableInfo(TableInfo):
def __init__(self, table):
TableInfo.__init__(self, table)
def spatialInfo(self):
ret = []
if self.table.geomType is None:
return ret
tbl = [
(QApplication.translate("DBManagerPlugin", "Column:"), self.table.geomColumn),
(QApplication.translate("DBManagerPlugin", "Geometry:"), self.table.geomType)
]
# only if we have info from geometry_columns
if self.table.geomDim:
tbl.append((QApplication.translate("DBManagerPlugin", "Dimension:"), self.table.geomDim))
srid = self.table.srid if self.table.srid not in (None, 0) else -1
sr_info = self.table.database().connector.getSpatialRefInfo(srid) if srid != -1 else QApplication.translate(
"DBManagerPlugin", "Undefined")
if sr_info:
tbl.append((QApplication.translate("DBManagerPlugin", "Spatial ref:"), u"%s (%d)" % (sr_info, srid)))
# estimated extent
if not self.table.isView:
if self.table.estimatedExtent is None:
# estimated extent information is not displayed yet, so just block
# table signals to avoid double refreshing (infoViewer->refreshEstimatedExtent->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshTableEstimatedExtent()
self.table.blockSignals(False)
if self.table.estimatedExtent is not None and self.table.estimatedExtent[0] is not None:
if isinstance(self.table.estimatedExtent, list):
estimated_extent_str = ', '.join('%.5f' % e for e in self.table.estimatedExtent)
else:
estimated_extent_str = '%.5f, %.5f - %.5f, %.5f' % self.table.estimatedExtent
tbl.append((QApplication.translate("DBManagerPlugin", "Estimated extent:"), estimated_extent_str))
# extent
if self.table.extent is not None and self.table.extent[0] is not None:
if isinstance(self.table.extent, list):
extent_str = ', '.join('%.5f' % e for e in self.table.extent)
else:
extent_str = '%.5f, %.5f - %.5f, %.5f' % self.table.extent
else:
extent_str = QApplication.translate("DBManagerPlugin",
'(unknown) (<a href="action:extent/get">find out</a>)')
tbl.append((QApplication.translate("DBManagerPlugin", "Extent:"), extent_str))
ret.append(HtmlTable(tbl))
# is there an entry in geometry_columns?
if self.table.geomType.lower() == 'geometry':
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> There is no entry in geometry_columns!")))
# find out whether the geometry column has spatial index on it
if not self.table.isView:
if not self.table.hasSpatialIndex():
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
'<warning> No spatial index defined (<a href="action:spatialindex/create">create it</a>)')))
return ret
class RasterTableInfo(TableInfo):
def __init__(self, table):
TableInfo.__init__(self, table)
def spatialInfo(self):
ret = []
if self.table.geomType is None:
return ret
tbl = [
(QApplication.translate("DBManagerPlugin", "Column:"), self.table.geomColumn),
(QApplication.translate("DBManagerPlugin", "Geometry:"), self.table.geomType)
]
# only if we have info from geometry_columns
srid = self.table.srid if self.table.srid is not None else -1
sr_info = self.table.database().connector.getSpatialRefInfo(srid) if srid != -1 else QApplication.translate(
"DBManagerPlugin", "Undefined")
if sr_info:
tbl.append((QApplication.translate("DBManagerPlugin", "Spatial ref:"), u"%s (%d)" % (sr_info, srid)))
# extent
if self.table.extent is not None and self.table.extent[0] is not None:
extent_str = '%.5f, %.5f - %.5f, %.5f' % self.table.extent
else:
extent_str = QApplication.translate("DBManagerPlugin",
'(unknown) (<a href="action:extent/get">find out</a>)')
tbl.append((QApplication.translate("DBManagerPlugin", "Extent:"), extent_str))
ret.append(HtmlTable(tbl))
return ret
| gpl-2.0 |
sametmax/Django--an-app-at-a-time | ignore_this_directory/django/template/defaulttags.py | 22 | 49592 | """Default tags used by the template system, available to all templates."""
import re
import sys
import warnings
from collections import namedtuple
from datetime import datetime
from itertools import cycle as itertools_cycle, groupby
from django.conf import settings
from django.utils import timezone
from django.utils.html import conditional_escape, format_html
from django.utils.lorem_ipsum import paragraphs, words
from django.utils.safestring import mark_safe
from .base import (
BLOCK_TAG_END, BLOCK_TAG_START, COMMENT_TAG_END, COMMENT_TAG_START,
FILTER_SEPARATOR, SINGLE_BRACE_END, SINGLE_BRACE_START,
VARIABLE_ATTRIBUTE_SEPARATOR, VARIABLE_TAG_END, VARIABLE_TAG_START,
Context, Node, NodeList, TemplateSyntaxError, VariableDoesNotExist,
kwarg_re, render_value_in_context, token_kwargs,
)
from .defaultfilters import date
from .library import Library
from .smartif import IfParser, Literal
register = Library()
class AutoEscapeControlNode(Node):
"""Implement the actions of the autoescape tag."""
def __init__(self, setting, nodelist):
self.setting, self.nodelist = setting, nodelist
def render(self, context):
old_setting = context.autoescape
context.autoescape = self.setting
output = self.nodelist.render(context)
context.autoescape = old_setting
if self.setting:
return mark_safe(output)
else:
return output
class CommentNode(Node):
def render(self, context):
return ''
class CsrfTokenNode(Node):
def render(self, context):
csrf_token = context.get('csrf_token')
if csrf_token:
if csrf_token == 'NOTPROVIDED':
return format_html("")
else:
return format_html('<input type="hidden" name="csrfmiddlewaretoken" value="{}">', csrf_token)
else:
# It's very probable that the token is missing because of
# misconfiguration, so we raise a warning
if settings.DEBUG:
warnings.warn(
"A {% csrf_token %} was used in a template, but the context "
"did not provide the value. This is usually caused by not "
"using RequestContext."
)
return ''
class CycleNode(Node):
def __init__(self, cyclevars, variable_name=None, silent=False):
self.cyclevars = cyclevars
self.variable_name = variable_name
self.silent = silent
def render(self, context):
if self not in context.render_context:
# First time the node is rendered in template
context.render_context[self] = itertools_cycle(self.cyclevars)
cycle_iter = context.render_context[self]
value = next(cycle_iter).resolve(context)
if self.variable_name:
context.set_upward(self.variable_name, value)
if self.silent:
return ''
return render_value_in_context(value, context)
def reset(self, context):
"""
Reset the cycle iteration back to the beginning.
"""
context.render_context[self] = itertools_cycle(self.cyclevars)
class DebugNode(Node):
def render(self, context):
from pprint import pformat
output = [pformat(val) for val in context]
output.append('\n\n')
output.append(pformat(sys.modules))
return ''.join(output)
class FilterNode(Node):
def __init__(self, filter_expr, nodelist):
self.filter_expr, self.nodelist = filter_expr, nodelist
def render(self, context):
output = self.nodelist.render(context)
# Apply filters.
with context.push(var=output):
return self.filter_expr.resolve(context)
class FirstOfNode(Node):
def __init__(self, variables, asvar=None):
self.vars = variables
self.asvar = asvar
def render(self, context):
first = ''
for var in self.vars:
value = var.resolve(context, ignore_failures=True)
if value:
first = render_value_in_context(value, context)
break
if self.asvar:
context[self.asvar] = first
return ''
return first
class ForNode(Node):
child_nodelists = ('nodelist_loop', 'nodelist_empty')
def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None):
self.loopvars, self.sequence = loopvars, sequence
self.is_reversed = is_reversed
self.nodelist_loop = nodelist_loop
if nodelist_empty is None:
self.nodelist_empty = NodeList()
else:
self.nodelist_empty = nodelist_empty
def __repr__(self):
reversed_text = ' reversed' if self.is_reversed else ''
return '<%s: for %s in %s, tail_len: %d%s>' % (
self.__class__.__name__,
', '.join(self.loopvars),
self.sequence,
len(self.nodelist_loop),
reversed_text,
)
def render(self, context):
if 'forloop' in context:
parentloop = context['forloop']
else:
parentloop = {}
with context.push():
values = self.sequence.resolve(context, ignore_failures=True)
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
if len_values < 1:
return self.nodelist_empty.render(context)
nodelist = []
if self.is_reversed:
values = reversed(values)
num_loopvars = len(self.loopvars)
unpack = num_loopvars > 1
# Create a forloop value in the context. We'll update counters on each
# iteration just below.
loop_dict = context['forloop'] = {'parentloop': parentloop}
for i, item in enumerate(values):
# Shortcuts for current loop iteration number.
loop_dict['counter0'] = i
loop_dict['counter'] = i + 1
# Reverse counter iteration numbers.
loop_dict['revcounter'] = len_values - i
loop_dict['revcounter0'] = len_values - i - 1
# Boolean values designating first and last times through loop.
loop_dict['first'] = (i == 0)
loop_dict['last'] = (i == len_values - 1)
pop_context = False
if unpack:
# If there are multiple loop variables, unpack the item into
# them.
try:
len_item = len(item)
except TypeError: # not an iterable
len_item = 1
# Check loop variable count before unpacking
if num_loopvars != len_item:
raise ValueError(
"Need {} values to unpack in for loop; got {}. "
.format(num_loopvars, len_item),
)
unpacked_vars = dict(zip(self.loopvars, item))
pop_context = True
context.update(unpacked_vars)
else:
context[self.loopvars[0]] = item
for node in self.nodelist_loop:
nodelist.append(node.render_annotated(context))
if pop_context:
# Pop the loop variables pushed on to the context to avoid
# the context ending up in an inconsistent state when other
# tags (e.g., include and with) push data to context.
context.pop()
return mark_safe(''.join(nodelist))
class IfChangedNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, nodelist_true, nodelist_false, *varlist):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self._varlist = varlist
def render(self, context):
# Init state storage
state_frame = self._get_context_stack_frame(context)
state_frame.setdefault(self)
nodelist_true_output = None
if self._varlist:
# Consider multiple parameters. This behaves like an OR evaluation
# of the multiple variables.
compare_to = [var.resolve(context, ignore_failures=True) for var in self._varlist]
else:
# The "{% ifchanged %}" syntax (without any variables) compares
# the rendered output.
compare_to = nodelist_true_output = self.nodelist_true.render(context)
if compare_to != state_frame[self]:
state_frame[self] = compare_to
# render true block if not already rendered
return nodelist_true_output or self.nodelist_true.render(context)
elif self.nodelist_false:
return self.nodelist_false.render(context)
return ''
def _get_context_stack_frame(self, context):
# The Context object behaves like a stack where each template tag can create a new scope.
# Find the place where to store the state to detect changes.
if 'forloop' in context:
# Ifchanged is bound to the local for loop.
# When there is a loop-in-loop, the state is bound to the inner loop,
# so it resets when the outer loop continues.
return context['forloop']
else:
# Using ifchanged outside loops. Effectively this is a no-op because the state is associated with 'self'.
return context.render_context
class IfEqualNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):
self.var1, self.var2 = var1, var2
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.negate = negate
def __repr__(self):
return '<%s>' % self.__class__.__name__
def render(self, context):
val1 = self.var1.resolve(context, ignore_failures=True)
val2 = self.var2.resolve(context, ignore_failures=True)
if (self.negate and val1 != val2) or (not self.negate and val1 == val2):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
class IfNode(Node):
def __init__(self, conditions_nodelists):
self.conditions_nodelists = conditions_nodelists
def __repr__(self):
return '<%s>' % self.__class__.__name__
def __iter__(self):
for _, nodelist in self.conditions_nodelists:
yield from nodelist
@property
def nodelist(self):
return NodeList(self)
def render(self, context):
for condition, nodelist in self.conditions_nodelists:
if condition is not None: # if / elif clause
try:
match = condition.eval(context)
except VariableDoesNotExist:
match = None
else: # else clause
match = True
if match:
return nodelist.render(context)
return ''
class LoremNode(Node):
def __init__(self, count, method, common):
self.count, self.method, self.common = count, method, common
def render(self, context):
try:
count = int(self.count.resolve(context))
except (ValueError, TypeError):
count = 1
if self.method == 'w':
return words(count, common=self.common)
else:
paras = paragraphs(count, common=self.common)
if self.method == 'p':
paras = ['<p>%s</p>' % p for p in paras]
return '\n\n'.join(paras)
GroupedResult = namedtuple('GroupedResult', ['grouper', 'list'])
class RegroupNode(Node):
def __init__(self, target, expression, var_name):
self.target, self.expression = target, expression
self.var_name = var_name
def resolve_expression(self, obj, context):
# This method is called for each object in self.target. See regroup()
# for the reason why we temporarily put the object in the context.
context[self.var_name] = obj
return self.expression.resolve(context, ignore_failures=True)
def render(self, context):
obj_list = self.target.resolve(context, ignore_failures=True)
if obj_list is None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ''
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
context[self.var_name] = [
GroupedResult(grouper=key, list=list(val))
for key, val in
groupby(obj_list, lambda obj: self.resolve_expression(obj, context))
]
return ''
class LoadNode(Node):
def render(self, context):
return ''
class NowNode(Node):
def __init__(self, format_string, asvar=None):
self.format_string = format_string
self.asvar = asvar
def render(self, context):
tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
formatted = date(datetime.now(tz=tzinfo), self.format_string)
if self.asvar:
context[self.asvar] = formatted
return ''
else:
return formatted
class ResetCycleNode(Node):
def __init__(self, node):
self.node = node
def render(self, context):
self.node.reset(context)
return ''
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(self.nodelist.render(context).strip())
class TemplateTagNode(Node):
mapping = {
'openblock': BLOCK_TAG_START,
'closeblock': BLOCK_TAG_END,
'openvariable': VARIABLE_TAG_START,
'closevariable': VARIABLE_TAG_END,
'openbrace': SINGLE_BRACE_START,
'closebrace': SINGLE_BRACE_END,
'opencomment': COMMENT_TAG_START,
'closecomment': COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, '')
class URLNode(Node):
def __init__(self, view_name, args, kwargs, asvar):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
from django.urls import reverse, NoReverseMatch
args = [arg.resolve(context) for arg in self.args]
kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()}
view_name = self.view_name.resolve(context)
try:
current_app = context.request.current_app
except AttributeError:
try:
current_app = context.request.resolver_match.namespace
except AttributeError:
current_app = None
# Try to look up the URL. If it fails, raise NoReverseMatch unless the
# {% url ... as var %} construct is used, in which case return nothing.
url = ''
try:
url = reverse(view_name, args=args, kwargs=kwargs, current_app=current_app)
except NoReverseMatch:
if self.asvar is None:
raise
if self.asvar:
context[self.asvar] = url
return ''
else:
if context.autoescape:
url = conditional_escape(url)
return url
class VerbatimNode(Node):
def __init__(self, content):
self.content = content
def render(self, context):
return self.content
class WidthRatioNode(Node):
def __init__(self, val_expr, max_expr, max_width, asvar=None):
self.val_expr = val_expr
self.max_expr = max_expr
self.max_width = max_width
self.asvar = asvar
def render(self, context):
try:
value = self.val_expr.resolve(context)
max_value = self.max_expr.resolve(context)
max_width = int(self.max_width.resolve(context))
except VariableDoesNotExist:
return ''
except (ValueError, TypeError):
raise TemplateSyntaxError("widthratio final argument must be a number")
try:
value = float(value)
max_value = float(max_value)
ratio = (value / max_value) * max_width
result = str(round(ratio))
except ZeroDivisionError:
result = '0'
except (ValueError, TypeError, OverflowError):
result = ''
if self.asvar:
context[self.asvar] = result
return ''
else:
return result
class WithNode(Node):
def __init__(self, var, name, nodelist, extra_context=None):
self.nodelist = nodelist
# var and name are legacy attributes, being left in case they are used
# by third-party subclasses of this Node.
self.extra_context = extra_context or {}
if name:
self.extra_context[name] = var
def __repr__(self):
return '<%s>' % self.__class__.__name__
def render(self, context):
values = {key: val.resolve(context) for key, val in self.extra_context.items()}
with context.push(**values):
return self.nodelist.render(context)
@register.tag
def autoescape(parser, token):
"""
Force autoescape behavior for this block.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 2:
raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.")
arg = args[1]
if arg not in ('on', 'off'):
raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'")
nodelist = parser.parse(('endautoescape',))
parser.delete_first_token()
return AutoEscapeControlNode((arg == 'on'), nodelist)
@register.tag
def comment(parser, token):
"""
Ignore everything between ``{% comment %}`` and ``{% endcomment %}``.
"""
parser.skip_past('endcomment')
return CommentNode()
@register.tag
def cycle(parser, token):
"""
Cycle among the given strings each time this tag is encountered.
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle 'row1' 'row2' %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each successive time through::
<tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, separated by spaces. Commas can also
be used to separate values; if a comma is used, the cycle values are
interpreted as literal strings.
The optional flag "silent" can be used to prevent the cycle declaration
from returning any value::
{% for o in some_list %}
{% cycle 'row1' 'row2' as rowcolors silent %}
<tr class="{{ rowcolors }}">{% include "subtemplate.html " %}</tr>
{% endfor %}
"""
# Note: This returns the exact same node on each {% cycle name %} call;
# that is, the node object returned from {% cycle a b c as name %} and the
# one returned from {% cycle name %} are the exact same object. This
# shouldn't cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: This stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
#
# It keeps the last node in the parser to be able to reset it with
# {% resetcycle %}.
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'cycle' tag requires at least two arguments")
if len(args) == 2:
# {% cycle foo %} case.
name = args[1]
if not hasattr(parser, '_named_cycle_nodes'):
raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name)
if name not in parser._named_cycle_nodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._named_cycle_nodes[name]
as_form = False
if len(args) > 4:
# {% cycle ... as foo [silent] %} case.
if args[-3] == "as":
if args[-1] != "silent":
raise TemplateSyntaxError("Only 'silent' flag is allowed after cycle's name, not '%s'." % args[-1])
as_form = True
silent = True
args = args[:-1]
elif args[-2] == "as":
as_form = True
silent = False
if as_form:
name = args[-1]
values = [parser.compile_filter(arg) for arg in args[1:-2]]
node = CycleNode(values, name, silent=silent)
if not hasattr(parser, '_named_cycle_nodes'):
parser._named_cycle_nodes = {}
parser._named_cycle_nodes[name] = node
else:
values = [parser.compile_filter(arg) for arg in args[1:]]
node = CycleNode(values)
parser._last_cycle_node = node
return node
@register.tag
def csrf_token(parser, token):
return CsrfTokenNode()
@register.tag
def debug(parser, token):
"""
Output a whole load of debugging information, including the current
context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
@register.tag('filter')
def do_filter(parser, token):
"""
Filter the contents of the block through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter force_escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
Note that the ``escape`` and ``safe`` filters are not acceptable arguments.
Instead, use the ``autoescape`` tag to manage autoescaping for blocks of
template code.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
for func, unused in filter_expr.filters:
filter_name = getattr(func, '_filter_name', None)
if filter_name in ('escape', 'safe'):
raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % filter_name)
nodelist = parser.parse(('endfilter',))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
@register.tag
def firstof(parser, token):
"""
Output the first variable passed that is not False.
Output nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 as myvar %}
This is equivalent to::
{% if var1 %}
{{ var1 }}
{% elif var2 %}
{{ var2 }}
{% elif var3 %}
{{ var3 }}
{% endif %}
but obviously much cleaner!
You can also use a literal string as a fallback value in case all
passed variables are False::
{% firstof var1 var2 var3 "fallback value" %}
If you want to disable auto-escaping of variables you can use::
{% autoescape off %}
{% firstof var1 var2 var3 "<strong>fallback value</strong>" %}
{% autoescape %}
Or if only some variables should be escaped, you can use::
{% firstof var1 var2|safe var3 "<strong>fallback value</strong>"|safe %}
"""
bits = token.split_contents()[1:]
asvar = None
if not bits:
raise TemplateSyntaxError("'firstof' statement requires at least one argument")
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
return FirstOfNode([parser.compile_filter(bit) for bit in bits], asvar)
@register.tag('for')
def do_for(parser, token):
"""
Loop over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can loop over a list in reverse by using
``{% for obj in list reversed %}``.
You can also unpack multiple values from a two-dimensional array::
{% for key,value in dict.items %}
{{ key }}: {{ value }}
{% endfor %}
The ``for`` tag can take an optional ``{% empty %}`` clause that will
be displayed if the given array is empty or could not be found::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% empty %}
<li>Sorry, no athletes in this list.</li>
{% endfor %}
<ul>
The above is equivalent to -- but shorter, cleaner, and possibly faster
than -- the following::
<ul>
{% if athlete_list %}
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
{% else %}
<li>Sorry, no athletes in this list.</li>
{% endif %}
</ul>
The for loop sets a number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
========================== ================================================
"""
bits = token.split_contents()
if len(bits) < 4:
raise TemplateSyntaxError("'for' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = -3 if is_reversed else -2
if bits[in_index] != 'in':
raise TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
invalid_chars = frozenset((' ', '"', "'", FILTER_SEPARATOR))
loopvars = re.split(r' *, *', ' '.join(bits[1:in_index]))
for var in loopvars:
if not var or not invalid_chars.isdisjoint(var):
raise TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
sequence = parser.compile_filter(bits[in_index + 1])
nodelist_loop = parser.parse(('empty', 'endfor',))
token = parser.next_token()
if token.contents == 'empty':
nodelist_empty = parser.parse(('endfor',))
parser.delete_first_token()
else:
nodelist_empty = None
return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)
def do_ifequal(parser, token, negate):
bits = list(token.split_contents())
if len(bits) != 3:
raise TemplateSyntaxError("%r takes two arguments" % bits[0])
end_tag = 'end' + bits[0]
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = NodeList()
val1 = parser.compile_filter(bits[1])
val2 = parser.compile_filter(bits[2])
return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate)
@register.tag
def ifequal(parser, token):
"""
Output the contents of the block if the two arguments equal each other.
Examples::
{% ifequal user.id comment.user_id %}
...
{% endifequal %}
{% ifnotequal user.id comment.user_id %}
...
{% else %}
...
{% endifnotequal %}
"""
return do_ifequal(parser, token, False)
@register.tag
def ifnotequal(parser, token):
"""
Output the contents of the block if the two arguments are not equal.
See ifequal.
"""
return do_ifequal(parser, token, True)
class TemplateLiteral(Literal):
def __init__(self, value, text):
self.value = value
self.text = text # for better error messages
def display(self):
return self.text
def eval(self, context):
return self.value.resolve(context, ignore_failures=True)
class TemplateIfParser(IfParser):
error_class = TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
super().__init__(*args, **kwargs)
def create_var(self, value):
return TemplateLiteral(self.template_parser.compile_filter(value), value)
@register.tag('if')
def do_if(parser, token):
"""
Evaluate a variable, and if that variable is "true" (i.e., exists, is not
empty, and is not a false boolean value), output the contents of the block:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% elif athlete_in_locker_room_list %}
Athletes should be out of the locker room soon!
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
The ``if`` tag may take one or several `` {% elif %}`` clauses, as well as
an ``{% else %}`` clause that will be displayed if all previous conditions
fail. These clauses are optional.
``if`` tags may use ``or``, ``and`` or ``not`` to test a number of
variables or to negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if athlete_list and coach_list %}
Both athletes and coaches are available.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
{% if athlete_list and not coach_list %}
There are some athletes and absolutely no coaches.
{% endif %}
Comparison operators are also available, and the use of filters is also
allowed, for example::
{% if articles|length >= 5 %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid if tag.
All supported operators are: ``or``, ``and``, ``in``, ``not in``
``==``, ``!=``, ``>``, ``>=``, ``<`` and ``<=``.
Operator precedence follows Python.
"""
# {% if ... %}
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
# {% elif ... %} (repeatable)
while token.contents.startswith('elif'):
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
# {% else %} (optional)
if token.contents == 'else':
nodelist = parser.parse(('endif',))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
# {% endif %}
if token.contents != 'endif':
raise TemplateSyntaxError('Malformed template tag at line {0}: "{1}"'.format(token.lineno, token.contents))
return IfNode(conditions_nodelists)
@register.tag
def ifchanged(parser, token):
"""
Check if a value has changed from the last iteration of a loop.
The ``{% ifchanged %}`` block tag is used within a loop. It has two
possible uses.
1. Check its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a
list of days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given one or more variables, check whether any variable has changed.
For example, the following shows the date every time it changes, while
showing the hour if either the hour or the date has changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.split_contents()
nodelist_true = parser.parse(('else', 'endifchanged'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifchanged',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
values = [parser.compile_filter(bit) for bit in bits[1:]]
return IfChangedNode(nodelist_true, nodelist_false, *values)
def find_library(parser, name):
try:
return parser.libraries[name]
except KeyError:
raise TemplateSyntaxError(
"'%s' is not a registered tag library. Must be one of:\n%s" % (
name, "\n".join(sorted(parser.libraries)),
),
)
def load_from_library(library, label, names):
"""
Return a subset of tags and filters from a library.
"""
subset = Library()
for name in names:
found = False
if name in library.tags:
found = True
subset.tags[name] = library.tags[name]
if name in library.filters:
found = True
subset.filters[name] = library.filters[name]
if found is False:
raise TemplateSyntaxError(
"'%s' is not a valid tag or filter in tag library '%s'" % (
name, label,
),
)
return subset
@register.tag
def load(parser, token):
"""
Load a custom template tag library into the parser.
For example, to load the template tags in
``django/templatetags/news/photos.py``::
{% load news.photos %}
Can also be used to load an individual tag/filter from
a library::
{% load byline from news %}
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
# from syntax is used; load individual tags from the library
name = bits[-1]
lib = find_library(parser, name)
subset = load_from_library(lib, name, bits[1:-2])
parser.add_library(subset)
else:
# one or more libraries are specified; load and add them to the parser
for name in bits[1:]:
lib = find_library(parser, name)
parser.add_library(lib)
return LoadNode()
@register.tag
def lorem(parser, token):
"""
Create random Latin text useful for providing test data in templates.
Usage format::
{% lorem [count] [method] [random] %}
``count`` is a number (or variable) containing the number of paragraphs or
words to generate (default is 1).
``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for
plain-text paragraph blocks (default is ``b``).
``random`` is the word ``random``, which if given, does not use the common
paragraph (starting "Lorem ipsum dolor sit amet, consectetuer...").
Examples:
* ``{% lorem %}`` outputs the common "lorem ipsum" paragraph
* ``{% lorem 3 p %}`` outputs the common "lorem ipsum" paragraph
and two random paragraphs each wrapped in HTML ``<p>`` tags
* ``{% lorem 2 w random %}`` outputs two random latin words
"""
bits = list(token.split_contents())
tagname = bits[0]
# Random bit
common = bits[-1] != 'random'
if not common:
bits.pop()
# Method bit
if bits[-1] in ('w', 'p', 'b'):
method = bits.pop()
else:
method = 'b'
# Count bit
if len(bits) > 1:
count = bits.pop()
else:
count = '1'
count = parser.compile_filter(count)
if len(bits) != 1:
raise TemplateSyntaxError("Incorrect format for %r tag" % tagname)
return LoremNode(count, method, common)
@register.tag
def now(parser, token):
"""
Display the date, formatted according to the given string.
Use the same format as PHP's ``date()`` function; see https://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.split_contents()
asvar = None
if len(bits) == 4 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits) != 2:
raise TemplateSyntaxError("'now' statement takes one argument")
format_string = bits[1][1:-1]
return NowNode(format_string, asvar)
@register.tag
def regroup(parser, token):
"""
Regroup a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``musicians`` is a list of ``Musician`` objects that have ``name`` and
``instrument`` attributes, and you'd like to display a list that
looks like:
* Guitar:
* Django Reinhardt
* Emily Remler
* Piano:
* Lovie Austin
* Bud Powell
* Trumpet:
* Duke Ellington
The following snippet of template code would accomplish this dubious task::
{% regroup musicians by instrument as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for musician in group.list %}
<li>{{ musician.name }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Guitar``, ``Piano``
and ``Trumpet``, and ``list`` is the list of musicians who play this
instrument.
Note that ``{% regroup %}`` does not work when the list to be grouped is not
sorted by the key you are grouping by! This means that if your list of
musicians was not sorted by instrument, you'd need to make sure it is sorted
before using it, i.e.::
{% regroup musicians|dictsort:"instrument" by instrument as grouped %}
"""
bits = token.split_contents()
if len(bits) != 6:
raise TemplateSyntaxError("'regroup' tag takes five arguments")
target = parser.compile_filter(bits[1])
if bits[2] != 'by':
raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'")
if bits[4] != 'as':
raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must"
" be 'as'")
var_name = bits[5]
# RegroupNode will take each item in 'target', put it in the context under
# 'var_name', evaluate 'var_name'.'expression' in the current context, and
# group by the resulting value. After all items are processed, it will
# save the final result in the context under 'var_name', thus clearing the
# temporary values. This hack is necessary because the template engine
# doesn't provide a context-aware equivalent of Python's getattr.
expression = parser.compile_filter(var_name +
VARIABLE_ATTRIBUTE_SEPARATOR +
bits[3])
return RegroupNode(target, expression, var_name)
@register.tag
def resetcycle(parser, token):
"""
Reset a cycle tag.
If an argument is given, reset the last rendered cycle tag whose name
matches the argument, else reset the last rendered cycle tag (named or
unnamed).
"""
args = token.split_contents()
if len(args) > 2:
raise TemplateSyntaxError("%r tag accepts at most one argument." % args[0])
if len(args) == 2:
name = args[1]
try:
return ResetCycleNode(parser._named_cycle_nodes[name])
except (AttributeError, KeyError):
raise TemplateSyntaxError("Named cycle '%s' does not exist." % name)
try:
return ResetCycleNode(parser._last_cycle_node)
except AttributeError:
raise TemplateSyntaxError("No cycles in template.")
@register.tag
def spaceless(parser, token):
"""
Remove whitespace between HTML tags, including tab and newline characters.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example returns this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text.
In this example, the space around ``Hello`` isn't stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(('endspaceless',))
parser.delete_first_token()
return SpacelessNode(nodelist)
@register.tag
def templatetag(parser, token):
"""
Output one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError("Invalid templatetag argument: '%s'."
" Must be one of: %s" %
(tag, list(TemplateTagNode.mapping)))
return TemplateTagNode(tag)
@register.tag
def url(parser, token):
r"""
Return an absolute URL matching the given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url "url_name" arg1 arg2 %}
or
{% url "url_name" name1=value1 name2=value2 %}
The first argument is a URL pattern name. Other arguments are
space-separated values that will be filled in place of positional and
keyword arguments in the URL. Don't mix positional and keyword arguments.
All arguments for the URL must be present.
For example, if you have a view ``app_name.views.client_details`` taking
the client's id and the corresponding line in a URLconf looks like this::
path('client/<int:id>/', views.client_details, name='client-detail-view')
and this app's URLconf is included into the project's URLconf under some
path::
path('clients/', include('app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url "client-detail-view" client.id %}
The URL will look like ``/clients/client/123/``.
The first argument may also be the name of a template variable that will be
evaluated to obtain the view name or the URL name, e.g.::
{% with url_name="client-detail-view" %}
{% url url_name client.id %}
{% endwith %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument, a URL pattern name." % bits[0])
viewname = parser.compile_filter(bits[1])
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar)
@register.tag
def verbatim(parser, token):
"""
Stop the template engine from rendering the contents of this block tag.
Usage::
{% verbatim %}
{% don't process this %}
{% endverbatim %}
You can also designate a specific closing tag block (allowing the
unrendered use of ``{% endverbatim %}``)::
{% verbatim myblock %}
...
{% endverbatim myblock %}
"""
nodelist = parser.parse(('endverbatim',))
parser.delete_first_token()
return VerbatimNode(nodelist.render(Context()))
@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such. Calculate the ratio of a given value to a
maximum value, and then apply that ratio to a constant.
For example::
<img src="bar.png" alt="Bar"
height="10" width="{% widthratio this_value max_value max_width %}">
If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100,
the image in the above example will be 88 pixels wide
(because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88).
In some cases you might want to capture the result of widthratio in a
variable. It can be useful for instance in a blocktrans like this::
{% widthratio this_value max_value max_width as width %}
{% blocktrans %}The width is: {{ width }}{% endblocktrans %}
"""
bits = token.split_contents()
if len(bits) == 4:
tag, this_value_expr, max_value_expr, max_width = bits
asvar = None
elif len(bits) == 6:
tag, this_value_expr, max_value_expr, max_width, as_, asvar = bits
if as_ != 'as':
raise TemplateSyntaxError("Invalid syntax in widthratio tag. Expecting 'as' keyword")
else:
raise TemplateSyntaxError("widthratio takes at least three arguments")
return WidthRatioNode(parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr),
parser.compile_filter(max_width),
asvar=asvar)
@register.tag('with')
def do_with(parser, token):
"""
Add one or more values to the context (inside of this block) for caching
and easy access.
For example::
{% with total=person.some_sql_method %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
Multiple values can be added to the context::
{% with foo=1 bar=2 %}
...
{% endwith %}
The legacy format of ``{% with person.some_sql_method as total %}`` is
still accepted.
"""
bits = token.split_contents()
remaining_bits = bits[1:]
extra_context = token_kwargs(remaining_bits, parser, support_legacy=True)
if not extra_context:
raise TemplateSyntaxError("%r expected at least one variable "
"assignment" % bits[0])
if remaining_bits:
raise TemplateSyntaxError("%r received an invalid token: %r" %
(bits[0], remaining_bits[0]))
nodelist = parser.parse(('endwith',))
parser.delete_first_token()
return WithNode(None, None, nodelist, extra_context=extra_context)
| mit |
jungle90/Openstack-Swift-I-O-throttler | build/lib.linux-x86_64-2.7/swift/common/middleware/container_sync.py | 40 | 6054 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.swob import HTTPBadRequest, HTTPUnauthorized, wsgify
from swift.common.utils import (
config_true_value, get_logger, register_swift_info, streq_const_time)
from swift.proxy.controllers.base import get_container_info
class ContainerSync(object):
"""
WSGI middleware that validates an incoming container sync request
using the container-sync-realms.conf style of container sync.
"""
def __init__(self, app, conf, logger=None):
self.app = app
self.conf = conf
self.logger = logger or get_logger(conf, log_route='container_sync')
self.realms_conf = ContainerSyncRealms(
os.path.join(
conf.get('swift_dir', '/etc/swift'),
'container-sync-realms.conf'),
self.logger)
self.allow_full_urls = config_true_value(
conf.get('allow_full_urls', 'true'))
# configure current realm/cluster for /info
self.realm = self.cluster = None
current = conf.get('current', None)
if current:
try:
self.realm, self.cluster = (p.upper() for p in
current.strip('/').split('/'))
except ValueError:
self.logger.error('Invalid current //REALM/CLUSTER (%s)',
current)
self.register_info()
def register_info(self):
dct = {}
for realm in self.realms_conf.realms():
clusters = self.realms_conf.clusters(realm)
if clusters:
dct[realm] = {'clusters': dict((c, {}) for c in clusters)}
if self.realm and self.cluster:
try:
dct[self.realm]['clusters'][self.cluster]['current'] = True
except KeyError:
self.logger.error('Unknown current //REALM/CLUSTER (%s)',
'//%s/%s' % (self.realm, self.cluster))
register_swift_info('container_sync', realms=dct)
@wsgify
def __call__(self, req):
if not self.allow_full_urls:
sync_to = req.headers.get('x-container-sync-to')
if sync_to and not sync_to.startswith('//'):
raise HTTPBadRequest(
body='Full URLs are not allowed for X-Container-Sync-To '
'values. Only realm values of the format '
'//realm/cluster/account/container are allowed.\n',
request=req)
auth = req.headers.get('x-container-sync-auth')
if auth:
valid = False
auth = auth.split()
if len(auth) != 3:
req.environ.setdefault('swift.log_info', []).append(
'cs:not-3-args')
else:
realm, nonce, sig = auth
realm_key = self.realms_conf.key(realm)
realm_key2 = self.realms_conf.key2(realm)
if not realm_key:
req.environ.setdefault('swift.log_info', []).append(
'cs:no-local-realm-key')
else:
info = get_container_info(
req.environ, self.app, swift_source='CS')
user_key = info.get('sync_key')
if not user_key:
req.environ.setdefault('swift.log_info', []).append(
'cs:no-local-user-key')
else:
expected = self.realms_conf.get_sig(
req.method, req.path,
req.headers.get('x-timestamp', '0'), nonce,
realm_key, user_key)
expected2 = self.realms_conf.get_sig(
req.method, req.path,
req.headers.get('x-timestamp', '0'), nonce,
realm_key2, user_key) if realm_key2 else expected
if not streq_const_time(sig, expected) and \
not streq_const_time(sig, expected2):
req.environ.setdefault(
'swift.log_info', []).append('cs:invalid-sig')
else:
req.environ.setdefault(
'swift.log_info', []).append('cs:valid')
valid = True
if not valid:
exc = HTTPUnauthorized(
body='X-Container-Sync-Auth header not valid; '
'contact cluster operator for support.',
headers={'content-type': 'text/plain'},
request=req)
exc.headers['www-authenticate'] = ' '.join([
'SwiftContainerSync',
exc.www_authenticate().split(None, 1)[1]])
raise exc
else:
req.environ['swift.authorize_override'] = True
if req.path == '/info':
# Ensure /info requests get the freshest results
self.register_info()
return self.app
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info('container_sync')
def cache_filter(app):
return ContainerSync(app, conf)
return cache_filter
| apache-2.0 |
SoCo/SoCo | soco/data_structure_quirks.py | 2 | 1729 | """This module implements 'quirks' for the DIDL-Lite data structures
A quirk, in this context, means that a specific music service does not follow
a specific part of the DIDL-Lite specification. In order not to clutter the
primary implementation of DIDL-Lite for SoCo (in :mod:`soco.data_structures`)
up with all these service specific exception, they are implemented separately
in this module. Besides from keeping the main implementation clean and
following the specification, this has the added advantage of making it easier
to track how many quiks are out there.
The implementation of the quirks at this point is just a single function which
applies quirks to the DIDL-Lite resources, with the options of adding one that
applies them to DIDL-Lite objects.
"""
import logging
_LOG = logging.getLogger(__name__)
def apply_resource_quirks(resource):
"""Apply DIDL-Lite resource quirks"""
# At least two music service (Spotify Direct and Amazon in conjunction
# with Alexa) has been known not to supply the mandatory protocolInfo, so
# if it is missing supply a dummy one
if "protocolInfo" not in resource.attrib:
protocol_info = "DUMMY_ADDED_BY_QUIRK"
# For Spotify direct we have a better idea what it should be, since it
# is included in the main element text
if resource.text and resource.text.startswith("x-sonos-spotify"):
protocol_info = "sonos.com-spotify:*:audio/x-spotify.*"
_LOG.debug(
"Resource quirk applied for missing protocolInfo, setting to '%s'",
protocol_info,
)
resource.set("protocolInfo", protocol_info)
if not resource.text:
resource.text = ""
return resource
| mit |
argentumproject/electrum-arg | gui/qt/seed_dialog.py | 1 | 6853 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2013 ecdsa@github
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_arg.i18n import _
from util import *
from qrtextedit import ShowQRTextEdit, ScanQRTextEdit
def seed_warning_msg(seed):
return ''.join([
"<p>",
_("Please save these %d words on paper (order is important). "),
_("This seed will allow you to recover your wallet in case "
"of computer failure."),
"</p>",
"<b>" + _("WARNING") + ":</b>",
"<ul>",
"<li>" + _("Never disclose your seed.") + "</li>",
"<li>" + _("Never type it on a website.") + "</li>",
"<li>" + _("Do not store it electronically.") + "</li>",
"</ul>"
]) % len(seed.split())
class SeedLayout(QVBoxLayout):
#options
is_bip39 = False
is_ext = False
def seed_options(self):
dialog = QDialog()
vbox = QVBoxLayout(dialog)
if 'ext' in self.options:
cb_ext = QCheckBox(_('Extend this seed with custom words'))
cb_ext.setChecked(self.is_ext)
vbox.addWidget(cb_ext)
if 'bip39' in self.options:
def f(b):
if b:
msg = ' '.join([
'<b>' + _('Warning') + '</b>' + ': ',
_('BIP39 seeds may not be supported in the future.'),
'<br/><br/>',
_('As technology matures, Bitcoin address generation may change.'),
_('However, BIP39 seeds do not include a version number.'),
_('As a result, it is not possible to infer your wallet type from a BIP39 seed.'),
'<br/><br/>',
_('We do not guarantee that BIP39 seeds will be supported in future versions of Electrum.'),
_('We recommend to use seeds generated by Electrum or compatible wallets.'),
])
#self.parent.show_warning(msg)
self.seed_type_label.setVisible(not b)
self.is_seed = (lambda x: bool(x)) if b else self.saved_is_seed
self.on_edit()
cb_bip39 = QCheckBox(_('BIP39 seed'))
cb_bip39.toggled.connect(f)
cb_bip39.setChecked(self.is_bip39)
vbox.addWidget(cb_bip39)
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
self.is_ext = cb_ext.isChecked() if 'ext' in self.options else False
self.is_bip39 = cb_bip39.isChecked() if 'bip39' in self.options else False
def __init__(self, seed=None, title=None, icon=True, msg=None, options=None, is_seed=None, passphrase=None, parent=None):
QVBoxLayout.__init__(self)
self.parent = parent
self.options = options
if title:
self.addWidget(WWLabel(title))
if seed:
self.seed_e = ShowQRTextEdit()
self.seed_e.setText(seed)
else:
self.seed_e = ScanQRTextEdit()
self.seed_e.setTabChangesFocus(True)
self.is_seed = is_seed
self.saved_is_seed = self.is_seed
self.seed_e.textChanged.connect(self.on_edit)
self.seed_e.setMaximumHeight(75)
hbox = QHBoxLayout()
if icon:
logo = QLabel()
logo.setPixmap(QPixmap(":icons/seed.png").scaledToWidth(64))
logo.setMaximumWidth(60)
hbox.addWidget(logo)
hbox.addWidget(self.seed_e)
self.addLayout(hbox)
hbox = QHBoxLayout()
hbox.addStretch(1)
self.seed_type_label = QLabel('')
hbox.addWidget(self.seed_type_label)
if options:
opt_button = EnterButton(_('Options'), self.seed_options)
hbox.addWidget(opt_button)
self.addLayout(hbox)
if passphrase:
hbox = QHBoxLayout()
passphrase_e = QLineEdit()
passphrase_e.setText(passphrase)
passphrase_e.setReadOnly(True)
hbox.addWidget(QLabel(_("Your seed extension is") + ':'))
hbox.addWidget(passphrase_e)
self.addLayout(hbox)
self.addStretch(1)
if msg:
msg = seed_warning_msg(seed)
self.addWidget(WWLabel(msg))
def get_seed(self):
text = unicode(self.seed_e.text())
return ' '.join(text.split())
def on_edit(self):
from electrum_arg.bitcoin import seed_type
s = self.get_seed()
b = self.is_seed(s)
t = seed_type(s)
label = _('Seed Type') + ': ' + t if t else ''
self.seed_type_label.setText(label)
self.parent.next_button.setEnabled(b)
class KeysLayout(QVBoxLayout):
def __init__(self, parent=None, title=None, is_valid=None):
QVBoxLayout.__init__(self)
self.parent = parent
self.is_valid = is_valid
self.text_e = ScanQRTextEdit()
self.text_e.textChanged.connect(self.on_edit)
self.addWidget(WWLabel(title))
self.addWidget(self.text_e)
def get_text(self):
return unicode(self.text_e.text())
def on_edit(self):
b = self.is_valid(self.get_text())
self.parent.next_button.setEnabled(b)
class SeedDialog(WindowModalDialog):
def __init__(self, parent, seed, passphrase):
WindowModalDialog.__init__(self, parent, ('Electrum - ' + _('Seed')))
self.setMinimumWidth(400)
vbox = QVBoxLayout(self)
title = _("Your wallet generation seed is:")
slayout = SeedLayout(title=title, seed=seed, msg=True, passphrase=passphrase)
vbox.addLayout(slayout)
vbox.addLayout(Buttons(CloseButton(self)))
| mit |
sgiavasis/nipype | examples/fmri_fsl_reuse.py | 8 | 9974 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
=========================
fMRI: FSL reuse workflows
=========================
A workflow that uses fsl to perform a first level analysis on the nipype
tutorial data set::
python fmri_fsl_reuse.py
First tell python where to find the appropriate functions.
"""
from __future__ import print_function
from __future__ import division
from builtins import range
import os # system functions
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.algorithms.modelgen as model # model generation
import nipype.algorithms.rapidart as ra # artifact detection
from nipype.workflows.fmri.fsl import (create_featreg_preproc,
create_modelfit_workflow,
create_fixed_effects_flow)
"""
Preliminaries
-------------
Setup any package specific configuration. The output file format for FSL
routines is being set to compressed NIFTI.
"""
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
level1_workflow = pe.Workflow(name='level1flow')
preproc = create_featreg_preproc(whichvol='first')
modelfit = create_modelfit_workflow()
fixed_fx = create_fixed_effects_flow()
"""
Add artifact detection and model specification nodes between the preprocessing
and modelfitting workflows.
"""
art = pe.MapNode(interface=ra.ArtifactDetect(use_differences=[True, False],
use_norm=True,
norm_threshold=1,
zintensity_threshold=3,
parameter_source='FSL',
mask_type='file'),
iterfield=['realigned_files', 'realignment_parameters', 'mask_file'],
name="art")
modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")
level1_workflow.connect([(preproc, art, [('outputspec.motion_parameters',
'realignment_parameters'),
('outputspec.realigned_files',
'realigned_files'),
('outputspec.mask', 'mask_file')]),
(preproc, modelspec, [('outputspec.highpassed_files',
'functional_runs'),
('outputspec.motion_parameters',
'realignment_parameters')]),
(art, modelspec, [('outlier_files', 'outlier_files')]),
(modelspec, modelfit, [('session_info', 'inputspec.session_info')]),
(preproc, modelfit, [('outputspec.highpassed_files', 'inputspec.functional_data')])
])
"""
Set up first-level workflow
---------------------------
"""
def sort_copes(files):
numelements = len(files[0])
outfiles = []
for i in range(numelements):
outfiles.insert(i, [])
for j, elements in enumerate(files):
outfiles[i].append(elements[i])
return outfiles
def num_copes(files):
return len(files)
pickfirst = lambda x: x[0]
level1_workflow.connect([(preproc, fixed_fx, [(('outputspec.mask', pickfirst),
'flameo.mask_file')]),
(modelfit, fixed_fx, [(('outputspec.copes', sort_copes),
'inputspec.copes'),
('outputspec.dof_file',
'inputspec.dof_files'),
(('outputspec.varcopes',
sort_copes),
'inputspec.varcopes'),
(('outputspec.copes', num_copes),
'l2model.num_copes'),
])
])
"""
Experiment specific components
------------------------------
The nipype tutorial contains data for two subjects. Subject data
is in two subdirectories, ``s1`` and ``s2``. Each subject directory
contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And
one anatomical volume named struct.nii.
Below we set some variables to inform the ``datasource`` about the
layout of our data. We specify the location of the data, the subject
sub-directories and a dictionary that maps each run to a mnemonic (or
field) for the run type (``struct`` or ``func``). These fields become
the output fields of the ``datasource`` node in the pipeline.
In the example below, run 'f3' is of type 'func' and gets mapped to a
nifti filename through a template '%s.nii'. So 'f3' would become
'f3.nii'.
"""
# Specify the location of the data.
data_dir = os.path.abspath('data')
# Specify the subject directories
subject_list = ['s1'] # , 's3']
# Map field names to individual subject runs.
info = dict(func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]],
struct=[['subject_id', 'struct']])
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),
name="infosource")
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataSource` object and
fill in the information from above about the layout of our data. The
:class:`nipype.pipeline.NodeWrapper` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=['func', 'struct']),
name='datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
Use the get_node function to retrieve an internal node by name. Then set the
iterables on this node to perform two different extents of smoothing.
"""
inputnode = level1_workflow.get_node('featpreproc.inputspec')
inputnode.iterables = ('fwhm', [5., 10.])
hpcutoff = 120.
TR = 3.
inputnode.inputs.highpass = hpcutoff / (2. * TR)
"""
Setup a function that returns subject-specific information about the
experimental paradigm. This is used by the
:class:`nipype.modelgen.SpecifyModel` to create the information necessary
to generate an SPM design matrix. In this tutorial, the same paradigm was used
for every participant. Other examples of this function are available in the
`doc/examples` folder. Note: Python knowledge required here.
"""
def subjectinfo(subject_id):
from nipype.interfaces.base import Bunch
from copy import deepcopy
print("Subject ID: %s\n" % str(subject_id))
output = []
names = ['Task-Odd', 'Task-Even']
for r in range(4):
onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))]
output.insert(r,
Bunch(conditions=names,
onsets=deepcopy(onsets),
durations=[[15] for s in names]))
return output
"""
Setup the contrast structure that needs to be evaluated. This is a list of
lists. The inner list specifies the contrasts and has the following format -
[Name,Stat,[list of condition names],[weights on those conditions]. The
condition names must match the `names` listed in the `subjectinfo` function
described above.
"""
cont1 = ['Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]]
cont2 = ['Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]]
cont3 = ['Task', 'F', [cont1, cont2]]
contrasts = [cont1, cont2]
modelspec.inputs.input_units = 'secs'
modelspec.inputs.time_repetition = TR
modelspec.inputs.high_pass_filter_cutoff = hpcutoff
modelfit.inputs.inputspec.interscan_interval = TR
modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': False}}
modelfit.inputs.inputspec.contrasts = contrasts
modelfit.inputs.inputspec.model_serial_correlations = True
modelfit.inputs.inputspec.film_threshold = 1000
level1_workflow.base_dir = os.path.abspath('./fsl/workingdir')
level1_workflow.config['execution'] = dict(crashdump_dir=os.path.abspath('./fsl/crashdumps'))
level1_workflow.connect([(infosource, datasource, [('subject_id', 'subject_id')]),
(infosource, modelspec, [(('subject_id', subjectinfo),
'subject_info')]),
(datasource, preproc, [('func', 'inputspec.func')]),
])
"""
Execute the pipeline
--------------------
The code discussed above sets up all the necessary data structures with
appropriate parameters and the connectivity between the processes, but does not
generate any output. To actually run the analysis on the data the
``nipype.pipeline.engine.Pipeline.Run`` function needs to be called.
"""
if __name__ == '__main__':
# level1_workflow.write_graph()
level1_workflow.run()
# level1_workflow.run(plugin='MultiProc', plugin_args={'n_procs':2})
| bsd-3-clause |
meganbkratz/acq4 | acq4/devices/DAQGeneric/InputChannelTemplate.py | 3 | 3283 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'InputChannelTemplate.ui'
#
# Created: Sun Feb 22 13:29:16 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(427, 220)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = GroupBox(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox.setFont(font)
self.groupBox.setCheckable(False)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setSpacing(0)
self.gridLayout.setContentsMargins(5, 0, 0, 0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.recordCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.recordCheck.setFont(font)
self.recordCheck.setChecked(True)
self.recordCheck.setObjectName(_fromUtf8("recordCheck"))
self.gridLayout.addWidget(self.recordCheck, 0, 0, 1, 1)
self.displayCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.displayCheck.setFont(font)
self.displayCheck.setChecked(True)
self.displayCheck.setObjectName(_fromUtf8("displayCheck"))
self.gridLayout.addWidget(self.displayCheck, 0, 1, 1, 1)
self.recordInitCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.recordInitCheck.setFont(font)
self.recordInitCheck.setObjectName(_fromUtf8("recordInitCheck"))
self.gridLayout.addWidget(self.recordInitCheck, 1, 0, 1, 2)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 2, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "GroupBox", None))
self.recordCheck.setText(_translate("Form", "Record Trace", None))
self.displayCheck.setText(_translate("Form", "Display", None))
self.recordInitCheck.setText(_translate("Form", "Record Initial State", None))
from acq4.pyqtgraph import GroupBox
| mit |
awduda/awduda.github.io | venv/lib/python2.7/site-packages/setuptools/command/build_clib.py | 314 | 4484 | import distutils.command.build_clib as orig
from distutils.errors import DistutilsSetupError
from distutils import log
from setuptools.dep_util import newer_pairwise_group
class build_clib(orig.build_clib):
"""
Override the default build_clib behaviour to do the following:
1. Implement a rudimentary timestamp-based dependency system
so 'compile()' doesn't run every time.
2. Add more keys to the 'build_info' dictionary:
* obj_deps - specify dependencies for each object compiled.
this should be a dictionary mapping a key
with the source filename to a list of
dependencies. Use an empty string for global
dependencies.
* cflags - specify a list of additional flags to pass to
the compiler.
"""
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
sources = list(sources)
log.info("building '%s' library", lib_name)
# Make sure everything is the correct type.
# obj_deps should be a dictionary of keys as sources
# and a list/tuple of files that are its dependencies.
obj_deps = build_info.get('obj_deps', dict())
if not isinstance(obj_deps, dict):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
dependencies = []
# Get the global dependencies that are specified by the '' key.
# These will go into every source's dependency list.
global_deps = obj_deps.get('', list())
if not isinstance(global_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
# Build the list to be used by newer_pairwise_group
# each source will be auto-added to its dependencies.
for source in sources:
src_deps = [source]
src_deps.extend(global_deps)
extra_deps = obj_deps.get(source, list())
if not isinstance(extra_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
src_deps.extend(extra_deps)
dependencies.append(src_deps)
expected_objects = self.compiler.object_filenames(
sources,
output_dir=self.build_temp
)
if newer_pairwise_group(dependencies, expected_objects) != ([], []):
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
cflags = build_info.get('cflags')
objects = self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
extra_postargs=cflags,
debug=self.debug
)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(
expected_objects,
lib_name,
output_dir=self.build_clib,
debug=self.debug
)
| mit |
drglove/SickRage | lib/sqlalchemy/dialects/sybase/pyodbc.py | 79 | 2162 | # sybase/pyodbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: sybase+pyodbc://<username>:<password>@<dsnname>[/<database>]
:url: http://pypi.python.org/pypi/pyodbc/
Unicode Support
---------------
The pyodbc driver currently supports usage of these Sybase types with
Unicode or multibyte strings::
CHAR
NCHAR
NVARCHAR
TEXT
VARCHAR
Currently *not* supported are::
UNICHAR
UNITEXT
UNIVARCHAR
"""
from sqlalchemy.dialects.sybase.base import SybaseDialect,\
SybaseExecutionContext
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy import types as sqltypes, processors
import decimal
class _SybNumeric_pyodbc(sqltypes.Numeric):
"""Turns Decimals with adjusted() < -6 into floats.
It's not yet known how to get decimals with many
significant digits or very large adjusted() into Sybase
via pyodbc.
"""
def bind_processor(self, dialect):
super_process = super(_SybNumeric_pyodbc, self).\
bind_processor(dialect)
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
if value.adjusted() < -6:
return processors.to_float(value)
if super_process:
return super_process(value)
else:
return value
return process
class SybaseExecutionContext_pyodbc(SybaseExecutionContext):
def set_ddl_autocommit(self, connection, value):
if value:
connection.autocommit = True
else:
connection.autocommit = False
class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect):
execution_ctx_cls = SybaseExecutionContext_pyodbc
colspecs = {
sqltypes.Numeric: _SybNumeric_pyodbc,
}
dialect = SybaseDialect_pyodbc
| gpl-3.0 |
dario-ramos/tp3_cloud-events | src/controller/cancel_attendance_controller.py | 1 | 1650 | #!/usr/bin/env python
import webapp2
import cgi
import environment
class CancelAttendanceController(webapp2.RequestHandler):
def get(self):
template_values = {
"title": "Cancel Attendance"
}
template = environment.JINJA_ENVIRONMENT.get_template('cancel.html')
self.response.out.write(template.render(template_values))
def renderAttendanceNotConfirmedScreen(self, eventName, guestEmail):
self.response.out.write('<html><body>')
self.response.out.write('<h1> Guest ' + guestEmail + ' was not subscribed to event ' +
eventName + ', so nothing needs to be done </h1>')
self.response.out.write('<a href="/"> Back to portal </a>')
self.response.out.write('</html></body>')
def post(self):
eventName = cgi.escape(self.request.get('event_name'))
guestEmail = cgi.escape(self.request.get('guest_email'))
attendanceRepo = environment.MODEL.getAttendanceRepository()
attendance = attendanceRepo.getByEventNameAndGuestEmail(
eventName, guestEmail)
if attendance is None:
self.renderAttendanceNotConfirmedScreen(eventName, guestEmail)
else:
attendanceRepo.deleteByEventNameAndGuestEmail(
eventName, guestEmail)
self.response.out.write('<html><body>')
self.response.out.write(
'<h1> Guest ' + guestEmail + ' successfully unsubscribed from event ' + eventName + '! </h1>')
self.response.out.write('<a href="/"> Back to portal </a>')
self.response.out.write('</html></body>')
| gpl-2.0 |
samarthmed/emacs-config | .python-environments/default/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py | 1002 | 25650 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
from . import tarfile
try:
import bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive", "ignore_patterns"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registery operation with the archiving
and unpacking registeries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError as why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file"),
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not isinstance(function, collections.Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not isinstance(function, collections.Callable):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registery."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
| gpl-2.0 |
benspaulding/django | django/contrib/databrowse/datastructures.py | 4 | 9272 | """
These classes are light wrappers around Django's database API that provide
convenience functionality and permalink functions for the databrowse app.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils import formats
from django.utils.text import capfirst
from django.utils.encoding import smart_unicode, smart_str, iri_to_uri
from django.utils.safestring import mark_safe
from django.db.models.query import QuerySet
EMPTY_VALUE = '(None)'
DISPLAY_SIZE = 100
class EasyModel(object):
def __init__(self, site, model):
self.site = site
self.model = model
self.model_list = site.registry.keys()
self.verbose_name = model._meta.verbose_name
self.verbose_name_plural = model._meta.verbose_name_plural
def __repr__(self):
return '<EasyModel for %s>' % smart_str(self.model._meta.object_name)
def model_databrowse(self):
"Returns the ModelDatabrowse class for this model."
return self.site.registry[self.model]
def url(self):
return mark_safe('%s%s/%s/' % (self.site.root_url, self.model._meta.app_label, self.model._meta.module_name))
def objects(self, **kwargs):
return self.get_query_set().filter(**kwargs)
def get_query_set(self):
easy_qs = self.model._default_manager.get_query_set()._clone(klass=EasyQuerySet)
easy_qs._easymodel = self
return easy_qs
def object_by_pk(self, pk):
return EasyInstance(self, self.model._default_manager.get(pk=pk))
def sample_objects(self):
for obj in self.model._default_manager.all()[:3]:
yield EasyInstance(self, obj)
def field(self, name):
try:
f = self.model._meta.get_field(name)
except models.FieldDoesNotExist:
return None
return EasyField(self, f)
def fields(self):
return [EasyField(self, f) for f in (self.model._meta.fields + self.model._meta.many_to_many)]
class EasyField(object):
def __init__(self, easy_model, field):
self.model, self.field = easy_model, field
def __repr__(self):
return smart_str('<EasyField for %s.%s>' % (self.model.model._meta.object_name, self.field.name))
def choices(self):
for value, label in self.field.choices:
yield EasyChoice(self.model, self, value, label)
def url(self):
if self.field.choices:
return mark_safe('%s%s/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.name))
elif self.field.rel:
return mark_safe('%s%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name))
class EasyChoice(object):
def __init__(self, easy_model, field, value, label):
self.model, self.field = easy_model, field
self.value, self.label = value, label
def __repr__(self):
return smart_str('<EasyChoice for %s.%s>' % (self.model.model._meta.object_name, self.field.name))
def url(self):
return mark_safe('%s%s/%s/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.field.name, iri_to_uri(self.value)))
class EasyInstance(object):
def __init__(self, easy_model, instance):
self.model, self.instance = easy_model, instance
def __repr__(self):
return smart_str('<EasyInstance for %s (%s)>' % (self.model.model._meta.object_name, self.instance._get_pk_val()))
def __unicode__(self):
val = smart_unicode(self.instance)
if len(val) > DISPLAY_SIZE:
return val[:DISPLAY_SIZE] + '...'
return val
def __str__(self):
return self.__unicode__().encode('utf-8')
def pk(self):
return self.instance._get_pk_val()
def url(self):
return mark_safe('%s%s/%s/objects/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, iri_to_uri(self.pk())))
def fields(self):
"""
Generator that yields EasyInstanceFields for each field in this
EasyInstance's model.
"""
for f in self.model.model._meta.fields + self.model.model._meta.many_to_many:
yield EasyInstanceField(self.model, self, f)
def related_objects(self):
"""
Generator that yields dictionaries of all models that have this
EasyInstance's model as a ForeignKey or ManyToManyField, along with
lists of related objects.
"""
for rel_object in self.model.model._meta.get_all_related_objects() + self.model.model._meta.get_all_related_many_to_many_objects():
if rel_object.model not in self.model.model_list:
continue # Skip models that aren't in the model_list
em = EasyModel(self.model.site, rel_object.model)
yield {
'model': em,
'related_field': rel_object.field.verbose_name,
'object_list': [EasyInstance(em, i) for i in getattr(self.instance, rel_object.get_accessor_name()).all()],
}
class EasyInstanceField(object):
def __init__(self, easy_model, instance, field):
self.model, self.field, self.instance = easy_model, field, instance
self.raw_value = getattr(instance.instance, field.name)
def __repr__(self):
return smart_str('<EasyInstanceField for %s.%s>' % (self.model.model._meta.object_name, self.field.name))
def values(self):
"""
Returns a list of values for this field for this instance. It's a list
so we can accomodate many-to-many fields.
"""
# This import is deliberately inside the function because it causes
# some settings to be imported, and we don't want to do that at the
# module level.
if self.field.rel:
if isinstance(self.field.rel, models.ManyToOneRel):
objs = getattr(self.instance.instance, self.field.name)
elif isinstance(self.field.rel, models.ManyToManyRel): # ManyToManyRel
return list(getattr(self.instance.instance, self.field.name).all())
elif self.field.choices:
objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE)
elif isinstance(self.field, models.DateField) or isinstance(self.field, models.TimeField):
if self.raw_value:
if isinstance(self.field, models.DateTimeField):
objs = capfirst(formats.date_format(self.raw_value, 'DATETIME_FORMAT'))
elif isinstance(self.field, models.TimeField):
objs = capfirst(formats.time_format(self.raw_value, 'TIME_FORMAT'))
else:
objs = capfirst(formats.date_format(self.raw_value, 'DATE_FORMAT'))
else:
objs = EMPTY_VALUE
elif isinstance(self.field, models.BooleanField) or isinstance(self.field, models.NullBooleanField):
objs = {True: 'Yes', False: 'No', None: 'Unknown'}[self.raw_value]
else:
objs = self.raw_value
return [objs]
def urls(self):
"Returns a list of (value, URL) tuples."
# First, check the urls() method for each plugin.
plugin_urls = []
for plugin_name, plugin in self.model.model_databrowse().plugins.items():
urls = plugin.urls(plugin_name, self)
if urls is not None:
#plugin_urls.append(urls)
values = self.values()
return zip(self.values(), urls)
if self.field.rel:
m = EasyModel(self.model.site, self.field.rel.to)
if self.field.rel.to in self.model.model_list:
lst = []
for value in self.values():
if value is None:
continue
url = mark_safe('%s%s/%s/objects/%s/' % (self.model.site.root_url, m.model._meta.app_label, m.model._meta.module_name, iri_to_uri(value._get_pk_val())))
lst.append((smart_unicode(value), url))
else:
lst = [(value, None) for value in self.values()]
elif self.field.choices:
lst = []
for value in self.values():
url = mark_safe('%s%s/%s/fields/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.name, iri_to_uri(self.raw_value)))
lst.append((value, url))
elif isinstance(self.field, models.URLField):
val = self.values()[0]
lst = [(val, iri_to_uri(val))]
else:
lst = [(self.values()[0], None)]
return lst
class EasyQuerySet(QuerySet):
"""
When creating (or cloning to) an `EasyQuerySet`, make sure to set the
`_easymodel` variable to the related `EasyModel`.
"""
def iterator(self, *args, **kwargs):
for obj in super(EasyQuerySet, self).iterator(*args, **kwargs):
yield EasyInstance(self._easymodel, obj)
def _clone(self, *args, **kwargs):
c = super(EasyQuerySet, self)._clone(*args, **kwargs)
c._easymodel = self._easymodel
return c
| bsd-3-clause |
jgrivolla/mongo-connector | tests/test_solr.py | 12 | 15537 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Solr search using the synchronizer, i.e. as it would be used by an user
"""
import logging
import os
import time
import sys
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
sys.path[0:0] = [""]
from pymongo import MongoClient
from tests import solr_pair, mongo_host, STRESS_COUNT
from tests.setup_cluster import (start_replica_set,
kill_replica_set,
restart_mongo_proc,
kill_mongo_proc)
from tests.util import assert_soon
from pysolr import Solr, SolrError
from mongo_connector.connector import Connector
from mongo_connector.util import retry_until_ok
from pymongo.errors import OperationFailure, AutoReconnect
class TestSynchronizer(unittest.TestCase):
""" Tests Solr
"""
@classmethod
def setUpClass(cls):
_, cls.secondary_p, cls.primary_p = start_replica_set('test-solr')
cls.conn = MongoClient(mongo_host, cls.primary_p,
replicaSet='test-solr')
cls.solr_conn = Solr('http://%s/solr' % solr_pair)
cls.solr_conn.delete(q='*:*')
@classmethod
def tearDownClass(cls):
""" Kills cluster instance
"""
kill_replica_set('test-solr')
def setUp(self):
try:
os.unlink("config.txt")
except OSError:
pass
open("config.txt", "w").close()
self.connector = Connector(
address='%s:%s' % (mongo_host, self.primary_p),
oplog_checkpoint='config.txt',
target_url='http://localhost:8983/solr',
ns_set=['test.test'],
u_key='_id',
auth_key=None,
doc_manager='mongo_connector/doc_managers/solr_doc_manager.py',
auto_commit_interval=0
)
self.connector.start()
assert_soon(lambda: len(self.connector.shard_set) > 0)
retry_until_ok(self.conn.test.test.remove)
assert_soon(lambda: sum(1 for _ in self.solr_conn.search('*:*')) == 0)
def tearDown(self):
self.connector.join()
def test_shard_length(self):
"""Tests the shard_length to see if the shard set was recognized
"""
self.assertEqual(len(self.connector.shard_set), 1)
def test_insert(self):
"""Tests insert
"""
self.conn['test']['test'].insert({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search('*:*')) > 0)
result_set_1 = list(self.solr_conn.search('paulie'))
self.assertEqual(len(result_set_1), 1)
result_set_2 = self.conn['test']['test'].find_one()
for item in result_set_1:
self.assertEqual(item['_id'], str(result_set_2['_id']))
self.assertEqual(item['name'], result_set_2['name'])
def test_remove(self):
"""Tests remove
"""
self.conn['test']['test'].insert({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 1)
self.conn['test']['test'].remove({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 0)
def test_update(self):
"""Test update operations on Solr.
Need to have the following defined in schema.xml:
<field name="a" type="int" indexed="true" stored="true" />
<field name="b.0.c" type="int" indexed="true" stored="true" />
<field name="b.0.e" type="int" indexed="true" stored="true" />
<field name="b.1.d" type="int" indexed="true" stored="true" />
<field name="b.1.f" type="int" indexed="true" stored="true" />
<field name="b.2.e" type="int" indexed="true" stored="true" />
"""
docman = self.connector.doc_managers[0]
# Insert
self.conn.test.test.insert({"a": 0})
assert_soon(lambda: sum(1 for _ in docman._search("*:*")) == 1)
def check_update(update_spec):
updated = self.conn.test.test.find_and_modify(
{"a": 0},
update_spec,
new=True
)
# Stringify _id to match what will be retrieved from Solr
updated['_id'] = str(updated['_id'])
# Flatten the MongoDB document to match Solr
updated = docman._clean_doc(updated)
# Allow some time for update to propagate
time.sleep(1)
replicated = list(docman._search("a:0"))[0]
# Remove add'l fields until these are stored in a separate Solr core
replicated.pop("_ts")
replicated.pop("ns")
# Remove field added by Solr
replicated.pop("_version_")
self.assertEqual(replicated, docman._clean_doc(updated))
# Update by adding a field.
# Note that Solr can't mix types within an array
check_update({"$set": {"b": [{"c": 10}, {"d": 11}]}})
# Update by changing a value within a sub-document (contains array)
check_update({"$inc": {"b.0.c": 1}})
# Update by changing the value within an array
check_update({"$inc": {"b.1.f": 12}})
# Update by adding new bucket to list
check_update({"$push": {"b": {"e": 12}}})
# Update by replacing an entire sub-document
check_update({"$set": {"b.0": {"e": 4}}})
# Update by adding a sub-document
check_update({"$set": {"b": {"0": {"c": 100}}}})
# Update whole document
check_update({"a": 0, "b": {"1": {"d": 10000}}})
def test_rollback(self):
"""Tests rollback. We force a rollback by inserting one doc, killing
primary, adding another doc, killing the new primary, and
restarting both the servers.
"""
primary_conn = MongoClient(mongo_host, self.primary_p)
self.conn['test']['test'].insert({'name': 'paul'})
assert_soon(
lambda: self.conn.test.test.find({'name': 'paul'}).count() == 1)
assert_soon(
lambda: sum(1 for _ in self.solr_conn.search('*:*')) == 1)
kill_mongo_proc(self.primary_p, destroy=False)
new_primary_conn = MongoClient(mongo_host, self.secondary_p)
admin_db = new_primary_conn['admin']
while admin_db.command("isMaster")['ismaster'] is False:
time.sleep(1)
time.sleep(5)
retry_until_ok(self.conn.test.test.insert,
{'name': 'pauline'})
assert_soon(
lambda: sum(1 for _ in self.solr_conn.search('*:*')) == 2)
result_set_1 = list(self.solr_conn.search('pauline'))
result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'})
self.assertEqual(len(result_set_1), 1)
for item in result_set_1:
self.assertEqual(item['_id'], str(result_set_2['_id']))
kill_mongo_proc(self.secondary_p, destroy=False)
restart_mongo_proc(self.primary_p)
while primary_conn['admin'].command("isMaster")['ismaster'] is False:
time.sleep(1)
restart_mongo_proc(self.secondary_p)
time.sleep(2)
result_set_1 = self.solr_conn.search('pauline')
self.assertEqual(sum(1 for _ in result_set_1), 0)
result_set_2 = self.solr_conn.search('paul')
self.assertEqual(sum(1 for _ in result_set_2), 1)
def test_stress(self):
"""Test stress by inserting and removing a large amount of docs.
"""
#stress test
for i in range(0, STRESS_COUNT):
self.conn['test']['test'].insert({'name': 'Paul ' + str(i)})
time.sleep(5)
assert_soon(
lambda: sum(1 for _ in self.solr_conn.search(
'*:*', rows=STRESS_COUNT)) == STRESS_COUNT)
for i in range(0, STRESS_COUNT):
result_set_1 = self.solr_conn.search('Paul ' + str(i))
for item in result_set_1:
self.assertEqual(item['_id'], item['_id'])
def test_stressed_rollback(self):
"""Test stressed rollback with a large number of documents"""
for i in range(0, STRESS_COUNT):
self.conn['test']['test'].insert(
{'name': 'Paul ' + str(i)})
assert_soon(
lambda: sum(1 for _ in self.solr_conn.search(
'*:*', rows=STRESS_COUNT)) == STRESS_COUNT)
primary_conn = MongoClient(mongo_host, self.primary_p)
kill_mongo_proc(self.primary_p, destroy=False)
new_primary_conn = MongoClient(mongo_host, self.secondary_p)
admin_db = new_primary_conn['admin']
while admin_db.command("isMaster")['ismaster'] is False:
time.sleep(1)
time.sleep(5)
count = -1
while count + 1 < STRESS_COUNT:
try:
count += 1
self.conn['test']['test'].insert(
{'name': 'Pauline ' + str(count)})
except (OperationFailure, AutoReconnect):
time.sleep(1)
collection_size = self.conn['test']['test'].find().count()
assert_soon(
lambda: sum(1 for _ in self.solr_conn.search(
'*:*', rows=STRESS_COUNT * 2)) == collection_size)
result_set_1 = self.solr_conn.search(
'Pauline',
rows=STRESS_COUNT * 2, sort='_id asc'
)
for item in result_set_1:
result_set_2 = self.conn['test']['test'].find_one(
{'name': item['name']})
self.assertEqual(item['_id'], str(result_set_2['_id']))
kill_mongo_proc(self.secondary_p, destroy=False)
restart_mongo_proc(self.primary_p)
while primary_conn['admin'].command("isMaster")['ismaster'] is False:
time.sleep(1)
restart_mongo_proc(self.secondary_p)
assert_soon(lambda: sum(1 for _ in self.solr_conn.search(
'Pauline', rows=STRESS_COUNT * 2)) == 0)
result_set_1 = list(self.solr_conn.search(
'Pauline',
rows=STRESS_COUNT * 2
))
self.assertEqual(len(result_set_1), 0)
result_set_2 = list(self.solr_conn.search(
'Paul',
rows=STRESS_COUNT * 2
))
self.assertEqual(len(result_set_2), STRESS_COUNT)
def test_valid_fields(self):
""" Tests documents with field definitions
"""
inserted_obj = self.conn['test']['test'].insert(
{'name': 'test_valid'})
self.conn['test']['test'].update(
{'_id': inserted_obj},
{'$set': {'popularity': 1}}
)
docman = self.connector.doc_managers[0]
assert_soon(lambda: sum(1 for _ in docman._search("*:*")) > 0)
result = docman.get_last_doc()
self.assertIn('popularity', result)
self.assertEqual(sum(1 for _ in docman._search(
"name=test_valid")), 1)
def test_invalid_fields(self):
""" Tests documents without field definitions
"""
inserted_obj = self.conn['test']['test'].insert(
{'name': 'test_invalid'})
self.conn['test']['test'].update(
{'_id': inserted_obj},
{'$set': {'break_this_test': 1}}
)
docman = self.connector.doc_managers[0]
assert_soon(lambda: sum(1 for _ in docman._search("*:*")) > 0)
result = docman.get_last_doc()
self.assertNotIn('break_this_test', result)
self.assertEqual(sum(1 for _ in docman._search(
"name=test_invalid")), 1)
def test_dynamic_fields(self):
""" Tests dynamic field definitions
The following fields are supplied in the provided schema.xml:
<dynamicField name="*_i" type="int" indexed="true" stored="true"/>
<dynamicField name="i_*" type="int" indexed="true" stored="true"/>
Cases:
1. Match on first definition
2. Match on second definition
3. No match
"""
self.solr_conn.delete(q='*:*')
match_first = {"_id": 0, "foo_i": 100}
match_second = {"_id": 1, "i_foo": 200}
match_none = {"_id": 2, "foo": 300}
# Connector is already running
self.conn["test"]["test"].insert(match_first)
self.conn["test"]["test"].insert(match_second)
self.conn["test"]["test"].insert(match_none)
# Should have documents in Solr now
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) > 0,
"Solr doc manager should allow dynamic fields")
# foo_i and i_foo should be indexed, foo field should not exist
self.assertEqual(sum(1 for _ in self.solr_conn.search("foo_i:100")), 1)
self.assertEqual(sum(1 for _ in self.solr_conn.search("i_foo:200")), 1)
# SolrError: "undefined field foo"
logger = logging.getLogger("pysolr")
logger.error("You should see an ERROR log message from pysolr here. "
"This indicates success, not an error in the test.")
with self.assertRaises(SolrError):
self.solr_conn.search("foo:300")
def test_nested_fields(self):
"""Test indexing fields that are sub-documents in MongoDB
The following fields are defined in the provided schema.xml:
<field name="person.address.street" type="string" ... />
<field name="person.address.state" type="string" ... />
<dynamicField name="numbers.*" type="string" ... />
<dynamicField name="characters.*" type="string" ... />
"""
# Connector is already running
self.conn["test"]["test"].insert({
"name": "Jeb",
"billing": {
"address": {
"street": "12345 Mariposa Street",
"state": "California"
}
}
})
self.conn["test"]["test"].insert({
"numbers": ["one", "two", "three"],
"characters": [
{"name": "Big Bird",
"color": "yellow"},
{"name": "Elmo",
"color": "red"},
"Cookie Monster"
]
})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) > 0,
"documents should have been replicated to Solr")
# Search for first document
results = self.solr_conn.search(
"billing.address.street:12345\ Mariposa\ Street")
self.assertEqual(len(results), 1)
self.assertEqual(next(iter(results))["billing.address.state"],
"California")
# Search for second document
results = self.solr_conn.search(
"characters.1.color:red")
self.assertEqual(len(results), 1)
self.assertEqual(next(iter(results))["numbers.2"], "three")
results = self.solr_conn.search("characters.2:Cookie\ Monster")
self.assertEqual(len(results), 1)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
YangSongzhou/django | tests/auth_tests/test_tokens.py | 297 | 2551 | import unittest
from datetime import date, timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.test import TestCase
from django.utils.six import PY3
class TokenGeneratorTest(TestCase):
def test_make_token(self):
"""
Ensure that we can make a token and that it is valid
"""
user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
self.assertTrue(p0.check_token(user, tk1))
def test_10265(self):
"""
Ensure that the token generated for a user created in the same request
will work correctly.
"""
# See ticket #10265
user = User.objects.create_user('comebackkid', 'test3@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
reload = User.objects.get(username='comebackkid')
tk2 = p0.make_token(reload)
self.assertEqual(tk1, tk2)
def test_timeout(self):
"""
Ensure we can use the token after n days, but no greater.
"""
# Uses a mocked version of PasswordResetTokenGenerator so we can change
# the value of 'today'
class Mocked(PasswordResetTokenGenerator):
def __init__(self, today):
self._today_val = today
def _today(self):
return self._today_val
user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS))
self.assertTrue(p1.check_token(user, tk1))
p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1))
self.assertFalse(p2.check_token(user, tk1))
@unittest.skipIf(PY3, "Unnecessary test with Python 3")
def test_date_length(self):
"""
Make sure we don't allow overly long dates, causing a potential DoS.
"""
user = User.objects.create_user('ima1337h4x0r', 'test4@example.com', 'p4ssw0rd')
p0 = PasswordResetTokenGenerator()
# This will put a 14-digit base36 timestamp into the token, which is too large.
self.assertRaises(ValueError,
p0._make_token_with_timestamp,
user, 175455491841851871349)
| bsd-3-clause |
reedloden/ansible | lib/ansible/plugins/lookup/dict.py | 220 | 1203 | # (c) 2014, Kent R. Spillner <kspillner@acm.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, varibles=None, **kwargs):
# Expect any type of Mapping, notably hostvars
if not isinstance(terms, collections.Mapping):
raise AnsibleError("with_dict expects a dict")
return self._flatten_hash_to_list(terms)
| gpl-3.0 |
swapnakrishnan2k/tp-qemu | qemu/tests/timedrift_check_when_crash.py | 11 | 3724 | import logging
import time
import re
from autotest.client.shared import error
from autotest.client.shared import utils
from virttest.env_process import preprocess
from virttest.virt_vm import VMDeadKernelCrashError
@error.context_aware
def run(test, params, env):
"""
Time clock offset check when guest crash/bsod test:
1) boot guest with '-rtc base=utc,clock=host,driftfix=slew';
2) sync host system time with "ntpdate clock.redhat.com";
3) inject nmi to guest/ make linux kernel crash;
4) sleep long time, then reset vm via system_reset;
5) query clock offset from ntp server;
:param test: QEMU test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
"""
ntp_server = params.get("ntp_server", "clock.redhat.com")
ntp_cmd = params["ntp_cmd"]
ntp_query_cmd = params["ntp_query_cmd"]
nmi_cmd = params.get("nmi_cmd", "inject-nmi")
sleep_time = float(params.get("sleep_time", 1800))
deviation = float(params.get("deviation", 5))
error.context("sync host time with ntp server", logging.info)
utils.system("ntpdate %s" % ntp_server)
error.context("start guest", logging.info)
params["start_vm"] = "yes"
preprocess(test, params, env)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
error.context("sync time in guest", logging.info)
session.cmd(ntp_cmd)
error.context("inject nmi interrupt in vm", logging.info)
target, cmd = re.split("\s*:\s*", nmi_cmd)
if target == "monitor":
vm.monitor.send_args_cmd(cmd)
else:
session.sendline(cmd)
try:
session.cmd("dir")
except Exception:
pass
else:
raise error.TestFail("Guest OS still alive ...")
error.context("sleep %s seconds" % sleep_time, logging.info)
time.sleep(sleep_time)
# Autotest parses serial output and could raise VMDeadKernelCrash
# we generated using sysrq. Ignore one "BUG:" line
try:
session = vm.reboot(method="system_reset")
except VMDeadKernelCrashError, details:
details = str(details)
if (re.findall(r"Trigger a crash\s.*BUG:", details, re.M) and
details.count("BUG:") != 1):
raise error.TestFail("Got multiple kernel crashes. Please "
"note that one of them was "
"intentionally generated by sysrq in "
"this test.\n%s" % details)
end_time = time.time() + timeout
while time.time() < end_time:
try:
session = vm.wait_for_login(timeout=timeout)
except VMDeadKernelCrashError, details:
details = str(details)
if (re.findall(r"Trigger a crash\s.*BUG:", details,
re.M) and details.count("BUG:") != 1):
raise error.TestFail("Got multiple kernel crashes. "
"Please note that one of them was "
"intentionally generated by sysrq "
"in this test.\n%s" % details)
else:
break
error.context("check time offset via ntp", logging.info)
output = session.cmd_output(ntp_query_cmd)
try:
offset = re.findall(r"[+-](\d+\.\d+)", output, re.M)[-1]
except IndexError:
offset = 0.0
if float(offset) > deviation:
raise error.TestFail("Unacceptable offset '%s', " % offset +
"deviation '%s'" % deviation)
| gpl-2.0 |
svn2github/libtorrent-trunk | tools/parse_disk_buffer_log.py | 11 | 2013 | #! /usr/bin/env python
import os, sys, time
lines = open(sys.argv[1], 'rb').readlines()
# logfile format:
# <time(ms)> <key>: <value>
# example:
# 16434 read cache: 17
key_order = ['receive buffer', 'send buffer', 'released send buffer', 'posted send buffer',
'received send buffer', 'dispatched send buffer', 'queued send buffer',
'write cache', 'read cache', 'hash temp']
colors = ['30f030', '001070', '101080', '2040a0',
'4070d0', '80a0f0', 'f03030',
'80f080', 'f08080', '4040ff']
keys = []
fields = {}
maximum = {}
out = open('disk_buffer_log.dat', 'w+')
field_sum = {}
field_num_samples = {}
field_timestamp = {}
for c in key_order:
keys.append(c)
fields[c] = 0
maximum[c] = 0
field_sum[c] = 0
field_num_samples[c] = 0
field_timestamp[c] = 0
last_t = 0
for l in lines:
try:
t = int(l[0:l.find(' ')])
c = l[l.find(' ')+1:l.find(':')]
n = int(l[l.find(':')+1:-1])
except:
print l
continue
if last_t != t:
print >>out, '%d\t' % last_t,
for i in keys:
print >>out, '%d\t' % maximum[i],
print >>out, '\n',
if not c in keys: continue
field_sum[c] += fields[c] * float(t - field_timestamp[c])
field_timestamp[c] = t
fields[c] = n
if n > maximum[c]: maximum[c] = n
if last_t != t:
last_t = t
maximum = fields
for i in keys:
print '%s: avg: %f' % (i, field_sum[i] / last_t)
print
out.close()
out = open('disk_buffer.gnuplot', 'wb')
print >>out, "set term png size 1200,700"
print >>out, 'set output "disk_buffer.png"'
print >>out, 'set xrange [0:*]'
print >>out, 'set xlabel "time (ms)"'
print >>out, 'set ylabel "buffers"'
print >>out, "set style data lines"
print >>out, "set key box"
print >>out, 'plot',
count = 1 + len(keys)
keys.reverse()
comma = ''
for k in keys:
expr = "$%d" % count
for i in xrange(2, count): expr += "+$%d" % i
count -= 1
print >>out, ' %s"disk_buffer_log.dat" using 1:(%s) title "%s" with filledcurves x1 lt rgb "#%s"' % (comma, expr, k, colors[count-1]),
comma = ','
out.close()
os.system('gnuplot disk_buffer.gnuplot')
| bsd-3-clause |
newerthcom/savagerebirth | libs/python-2.72/Lib/test/test_urllib2net.py | 14 | 11769 | #!/usr/bin/env python
import unittest
from test import test_support
from test.test_urllib2 import sanepathname2url
import socket
import urllib2
import os
import sys
TIMEOUT = 60 # seconds
def _retry_thrice(func, exc, *args, **kwargs):
for i in range(3):
try:
return func(*args, **kwargs)
except exc, last_exc:
continue
except:
raise
raise last_exc
def _wrap_with_retry_thrice(func, exc):
def wrapped(*args, **kwargs):
return _retry_thrice(func, exc, *args, **kwargs)
return wrapped
# Connecting to remote hosts is flaky. Make it more robust by retrying
# the connection several times.
_urlopen_with_retry = _wrap_with_retry_thrice(urllib2.urlopen, urllib2.URLError)
class AuthTests(unittest.TestCase):
"""Tests urllib2 authentication features."""
## Disabled at the moment since there is no page under python.org which
## could be used to HTTP authentication.
#
# def test_basic_auth(self):
# import httplib
#
# test_url = "http://www.python.org/test/test_urllib2/basic_auth"
# test_hostport = "www.python.org"
# test_realm = 'Test Realm'
# test_user = 'test.test_urllib2net'
# test_password = 'blah'
#
# # failure
# try:
# _urlopen_with_retry(test_url)
# except urllib2.HTTPError, exc:
# self.assertEqual(exc.code, 401)
# else:
# self.fail("urlopen() should have failed with 401")
#
# # success
# auth_handler = urllib2.HTTPBasicAuthHandler()
# auth_handler.add_password(test_realm, test_hostport,
# test_user, test_password)
# opener = urllib2.build_opener(auth_handler)
# f = opener.open('http://localhost/')
# response = _urlopen_with_retry("http://www.python.org/")
#
# # The 'userinfo' URL component is deprecated by RFC 3986 for security
# # reasons, let's not implement it! (it's already implemented for proxy
# # specification strings (that is, URLs or authorities specifying a
# # proxy), so we must keep that)
# self.assertRaises(httplib.InvalidURL,
# urllib2.urlopen, "http://evil:thing@example.com")
class CloseSocketTest(unittest.TestCase):
def test_close(self):
import httplib
# calling .close() on urllib2's response objects should close the
# underlying socket
# delve deep into response to fetch socket._socketobject
response = _urlopen_with_retry("http://www.python.org/")
abused_fileobject = response.fp
self.assertTrue(abused_fileobject.__class__ is socket._fileobject)
httpresponse = abused_fileobject._sock
self.assertTrue(httpresponse.__class__ is httplib.HTTPResponse)
fileobject = httpresponse.fp
self.assertTrue(fileobject.__class__ is socket._fileobject)
self.assertTrue(not fileobject.closed)
response.close()
self.assertTrue(fileobject.closed)
class OtherNetworkTests(unittest.TestCase):
def setUp(self):
if 0: # for debugging
import logging
logger = logging.getLogger("test_urllib2net")
logger.addHandler(logging.StreamHandler())
# XXX The rest of these tests aren't very good -- they don't check much.
# They do sometimes catch some major disasters, though.
def test_ftp(self):
urls = [
'ftp://ftp.kernel.org/pub/linux/kernel/README',
'ftp://ftp.kernel.org/pub/linux/kernel/non-existent-file',
#'ftp://ftp.kernel.org/pub/leenox/kernel/test',
'ftp://gatekeeper.research.compaq.com/pub/DEC/SRC'
'/research-reports/00README-Legal-Rules-Regs',
]
self._test_urls(urls, self._extra_handlers())
def test_file(self):
TESTFN = test_support.TESTFN
f = open(TESTFN, 'w')
try:
f.write('hi there\n')
f.close()
urls = [
'file:'+sanepathname2url(os.path.abspath(TESTFN)),
('file:///nonsensename/etc/passwd', None, urllib2.URLError),
]
self._test_urls(urls, self._extra_handlers(), retry=True)
finally:
os.remove(TESTFN)
# XXX Following test depends on machine configurations that are internal
# to CNRI. Need to set up a public server with the right authentication
# configuration for test purposes.
## def test_cnri(self):
## if socket.gethostname() == 'bitdiddle':
## localhost = 'bitdiddle.cnri.reston.va.us'
## elif socket.gethostname() == 'bitdiddle.concentric.net':
## localhost = 'localhost'
## else:
## localhost = None
## if localhost is not None:
## urls = [
## 'file://%s/etc/passwd' % localhost,
## 'http://%s/simple/' % localhost,
## 'http://%s/digest/' % localhost,
## 'http://%s/not/found.h' % localhost,
## ]
## bauth = HTTPBasicAuthHandler()
## bauth.add_password('basic_test_realm', localhost, 'jhylton',
## 'password')
## dauth = HTTPDigestAuthHandler()
## dauth.add_password('digest_test_realm', localhost, 'jhylton',
## 'password')
## self._test_urls(urls, self._extra_handlers()+[bauth, dauth])
def test_urlwithfrag(self):
urlwith_frag = "http://docs.python.org/glossary.html#glossary"
with test_support.transient_internet(urlwith_frag):
req = urllib2.Request(urlwith_frag)
res = urllib2.urlopen(req)
self.assertEqual(res.geturl(),
"http://docs.python.org/glossary.html#glossary")
def test_fileno(self):
req = urllib2.Request("http://www.python.org")
opener = urllib2.build_opener()
res = opener.open(req)
try:
res.fileno()
except AttributeError:
self.fail("HTTPResponse object should return a valid fileno")
finally:
res.close()
def test_custom_headers(self):
url = "http://www.example.com"
with test_support.transient_internet(url):
opener = urllib2.build_opener()
request = urllib2.Request(url)
self.assertFalse(request.header_items())
opener.open(request)
self.assertTrue(request.header_items())
self.assertTrue(request.has_header('User-agent'))
request.add_header('User-Agent','Test-Agent')
opener.open(request)
self.assertEqual(request.get_header('User-agent'),'Test-Agent')
def _test_urls(self, urls, handlers, retry=True):
import time
import logging
debug = logging.getLogger("test_urllib2").debug
urlopen = urllib2.build_opener(*handlers).open
if retry:
urlopen = _wrap_with_retry_thrice(urlopen, urllib2.URLError)
for url in urls:
if isinstance(url, tuple):
url, req, expected_err = url
else:
req = expected_err = None
with test_support.transient_internet(url):
debug(url)
try:
f = urlopen(url, req, TIMEOUT)
except EnvironmentError as err:
debug(err)
if expected_err:
msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" %
(expected_err, url, req, type(err), err))
self.assertIsInstance(err, expected_err, msg)
except urllib2.URLError as err:
if isinstance(err[0], socket.timeout):
print >>sys.stderr, "<timeout: %s>" % url
continue
else:
raise
else:
try:
with test_support.transient_internet(url):
buf = f.read()
debug("read %d bytes" % len(buf))
except socket.timeout:
print >>sys.stderr, "<timeout: %s>" % url
f.close()
debug("******** next url coming up...")
time.sleep(0.1)
def _extra_handlers(self):
handlers = []
cfh = urllib2.CacheFTPHandler()
cfh.setTimeout(1)
handlers.append(cfh)
return handlers
class TimeoutTest(unittest.TestCase):
def test_http_basic(self):
self.assertTrue(socket.getdefaulttimeout() is None)
url = "http://www.python.org"
with test_support.transient_internet(url, timeout=None):
u = _urlopen_with_retry(url)
self.assertTrue(u.fp._sock.fp._sock.gettimeout() is None)
def test_http_default_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
url = "http://www.python.org"
with test_support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp._sock.fp._sock.gettimeout(), 60)
def test_http_no_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
url = "http://www.python.org"
with test_support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(u.fp._sock.fp._sock.gettimeout() is None)
def test_http_timeout(self):
url = "http://www.python.org"
with test_support.transient_internet(url):
u = _urlopen_with_retry(url, timeout=120)
self.assertEqual(u.fp._sock.fp._sock.gettimeout(), 120)
FTP_HOST = "ftp://ftp.mirror.nl/pub/gnu/"
def test_ftp_basic(self):
self.assertTrue(socket.getdefaulttimeout() is None)
with test_support.transient_internet(self.FTP_HOST, timeout=None):
u = _urlopen_with_retry(self.FTP_HOST)
self.assertTrue(u.fp.fp._sock.gettimeout() is None)
def test_ftp_default_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
with test_support.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.fp._sock.gettimeout(), 60)
def test_ftp_no_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
with test_support.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(u.fp.fp._sock.gettimeout() is None)
def test_ftp_timeout(self):
with test_support.transient_internet(self.FTP_HOST):
u = _urlopen_with_retry(self.FTP_HOST, timeout=60)
self.assertEqual(u.fp.fp._sock.gettimeout(), 60)
def test_main():
test_support.requires("network")
test_support.run_unittest(AuthTests,
OtherNetworkTests,
CloseSocketTest,
TimeoutTest,
)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
gibiansky/tensorflow | tensorflow/contrib/learn/python/learn/summary_writer_cache.py | 144 | 1196 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for a Session-like object that handles threads and recovery.
Based on an original design of Illia Polosukhin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import summary_io
SummaryWriterCache = summary_io.SummaryWriterCache # pylint: disable=invalid-name
# Backward compatible interface. Remove?
clear_summary_writers = SummaryWriterCache.clear
get_summary_writer = SummaryWriterCache.get
| apache-2.0 |
hujiajie/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/wpt/wpt/tools/wptserve/wptserve/constants.py | 326 | 4616 | import utils
content_types = utils.invert_dict({"text/html": ["htm", "html"],
"application/json": ["json"],
"application/xhtml+xml": ["xht", "xhtm", "xhtml"],
"application/xml": ["xml"],
"application/x-xpinstall": ["xpi"],
"text/javascript": ["js"],
"text/css": ["css"],
"text/plain": ["txt", "md"],
"image/svg+xml": ["svg"],
"image/gif": ["gif"],
"image/jpeg": ["jpg", "jpeg"],
"image/png": ["png"],
"image/bmp": ["bmp"],
"text/event-stream": ["event_stream"],
"text/cache-manifest": ["manifest"],
"video/mp4": ["mp4", "m4v"],
"audio/mp4": ["m4a"],
"audio/mpeg": ["mp3"],
"video/webm": ["webm"],
"audio/webm": ["weba"],
"video/ogg": ["ogg", "ogv"],
"audio/ogg": ["oga"],
"audio/x-wav": ["wav"],
"text/vtt": ["vtt"],})
response_codes = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
| bsd-3-clause |
swegener/libsigrokdecode | decoders/xfp/__init__.py | 6 | 1556 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2013 Bert Vermeulen <bert@biot.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
'''
This PD decodes the XFP I²C management interface structures/protocol.
XFP modules include an I²C interface, used to monitor and control various
aspects of the module. The specification defines an I²C slave at address
0x50 (0xa0) which returns 128 bytes of a standard structure ("lower memory"),
and, after setting a table number in lower memory, a set of 256 "higher
memory" tables, which can be mapped to different subdevices on the XFP.
Only one table is defined in the specification: table 0x01, the default on
module startup. Other table are either reserved for future expansion, or
available for vendor-specific extensions. This decoder supports both lower
memory and table 0x01.
Details:
ftp://ftp.seagate.com/sff/INF-8077.PDF (XFP specification)
'''
from .pd import Decoder
| gpl-3.0 |
placiflury/gridmonitor-sft | sft/publisher.py | 1 | 12109 | #!/usr/bin/env python
"""
Deals with Querying and fetching SFT jobs
that got submitted.
"""
import logging
import os, os.path, hashlib
import subprocess
from sqlalchemy import and_ as AND
from sqlalchemy import or_ as OR
from datetime import datetime
from sft.utils import helpers
import db.sft_meta as meta
import db.sft_schema as schema
import sft_globals as g
from errors import publisher
from nagios_notifier import NagiosNotification
from indexer import HTMLIndexer, HTMLIndexerError
import sft.db.sft_schema as schema
__author__ = "Placi Flury placi.flury@switch.ch"
__date__ = "20.04.2010"
__version__ = "0.2.0"
class Publisher(object):
""" Queries for submitted jobs and fetches them
if jobs have completed.
"""
GRID_FIN_STATES = [ 'FAILED', 'FINISHED',
'DELETED', 'KILLED']
FIN_STATES = ['failed', 'fetched', 'fetched_failed',
'timeout'] + GRID_FIN_STATES
TIMEOUT = 20
def __init__(self):
self.log = logging.getLogger(__name__)
self.session = meta.Session
self.jobsdir = g.config.jobsdir
self.joblist = os.path.join(self.jobsdir, 'jobs.xml')
self.log.info("Jobs download directory set to '%s'" % self.jobsdir)
try:
self.html_indexer = HTMLIndexer(g.config.url_root)
except HTMLIndexerError, e:
self.log.error("%s: %s" % ( e.expression, e.message))
_arcstat = os.path.join(g.config.arc_clients, 'arcstat')
_arcget = os.path.join(g.config.arc_clients, 'arcget')
if os.path.isfile(_arcstat):
self.arcstat = _arcstat
self.log.debug("'arcstat' command path set to '%s'" % self.arcstat)
else:
raise publisher.PublisherError("arcstat path error",
"'%s' is not a valid file/path" % self.arcstat)
if os.path.isfile(_arcget):
self.arcget = _arcget
self.log.debug("'arcget' command path set to '%s'" % self.arcget)
else:
raise publisher.PublisherError("arcget path error",
"'%s' is not a valid file/path" % self.arcget)
self.pos_dn_vos = []
self.neg_dn_vos = []
self.log.debug("Initialization finished")
def reset_proxy_cache(self):
""" resets (user,VO) caches. """
self.pos_dn_vos = []
self.neg_dn_vos = []
def __set_x509_user_proxy(self, DN, vo_name):
"""
Try to set X509_USER_PROXY environment variable
to a vomsproxy for user DN and VO vo.
returns True, if environment variable could be set
False, else
"""
if (DN, vo_name) in self.neg_dn_vos:
return False
user = self.session.query(schema.User).filter_by(DN=DN).first()
if not user:
return False
passwd = user.get_passwd()
file_prefix = hashlib.md5(DN).hexdigest()
myproxy_file = os.path.join(g.config.proxy_dir, file_prefix)
voms_proxy_file = os.path.join(g.config.proxy_dir,
file_prefix +'_'+vo_name)
if (DN, vo_name) not in self.pos_dn_vos:
try:
g.pxhandle.check_create_myproxy(DN, passwd, myproxy_file)
g.pxhandle.check_create_vomsproxy(DN, file_prefix, vo_name)
except Exception, exp2:
_notification = NagiosNotification(g.config.localhost, 'publisher' )
_msg = DN + ': ' + exp2.__repr__()
_notification.set_message(_msg)
_notification.set_status('CRITICAL')
g.notifier.add_notification(_notification)
self.neg_dn_vos.append((DN, vo_name))
return False
self.pos_dn_vos.append((DN, vo_name))
os.environ['X509_USER_PROXY'] = voms_proxy_file
return True
def check_submitted_jobs(self):
""" checking whether submitted jobs can be fetched."""
for entry in self.session.query(schema.SFTJob).\
filter(AND(schema.SFTJob.status != 'failed',
schema.SFTJob.status != 'fetched',
schema.SFTJob.status != 'success',
schema.SFTJob.status != 'fetched_failed',
schema.SFTJob.status != 'fetch_failed',
schema.SFTJob.status != 'test_failed',
schema.SFTJob.status != 'timeout',
schema.SFTJob.status != 'FAILED',
schema.SFTJob.status != 'FINISHED',
schema.SFTJob.status != 'KILLED',
schema.SFTJob.status != 'DELETED')).all():
self.log.debug("Checking job in state: %s" % entry.status)
DN = entry.DN
vo_name = entry.vo_name
if not self.__set_x509_user_proxy(DN, vo_name):
continue
self.log.debug("Querying status of job: %s" % entry.jobid.strip())
cmd = '%s -j %s %s' % (self.arcstat, self.joblist,entry.jobid.strip())
try:
outdata, err, return_code = helpers.timeout_call(cmd, Publisher.TIMEOUT)
except helpers.Alarm:
continue
if return_code == 0:
output = outdata.split('\n') # Job, Job Name, Status, [Exit code]
if len(output) > 2:
# some dirty hacking, as Status sometimes not present, no clue why.
if not output[2]:
continue
status = output[2].split('(')[1].strip(') ') # state is presented in ()
self.log.debug("Refreshed job status:>%s<" % status)
else:
self.log.error("Parsing job status: '%s'" % output)
continue
entry.status = status
entry.db_lastmodified = datetime.utcnow()
self.session.flush()
else:
_error_msg = outdata + 'Error: ' + err
self.log.debug("Quering job status failed with %s" % _error_msg)
# hack to intercept jobs that got 'lost'
if 'No jobs given' in err:
entry.status = 'fetch_failed'
entry.error_type = 'sft'
entry.error_msg = "Job '%s' not found anymore" % entry.jobid.strip()
self.session.commit()
def fetch_final_jobs(self):
""" fetching all jobs in final state, that were not yet fetched. """
for entry in self.session.query(schema.SFTJob).\
filter(OR(schema.SFTJob.status == 'FAILED',
schema.SFTJob.status == 'FINISHED')).all():
# since filter is case insensitive, let's skip 'failed' status
jobid = entry.jobid
if not jobid:
continue
jobid = jobid.strip()
DN = entry.DN
vo_name = entry.vo_name
if not self.__set_x509_user_proxy(DN, vo_name):
continue
nstatus = self.fetch_job(jobid)
self.log.debug("Fetching job status: %s" % nstatus)
if nstatus == 'fetched':
outdir = os.path.join(self.jobsdir, jobid.split('/jobs/')[1])
if entry.status == 'FAILED':
entry.status = 'fetched_failed'
entry.error_type = 'lrms'
entry.error_msg = "Feching job '%s' failed" % jobid
else:
# check whether test logically failed.
entry.status, entry.error_type, entry.error_msg = self.check_test_succeeded(outdir)
try:
self.html_indexer.set_path(outdir)
self.html_indexer.generate()
entry.outputdir = self.html_indexer.get_logical_path()
except HTMLIndexerError, e:
self.log.error("%s: %s" % ( e.expression, e.message))
entry.outputdir = outdir + '(indexer error)'
entry.db_lastmodified = datetime.utcnow()
self.session.flush()
else:
entry.status = 'fetch_failed'
entry.error_type = 'lrms'
entry.error_msg = 'Job could not be retrieved anymore '
_notification = NagiosNotification(entry.cluster_name, entry.sft_test_name)
if entry.status == 'success':
_notification.set_status('OK')
_msg = '(%s) successfully executed' % entry.test_name
else:
_notification.set_status('CRITICAL')
_msg = '(%s) execution faile' % entry.test_name
_notification.set_message(_msg)
g.notifier.add_notification(_notification)
self.session.commit()
def fetch_job(self, jobid):
""" fetching the specified job.
Returns: fetched - if job could be fetched
failed - is something went wrong.
if failed: with get_last_error(), an error message
can be fetched.
"""
cmd = '%s -j %s -D %s %s' % (self.arcget, self.joblist, self.jobsdir, jobid.strip() )
# as ret.wait() sets return codes != 0 even for success, we therefore
# need to parse the output ;-(
try:
output, stderr, return_code = helpers.timeout_call(cmd, Publisher.TIMEOUT)
except helpers.Alarm:
pass
self.log.debug("arcget output:>%s<, stderr:>%s<" % (output.strip('\n'), stderr))
self.log.debug("return-code: %d" % return_code)
if output and ('successfully' in output):
self.log.info("Stored job results at %s" % output.strip('\n'))
return 'fetched'
else:
_error_msg = stderr
self.log.error("Fetching job '%s' failed with %s" %
(jobid, _error_msg))
# XXX need to intercept different kind of errors -> deal with them individually
return 'failed'
def check_test_succeeded(self, outdir):
"""
Checking whether the site functional test of given output directory
was 'logically' successful.
Underlying assumptions are, that the test was written following one of the
following conventions (optoins):
option a.) in xrls of job: (stderr = 'error.log'
option b.) in xrls of job: (gmlog = '.arc')
option c.) in xrls of job: (gmlog = 'log')
and that the output directory exists
returns 'success', None, None if job got executed correctly
'test_failed', error_type, error_msg, if job got executed by failed
"""
if not os.path.isdir(outdir):
return 'test_failed', 'output', 'No job output directory'
if '.arc' in os.listdir(outdir):
_gmlog = os.path.join(outdir, '.arc')
elif 'log' in os.listdir(outdir):
_gmlog = os.path.join(outdir, 'log')
else:
_gmlog = None
if 'error.log' in os.listdir(outdir):
elog = os.path.join(outdir, 'error.log')
if os.path.getsize(elog) > 0:
return 'test_failed', 'logical', 'Test failed, see error.log for details'
if not _gmlog:
return 'success', None, None
self.log.debug('Checking gmlog file: >%s< in output dir: %s' % (_gmlog, outdir))
if not _gmlog or not os.path.isdir(_gmlog):
return 'test_failed', 'output', "gmlog missing, or it's not set to '.arc' or 'log'."
if 'failed' in os.listdir(_gmlog):
return 'test_failed', 'logical', "Test failed, see gmlog 'failed' file"
return 'success', None, None
def main(self):
""" main method """
self.reset_proxy_cache()
self.check_submitted_jobs()
self.fetch_final_jobs()
| bsd-3-clause |
dpc/rust | src/etc/generate-keyword-tests.py | 24 | 1981 | #!/usr/bin/env python
#
# Copyright 2013 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
This script takes a list of keywords and generates a testcase, that checks
if using the keyword as identifier fails, for every keyword. The generate
test files are set read-only.
Test for https://github.com/rust-lang/rust/issues/2275
sample usage: src/etc/generate-keyword-tests.py as break
"""
import sys
import os
import datetime
import stat
template = """// Copyright %d The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This file was auto-generated using 'src/etc/generate-keyword-tests.py %s'
fn main() {
let %s = "foo"; //~ error: ident
}
"""
test_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../test/compile-fail')
)
for kw in sys.argv[1:]:
test_file = os.path.join(test_dir, 'keyword-%s-as-identifier.rs' % kw)
# set write permission if file exists, so it can be changed
if os.path.exists(test_file):
os.chmod(test_file, stat.S_IWUSR)
with open(test_file, 'wt') as f:
f.write(template % (datetime.datetime.now().year, kw, kw))
# mark file read-only
os.chmod(test_file, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
| apache-2.0 |
gangadharkadam/stfrappe | frappe/widgets/report_dump.py | 39 | 2844 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import json
import copy
@frappe.whitelist()
def get_data(doctypes, last_modified):
data_map = {}
for dump_report_map in frappe.get_hooks().dump_report_map:
data_map.update(frappe.get_attr(dump_report_map))
import datetime
out = {}
doctypes = json.loads(doctypes)
last_modified = json.loads(last_modified)
start = datetime.datetime.now()
for d in doctypes:
args = copy.deepcopy(data_map[d])
dt = d.find("[") != -1 and d[:d.find("[")] or d
out[dt] = {}
if args.get("from"):
modified_table = "item."
else:
modified_table = ""
conditions = order_by = ""
table = args.get("from") or ("`tab%s`" % dt)
if d in last_modified:
if not args.get("conditions"):
args['conditions'] = []
args['conditions'].append(modified_table + "modified > '" + last_modified[d] + "'")
out[dt]["modified_names"] = frappe.db.sql_list("""select %sname from %s
where %smodified > %s""" % (modified_table, table, modified_table, "%s"), last_modified[d])
if args.get("force_index"):
conditions = " force index (%s) " % args["force_index"]
if args.get("conditions"):
conditions += " where " + " and ".join(args["conditions"])
if args.get("order_by"):
order_by = " order by " + args["order_by"]
out[dt]["data"] = [list(t) for t in frappe.db.sql("""select %s from %s %s %s""" \
% (",".join(args["columns"]), table, conditions, order_by))]
# last modified
modified_table = table
if "," in table:
modified_table = " ".join(table.split(",")[0].split(" ")[:-1])
tmp = frappe.db.sql("""select `modified`
from %s order by modified desc limit 1""" % modified_table)
out[dt]["last_modified"] = tmp and tmp[0][0] or ""
out[dt]["columns"] = map(lambda c: c.split(" as ")[-1], args["columns"])
if args.get("links"):
out[dt]["links"] = args["links"]
for d in out:
unused_links = []
# only compress full dumps (not partial)
if out[d].get("links") and (d not in last_modified):
for link_key in out[d]["links"]:
link = out[d]["links"][link_key]
if link[0] in out and (link[0] not in last_modified):
# make a map of link ids
# to index
link_map = {}
doctype_data = out[link[0]]
col_idx = doctype_data["columns"].index(link[1])
for row_idx in xrange(len(doctype_data["data"])):
row = doctype_data["data"][row_idx]
link_map[row[col_idx]] = row_idx
for row in out[d]["data"]:
col_idx = out[d]["columns"].index(link_key)
# replace by id
if row[col_idx]:
row[col_idx] = link_map.get(row[col_idx])
else:
unused_links.append(link_key)
for link in unused_links:
del out[d]["links"][link]
return out
| mit |
yasserglez/tagfs | packages/tagfs/contrib/django/contrib/admin/views/template.py | 9 | 3236 | from django import template, forms
from django.contrib.admin.views.decorators import staff_member_required
from django.template import loader
from django.shortcuts import render_to_response
from django.contrib.sites.models import Site
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.translation import ugettext_lazy as _
def template_validator(request):
"""
Displays the template validator form, which finds and displays template
syntax errors.
"""
# get a dict of {site_id : settings_module} for the validator
settings_modules = {}
for mod in settings.ADMIN_FOR:
settings_module = import_module(mod)
settings_modules[settings_module.SITE_ID] = settings_module
site_list = Site.objects.in_bulk(settings_modules.keys()).values()
if request.POST:
form = TemplateValidatorForm(settings_modules, site_list,
data=request.POST)
if form.is_valid():
request.user.message_set.create(message='The template is valid.')
else:
form = TemplateValidatorForm(settings_modules, site_list)
return render_to_response('admin/template_validator.html', {
'title': 'Template validator',
'form': form,
}, context_instance=template.RequestContext(request))
template_validator = staff_member_required(template_validator)
class TemplateValidatorForm(forms.Form):
site = forms.ChoiceField(_('site'))
template = forms.CharField(
_('template'), widget=forms.Textarea({'rows': 25, 'cols': 80}))
def __init__(self, settings_modules, site_list, *args, **kwargs):
self.settings_modules = settings_modules
super(TemplateValidatorForm, self).__init__(*args, **kwargs)
self.fields['site'].choices = [(s.id, s.name) for s in site_list]
def clean_template(self):
# Get the settings module. If the site isn't set, we don't raise an
# error since the site field will.
try:
site_id = int(self.cleaned_data.get('site', None))
except (ValueError, TypeError):
return
settings_module = self.settings_modules.get(site_id, None)
if settings_module is None:
return
# So that inheritance works in the site's context, register a new
# function for "extends" that uses the site's TEMPLATE_DIRS instead.
def new_do_extends(parser, token):
node = loader.do_extends(parser, token)
node.template_dirs = settings_module.TEMPLATE_DIRS
return node
register = template.Library()
register.tag('extends', new_do_extends)
template.builtins.append(register)
# Now validate the template using the new TEMPLATE_DIRS, making sure to
# reset the extends function in any case.
error = None
template_string = self.cleaned_data['template']
try:
tmpl = loader.get_template_from_string(template_string)
tmpl.render(template.Context({}))
except template.TemplateSyntaxError, e:
error = e
template.builtins.remove(register)
if error:
raise forms.ValidationError, e.args
| mit |
bgyori/indra | indra/statements/concept.py | 3 | 6441 | import logging
from collections import OrderedDict as _o
logger = logging.getLogger(__name__)
default_ns_order = ['WM', 'UN', 'HUME', 'SOFIA', 'CWMS']
class Concept(object):
"""A concept/entity of interest that is the argument of a Statement
Parameters
----------
name : str
The name of the concept, possibly a canonicalized name.
db_refs : dict
Dictionary of database identifiers associated with this concept.
"""
def __init__(self, name, db_refs=None):
self.name = name
self.db_refs = db_refs if db_refs else {}
def matches(self, other):
return self.matches_key() == other.matches_key()
def matches_key(self):
key = self.entity_matches_key()
return str(key)
def entity_matches(self, other):
return self.entity_matches_key() == other.entity_matches_key()
def entity_matches_key(self):
# Get the grounding first
db_ns, db_id = self.get_grounding()
# If there's no grounding, just use the name as key
if not db_ns and not db_id:
return self.name
return str((db_ns, db_id))
def equals(self, other):
matches = (self.name == other.name) and \
(self.db_refs == other.db_refs)
return matches
def get_grounding(self, ns_order=None):
# There are the following possibilities here:
# 1. a single unscored entry (str)
# 2. a list of scored entries with one element per entry
# (list of tuple(str, float))
# 3. a list of entries with each entry cosisting of a tuple
# of 4 scored groundings (list of tuple(tuple(str, float)))
ns_order = ns_order if ns_order else default_ns_order
for db_ns in ns_order:
# If there is no such entry, we continue
db_id = self.db_refs.get(db_ns)
# Note, this includes an empty list in case that comes up
if not db_id:
continue
# Case 1, a simple string ID
if isinstance(db_id, str):
return db_ns, db_id
# Cases 2 and 3 where db_id here is a list
elif isinstance(db_id, (list, tuple)):
first_entry = db_id[0]
# Case 2: each entry is a grounding and a score
if len(first_entry) == 2:
top_entry = \
sorted(db_id, key=lambda x: x[1],
reverse=True)[0][0]
return db_ns, top_entry
# Case 3: each entry is a tuple with 4 elements
# each of which is a tuple consisting of a grounding
# and a score
else:
top_entry = get_top_compositional_grounding(db_id)
return db_ns, tuple([gr[0] if gr is not None else None
for gr in top_entry])
else:
continue
return None, None
def isa(self, other, ontology):
# Get the namespaces for the comparison
(self_ns, self_id) = self.get_grounding()
(other_ns, other_id) = other.get_grounding()
# If one of the agents isn't grounded to a relevant namespace,
# there can't be an isa relationship
if not all((self_ns, self_id, other_ns, other_id)):
return False
# Check for isa relationship
return ontology.isa(self_ns, self_id, other_ns, other_id)
def is_opposite(self, other, ontology):
# Get the namespaces for the comparison
(self_ns, self_id) = self.get_grounding()
(other_ns, other_id) = other.get_grounding()
# If one of the agents isn't grounded to a relevant namespace,
# there can't be an is_opposite relationship
if not all((self_ns, self_id, other_ns, other_id)):
return False
# Check for is_opposite relationship
return ontology.is_opposite(self_ns, self_id,
other_ns, other_id)
def refinement_of(self, other, ontology, entities_refined=False):
# Make sure the Agent types match
if type(self) != type(other):
return False
# Check that the basic entity of the agent either matches or is related
# to the entity of the other agent. If not, no match.
# If the entities, match, then we can continue
if entities_refined:
return True
if self.entity_matches(other):
return True
if self.isa(other, ontology):
return True
return False
def to_json(self):
json_dict = _o({'name': self.name})
json_dict['db_refs'] = self.db_refs
return json_dict
@classmethod
def _from_json(cls, json_dict):
name = json_dict.get('name')
db_refs = json_dict.get('db_refs', {})
if not name:
logger.error('Concept missing name.')
return None
# This fixes the fact that scored lists of groundings
# are deserialized as lists of lists instead of lists
# of tuples.
for key, val in db_refs.items():
if isinstance(val, list):
db_refs[key] = [tuple(v) for v in val]
concept = Concept(name, db_refs=db_refs)
return concept
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def compositional_sort_key(entry):
"""Return a sort key from a compositional grounding entry"""
concepts = [grounding[0] for grounding in entry
if grounding is not None]
scores = [grounding[1] for grounding in entry
if grounding is not None]
# First key is the theme grounding
key1 = scores[0]
# Second key is the number of groundings
key2 = len(scores)
# Third key is the average score
key3 = sum(scores) / len(scores)
# Fourth key is alphabetical to avoid non-determinism
key4 = '|'.join(concepts)
return key1, key2, key3, key4
def get_top_compositional_grounding(groundings):
"""Return the highest ranking compositional grounding entry."""
return max(groundings, key=compositional_sort_key)
def get_sorted_compositional_groundings(groundings):
"""Return the compositional groundings sorted starting from the top."""
return sorted(groundings, key=compositional_sort_key, reverse=True)
| bsd-2-clause |
theflofly/tensorflow | tensorflow/compiler/tests/scan_ops_test.py | 12 | 7727 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for scan ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def numpy_reverse(x, axis):
length = len(x.shape)
if axis < 0:
axis = length + axis
ix = [
slice(None, None, -1) if i == axis else slice(None) for i in range(length)
]
return x[ix]
def handle_options(func, x, axis, exclusive, reverse):
"""Adds tf options to numpy scan ops."""
length = len(x.shape)
if axis < 0:
axis = length + axis
if reverse:
x = numpy_reverse(x, axis)
if exclusive:
ix_head = [slice(0, 1) if i == axis else slice(None) for i in range(length)]
ix_init = [
slice(0, -1) if i == axis else slice(None) for i in range(length)
]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
init = np.ones_like(x[ix_head])
else:
raise ValueError("Unknown scan function.")
x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
else:
x = func(x, axis=axis)
if reverse:
x = numpy_reverse(x, axis)
return x
class CumsumTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.cached_session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
tf_out = math_ops.cumsum(p, axis, exclusive, reverse).eval(
feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.cached_session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumsum(p, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.cached_session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumsum(input_tensor, [0]).eval()
class CumprodTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.cached_session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
prod = math_ops.cumprod(p, axis, exclusive, reverse)
tf_out = prod.eval(feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.cached_session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumprod(x, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.cached_session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumprod(input_tensor, [0]).eval()
if __name__ == "__main__":
test.main()
| apache-2.0 |
narunlifescience/Clementine | dist/update_desktop_languages.py | 24 | 3408 | import codecs
import glob
import logging
import os
import polib
import re
PO_GLOB = 'src/translations/*.po'
DESKTOP_PATH = 'dist/clementine.desktop'
class ConfigParser(object):
"""
Better version of the ConfigParser from the stdlib that handles unicode.
"""
SECTION_RE = re.compile('^\[(.*)\]\s*$')
VALUE_RE = re.compile('^([^\s=]+)\s*=\s*(.*)')
def __init__(self, encoding='utf-8'):
self._sections = []
self.encoding = encoding
def read(self, filename):
with codecs.open(filename, 'r', self.encoding) as handle:
current_section = None
for line in handle:
match = self.SECTION_RE.match(line)
if match:
current_section = self._add_section(match.group(1))
continue
match = self.VALUE_RE.match(line)
if match:
current_section['items'].append(
(match.group(1), match.group(2).strip()))
continue
def _add_section(self, section_name):
section = {
'name': section_name,
'items': [],
}
self._sections.append(section)
return section
def _find_section(self, section_name):
return [
section
for section in self._sections
if section['name'] == section_name
][0]
def _find_item_value(self, section, key):
return [
item[1]
for item in section['items']
if item[0] == key
][0]
def sections(self):
return [x['name'] for x in self._sections]
def get(self, section_name, key):
try:
return self._find_item_value(self._find_section(section_name), key)
except IndexError:
return None
def set(self, section_name, key, value):
try:
section = self._find_section(section_name)
except IndexError:
section = self._add_section(section_name)
for index, (existing_key, _existing_value) in enumerate(section['items']):
if existing_key == key:
section['items'][index] = (key, value)
return
section['items'].append((key, value))
def write(self, filename):
with codecs.open(filename, 'w', self.encoding) as handle:
for section in self._sections:
handle.write('[%s]\n' % section['name'])
for key, value in section['items']:
handle.write('%s=%s\n' % (key, value))
handle.write('\n')
def main():
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)-7s %(message)s")
config_parser = ConfigParser()
config_parser.read(DESKTOP_PATH)
shortcuts = [
{
'section_name': section_name,
'original': config_parser.get(section_name, 'Name'),
}
for section_name in config_parser.sections()
if section_name.endswith('Shortcut Group')
]
for po_filename in glob.glob(PO_GLOB):
language = os.path.splitext(os.path.basename(po_filename))[0]
config_key = 'Name[%s]' % language
po_file = polib.pofile(po_filename)
logging.info('Reading language %s', po_filename)
for shortcut in shortcuts:
original = shortcut['original']
entry = po_file.find(original)
if entry is not None and entry.msgstr and entry.msgstr != original:
logging.info('Setting %s for %s to %s',
config_key, shortcut['section_name'], entry.msgstr)
config_parser.set(shortcut['section_name'], config_key, entry.msgstr)
config_parser.write(DESKTOP_PATH)
if __name__ == '__main__':
main()
| gpl-3.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/jedi/api/keywords.py | 1 | 3698 | import pydoc
import keyword
from jedi._compatibility import is_py3, is_py35
from jedi import common
from jedi.evaluate.filters import AbstractNameDefinition
from parso.python.tree import Leaf
try:
from pydoc_data import topics as pydoc_topics
except ImportError:
# Python 2
try:
import pydoc_topics
except ImportError:
# This is for Python 3 embeddable version, which dont have
# pydoc_data module in its file python3x.zip.
pydoc_topics = None
if is_py3:
if is_py35:
# in python 3.5 async and await are not proper keywords, but for
# completion pursposes should as as though they are
keys = keyword.kwlist + ["async", "await"]
else:
keys = keyword.kwlist
else:
keys = keyword.kwlist + ['None', 'False', 'True']
def has_inappropriate_leaf_keyword(pos, module):
relevant_errors = filter(
lambda error: error.first_pos[0] == pos[0],
module.error_statement_stacks)
for error in relevant_errors:
if error.next_token in keys:
return True
return False
def completion_names(evaluator, stmt, pos, module):
keyword_list = all_keywords(evaluator)
if not isinstance(stmt, Leaf) or has_inappropriate_leaf_keyword(pos, module):
keyword_list = filter(
lambda keyword: not keyword.only_valid_as_leaf,
keyword_list
)
return [keyword.name for keyword in keyword_list]
def all_keywords(evaluator, pos=(0, 0)):
return set([Keyword(evaluator, k, pos) for k in keys])
def keyword(evaluator, string, pos=(0, 0)):
if string in keys:
return Keyword(evaluator, string, pos)
else:
return None
def get_operator(evaluator, string, pos):
return Keyword(evaluator, string, pos)
keywords_only_valid_as_leaf = (
'continue',
'break',
)
class KeywordName(AbstractNameDefinition):
api_type = 'keyword'
def __init__(self, evaluator, name):
self.evaluator = evaluator
self.string_name = name
self.parent_context = evaluator.BUILTINS
def eval(self):
return set()
def infer(self):
return [Keyword(self.evaluator, self.string_name, (0, 0))]
class Keyword(object):
api_type = 'keyword'
def __init__(self, evaluator, name, pos):
self.name = KeywordName(evaluator, name)
self.start_pos = pos
self.parent = evaluator.BUILTINS
@property
def only_valid_as_leaf(self):
return self.name.value in keywords_only_valid_as_leaf
@property
def names(self):
""" For a `parsing.Name` like comparision """
return [self.name]
def py__doc__(self, include_call_signature=False):
return imitate_pydoc(self.name.string_name)
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self.name)
def imitate_pydoc(string):
"""
It's not possible to get the pydoc's without starting the annoying pager
stuff.
"""
if pydoc_topics is None:
return ''
# str needed because of possible unicode stuff in py2k (pydoc doesn't work
# with unicode strings)
string = str(string)
h = pydoc.help
with common.ignored(KeyError):
# try to access symbols
string = h.symbols[string]
string, _, related = string.partition(' ')
get_target = lambda s: h.topics.get(s, h.keywords.get(s))
while isinstance(string, str):
string = get_target(string)
try:
# is a tuple now
label, related = string
except TypeError:
return ''
try:
return pydoc_topics.topics[label].strip() if pydoc_topics else ''
except KeyError:
return ''
| apache-2.0 |
thomas-schmid-ubnt/avocado | avocado/plugins/jsonresult.py | 1 | 4544 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2014
# Authors: Ruda Moura <rmoura@redhat.com>
# Cleber Rosa <crosa@redhat.com>
"""
JSON output module.
"""
import json
import logging
import os
from avocado.core.parser import FileOrStdoutAction
from avocado.core.plugin_interfaces import CLI, Result
UNKNOWN = '<unknown>'
class JSONResult(Result):
name = 'json'
description = 'JSON result support'
def _render(self, result):
tests = []
for test in result.tests:
tests.append({'id': str(test.get('name', UNKNOWN)),
'start': test.get('time_start', -1),
'end': test.get('time_end', -1),
'time': test.get('time_elapsed', -1),
'status': test.get('status', {}),
'whiteboard': test.get('whiteboard', UNKNOWN),
'logdir': test.get('logdir', UNKNOWN),
'logfile': test.get('logfile', UNKNOWN),
'fail_reason': str(test.get('fail_reason', UNKNOWN)),
# COMPATIBILITY: `test` and `url` are backward
# compatibility key names for the test ID,
# as defined by the test name RFC. `url` is
# not a test reference, as it's recorded
# after it has been processed by the test resolver
# (currently called test loader in the code).
# Expect them to be removed in the future.
'test': str(test.get('name', UNKNOWN)),
'url': str(test.get('name', UNKNOWN))})
content = {'job_id': result.job_unique_id,
'debuglog': result.logfile,
'tests': tests,
'total': result.tests_total,
'pass': result.passed,
'errors': result.errors,
'failures': result.failed,
'skip': result.skipped,
'cancel': result.cancelled,
'time': result.tests_total_time}
return json.dumps(content,
sort_keys=True,
indent=4,
separators=(',', ': '))
def render(self, result, job):
if not (hasattr(job.args, 'json_job_result') or
hasattr(job.args, 'json_output')):
return
if not result.tests_total:
return
content = self._render(result)
if getattr(job.args, 'json_job_result', 'off') == 'on':
json_path = os.path.join(job.logdir, 'results.json')
with open(json_path, 'w') as json_file:
json_file.write(content)
json_path = getattr(job.args, 'json_output', 'None')
if json_path is not None:
if json_path == '-':
log = logging.getLogger("avocado.app")
log.debug(content)
else:
with open(json_path, 'w') as json_file:
json_file.write(content)
class JSONCLI(CLI):
"""
JSON output
"""
name = 'json'
description = "JSON output options for 'run' command"
def configure(self, parser):
run_subcommand_parser = parser.subcommands.choices.get('run', None)
if run_subcommand_parser is None:
return
run_subcommand_parser.output.add_argument(
'--json', type=str, action=FileOrStdoutAction,
dest='json_output', metavar='FILE',
help='Enable JSON result format and write it to FILE. '
"Use '-' to redirect to the standard output.")
run_subcommand_parser.output.add_argument(
'--json-job-result', dest='json_job_result',
choices=('on', 'off'), default='on',
help=('Enables default JSON result in the job results directory. '
'File will be named "results.json".'))
def run(self, args):
pass
| gpl-2.0 |
vjmac15/Lyilis | lib/youtube_dl/extractor/teletask (VJ Washington's conflicted copy 2017-08-29).py | 215 | 1739 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import unified_strdate
class TeleTaskIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tele-task\.de/archive/video/html5/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.tele-task.de/archive/video/html5/26168/',
'info_dict': {
'id': '26168',
'title': 'Duplicate Detection',
},
'playlist': [{
'md5': '290ef69fb2792e481169c3958dbfbd57',
'info_dict': {
'id': '26168-speaker',
'ext': 'mp4',
'title': 'Duplicate Detection',
'upload_date': '20141218',
}
}, {
'md5': 'e1e7218c5f0e4790015a437fcf6c71b4',
'info_dict': {
'id': '26168-slides',
'ext': 'mp4',
'title': 'Duplicate Detection',
'upload_date': '20141218',
}
}]
}
def _real_extract(self, url):
lecture_id = self._match_id(url)
webpage = self._download_webpage(url, lecture_id)
title = self._html_search_regex(
r'itemprop="name">([^<]+)</a>', webpage, 'title')
upload_date = unified_strdate(self._html_search_regex(
r'Date:</td><td>([^<]+)</td>', webpage, 'date', fatal=False))
entries = [{
'id': '%s-%s' % (lecture_id, format_id),
'url': video_url,
'title': title,
'upload_date': upload_date,
} for format_id, video_url in re.findall(
r'<video class="([^"]+)"[^>]*>\s*<source src="([^"]+)"', webpage)]
return self.playlist_result(entries, lecture_id, title)
| gpl-3.0 |
chatelak/RMG-Py | rmgpy/cantherm/__init__.py | 13 | 1548 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# CanTherm -
#
# Copyright (c) 2010 by Joshua W. Allen (jwallen@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
from .main import CanTherm
from .statmech import StatMechJob
from .thermo import ThermoJob
from .kinetics import KineticsJob
from .pdep import PressureDependenceJob
| mit |
ngvoice/android-client | phone/jni/pjsip/sources/tests/pjsua/scripts-sendto/999_asterisk_err.py | 59 | 1312 | # $Id: 999_asterisk_err.py 2081 2008-06-27 21:59:15Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
# http://lists.pjsip.org/pipermail/pjsip_lists.pjsip.org/2008-June/003426.html:
#
# Report in pjsip mailing list on 27/6/2008 that this message will
# cause pjsip to respond with 500 and then second request will cause
# segfault.
complete_msg = \
"""INVITE sip:5001@192.168.1.200:5060;transport=UDP SIP/2.0
Via: SIP/2.0/UDP 192.168.1.11:5060;branch=z9hG4bK74a60ee5;rport
From: \"A user\" <sip:66660000@192.168.1.11>;tag=as2858a32c
To: <sip:5001@192.168.1.200:5060;transport=UDP>
Contact: <sip:66660000@192.168.1.11>
Call-ID: 0bc7612c665e875a4a46411442b930a6@192.168.1.11
CSeq: 102 INVITE
User-Agent: Asterisk PBX
Max-Forwards: 70
Date: Fri, 27 Jun 2008 08:46:47 GMT
Allow: INVITE, ACK, CANCEL, OPTIONS, BYE, REFER, SUBSCRIBE, NOTIFY
Supported: replaces
Content-Type: application/sdp
Content-Length: 285
v=0
o=root 4236 4236 IN IP4 192.168.1.11
s=session
c=IN IP4 192.168.1.11
t=0 0
m=audio 14390 RTP/AVP 0 3 8 101
a=rtpmap:0 PCMU/8000
a=rtpmap:3 GSM/8000
a=rtpmap:8 PCMA/8000
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-16
a=silenceSupp:off - - - -
a=ptime:20
a=sendrecv
"""
sendto_cfg = sip.SendtoCfg( "Asterisk 500", "--null-audio --auto-answer 200",
"", 200, complete_msg=complete_msg)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.