repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
brain-tec/connector-magento | magentoerpconnect/tests/test_related_action.py | 11 | 3591 | # -*- coding: utf-8 -*-
import mock
import openerp
import openerp.tests.common as common
from openerp.addons.connector.queue.job import (
Job,
OpenERPJobStorage,
)
from openerp.addons.connector.session import (
ConnectorSession)
from .common import mock_api
from .data_base import magento_base_responses
from ..unit.import_synchronizer import import_batch, import_record
from ..unit.export_synchronizer import export_record
class TestRelatedActionStorage(common.TransactionCase):
""" Test related actions on stored jobs """
def setUp(self):
super(TestRelatedActionStorage, self).setUp()
backend_model = self.env['magento.backend']
self.session = ConnectorSession(self.env.cr, self.env.uid,
context=self.env.context)
warehouse = self.env.ref('stock.warehouse0')
self.backend = backend_model.create(
{'name': 'Test Magento',
'version': '1.7',
'location': 'http://anyurl',
'username': 'username',
'warehouse_id': warehouse.id,
'password': '42'})
# import the base informations
with mock_api(magento_base_responses):
import_batch(self.session, 'magento.website', self.backend.id)
import_batch(self.session, 'magento.store', self.backend.id)
import_batch(self.session, 'magento.storeview', self.backend.id)
self.MagentoProduct = self.env['magento.product.product']
self.QueueJob = self.env['queue.job']
def test_unwrap_binding(self):
""" Open a related action opening an unwrapped binding """
product = self.env.ref('product.product_product_7')
magento_product = self.MagentoProduct.create(
{'openerp_id': product.id,
'backend_id': self.backend.id})
stored = self._create_job(export_record, 'magento.product.product',
magento_product.id)
expected = {
'name': mock.ANY,
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_id': product.id,
'res_model': 'product.product',
}
self.assertEquals(stored.open_related_action(), expected)
def _create_job(self, func, *args):
job = Job(func=func, args=args)
storage = OpenERPJobStorage(self.session)
storage.store(job)
stored = self.QueueJob.search([('uuid', '=', job.uuid)])
self.assertEqual(len(stored), 1)
return stored
def test_link(self):
""" Open a related action opening an url on Magento """
self.backend.write({'admin_location': 'http://www.example.com/admin'})
stored = self._create_job(import_record, 'magento.product.product',
self.backend.id, 123456)
url = 'http://www.example.com/admin/catalog_product/edit/id/123456'
expected = {
'type': 'ir.actions.act_url',
'target': 'new',
'url': url,
}
self.assertEquals(stored.open_related_action(), expected)
def test_link_no_location(self):
""" Related action opening an url, admin location is not configured """
self.backend.write({'admin_location': False})
self.backend.refresh()
stored = self._create_job(import_record, 'magento.product.product',
self.backend.id, 123456)
with self.assertRaises(openerp.exceptions.Warning):
stored.open_related_action()
| agpl-3.0 |
johankaito/fufuka | microblog/old-flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langhungarianmodel.py | 2763 | 12536 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
# flake8: noqa
| apache-2.0 |
EvanK/ansible | lib/ansible/modules/network/onyx/onyx_mlag_vip.py | 52 | 5854 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_mlag_vip
version_added: "2.5"
author: "Samer Deeb (@samerd)"
short_description: Configures MLAG VIP on Mellanox ONYX network devices
description:
- This module provides declarative management of MLAG virtual IPs
on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.4000
options:
ipaddress:
description:
- Virtual IP address of the MLAG. Required if I(state=present).
group_name:
description:
- MLAG group name. Required if I(state=present).
mac_address:
description:
- MLAG system MAC address. Required if I(state=present).
state:
description:
- MLAG VIP state.
choices: ['present', 'absent']
delay:
description:
- Delay interval, in seconds, waiting for the changes on mlag VIP to take
effect.
default: 12
"""
EXAMPLES = """
- name: configure mlag-vip
onyx_mlag_vip:
ipaddress: 50.3.3.1/24
group_name: ansible-test-group
mac_address: 00:11:12:23:34:45
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- mlag-vip ansible_test_group ip 50.3.3.1 /24 force
- no mlag shutdown
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
from ansible.module_utils.network.onyx.onyx import show_cmd
class OnyxMLagVipModule(BaseOnyxModule):
def init_module(self):
""" initialize module
"""
element_spec = dict(
ipaddress=dict(),
group_name=dict(),
mac_address=dict(),
delay=dict(type='int', default=12),
state=dict(choices=['present', 'absent'], default='present'),
)
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
lag_params = {
'ipaddress': module_params['ipaddress'],
'group_name': module_params['group_name'],
'mac_address': module_params['mac_address'],
'delay': module_params['delay'],
'state': module_params['state'],
}
self.validate_param_values(lag_params)
self._required_config = lag_params
def _show_mlag_cmd(self, cmd):
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def _show_mlag(self):
cmd = "show mlag"
return self._show_mlag_cmd(cmd)
def _show_mlag_vip(self):
cmd = "show mlag-vip"
return self._show_mlag_cmd(cmd)
def load_current_config(self):
self._current_config = dict()
mlag_config = self._show_mlag()
mlag_vip_config = self._show_mlag_vip()
if mlag_vip_config:
mlag_vip = mlag_vip_config.get("MLAG-VIP", {})
self._current_config['group_name'] = \
mlag_vip.get("MLAG group name")
self._current_config['ipaddress'] = \
mlag_vip.get("MLAG VIP address")
if mlag_config:
self._current_config['mac_address'] = \
mlag_config.get("System-mac")
def generate_commands(self):
state = self._required_config['state']
if state == 'present':
self._generate_mlag_vip_cmds()
else:
self._generate_no_mlag_vip_cmds()
def _generate_mlag_vip_cmds(self):
current_group = self._current_config.get('group_name')
current_ip = self._current_config.get('ipaddress')
current_mac = self._current_config.get('mac_address')
if current_mac:
current_mac = current_mac.lower()
req_group = self._required_config.get('group_name')
req_ip = self._required_config.get('ipaddress')
req_mac = self._required_config.get('mac_address')
if req_mac:
req_mac = req_mac.lower()
if req_ip is not None:
if req_group is None:
self._module.fail_json(msg='In order to configure Mlag-Vip you must send '
'group name param beside IPaddress')
ipaddr, mask = req_ip.split('/')
if req_group != current_group or req_ip != current_ip:
self._commands.append('mlag-vip %s ip %s /%s force' % (req_group, ipaddr, mask))
elif req_group and req_group != current_group:
self._commands.append('mlag-vip %s' % req_group)
if req_mac and req_mac != current_mac:
self._commands.append(
'mlag system-mac %s' % (req_mac))
if self._commands:
self._commands.append('no mlag shutdown')
def _generate_no_mlag_vip_cmds(self):
if self._current_config.get('group_name'):
self._commands.append('no mlag-vip')
def check_declarative_intent_params(self, result):
if not result['changed']:
return
delay_interval = self._required_config.get('delay')
if delay_interval > 0:
time.sleep(delay_interval)
for cmd in ("show mlag-vip", ""):
show_cmd(self._module, cmd, json_fmt=False, fail_on_error=False)
def main():
""" main entry point for module execution
"""
OnyxMLagVipModule.main()
if __name__ == '__main__':
main()
| gpl-3.0 |
dufresnedavid/hr | __unported__/hr_report_payroll_attendance_summary/report/attendance_summary.py | 21 | 9460 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OE_DATEFORMAT
from report import report_sxw
class Parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_employee_data': self.get_employee_data,
'get_worked_days': self.get_worked_days,
'get_daily_ot': self.get_daily_ot,
'get_nightly_ot': self.get_nightly_ot,
'get_restday_ot': self.get_restday_ot,
'get_holiday_ot': self.get_holiday_ot,
'get_bunch_no': self.get_bunch_no,
'get_awol': self.get_awol,
'get_sickleave': self.get_sickleave,
'get_no': self.get_no,
'get_start': self.get_start,
'get_end': self.get_end,
'lose_bonus': self.lose_bonus,
'get_paid_leave': self.get_paid_leave,
'get_employee_list': self.get_employee_list,
})
self.start_date = False
self.end_date = False
self.ee_lines = {}
self.no = 0
self.department_id = False
self.regular_hours = 8.0
def set_context(self, objects, data, ids, report_type=None):
if data.get('form', False) and data['form'].get('start_date', False):
self.start_date = data['form']['start_date']
if data.get('form', False) and data['form'].get('end_date', False):
self.end_date = data['form']['end_date']
return super(Parser, self).set_context(
objects, data, ids, report_type=report_type)
def get_employee_list(self, department_id):
ee_obj = self.pool.get('hr.employee')
ee_ids = ee_obj.search(
self.cr, self.uid, [
('active', '=', True),
'|',
('department_id.id', '=', department_id),
('saved_department_id.id', '=', department_id)
])
ees = ee_obj.browse(self.cr, self.uid, ee_ids)
return ees
def get_employee_data(self, department_id):
payslip_obj = self.pool.get('hr.payslip')
ee_obj = self.pool.get('hr.employee')
dtStart = datetime.strptime(self.start_date, OE_DATEFORMAT).date()
dtEnd = datetime.strptime(self.end_date, OE_DATEFORMAT).date()
ee_ids = ee_obj.search(
self.cr, self.uid, [
('active', '=', True),
'|',
('department_id.id', '=', department_id),
('saved_department_id.id', '=', department_id)
])
for ee in ee_obj.browse(self.cr, self.uid, ee_ids):
datas = []
for c in ee.contract_ids:
dtCStart = False
dtCEnd = False
if c.date_start:
dtCStart = datetime.strptime(
c.date_start, OE_DATEFORMAT).date()
if c.date_end:
dtCEnd = datetime.strptime(
c.date_end, OE_DATEFORMAT).date()
if (dtCStart and dtCStart <= dtEnd) and (
(dtCEnd and dtCEnd >= dtStart) or not dtCEnd
):
datas.append({
'contract_id': c.id,
'date_start': (dtCStart > dtStart
and dtCStart.strftime(OE_DATEFORMAT)
or dtStart.strftime(OE_DATEFORMAT)),
'date_end': ((dtCEnd and dtCEnd < dtEnd)
and dtCEnd.strftime(OE_DATEFORMAT)
or dtEnd.strftime(OE_DATEFORMAT)),
})
wd_lines = []
for d in datas:
wd_lines += payslip_obj.get_worked_day_lines(
self.cr, self.uid, [d['contract_id']],
d['date_start'], d['date_end'])
self.ee_lines.update({ee.id: wd_lines})
def get_start(self):
return datetime.strptime(self.start_date, OE_DATEFORMAT).strftime(
'%B %d, %Y')
def get_end(self):
return datetime.strptime(self.end_date, OE_DATEFORMAT).strftime(
'%B %d, %Y')
def get_no(self, department_id):
if not self.department_id or self.department_id != department_id:
self.department_id = department_id
self.no = 1
else:
self.no += 1
return self.no
def get_employee_start_date(self, employee_id):
first_day = False
c_obj = self.pool.get('hr.contract')
c_ids = c_obj.search(
self.cr, self.uid, [('employee_id', '=', employee_id)])
for contract in c_obj.browse(self.cr, self.uid, c_ids):
if not first_day or contract.date_start < first_day:
first_day = contract.date_start
return first_day
def get_worked_days(self, employee_id):
total = 0.0
maxw = 0.0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORK100']:
total += float(line['number_of_hours']) / self.regular_hours
elif line['code'] == ['MAX']:
maxw += float(line['number_of_hours']) / self.regular_hours
total += self.get_paid_leave(employee_id)
awol = self.get_awol(employee_id)
# Take care to identify and handle employee's who didn't work the
# full month: newly hired and terminated employees
#
hire_date = self.get_employee_start_date(employee_id)
term_ids = self.pool.get(
'hr.employee.termination').search(
self.cr, self.uid, [
('name', '<', self.end_date),
('name', '>=', self.start_date),
('employee_id', '=', employee_id),
('employee_id.status', 'in', [
'pending_inactive', 'inactive']),
('state', 'not in', ['cancel'])])
if hire_date <= self.start_date and len(term_ids) == 0:
if total >= maxw:
total = 26
total = total - awol
return total
def get_paid_leave(self, employee_id):
total = 0
paid_leaves = ['LVANNUAL', 'LVBEREAVEMENT', 'LVCIVIC', 'LVMATERNITY',
'LVMMEDICAL', 'LVPTO', 'LVWEDDING', 'LVSICK']
for line in self.ee_lines[employee_id]:
if line['code'] in paid_leaves:
total += float(line['number_of_hours']) / self.regular_hours
return total
def get_daily_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTD']:
total += line['number_of_hours']
return total
def get_nightly_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTN']:
total += line['number_of_hours']
return total
def get_restday_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTR']:
total += line['number_of_hours']
return total
def get_holiday_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTH']:
total += line['number_of_hours']
return total
def get_bunch_no(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['BUNCH']:
total += int(line['number_of_hours'])
return total
def get_awol(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['AWOL']:
total += float(line['number_of_hours']) / self.regular_hours
return total
def get_sickleave(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['LVSICK']:
total += float(line['number_of_hours']) / self.regular_hours
elif line['code'] in ['LVSICK50']:
total += float(line['number_of_hours']) * 0.5
return total
def lose_bonus(self, employee_id):
loseit = False
for line in self.ee_lines[employee_id]:
if line['code'] in ['AWOL', 'TARDY', 'NFRA', 'WARN'] and line[
'number_of_hours'
] > 0.01:
loseit = True
return loseit
| agpl-3.0 |
faust64/ansible | lib/ansible/modules/cloud/openstack/os_user_group.py | 15 | 3391 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: os_user_group
short_description: Associate OpenStack Identity users and groups
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add and remove users from groups
options:
user:
description:
- Name or id for the user
required: true
group:
description:
- Name or id for the group.
required: true
state:
description:
- Should the user be present or absent in the group
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Add the demo user to the demo group
- os_user_group:
cloud: mycloud
user: demo
group: demo
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _system_state_change(state, in_group):
if state == 'present' and not in_group:
return True
if state == 'absent' and in_group:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
user=dict(required=True),
group=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
user = module.params['user']
group = module.params['group']
state = module.params['state']
try:
cloud = shade.operator_cloud(**module.params)
in_group = cloud.is_user_in_group(user, group)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, in_group))
changed = False
if state == 'present':
if not in_group:
cloud.add_user_to_group(user, group)
changed = True
elif state == 'absent':
if in_group:
cloud.remove_user_from_group(user, group)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
cuboxi/android_external_chromium_org | media/tools/layout_tests/bug.py | 144 | 1863 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bug module that is necessary for the layout analyzer."""
import re
from webkitpy.layout_tests.models.test_expectations import *
class Bug(object):
"""A class representing a bug.
TODO(imasaki): add more functionalities here if bug-tracker API is available.
For example, you can get the name of a bug owner.
"""
# Type enum for the bug.
WEBKIT = 0
CHROMIUM = 1
OTHERS = 2
def __init__(self, bug_modifier):
"""Initialize the object using raw bug text (such as BUGWK2322).
The bug modifier used in the test expectation file.
Args:
bug_modifier: a string representing a bug modifier. According to
http://www.chromium.org/developers/testing/webkit-layout-tests/\
testexpectations
Bug identifiers are of the form "webkit.org/b/12345", "crbug.com/12345",
"code.google.com/p/v8/issues/detail?id=12345" or "Bug(username)"
"""
match = re.match('Bug\((\w+)\)$', bug_modifier)
if match:
self.type = self.OTHERS
self.url = 'mailto:%s@chromium.org' % match.group(1).lower()
self.bug_txt = bug_modifier
return
self.type = self.GetBugType(bug_modifier)
self.url = bug_modifier
self.bug_txt = bug_modifier
def GetBugType(self, bug_modifier):
"""Returns type of the bug based on URL."""
if bug_modifier.startswith(WEBKIT_BUG_PREFIX):
return self.WEBKIT;
if bug_modifier.startswith(CHROMIUM_BUG_PREFIX):
return self.CHROMIUM;
return self.OTHERS
def __str__(self):
"""Get a string representation of a bug object.
Returns:
a string for HTML link representation of a bug.
"""
return '<a href="%s">%s</a>' % (self.url, self.bug_txt)
| bsd-3-clause |
ctk3b/mbuild | mbuild/examples/alkane_monolayer/alkane_monolayer.py | 4 | 1378 | # -*- coding: utf-8 -*-
# -- ==alkane_monolayer== --
import mbuild as mb
from mbuild.lib.surfaces import Betacristobalite
from mbuild.lib.atoms import H
from mbuild.examples.alkane_monolayer.alkylsilane import AlkylSilane
class AlkaneMonolayer(mb.Monolayer):
"""An akylsilane monolayer on beta-cristobalite. """
def __init__(self, pattern, tile_x=1, tile_y=1, chain_length=10):
"""Create an alkylsilane monolayer on beta-cristobalite.
Parameters
----------
pattern : np.ndarray, shape=(n, 3), optional, default=None
An array of planar binding locations. If not provided, the entire
surface will be filled with `chain`.
tile_x : int, optional, default=1
Number of times to replicate substrate in x-direction.
tile_y : int, optional, default=1
Number of times to replicate substrate in y-direction.
chain_length : int, optional, default=10
Number of carbon atoms per chain.
"""
surface = Betacristobalite()
alkylsilane = AlkylSilane(chain_length)
hydrogen = H()
super(AlkaneMonolayer, self).__init__(surface, alkylsilane, backfill=hydrogen,
pattern=pattern, tile_x=tile_x,
tile_y=tile_y)
# -- ==alkane_monolayer== -- | mit |
eflowbeach/draw-your-taf | Pmw/Pmw_1_3/doc/ScrolledText_test.py | 5 | 3247 | # Based on iwidgets2.2.0/tests/scrolledtext.test code.
import Test
import Pmw
Test.initialise()
c = Pmw.ScrolledText
def _testYView(doBottom):
w = Test.currentWidget()
top, bottom = w.yview()
if type(top) != type(0.0) or type(bottom) != type(0.0):
return 'bad type ' + str(top) + ' ' + str(bottom)
if doBottom:
if bottom != 1.0:
return 'bottom is ' + str(bottom)
else:
if top != 0.0:
return 'top is ' + str(top)
kw_1 = {'labelpos': 'n', 'label_text': 'ScrolledText'}
tests_1 = (
(c.pack, (), {'padx' : 10, 'pady' : 10, 'fill' : 'both', 'expand' : 1}),
(Test.num_options, (), 10),
(c.importfile, 'ScrolledText_test.py'),
('hull_background', 'aliceblue'),
('text_borderwidth', 3),
('Scrollbar_borderwidth', 3),
('hull_cursor', 'gumby'),
('text_exportselection', 0),
('text_exportselection', 1),
('text_foreground', 'Black'),
('text_height', 10),
('text_width', 20),
('text_insertbackground', 'Black'),
('text_insertborderwidth', 1),
('text_insertofftime', 200),
('text_insertontime', 500),
('text_insertwidth', 3),
('label_text', 'Label'),
('text_relief', 'raised'),
('text_relief', 'sunken'),
('Scrollbar_repeatdelay', 200),
('Scrollbar_repeatinterval', 105),
('vscrollmode', 'none'),
('vscrollmode', 'static'),
('vscrollmode', 'dynamic'),
('hscrollmode', 'none'),
('hscrollmode', 'static'),
('hscrollmode', 'dynamic'),
('Scrollbar_width', 20),
('text_selectborderwidth', 2),
('text_state', 'disabled'),
('text_state', 'normal'),
('text_background', 'GhostWhite'),
('text_wrap', 'char'),
('text_wrap', 'none'),
('vscrollmode', 'bogus', 'ValueError: bad vscrollmode ' +
'option "bogus": should be static, dynamic, or none'),
('hscrollmode', 'bogus', 'ValueError: bad hscrollmode ' +
'option "bogus": should be static, dynamic, or none'),
(c.cget, 'vscrollmode', 'bogus'),
(c.cget, 'hscrollmode', 'bogus'),
('vscrollmode', 'dynamic'),
('hscrollmode', 'dynamic'),
(c.insert, ('end', 'Hello there\n')),
(_testYView, 0),
(c.yview, ('moveto', 0.02)),
(c.yview, ('moveto', 0.04)),
(c.yview, ('moveto', 0.06)),
(c.yview, ('moveto', 0.08)),
(c.yview, ('moveto', 0.10)),
(c.yview, ('moveto', 0.12)),
(c.yview, ('moveto', 0.14)),
(c.yview, ('moveto', 0.16)),
(c.yview, ('moveto', 0.18)),
(c.yview, ('moveto', 0.20)),
(c.yview, ('moveto', 0.22)),
(c.yview, ('moveto', 0.24)),
(c.yview, ('moveto', 0.26)),
(c.yview, ('moveto', 0.28)),
(c.yview, ('moveto', 0.98)),
(_testYView, 1),
(c.yview, ('scroll', -1, 'page')),
(c.yview, ('scroll', -50, 'page')),
(_testYView, 0),
(c.yview, ('scroll', 1, 'page')),
(c.yview, ('scroll', 50, 'page')),
(_testYView, 1),
(c.clear, ()),
(c.get, (), '\n'),
)
kw_2 = {
'hscrollmode' : 'dynamic',
'label_text' : 'Label',
'labelpos' : 'n',
'scrollmargin': 20,
}
tests_2 = (
(c.pack, (), {'padx' : 10, 'pady' : 10, 'fill' : 'both', 'expand' : 1}),
(c.importfile, 'ScrolledText_test.py'),
('text_relief', 'raised'),
('text_relief', 'sunken'),
)
alltests = (
(tests_1, kw_1),
(tests_2, kw_2),
)
testData = ((Pmw.ScrolledText, alltests),)
if __name__ == '__main__':
Test.runTests(testData)
| mit |
SCSSG/Odoo-SCS | addons/account/report/account_aged_partner_balance.py | 73 | 21186 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class aged_trial_report(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context):
super(aged_trial_report, self).__init__(cr, uid, name, context=context)
self.total_account = []
self.localcontext.update({
'time': time,
'get_lines_with_out_partner': self._get_lines_with_out_partner,
'get_lines': self._get_lines,
'get_total': self._get_total,
'get_direction': self._get_direction,
'get_for_period': self._get_for_period,
'get_company': self._get_company,
'get_currency': self._get_currency,
'get_partners':self._get_partners,
'get_account': self._get_account,
'get_fiscalyear': self._get_fiscalyear,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
ctx = data['form'].get('used_context', {})
ctx.update({'fiscalyear': False, 'all_fiscalyear': True})
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx)
self.direction_selection = data['form'].get('direction_selection', 'past')
self.target_move = data['form'].get('target_move', 'all')
self.date_from = data['form'].get('date_from', time.strftime('%Y-%m-%d'))
if (data['form']['result_selection'] == 'customer' ):
self.ACCOUNT_TYPE = ['receivable']
elif (data['form']['result_selection'] == 'supplier'):
self.ACCOUNT_TYPE = ['payable']
else:
self.ACCOUNT_TYPE = ['payable','receivable']
return super(aged_trial_report, self).set_context(objects, data, ids, report_type=report_type)
def _get_lines(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT DISTINCT res_partner.id AS id,\
res_partner.name AS name \
FROM res_partner,account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) \
AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND account_account.active\
AND ((reconcile_id IS NULL)\
OR (reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND (l.partner_id=res_partner.id)\
AND (l.date <= %s)\
AND ' + self.query + ' \
ORDER BY res_partner.name', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
partners = self.cr.dictfetchall()
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
#
# Build a string like (1,2,3) for easy use in SQL query
partner_ids = [x['id'] for x in partners]
if not partner_ids:
return []
# This dictionary will store the debit-credit for all partners, using partner_id as key.
totals = {}
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND ' + self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals[i[0]] = i[1]
# This dictionary will store the future or past of all partners
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids),self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
# Use one query per period and store results in history (a list variable)
# Each history will contain: history[1] = {'<partner_id>': <partner_debit-credit>}
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids),self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' >= %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' <= %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('''SELECT l.partner_id, SUM(l.debit-l.credit), l.reconcile_partial_id
FROM account_move_line AS l, account_account, account_move am
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)
AND (am.state IN %s)
AND (account_account.type IN %s)
AND (l.partner_id IN %s)
AND ((l.reconcile_id IS NULL)
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))
AND ''' + self.query + '''
AND account_account.active
AND ''' + dates_query + '''
AND (l.date <= %s)
GROUP BY l.partner_id, l.reconcile_partial_id''', args_list)
partners_partial = self.cr.fetchall()
partners_amount = dict((i[0],0) for i in partners_partial)
for partner_info in partners_partial:
if partner_info[2]:
# in case of partial reconciliation, we want to keep the left amount in the oldest period
self.cr.execute('''SELECT MIN(COALESCE(date_maturity,date)) FROM account_move_line WHERE reconcile_partial_id = %s''', (partner_info[2],))
date = self.cr.fetchall()
partial = False
if 'BETWEEN' in dates_query:
partial = date and args_list[-3] <= date[0][0] <= args_list[-2]
elif '>=' in dates_query:
partial = date and date[0][0] >= form[str(i)]['start']
else:
partial = date and date[0][0] <= form[str(i)]['stop']
if partial:
# partial reconcilation
limit_date = 'COALESCE(l.date_maturity,l.date) %s %%s' % '<=' if self.direction_selection == 'past' else '>='
self.cr.execute('''SELECT SUM(l.debit-l.credit)
FROM account_move_line AS l, account_move AS am
WHERE l.move_id = am.id AND am.state in %s
AND l.reconcile_partial_id = %s
AND ''' + limit_date, (tuple(move_state), partner_info[2], self.date_from))
unreconciled_amount = self.cr.fetchall()
partners_amount[partner_info[0]] += unreconciled_amount[0][0]
else:
partners_amount[partner_info[0]] += partner_info[1]
history.append(partners_amount)
for partner in partners:
values = {}
## If choise selection is in the future
if self.direction_selection == 'future':
# Query here is replaced by one query which gets the all the partners their 'before' value
before = False
if future_past.has_key(partner['id']):
before = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past': # Changed this so people could in the future create new direction_selections
# Query here is replaced by one query which gets the all the partners their 'after' value
after = False
if future_past.has_key(partner['id']): # Making sure this partner actually was found by the query
after = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key(partner['id']):
during = [ history[i][partner['id']] ]
# Ajout du compteur
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( partner['id'] ):
total = [ totals[partner['id']] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = partner['name']
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_lines_with_out_partner(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
totals = {}
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND ((l.reconcile_id IS NULL) \
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND ' + self.query + '\
AND (l.date <= %s)\
AND account_account.active ',(tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals['Unknown Partner'] = i[0]
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' > %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' < %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('SELECT SUM(l.debit-l.credit)\
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IS NULL)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND ' + dates_query + '\
AND (l.date <= %s)\
GROUP BY l.partner_id', args_list)
t = self.cr.fetchall()
d = {}
for i in t:
d['Unknown Partner'] = i[0]
history.append(d)
values = {}
if self.direction_selection == 'future':
before = False
if future_past.has_key('Unknown Partner'):
before = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past':
after = False
if future_past.has_key('Unknown Partner'):
after = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key('Unknown Partner'):
during = [ history[i]['Unknown Partner'] ]
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( 'Unknown Partner' ):
total = [ totals['Unknown Partner'] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = 'Unknown Partner'
if values['total']:
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_total(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_direction(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_for_period(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_partners(self,data):
# TODO: deprecated, to remove in trunk
if data['form']['result_selection'] == 'customer':
return self._translate('Receivable Accounts')
elif data['form']['result_selection'] == 'supplier':
return self._translate('Payable Accounts')
elif data['form']['result_selection'] == 'customer_supplier':
return self._translate('Receivable and Payable Accounts')
return ''
class report_agedpartnerbalance(osv.AbstractModel):
_name = 'report.account.report_agedpartnerbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_agedpartnerbalance'
_wrapped_report_class = aged_trial_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ihsanudin/odoo | addons/mail/mail_vote.py | 439 | 1647 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class mail_vote(osv.Model):
''' Mail vote feature allow users to like and unlike messages attached
to a document. This allows for example to build a ranking-based
displaying of messages, for FAQ. '''
_name = 'mail.vote'
_description = 'Mail Vote'
_columns = {
'message_id': fields.many2one('mail.message', 'Message', select=1,
ondelete='cascade', required=True),
'user_id': fields.many2one('res.users', 'User', select=1,
ondelete='cascade', required=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
a-tsvetkov/lucid-python-django-admin-tools | admin_tools/dashboard/tests.py | 17 | 1284 | from tempfile import mktemp
from unittest import TestCase
from django.test import TestCase as DjangoTestCase
from django.core import management
from django.contrib.auth import models as auth_models
from admin_tools.dashboard import AppIndexDashboard
from admin_tools.dashboard.modules import DashboardModule, Group
class ManagementCommandTest(DjangoTestCase):
def test_customdashboard(self):
# check that customdashboard command doesn't raise exceptions
file_name = mktemp()
management.call_command('customdashboard', file=file_name)
# and fails if file is already here
try:
management.call_command('customdashboard', file=file_name)
assert False
except:
pass
class AppIndexDashboardTest(TestCase):
def test_models(self):
models = ['django.contrib.auth.models.User',
'django.contrib.auth.models.Group']
board = AppIndexDashboard('Auth', models)
self.assertEqual(board.get_app_model_classes(),
[auth_models.User, auth_models.Group])
__test__ = {
'DashboardModule.is_empty': DashboardModule.is_empty,
'DashboardModule.render_css_classes': DashboardModule.render_css_classes,
'Group.is_empty': Group.is_empty,
}
| mit |
le9i0nx/ansible | lib/ansible/modules/commands/telnet.py | 24 | 2332 | # this is a virtual module that is entirely implemented server side
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: telnet
short_description: Executes a low-down and dirty telnet command
version_added: 2.4
description:
- Executes a low-down and dirty telnet command, not going through the module subsystem.
- This is mostly to be used for enabling ssh on devices that only have telnet enabled by default.
options:
command:
description:
- List of commands to be executed in the telnet session.
required: True
aliases: ['commands']
host:
description:
- The host/target on which to execute the command
required: False
default: remote_addr
user:
description:
- The user for login
required: False
default: remote_user
password:
description:
- The password for login
port:
description:
- Remote port to use
default: 23
timeout:
description:
- timeout for remote operations
default: 120
prompts:
description:
- List of prompts expected before sending next command
required: False
default: ['$']
pause:
description:
- Seconds to pause between each command issued
required: False
default: 1
notes:
- The C(environment) keyword does not work with this task
author:
- Ansible Core Team
'''
EXAMPLES = '''
- name: send configuration commands to IOS
telnet:
user: cisco
password: cisco
login_prompt: "Username: "
prompts:
- "[>|#]"
command:
- terminal length 0
- configure terminal
- hostname ios01
- name: run show commands
telnet:
user: cisco
password: cisco
login_prompt: "Username: "
prompts:
- "[>|#]"
command:
- terminal length 0
- show version
'''
RETURN = '''
output:
description: output of each command is an element in this list
type: list
returned: always
sample: [ 'success', 'success', '', 'warning .. something' ]
'''
| gpl-3.0 |
mlavin/django | django/middleware/clickjacking.py | 82 | 1733 | """
Clickjacking Protection Middleware.
This module provides a middleware that implements protection against a
malicious site loading resources from your site in a hidden frame.
"""
from django.conf import settings
from django.utils.deprecation import MiddlewareMixin
class XFrameOptionsMiddleware(MiddlewareMixin):
"""
Set the X-Frame-Options HTTP header in HTTP responses.
Do not set the header if it's already set or if the response contains
a xframe_options_exempt value set to True.
By default, set the X-Frame-Options header to 'SAMEORIGIN', meaning the
response can only be loaded on a frame within the same site. To prevent the
response from being loaded in a frame in any site, set X_FRAME_OPTIONS in
your project's Django settings to 'DENY'.
"""
def process_response(self, request, response):
# Don't set it if it's already in the response
if response.get('X-Frame-Options') is not None:
return response
# Don't set it if they used @xframe_options_exempt
if getattr(response, 'xframe_options_exempt', False):
return response
response['X-Frame-Options'] = self.get_xframe_options_value(request,
response)
return response
def get_xframe_options_value(self, request, response):
"""
Get the value to set for the X_FRAME_OPTIONS header. Use the value from
the X_FRAME_OPTIONS setting, or 'SAMEORIGIN' if not set.
This method can be overridden if needed, allowing it to vary based on
the request or response.
"""
return getattr(settings, 'X_FRAME_OPTIONS', 'SAMEORIGIN').upper()
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/scipy/io/wavfile.py | 22 | 12402 | """
Module to read / write wav files using numpy arrays
Functions
---------
`read`: Return the sample rate (in samples/sec) and data from a WAV file.
`write`: Write a numpy array as a WAV file.
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy
import struct
import warnings
__all__ = [
'WavFileWarning',
'read',
'write'
]
class WavFileWarning(UserWarning):
pass
WAVE_FORMAT_PCM = 0x0001
WAVE_FORMAT_IEEE_FLOAT = 0x0003
WAVE_FORMAT_EXTENSIBLE = 0xfffe
KNOWN_WAVE_FORMATS = (WAVE_FORMAT_PCM, WAVE_FORMAT_IEEE_FLOAT)
# assumes file pointer is immediately
# after the 'fmt ' id
def _read_fmt_chunk(fid, is_big_endian):
"""
Returns
-------
size : int
size of format subchunk in bytes (minus 8 for "fmt " and itself)
format_tag : int
PCM, float, or compressed format
channels : int
number of channels
fs : int
sampling frequency in samples per second
bytes_per_second : int
overall byte rate for the file
block_align : int
bytes per sample, including all channels
bit_depth : int
bits per sample
"""
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = res = struct.unpack(fmt+'I', fid.read(4))[0]
bytes_read = 0
if size < 16:
raise ValueError("Binary structure of wave file is not compliant")
res = struct.unpack(fmt+'HHIIHH', fid.read(16))
bytes_read += 16
format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res
if format_tag == WAVE_FORMAT_EXTENSIBLE and size >= (16+2):
ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]
bytes_read += 2
if ext_chunk_size >= 22:
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[2+4:2+4+16]
# GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361)
# MS GUID byte order: first three groups are native byte order,
# rest is Big Endian
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71'
if raw_guid.endswith(tail):
format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0]
else:
raise ValueError("Binary structure of wave file is not compliant")
if format_tag not in KNOWN_WAVE_FORMATS:
raise ValueError("Unknown wave file format")
# move file pointer to next chunk
if size > (bytes_read):
fid.read(size - bytes_read)
return (size, format_tag, channels, fs, bytes_per_second, block_align,
bit_depth)
# assumes file pointer is immediately after the 'data' id
def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian,
mmap=False):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
# Size of the data subchunk in bytes
size = struct.unpack(fmt, fid.read(4))[0]
# Number of bytes per sample
bytes_per_sample = bit_depth//8
if bit_depth == 8:
dtype = 'u1'
else:
if is_big_endian:
dtype = '>'
else:
dtype = '<'
if format_tag == WAVE_FORMAT_PCM:
dtype += 'i%d' % bytes_per_sample
else:
dtype += 'f%d' % bytes_per_sample
if not mmap:
data = numpy.fromstring(fid.read(size), dtype=dtype)
else:
start = fid.tell()
data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start,
shape=(size//bytes_per_sample,))
fid.seek(start + size)
if channels > 1:
data = data.reshape(-1, channels)
return data
def _skip_unknown_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
data = fid.read(4)
# call unpack() and seek() only if we have really read data from file
# otherwise empty read at the end of the file would trigger
# unnecessary exception at unpack() call
# in case data equals somehow to 0, there is no need for seek() anyway
if data:
size = struct.unpack(fmt, data)[0]
fid.seek(size, 1)
def _read_riff_chunk(fid):
str1 = fid.read(4) # File signature
if str1 == b'RIFF':
is_big_endian = False
fmt = '<I'
elif str1 == b'RIFX':
is_big_endian = True
fmt = '>I'
else:
# There are also .wav files with "FFIR" or "XFIR" signatures?
raise ValueError("File format {}... not "
"understood.".format(repr(str1)))
# Size of entire file
file_size = struct.unpack(fmt, fid.read(4))[0] + 8
str2 = fid.read(4)
if str2 != b'WAVE':
raise ValueError("Not a WAV file.")
return file_size, is_big_endian
def read(filename, mmap=False):
"""
Open a WAV file
Return the sample rate (in samples/sec) and data from a WAV file.
Parameters
----------
filename : string or open file handle
Input wav file.
mmap : bool, optional
Whether to read data as memory-mapped.
Only to be used on real files (Default: False).
.. versionadded:: 0.12.0
Returns
-------
rate : int
Sample rate of wav file.
data : numpy array
Data read from wav file. Data-type is determined from the file;
see Notes.
Notes
-----
This function cannot read wav files with 24-bit data.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www-mmsp.ece.mcgill.ca/documents/audioformats/wave/Docs/riffmci.pdf
"""
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
file_size, is_big_endian = _read_riff_chunk(fid)
fmt_chunk_received = False
channels = 1
bit_depth = 8
format_tag = WAVE_FORMAT_PCM
while fid.tell() < file_size:
# read the next chunk
chunk_id = fid.read(4)
if chunk_id == b'fmt ':
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
format_tag, channels, fs = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
if bit_depth not in (8, 16, 32, 64, 96, 128):
raise ValueError("Unsupported bit depth: the wav file "
"has {}-bit data.".format(bit_depth))
elif chunk_id == b'fact':
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id == b'data':
if not fmt_chunk_received:
raise ValueError("No fmt chunk before data")
data = _read_data_chunk(fid, format_tag, channels, bit_depth,
is_big_endian, mmap)
elif chunk_id == b'LIST':
# Someday this could be handled properly but for now skip it
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id in (b'JUNK', b'Fake'):
# Skip alignment chunks without warning
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn("Chunk (non-data) not understood, skipping it.",
WavFileWarning)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
return fs, data
def write(filename, rate, data):
"""
Write a numpy array as a WAV file.
Parameters
----------
filename : string or open file handle
Output wav file.
rate : int
The sample rate (in samples/sec).
data : ndarray
A 1-D or 2-D numpy array of either integer or float data-type.
Notes
-----
* Writes a simple uncompressed WAV file.
* To write multiple-channels, use a 2-D array of shape
(Nsamples, Nchannels).
* The bits-per-sample and PCM/float will be determined by the data-type.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www-mmsp.ece.mcgill.ca/documents/audioformats/wave/Docs/riffmci.pdf
"""
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
fs = rate
try:
dkind = data.dtype.kind
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
data.dtype.itemsize == 1)):
raise ValueError("Unsupported data type '%s'" % data.dtype)
header_data = b''
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
# fmt chunk
header_data += b'fmt '
if dkind == 'f':
format_tag = WAVE_FORMAT_IEEE_FLOAT
else:
format_tag = WAVE_FORMAT_PCM
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
bit_depth = data.dtype.itemsize * 8
bytes_per_second = fs*(bit_depth // 8)*channels
block_align = channels * (bit_depth // 8)
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
bytes_per_second, block_align, bit_depth)
if not (dkind == 'i' or dkind == 'u'):
# add cbSize field for non-PCM files
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
# fact chunk (non-PCM files)
if not (dkind == 'i' or dkind == 'u'):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
# check data size (needs to be immediately before the data chunk)
if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
raise ValueError("Data exceeds wave file size limit")
fid.write(header_data)
# data chunk
fid.write(b'data')
fid.write(struct.pack('<I', data.nbytes))
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
sys.byteorder == 'big'):
data = data.byteswap()
_array_tofile(fid, data)
# Determine file size and place it in correct
# position at start of the file.
size = fid.tell()
fid.seek(4)
fid.write(struct.pack('<I', size-8))
finally:
if not hasattr(filename, 'write'):
fid.close()
else:
fid.seek(0)
if sys.version_info[0] >= 3:
def _array_tofile(fid, data):
# ravel gives a c-contiguous buffer
fid.write(data.ravel().view('b').data)
else:
def _array_tofile(fid, data):
fid.write(data.tostring())
| mit |
gajim/python-nbxmpp | nbxmpp/modules/misc.py | 1 | 4348 | # Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
import logging
from nbxmpp.namespaces import Namespace
from nbxmpp.protocol import NodeProcessed
from nbxmpp.protocol import InvalidFrom
from nbxmpp.protocol import InvalidStanza
from nbxmpp.protocol import Message
from nbxmpp.structs import MAMData
from nbxmpp.structs import CarbonData
from nbxmpp.modules.delay import parse_delay
log = logging.getLogger('nbxmpp.m.misc')
def unwrap_carbon(stanza, own_jid):
carbon = stanza.getTag('received', namespace=Namespace.CARBONS)
if carbon is None:
carbon = stanza.getTag('sent', namespace=Namespace.CARBONS)
if carbon is None:
return stanza, None
# Carbon must be from our bare jid
if stanza.getFrom() != own_jid.new_as_bare():
raise InvalidFrom('Invalid from: %s' % stanza.getAttr('from'))
forwarded = carbon.getTag('forwarded', namespace=Namespace.FORWARD)
message = Message(node=forwarded.getTag('message'))
type_ = carbon.getName()
# Fill missing to/from
to = message.getTo()
if to is None:
message.setTo(own_jid.bare)
frm = message.getFrom()
if frm is None:
message.setFrom(own_jid.bare)
if type_ == 'received':
if message.getFrom().bare_match(own_jid):
# Drop 'received' Carbons from ourself, we already
# got the message with the 'sent' Carbon or via the
# message itself
raise NodeProcessed('Drop "received"-Carbon from ourself')
if message.getTag('x', namespace=Namespace.MUC_USER) is not None:
# A MUC broadcasts messages sent to us to all resources
# there is no need to process the received carbon
raise NodeProcessed('Drop MUC-PM "received"-Carbon')
return message, CarbonData(type=type_)
def unwrap_mam(stanza, own_jid):
result = stanza.getTag('result', namespace=Namespace.MAM_2)
if result is None:
result = stanza.getTag('result', namespace=Namespace.MAM_1)
if result is None:
return stanza, None
query_id = result.getAttr('queryid')
if query_id is None:
log.warning('No queryid on MAM message')
log.warning(stanza)
raise InvalidStanza
id_ = result.getAttr('id')
if id_ is None:
log.warning('No id on MAM message')
log.warning(stanza)
raise InvalidStanza
forwarded = result.getTag('forwarded', namespace=Namespace.FORWARD)
message = Message(node=forwarded.getTag('message'))
# Fill missing to/from
to = message.getTo()
if to is None:
message.setTo(own_jid.bare)
frm = message.getFrom()
if frm is None:
message.setFrom(own_jid.bare)
# Timestamp parsing
# Most servers dont set the 'from' attr, so we cant check for it
delay_timestamp = parse_delay(forwarded)
if delay_timestamp is None:
log.warning('No timestamp on MAM message')
log.warning(stanza)
raise InvalidStanza
return message, MAMData(id=id_,
query_id=query_id,
archive=stanza.getFrom(),
namespace=result.getNamespace(),
timestamp=delay_timestamp)
def build_xhtml_body(xhtml, xmllang=None):
try:
if xmllang is not None:
body = '<body xmlns="%s" xml:lang="%s">%s</body>' % (
Namespace.XHTML, xmllang, xhtml)
else:
body = '<body xmlns="%s">%s</body>' % (Namespace.XHTML, xhtml)
except Exception as error:
log.error('Error while building xhtml node: %s', error)
return None
return body
| gpl-3.0 |
hdinsight/hue | desktop/core/ext-py/Django-1.6.10/django/utils/html_parser.py | 231 | 4546 | from django.utils.six.moves import html_parser as _html_parser
import re
import sys
current_version = sys.version_info
use_workaround = (
(current_version < (2, 7, 3)) or
(current_version >= (3, 0) and current_version < (3, 2, 3))
)
HTMLParseError = _html_parser.HTMLParseError
if not use_workaround:
HTMLParser = _html_parser.HTMLParser
else:
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
class HTMLParser(_html_parser.HTMLParser):
"""
Patched version of stdlib's HTMLParser with patch from:
http://bugs.python.org/issue670664
"""
def __init__(self):
_html_parser.HTMLParser.__init__(self)
self.cdata_tag = None
def set_cdata_mode(self, tag):
try:
self.interesting = _html_parser.interesting_cdata
except AttributeError:
self.interesting = re.compile(r'</\s*%s\s*>' % tag.lower(), re.I)
self.cdata_tag = tag.lower()
def clear_cdata_mode(self):
self.interesting = _html_parser.interesting_normal
self.cdata_tag = None
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i + 1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = _html_parser.attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag) # <--------------------------- Changed
return endpos
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == "</", "unexpected call to parse_endtag"
match = _html_parser.endendtag.search(rawdata, i + 1) # >
if not match:
return -1
j = match.end()
match = _html_parser.endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_tag is not None: # *** add ***
self.handle_data(rawdata[i:j]) # *** add ***
return j # *** add ***
self.error("bad end tag: %r" % (rawdata[i:j],))
# --- changed start ---------------------------------------------------
tag = match.group(1).strip()
if self.cdata_tag is not None:
if tag.lower() != self.cdata_tag:
self.handle_data(rawdata[i:j])
return j
# --- changed end -----------------------------------------------------
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
| apache-2.0 |
xen0l/ansible | lib/ansible/modules/network/panos/panos_interface.py | 16 | 5401 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_interface
short_description: configure data-port network interface for DHCP
description:
- Configure data-port (DP) network interface for DHCP. By default DP interfaces are static.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.org/project/pan-python/)
notes:
- Checkmode is not supported.
options:
if_name:
description:
- Name of the interface to configure.
required: true
zone_name:
description: >
Name of the zone for the interface. If the zone does not exist it is created but if the zone exists and
it is not of the layer3 type the operation will fail.
required: true
create_default_route:
description:
- Whether or not to add default route with router learned via DHCP.
default: "false"
commit:
description:
- Commit if changed
default: true
extends_documentation_fragment: panos
'''
EXAMPLES = '''
- name: enable DHCP client on ethernet1/1 in zone public
interface:
password: "admin"
ip_address: "192.168.1.1"
if_name: "ethernet1/1"
zone_name: "public"
create_default_route: "yes"
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
try:
import pan.xapi
from pan.xapi import PanXapiError
HAS_LIB = True
except ImportError:
HAS_LIB = False
_IF_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/network/interface/ethernet/entry[@name='%s']"
_ZONE_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/vsys/entry/zone/entry"
_ZONE_XPATH_QUERY = _ZONE_XPATH + "[network/layer3/member/text()='%s']"
_ZONE_XPATH_IF = _ZONE_XPATH + "[@name='%s']/network/layer3/member[text()='%s']"
_VR_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/network/virtual-router/entry"
def add_dhcp_if(xapi, if_name, zone_name, create_default_route):
if_xml = [
'<entry name="%s">',
'<layer3>',
'<dhcp-client>',
'<create-default-route>%s</create-default-route>',
'</dhcp-client>'
'</layer3>'
'</entry>'
]
cdr = 'yes'
if not create_default_route:
cdr = 'no'
if_xml = (''.join(if_xml)) % (if_name, cdr)
xapi.edit(xpath=_IF_XPATH % if_name, element=if_xml)
xapi.set(xpath=_ZONE_XPATH + "[@name='%s']/network/layer3" % zone_name,
element='<member>%s</member>' % if_name)
xapi.set(xpath=_VR_XPATH + "[@name='default']/interface",
element='<member>%s</member>' % if_name)
return True
def if_exists(xapi, if_name):
xpath = _IF_XPATH % if_name
xapi.get(xpath=xpath)
network = xapi.element_root.find('.//layer3')
return (network is not None)
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
if_name=dict(required=True),
zone_name=dict(required=True),
create_default_route=dict(type='bool', default=False),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if_name = module.params['if_name']
zone_name = module.params['zone_name']
create_default_route = module.params['create_default_route']
commit = module.params['commit']
ifexists = if_exists(xapi, if_name)
if ifexists:
module.exit_json(changed=False, msg="interface exists, not changed")
try:
changed = add_dhcp_if(xapi, if_name, zone_name, create_default_route)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
llenfest/programingworkshop | Python/pandas_and_parallel/plotting.py | 8 | 2827 | import pandas as pd
import os
import matplotlib.pyplot as plt
import datetime as dt
import numpy as np
from scipy import interpolate
from mpl_toolkits.basemap import Basemap, cm
def sfc_plot(starttime, endtime, variables, variablest, locations,
met, xi, yi, xmin, xmax, ymin, ymax):
''' Script for plotting the mesonet data with wind barbs over a
county map in a given time interval
'''
interval = int((endtime - starttime).total_seconds()/300)
z_max = np.max(met[variablest[1]])
z_min = np.min(met[variablest[1]])
levels = np.arange(z_min, z_max+0.1, 0.1)
shapefile = 'UScounties/UScounties'
if not os.path.exists('%s' %(variables)):
os.makedirs('%s' %(variables))
for i in range(interval):
time_selection = starttime + dt.timedelta(minutes=5*i)
zi = interpolate.griddata((met.ix[time_selection]['Lon'],
met.ix[time_selection]['Lat']),
met.ix[time_selection][variablest[1]],
(xi, yi), method='linear')
maps = Basemap(llcrnrlon=xmin, llcrnrlat=ymin,
urcrnrlon=xmax, urcrnrlat=ymax, projection='cyl')
maps.readshapefile(shapefile, name='counties')
if (variables == 'dew_point'):
maps.contourf(xi, yi, zi, levels, cmap=plt.cm.gist_earth_r)
if (variables == 'temperature'):
maps.contourf(xi, yi, zi, levels, cmap=plt.cm.jet)
if variables == 'rainfall':
maps.contourf(xi, yi, zi, levels, cmap=plt.cm.YlGn)
if ((variables == 'pressure') or (variables == 'wind_speed') or
(variables == 'gust_speed')):
maps.contourf(xi, yi, zi, levels, cmap=plt.cm.gist_earth)
c = plt.colorbar()
c.set_label(variablest[0])
maps.scatter(met.ix[time_selection]['Lon'],
met.ix[time_selection]['Lat'], latlon=True, marker='o', c='b', s=5)
maps.barbs(met.ix[time_selection]['Lon'],
met.ix[time_selection]['Lat'],
met.ix[time_selection]['u'].values*1.94384,
met.ix[time_selection]['v'].values*1.94384, latlon=True)
maps.drawparallels(np.arange(31.,36,1.), color='0.5',
labels=[1,0,0,0], fontsize=10)
maps.drawmeridians(np.arange(-104.,-98.,1.), color='0.5',
labels=[0,0,0,1], fontsize=10)
plt.title(variablest[1])
filename = '%s_%s.png' % (variables,
time_selection.strftime('%Y%m%d_%H%M'))
plt.tight_layout()
plt.savefig(variables + '/' + filename, dpi=150)
plt.clf()
| mit |
jansel/opentuner | opentuner/search/manipulator.py | 1 | 59013 | from __future__ import division
# vim: tabstop=2 shiftwidth=2 softtabstop=2 expandtab autoindent smarttab
from builtins import str
from builtins import map
from builtins import range
from past.utils import old_div
from builtins import object
import abc
import collections
import copy
import hashlib
import json
import logging
import math
import os
import pickle
import random
from fn import _
import argparse
from datetime import datetime
import numpy
import inspect
import sys
from future.utils import with_metaclass
from functools import reduce
log = logging.getLogger(__name__)
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('--list-params', '-lp',
help='list available parameter classes')
class ConfigurationManipulatorBase(with_metaclass(abc.ABCMeta, object)):
"""
abstract interface for objects used by search techniques to mutate
configurations
"""
# List of file formats, which can be extended by subclasses. Used in
# write_to_file() and load_from_file(). Objects in list must define
# load(fd) and dump(cfg, fd).
FILE_FORMATS = {'default': json, 'json': json,
'pickle': pickle, 'pk': pickle}
def validate(self, config):
"""is the given config valid???"""
return all(map(_.validate(config), self.parameters(config)))
def normalize(self, config):
"""mutate config into canonical form"""
for param in self.parameters(config):
param.normalize(config)
def set_search_driver(self, search_driver):
"""called exactly once during setup"""
pass
def copy(self, config):
"""produce copy of config"""
return copy.deepcopy(config)
def parameters_dict(self, config):
"""convert self.parameters() to a dictionary by name"""
return dict([(p.name, p) for p in self.parameters(config)])
def param_names(self, *args):
"""return union of parameter names in args"""
return sorted(reduce(set.union,
[set(map(_.name, self.parameters(cfg)))
for cfg in args]))
def linear_config(self, a, cfg_a, b, cfg_b, c, cfg_c):
"""return a configuration that is a linear combination of 3 other configs"""
dst = self.copy(cfg_a)
dst_params = self.proxy(dst)
for k in self.param_names(dst, cfg_a, cfg_b, cfg_c):
dst_params[k].op4_set_linear(cfg_a, cfg_b, cfg_c, a, b, c)
return dst
def _get_serializer(self, filename, format=None):
"""
Extract the correct file format serializer from self.FILE_FORMATS.
Guess the format by extension if one is not given.
"""
if format is None:
format = os.path.splitext(filename)[1].lower().replace('.', '')
if format not in self.FILE_FORMATS:
serializer = self.FILE_FORMATS['default']
if len(self.FILE_FORMATS) > 1:
log.warning('Unknown file format "%s", using "%s" instead', format,
serializer.__name__)
else:
serializer = self.FILE_FORMATS[format]
return serializer
def save_to_file(self, cfg, filename, format=None):
"""
Write cfg to filename. Guess the format by extension if one is not given.
"""
with open(filename, 'wb') as fd:
self._get_serializer(filename, format).dump(cfg, fd)
def load_from_file(self, filename, format=None):
"""
Read cfg from filename. Guess the format by extension if one is not given.
"""
with open(filename, 'rb') as fd:
return self._get_serializer(filename, format).load(fd)
def proxy(self, cfg):
return ManipulatorProxy(self, cfg)
@abc.abstractmethod
def random(self):
"""produce a random initial configuration"""
return
@abc.abstractmethod
def parameters(self, config):
"""return a list of of Parameter objects"""
return list()
@abc.abstractmethod
def hash_config(self, config):
"""produce unique hash value for the given config"""
return
class ConfigurationManipulator(ConfigurationManipulatorBase):
"""
a configuration manipulator using a fixed set of parameters and storing
configs in a dict-like object
"""
def __init__(self, params=None, config_type=dict, seed_config=None, **kwargs):
if params is None:
params = []
self.params = list(params)
self.config_type = config_type
self.search_driver = None
self._seed_config = seed_config
super(ConfigurationManipulator, self).__init__(**kwargs)
for p in self.params:
p.parent = self
def add_parameter(self, p):
p.set_parent(self)
self.params.append(p)
#TODO sub parameters should be recursed on
# not currently an issue since no doubly-nested sub-parameters
sub_params = p.sub_parameters()
for sp in sub_params:
sp.set_parent(p)
self.params.extend(sub_params)
def set_search_driver(self, search_driver):
self.search_driver = search_driver
def seed_config(self):
"""produce a fixed seed configuration"""
if self._seed_config:
cfg = copy.deepcopy(self._seed_config)
else:
cfg = self.config_type()
for p in self.params:
if not isinstance(p.name, str) or '/' not in p.name:
cfg[p.name] = p.seed_value()
return cfg
def random(self):
"""produce a random configuration"""
cfg = self.seed_config()
for p in self.parameters(cfg):
p.op1_randomize(cfg)
return cfg
def parameters(self, config):
"""return a list of Parameter objects"""
if type(config) is not self.config_type:
log.error("wrong type, expected %s got %s",
str(self.config_type),
str(type(config)))
raise TypeError()
return self.params
def parameters_to_json(self):
"""
output information about the parameters in this manipulator in json format:
[ConfigurationManipulator,{pinfo:count,pinfo:count ...}]
where pinfo has a similar form to describe the parameter's sub-parameters:
[param_name,{pinfo:count,pinfo:count ...}]
"""
def param_info_to_json(param, sub_parameters):
"""
recursively output information about a parameter and its subparameters in a json format:
[parameter_name, {subparam_info:count,subparam_info:count,...}]
or if no subparams
[parameter_name,{}]
where subparam_info are sorted alphabetically. Note we can't directly use json since
sets/dictionaries aren't always ordered by key
"""
sub_parameter_counts = {}
# build the string
if isinstance(param, str):
param_name = param
else:
param_name = param.__class__.__name__
out = ['[', param_name, ',{']
if len(sub_parameters) > 0:
# count sub params
for sp in sub_parameters:
spout = param_info_to_json(sp, sp.sub_parameters())
sub_parameter_counts[spout] = sub_parameter_counts.get(spout, 0) + 1
# add the count map in sorted order
for sp in sorted(sub_parameter_counts):
out.append(sp)
out.append(':')
out.append(str(sub_parameter_counts[sp]))
out.append(',')
out.pop() # remove trailing comma
out.append('}]')
return ''.join(out)
# filter out subparameters to avoid double counting
params = [p for p in self.params if p.parent is self]
return param_info_to_json(self, params)
def hash_config(self, config):
"""produce unique hash value for the given config"""
m = hashlib.sha256()
params = list(self.parameters(config))
params.sort(key=_.name)
for i, p in enumerate(params):
m.update(str(p.name).encode())
m.update(p.hash_value(config))
m.update(str(i).encode())
m.update(b"|")
return m.hexdigest()
def search_space_size(self):
"""estimate the size of the search space, not precise"""
return reduce(_ * _, [x.search_space_size() for x in self.params])
def difference(self, cfg1, cfg2):
cfg = self.copy(cfg1)
for param in self.parameters(cfg1):
if param.is_primitive(cfg1):
# TODO: check range
param.set_value(cfg, param.get_value(cfg1) - param.get_value(cfg2))
else:
pass
return cfg
def applySVs(self, cfg, sv_map, args, kwargs):
"""
Apply operators to each parameter according to given map. Updates cfg.
Parameters with no operators specified are not updated.
cfg: configuration data
sv_map: python dict that maps string parameter name to class method name
arg_map: python dict that maps string parameter name to class method
arguments
"""
# TODO: check consistency between sv_map and cfg
param_dict = self.parameters_dict(cfg)
for pname in self.param_names(cfg):
param = param_dict[pname]
getattr(param, sv_map[pname])(cfg, *args[pname], **kwargs[pname])
class Parameter(with_metaclass(abc.ABCMeta, object)):
"""
abstract base class for parameters in a ConfigurationManipulator
"""
def __init__(self, name):
self.name = name
self.parent = None
super(Parameter, self).__init__()
def _to_storage_type(self, val):
"""hook to support transformation applied while stored"""
return val
def _from_storage_type(self, sval):
"""hook to support transformation applied while stored"""
return sval
def _read_node(self, config):
"""hook to support different storage structures"""
node = config
if not isinstance(self.name, str):
return node, self.name
name_parts = self.name.split('/')
for part in name_parts[:-1]:
if isinstance(node, list):
part = int(part)
node = node[part]
part = name_parts[-1]
if isinstance(node, list):
part = int(part)
return node, part
def _get(self, config):
"""hook to support different storage structures"""
node, part = self._read_node(config)
return self._from_storage_type(node[part])
def _set(self, config, v):
"""hook to support different storage structures"""
node, part = self._read_node(config)
node[part] = self._to_storage_type(v)
def set_parent(self, manipulator):
self.parent = manipulator
def validate(self, config):
"""is the given config valid???"""
return True
def is_primitive(self, ignored=None):
return isinstance(self, PrimitiveParameter)
def is_permutation(self, ignored=None):
return isinstance(self, PermutationParameter)
def manipulators(self, config):
"""
a list of manipulator functions to change this value in the config
manipulators must be functions that take a config and change it in place
default implementation just has op1_randomize as only operation
"""
return [self.op1_randomize]
def normalize(self, config):
"""
mutate this parameter into a canonical form
"""
pass
def sub_parameters(self):
"""
additional parameters added with this parameter
"""
return []
@abc.abstractmethod
def op1_randomize(self, cfg):
"""
Set this parameter's value in a configuration to a random value
:param config: the configuration to be changed
"""
pass
@abc.abstractmethod
def seed_value(self):
"""some legal value of this parameter (for creating initial configs)"""
return
@abc.abstractmethod
def copy_value(self, src, dst):
"""copy the value of this parameter from src to dst config"""
pass
@abc.abstractmethod
def same_value(self, cfg1, cfg2):
"""test if cfg1 and cfg2 have the same value of this parameter"""
return
@abc.abstractmethod
def hash_value(self, config):
"""produce unique hash for this value in the config"""
return
@abc.abstractmethod
def op4_set_linear(self, cfg, cfg_a, cfg_b, cfg_c, a, b, c):
"""
Sets the parameter value in a configuration to a linear combination of 3
other configurations: :math:`a*cfg_a + b*cfg_b + c*cfg_c`
:param cfg: the configuration to be changed
:param cfg_a: a parent configuration
:param cfg_b: a parent configuration
:param cfg_c: a parent configuration
:param a: weight for cfg_a
:param b: weight for cfg_b
:param c: weight for cfg_c
"""
pass
def search_space_size(self):
return 1
def op1_nop(self, cfg):
"""
The 'null' operator. Does nothing.
:param cfg: the configuration to be changed
"""
pass
# Stochastic variators
def op3_swarm(self, cfg, cfg1, cfg2, c, c1, c2, *args, **kwargs):
"""
Stochastically 'move' the parameter value in a configuration towards those
in two parent configurations. This is done by calling :py:meth:`opn_stochastic_mix`
:param cfg: the configuration to be changed
:param cfg1: a parent configuration
:param cfg2: a parent configuration
:param c: weight of original configuration
:param c1: weight for cfg1
:param c2: weight for cfg2
"""
# default to probabilistic treatment
self.opn_stochastic_mix(cfg, [cfg, cfg1, cfg2], [c, c1, c2])
def opn_stochastic_mix(self, cfg, cfgs, ratio, *args, **kwargs):
"""
Stochastically recombine a list of parent values into a single result.
This randomly copies a value from a list of parents configurations according
to a list of weights.
:param cfg: the configuration to be changed
:param cfgs: a list of parent configurations
:param ratio: a list of floats representing the weight of each configuration
in cfgs
"""
assert len(cfgs) == len(ratio)
r = random.random()
c = old_div(numpy.array(ratio, dtype=float), sum(ratio))
for i in range(len(c)):
if r < sum(c[:i + 1]):
self.copy_value(cfg, cfgs[i])
break
class PrimitiveParameter(with_metaclass(abc.ABCMeta, Parameter)):
"""
An abstract interface implemented by parameters that represent a single
dimension in a cartesian space in a legal range
"""
def __init__(self, name, value_type=float, **kwargs):
self.value_type = value_type
super(PrimitiveParameter, self).__init__(name, **kwargs)
def hash_value(self, config):
"""produce unique hash for this value in the config"""
self.normalize(config)
return hashlib.sha256(repr(self.get_value(config)).encode('utf-8')).hexdigest().encode('utf-8')
def copy_value(self, src, dst):
"""copy the value of this parameter from src to dst config"""
self.set_value(dst, self.get_value(src))
def same_value(self, cfg1, cfg2):
"""test if cfg1 and cfg2 have the same value of this parameter"""
return self.get_value(cfg1) == self.get_value(cfg2)
def is_integer_type(self):
"""true if self.value_type can only represent integers"""
return self.value_type(0) == self.value_type(0.1)
def get_unit_value(self, config):
"""get_value scaled such that range is between 0.0 and 1.0"""
low, high = self.legal_range(config)
if self.is_integer_type():
# account for rounding
low -= 0.4999
high += 0.4999
val = self.get_value(config)
if low < high:
return old_div(float(val - low), float(high - low))
else:
if low > high:
log.warning('invalid range for parameter %s, %s to %s',
self.name, low, high)
# only a single legal value!
return 0.0
def set_unit_value(self, config, unit_value):
"""set_value scaled such that range is between 0.0 and 1.0"""
assert 0.0 <= unit_value <= 1.0
low, high = self.legal_range(config)
if self.is_integer_type():
# account for rounding
low -= 0.4999
high += 0.4999
if low < high:
val = unit_value * float(high - low) + low
if self.is_integer_type():
val = round(val)
val = max(low, min(val, high))
self.set_value(config, self.value_type(val))
def op1_normal_mutation(self, cfg, sigma=0.1, *args, **kwargs):
"""
apply normally distributed noise to this parameter's value in a
configuration
:param cfg: The configuration to be changed
:param sigma: the std. deviation of the normally distributed noise on a unit
scale
"""
v = self.get_unit_value(cfg)
v += random.normalvariate(0.0, sigma)
# handle boundary cases by reflecting off the edge
if v < 0.0:
v *= -1.0
if v > 1.0:
v = 1.0 - (v % 1)
self.set_unit_value(cfg, v)
def op4_set_linear(self, cfg, cfg_a, cfg_b, cfg_c, a, b, c):
"""
set the parameter value in a configuration to a linear combination of 3
other configurations: :math:`a*cfg_a + b*cfg_b + c*cfg_c`
:param cfg: The configuration to be changed
:param cfg_a: a parent configuration
:param cfg_b: a parent configuration
:param cfg_c: a parent configuration
:param a: weight for cfg_a
:param b: weight for cfg_b
:param c: weight for cfg_c
"""
va = self.get_unit_value(cfg_a)
vb = self.get_unit_value(cfg_b)
vc = self.get_unit_value(cfg_c)
v = a * va + b * vb + c * vc
v = max(0.0, min(v, 1.0))
self.set_unit_value(cfg, v)
def manipulators(self, config):
"""
a list of manipulator functions to change this value in the config
manipulators must be functions that take a config and change it in place
for primitive params default implementation is uniform random and normal
"""
return [self.op1_randomize, self.op1_normal_mutation]
@abc.abstractmethod
def set_value(self, config, value):
"""assign this value in the given configuration"""
pass
@abc.abstractmethod
def get_value(self, config):
"""retrieve this value from the given configuration"""
return 0
@abc.abstractmethod
def legal_range(self, config):
"""return the legal range for this parameter, inclusive"""
return 0, 1
class NumericParameter(PrimitiveParameter):
"""
A parameter representing a number with a minimum and maximum value
"""
def __init__(self, name, min_value, max_value, **kwargs):
"""min/max are inclusive"""
assert min_value <= max_value
super(NumericParameter, self).__init__(name, **kwargs)
# after super call so self.value_type is initialized
self.min_value = self.value_type(min_value)
self.max_value = self.value_type(max_value)
def seed_value(self):
"""some legal value of this parameter (for creating initial configs)"""
return self.min_value
def set_value(self, config, value):
assert value >= self.min_value
assert value <= self.max_value
self._set(config, value)
def get_value(self, config):
return self._get(config)
def legal_range(self, config):
return self.min_value, self.max_value
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration to a random value in its legal
range
:param config: the configuration to be changed
"""
if self.is_integer_type():
self.set_value(config, random.randint(*self.legal_range(config)))
else:
self.set_value(config, random.uniform(*self.legal_range(config)))
def op1_scale(self, cfg, k):
"""
Scale this parameter's value in a configuration by a constant factor
:param cfg: the configuration to be changed
:param k: the constant factor to scale the parameter value by
"""
v = self.get_value(cfg) * k
v = max(self.min_value, min(self.max_value, v))
self.set_value(cfg, v)
def op3_difference(self, cfg, cfg1, cfg2):
"""
Set this parameter's value in a configuration to the difference between this
parameter's values in 2 other configs (cfg2 - cfg1)
:param cfg: the configuration to be changed
:param cfg1: The configuration whose parameter value is being subtracted
:param cfg2: The configuration whose parameter value is subtracted from
"""
v = self.get_value(cfg2) - self.get_value(cfg1)
v = max(self.min_value, min(self.max_value, v))
self.set_value(cfg, v)
def opn_sum(self, cfg, *cfgs):
"""
Set this parameter's value in a configuration to the sum of it's values in a
list of configurations
:param cfg: the configuration to be changed
:param cfgs: a list of configurations to sum
"""
v = sum([self.get_value(c) for c in cfgs])
v = max(self.min_value, min(self.max_value, v))
self.set_value(cfg, v)
def search_space_size(self):
if self.value_type is float:
return 2 ** 32
else:
return self.max_value - self.min_value + 1 # inclusive range
class IntegerParameter(NumericParameter):
"""
A parameter representing an integer value in a legal range
"""
def __init__(self, name, min_value, max_value, **kwargs):
"""min/max are inclusive"""
kwargs['value_type'] = int
super(IntegerParameter, self).__init__(name, min_value, max_value, **kwargs)
def op3_swarm(self, cfg, cfg1, cfg2, c=1, c1=0.5,
c2=0.5, velocity=0, sigma=0.2, *args, **kwargs):
"""
Simulates a single update step in particle swarm optimization by updating
the current position and returning a new velocity.
The new velocity is given by
.. math:: c*velocity + r1*c1*(cfg1-cfg) + r2*c2*(cfg2-cfg)
where r1 and r2 are random values between 0 and 1.
The new current position is the new velocity with gaussian noise added.
:param cfg: the configuration to be changed. Represents the current position
:param cfg1: a configuration to shift towards. Should be the local best
position
:param cfg2: a configuration to shift towards. Should be the global best
position
:param c: the weight of the current velocity
:param c1: weight of cfg1
:param c2: weight of cfg2
:param velocity: the old velocity
:param sigma: standard deviation of the gaussian noise, on a unit-scale
:return: the new velocity, a float
"""
vmin, vmax = self.legal_range(cfg)
k = vmax - vmin
# calculate the new velocity
v = velocity * c + (self.get_value(cfg1) - self.get_value(
cfg)) * c1 * random.random() + (self.get_value(
cfg2) - self.get_value(cfg)) * c2 * random.random()
# Map velocity to continuous space with sigmoid
s = old_div(k, (1 + numpy.exp(-v))) + vmin
# Add Gaussian noise
p = random.gauss(s, sigma * k)
# Discretize and bound
p = int(min(vmax, max(round(p), vmin)))
self.set_value(cfg, p)
return v
class FloatParameter(NumericParameter):
def __init__(self, name, min_value, max_value, **kwargs):
"""min/max are inclusive"""
kwargs['value_type'] = float
super(FloatParameter, self).__init__(name, min_value, max_value, **kwargs)
def op3_swarm(self, cfg, cfg1, cfg2, c=1, c1=0.5,
c2=0.5, velocity=0, *args, **kwargs):
"""
Simulates a single update step in particle swarm optimization by updating
the current position and returning a new velocity.
The new velocity is given by
.. math:: c*velocity + r1*c1*(cfg1-cfg) + r2*c2*(cfg2-cfg)
where r1 and r2 are random values between 0 and 1
The new current position is the old current position offset by the new
velocity:
:param cfg: the configuration to be changed. Represents the current position
:param cfg1: a configuration to shift towards. Should be the local best
position
:param cfg2: a configuration to shift towards. Should be the global best
position
:param c: the weight of the current velocity
:param c1: weight of cfg1
:param c2: weight of cfg2
:param velocity: the old velocity
:return: the new velocity, a float
"""
vmin, vmax = self.legal_range(cfg)
v = velocity * c + (self.get_value(cfg1) - self.get_value(
cfg)) * c1 * random.random() + (self.get_value(
cfg2) - self.get_value(cfg)) * c2 * random.random()
p = self.get_value(cfg) + v
p = min(vmax, max(p, vmin))
self.set_value(cfg, p)
return v
class ScaledNumericParameter(NumericParameter):
"""
A Parameter that is stored in configurations normally, but has a scaled
value when accessed using 'get_value'.
Because search techniques interact with Parameters through get_value, these
parameters are searched on a different scale (e.g. log scale).
"""
@abc.abstractmethod
def _scale(self, v):
"""
called on a value when getting it from it's configuration. Transforms the
actual value to the scale it is searched on
"""
return v
@abc.abstractmethod
def _unscale(self, v):
"""
called on a value when storing it. Transforms a value from it's search scale
to it's actual value
"""
return v
def set_value(self, config, value):
NumericParameter.set_value(self, config, self._unscale(value))
def get_value(self, config):
return self._scale(NumericParameter.get_value(self, config))
def legal_range(self, config):
return list(map(self._scale, NumericParameter.legal_range(self, config)))
class LogIntegerParameter(ScaledNumericParameter, FloatParameter):
"""
an integer value that is searched on a log scale, but stored without scaling
"""
def _scale(self, v):
return math.log(v + 1.0 - self.min_value, 2.0)
def _unscale(self, v):
v = 2.0 ** v - 1.0 + self.min_value
v = int(round(v))
return v
def legal_range(self, config):
low, high = NumericParameter.legal_range(self, config)
# increase the bounds account for rounding
return self._scale(low - 0.4999), self._scale(high + 0.4999)
class LogFloatParameter(ScaledNumericParameter, FloatParameter):
"""
a float parameter that is searched on a log scale, but stored without scaling
"""
def _scale(self, v):
return math.log(v + 1.0 - self.min_value, 2.0)
def _unscale(self, v):
v = 2.0 ** v - 1.0 + self.min_value
return v
class PowerOfTwoParameter(ScaledNumericParameter, IntegerParameter):
"""
An integer power of two, with a min and max value. Searched by the exponent
"""
def __init__(self, name, min_value, max_value, **kwargs):
kwargs['value_type'] = int
assert min_value >= 1
assert math.log(min_value, 2) % 1 == 0 # must be power of 2
assert math.log(max_value, 2) % 1 == 0 # must be power of 2
super(PowerOfTwoParameter, self).__init__(name, min_value, max_value,
**kwargs)
def _scale(self, v):
return int(math.log(v, 2))
def _unscale(self, v):
return 2 ** int(v)
def legal_range(self, config):
return int(math.log(self.min_value, 2)), int(math.log(self.max_value, 2))
def search_space_size(self):
return int(math.log(self.max_value,2) - math.log(self.min_value, 2)) + 1
##################
class ComplexParameter(Parameter):
"""
A non-cartesian parameter that can't be manipulated directly, but has a set
of user defined manipulation functions
"""
def copy_value(self, src, dst):
"""copy the value of this parameter from src to dst config"""
self._set(dst, copy.deepcopy(self._get(src)))
def same_value(self, cfg1, cfg2):
"""test if cfg1 and cfg2 have the same value of this parameter"""
return self._get(cfg1) == self._get(cfg2)
def hash_value(self, config):
"""produce unique hash for this value in the config"""
self.normalize(config)
return hashlib.sha256(repr(self.get_value(config)).encode('utf-8')).hexdigest().encode('utf-8')
def get_value(self, config):
return self._get(config)
def set_value(self, config, value):
self._set(config, value)
def op4_set_linear(self, cfg, cfg_a, cfg_b, cfg_c, a, b, c):
"""
set this value to :math:`a*cfg_a + b*cfg_b + c*cfg_c`
this operation is not possible in general with complex parameters but
we make an attempt to "fake" it for common use cases
basically a call to randomize unless after normalization,
a = 1.0, b == -c, and cfg_b == cfg_c, in which case nothing is done
:param cfg: the configuration to be changed
:param cfg_a: a parent configuration
:param cfg_b: a parent configuration
:param cfg_c: a parent configuration
:param a: weight for cfg_a
:param b: weight for cfg_b
:param c: weight for cfg_c
"""
# attempt to normalize order, we prefer a==1.0
if a != 1.0 and b == 1.0: # swap a and b
a, cfg_a, b, cfg_b = b, cfg_b, a, cfg_a
if a != 1.0 and c == 1.0: # swap a and c
a, cfg_a, c, cfg_c = c, cfg_c, a, cfg_a
# attempt to normalize order, we prefer b==-c
if b < c: # swap b and c
b, cfg_b, c, cfg_c = c, cfg_c, b, cfg_b
if b != -c and a == -c: # swap a and c
a, cfg_a, c, cfg_c = c, cfg_c, a, cfg_a
if a == 1.0 and b == -c:
self.copy_value(cfg_a, cfg)
self.add_difference(cfg, b, cfg_b, cfg_c) # TODO inline this logic?
else:
# TODO: should handle more cases
self.op1_randomize(cfg)
def add_difference(self, cfg_dst, scale, cfg_b, cfg_c):
"""
add the difference cfg_b-cfg_c to cfg_dst
this is the key operation used in differential evolution
and some simplex techniques
this operation is not possible in general with complex parameters but
we make an attempt to "fake" it
"""
if not self.same_value(cfg_b, cfg_c):
self.op1_randomize(cfg_dst)
@abc.abstractmethod
def op1_randomize(self, config):
"""
randomize this value without taking into account the current position
:param config: the configuration to be changed
"""
pass
@abc.abstractmethod
def seed_value(self):
"""some legal value of this parameter (for creating initial configs)"""
return
class BooleanParameter(ComplexParameter):
def manipulators(self, config):
return [self.op1_flip]
def get_value(self, config):
return self._get(config)
def set_value(self, config, value):
self._set(config, value)
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration randomly
:param config: the configuration to be changed
"""
self._set(config, self.seed_value())
def seed_value(self):
return random.choice((True, False))
def op1_flip(self, config):
"""
Flip this parameter's value in a configuration
:param config: the configuration to be changed
"""
self._set(config, not self._get(config))
def search_space_size(self):
return 2
def op3_swarm(self, cfg, cfg1, cfg2, c=1, c1=0.5,
c2=0.5, velocity=0, *args, **kwargs):
"""
Simulates a single update step in particle swarm optimization by updating
the current position and returning a new velocity.
The new velocity is given by
.. math:: c*velocity + r1*c1*(cfg1-cfg) + r2*c2*(cfg2-cfg)
where r1 and r2 are random values between 0 and 1
The new current position is randomly chosen based on the new velocity
:param cfg: the configuration to be changed. Represents the current position
:param cfg1: a configuration to shift towards. Should be the local best position
:param cfg2: a configuration to shift towards. Should be the global best position
:param c: the weight of the current velocity
:param c1: weight of cfg1
:param c2: weight of cfg2
:param velocity: the old velocity
:param args:
:param kwargs:
:return: the new velocity, a float
"""
v = velocity * c + (self.get_value(cfg1) - self.get_value(
cfg)) * c1 * random.random() + (self.get_value(
cfg2) - self.get_value(cfg)) * c2 * random.random()
# Map velocity to continuous space with sigmoid
s = old_div(1, (1 + numpy.exp(-v)))
# Decide position randomly
p = (s - random.random()) > 0
self.set_value(cfg, p)
return v
class SwitchParameter(ComplexParameter):
"""
A parameter representing an unordered collection of options with no implied
correlation between the choices. The choices are range(option_count)
"""
def __init__(self, name, option_count):
self.option_count = option_count
super(SwitchParameter, self).__init__(name)
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration to a random value
:param config: the configuration to be changed
"""
self._set(config, random.randrange(self.option_count))
def seed_value(self):
return random.randrange(self.option_count)
def search_space_size(self):
return max(1, self.option_count)
class EnumParameter(ComplexParameter):
"""
same as a SwitchParameter but choices are taken from an arbitrarily typed list
"""
def __init__(self, name, options):
super(EnumParameter, self).__init__(name)
self.options = list(options)
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration to a random value
:param config: the configuration to be changed
"""
self._set(config, random.choice(self.options))
def seed_value(self):
return random.choice(self.options)
def search_space_size(self):
return max(1, len(self.options))
class PermutationParameter(ComplexParameter):
"""
A parameter representing a permutation (or ordering) as a list of items
"""
def __init__(self, name, items):
super(PermutationParameter, self).__init__(name)
self._items = list(items)
self.size = len(items)
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration to a random value
:param config: the configuration to be changed
"""
random.shuffle(self._get(config))
self.normalize(config)
def op1_small_random_change(self, config, p=0.25):
"""
Iterates through the list and probabilistically swaps each element with the
next element
:param p: probability of swapping an element with the next element
:param config: the configuration to be changed
"""
cfg_item = self._get(config)
for i in range(1, len(cfg_item)):
if random.random() < p:
# swap
cfg_item[i - 1], cfg_item[i] = cfg_item[i], cfg_item[i - 1]
self.normalize(config)
def seed_value(self):
return list(self._items) # copy
def manipulators(self, config):
return [self.op1_randomize, self.op1_small_random_change]
def get_value(self, config):
return self._get(config)
def set_value(self, config, value):
self._set(config, value)
def search_space_size(self):
return math.factorial(max(1, len(self._items)))
def op3_cross(self, cfg, cfg1, cfg2, xchoice='op3_cross_OX1', strength=0.3,
*args, **kwargs):
"""
Calls the crossover operator specified by xchoice
Passes argument d = strength*(size of the permutation)
:param cfg: the configuration to be changed
:param cfg1: a parent configuration
:param cfg2: a parent configuration
:param xchoice: string specifying which crossover operator to use (should start with op3_cross prefix)
:param strength: the strength of the crossover
"""
dd = int(round(self.size * strength))
if dd < 1:
log.warning('Crossover length too small. Cannot create new solution.')
if dd >= self.size:
log.warning('Crossover length too big. Cannot create new solution.')
getattr(self, xchoice)(cfg, cfg1, cfg2, d=dd, *args, **kwargs)
def op3_swarm(self, cfg, cfg1, cfg2, xchoice='op3_cross_OX1', c=0.5,
c1=0.5, c2=0.5, strength=0.3, velocity=0, *args, **kwargs):
"""
Replacement for particle swarm optimization iterative step for permutations.
Given a target cfg and 2 parent cfgs, probabilistically performs an
op3_cross with one of the 2 parents.
:param cfg: the configuration to be changed. Represents the current position
:param cfg1: a configuration to shift towards. Should be the local best
position
:param cfg2: a configuration to shift towards. Should be the global best
position
:param xchoice: which crossover operator should be used
:param c: the probability of not performing a crossover
:param c1: the probability of performing a crossover with cfg1 (if a
crossover is performed)
:param c2: unused
:param strength: the strength of the crossover
:param velocity: the old velocity - unused
"""
if random.uniform(0, 1) > c:
if random.uniform(0, 1) < c1:
# Select crossover operator
self.op3_cross(cfg, cfg, cfg1, xchoice, strength)
else:
self.op3_cross(cfg, cfg, cfg2, xchoice, strength)
# swap-based operators
def op2_random_swap(self, cfg, cfg1, *args, **kwargs):
"""
Swap a random pair of items in cfg1 and save the result into cfg
:param cfg: the configuration to be changed
:param cfg1: the configuration whose PermutationParameter's elements are
swapped and copied into cfg
"""
p = self.get_value(cfg1)[:]
r = random.randint(0, len(p) - 1)
s = random.randint(0, len(p) - 1)
v1 = p[r]
v2 = p[s]
p[r] = v2
p[s] = v1
self.set_value(cfg, p)
def op2_random_invert(self, cfg, cfg1, strength=0.3, *args, **kwargs):
"""
Reverse the ordering of a random subsection of size d in cfg1 and save the
result in cfg where d = strength*total-size
:param cfg: the configuration to be changed
:param cfg1: the configuration whose PermutationParameter is inverted
:param strength: the size of the reversed subsection as a fraction of the
total size
"""
p = self.get_value(cfg1)[:]
d = int(round(len(p) * strength))
r = random.randint(0, len(p) - d)
subpath = p[r:r + d][:]
subpath.reverse()
p[r:r + d] = subpath
self.set_value(cfg, p)
# Crossover operators
def op3_cross_PX(self, cfg, cfg1, cfg2, d=0):
"""
Partition crossover (Whitley 2009?)
Chooses a random cut point and reorders elements in cfg1 up to the cut point
according to their order in cfg2.
Saves the result in cfg
:param cfg: the configuration to be changed
:param cfg1: the first parent configuration. The "base" configuration
:param cfg2: the second parent configuration. Is "crossed into" cfg1
:param d: unused
"""
p1 = self.get_value(cfg1)
p2 = self.get_value(cfg2)
c1 = random.randint(2, len(p1))
self.set_value(cfg, sorted(p1[:c1], key=lambda x: p2.index(x)) + p1[c1:])
def op3_cross_PMX(self, cfg, cfg1, cfg2, d=0):
"""
Partially-mapped crossover Goldberg & Lingle (1985)
Replaces a random section of cfg1 with the corresponding section in cfg2.
Displaced elements in cfg1 are moved to the old position of the elements
displacing them
:param cfg: the configuration to be changed
:param cfg1: the first parent configuration. The "base" configuration
:param cfg2: the second parent configuration. Is "crossed into" cfg1
:param d: the size of the crossover
"""
if d == 0:
d = max(1, int(round(self.size * 0.3))) # default to 1/3 of permutation size
p1 = self.get_value(cfg1)[:]
p2 = self.get_value(cfg2)[:]
r = random.randint(0, len(p1) - d)
c1 = p1[r:r + d]
c2 = p2[r:r + d]
# get new permutation by crossing over a section of p2 onto p1
pnew = self.get_value(cfg1)[:]
pnew[r:r + d] = c2
# fix conflicts by taking displaced elements in crossed over section
# displaced = (elements x in c1 where x does not have corresponding value in c2)
# and putting them where the value that displaced them was
#candidates for displacement
candidate_indices = set(list(range(r)) + list(range(r+d, len(p1))))
# Check through displaced elements to find values to swap conflicts to
while c1 != []:
n = c1[0]
#try to match up a value in c1 to the equivalent value in c2
while c2[0] in c1:
if n == c2[0]:
# already match up
break
# find position idx of c2[0] in c1
link_idx = c1.index(c2[0])
# get value of c2 at idx
link = c2[link_idx]
# remove c2[idx] and c1[idx] since they match up when we swap c2[0] with c2[idx] (this avoids an infinite loop)
del c2[link_idx]
del c1[link_idx]
# swap new value into c2[0]
c2[0] = link
if n != c2[0]:
# first check if we can swap in the crossed over section still
if n in c2:
c2[c2.index(n)] = c2[0]
else:
# assign first instance of c2[0] outside of the crossed over section in pnew to c1[0]
for idx in candidate_indices:
if pnew[idx] == c2[0]:
pnew[idx] = c1[0]
candidate_indices.remove(idx) # make sure we don't override this value now
break
# remove first elements
del c1[0]
del c2[0]
self.set_value(cfg, pnew)
def op3_cross_CX(self, cfg, cfg1, cfg2, d=0):
"""
Implementation of a cyclic crossover.
Repeatedly replaces elements of cfg1 with the element at the same index in
cfg2. This is done until a cycle is reached and cfg1 is valid again. The
initial replacement is random.
Saves the result in cfg.
:param cfg: the configuration to be changed
:param cfg1: the first parent configuration. The "base" configuration
:param cfg2: the second parent configuration. Is "crossed into" cfg1
:param d: unused
"""
p1 = self.get_value(cfg1)
p2 = self.get_value(cfg2)
p = p1[:]
s = random.randint(0, len(p1) - 1)
i = s
indices = set()
while len(indices) < len(p1): # should never exceed this
indices.add(i)
val = p1[i]
i = p2.index(val)
# deal with duplicate values
while i in indices:
if i == s:
break
i = p2[i+1:].index(val) + i + 1
if i == s:
break
for j in indices:
p[j] = p2[j]
self.set_value(cfg, p)
def op3_cross_OX1(self, cfg, cfg1, cfg2, d=0):
"""
Ordered Crossover (Davis 1985)
Exchanges a subpath from cfg2 into cfg1 while maintaining the order of the
remaining elements in cfg1.
Saves the result in cfg.
:param cfg: the configuration to be changed
:param cfg1: the first parent configuration. The "base" configuration
:param cfg2: the second parent configuration. Is "crossed into" cfg1
:param d: size of the exchanged subpath
"""
if d == 0:
d = max(1, int(round(self.size * 0.3))) # default to 1/3 of permutation size
p1 = self.get_value(cfg1)
p2 = self.get_value(cfg2)
c1 = p1[:]
c2 = p2[:]
# Randomly find cut points
r = random.randint(0, len(
p1) - d) # Todo: treat path as circle i.e. allow cross-boundary cuts
[c1.remove(i) for i in p2[r:int(r + d)]]
self.set_value(cfg, c1[:r] + p2[r:r + d] + c1[r:])
def op3_cross_OX3(self, cfg, cfg1, cfg2, d=0):
"""
Ordered crossover variation 3 (Deep 2010)
Same as op3_cross_OX1, except the parents have different cut points for
their subpaths
:param cfg: the configuration to be changed
:param cfg1: the first parent configuration. The "base" configuration
:param cfg2: the second parent configuration. Is "crossed into" cfg1
:param d: size of the exchanged subpath
"""
if d == 0:
d = max(1, int(round(self.size * 0.3))) # default to 1/3 of permutation size
p1 = self.get_value(cfg1)
p2 = self.get_value(cfg2)
c1 = p1[:]
c2 = p2[:]
# Randomly find cut points
# Todo: treat path as circle i.e. allow cross-boundary cuts
r1 = random.randint(0, len(p1) - d)
r2 = random.randint(0, len(p1) - d)
[c1.remove(i) for i in p2[r2:r2 + d]]
self.set_value(cfg, c1[:r1] + p2[r2:r2 + d] + c1[r1:])
def search_space_size(self):
return math.factorial(max(1, len(self._items)))
class ScheduleParameter(PermutationParameter):
def __init__(self, name, items, deps):
super(ScheduleParameter, self).__init__(name, items)
self.deps = dict((k, set(v)) for k, v in list(deps.items()))
log.debug("ScheduleParameter(%s, %s, %s)", repr(name), repr(items),
repr(deps))
self._expand_deps()
def _expand_deps(self):
"""expand self.deps to include recursive dependencies"""
fixed_point = False
while not fixed_point:
fixed_point = True
for k in list(self.deps.keys()):
oldlen = len(self.deps[k])
for dep in list(self.deps[k]):
if dep in self.deps:
self.deps[k].update(self.deps[dep])
if oldlen != len(self.deps[k]):
fixed_point = False
# verify schedule is valid
items = set(self._items)
for k, v in list(self.deps.items()):
if k in v:
raise Exception("ScheduleParameter('%s') cycle: %s depends on itself" %
(self.name, k))
if v - items:
raise Exception("ScheduleParameter('%s'): %s is unknown" %
(self.name, v - items))
if set(self.deps.keys()) - items:
raise Exception("ScheduleParameter('%s'): %s is unknown" %
(self.name, set(self.deps.keys()) - items))
def is_topologically_sorted(self, values):
used = set()
for v in values:
if v in self.deps and self.deps[v].union(used):
return False
used.add(v)
return True
def topologically_sorted_depth_first(self, values):
"""faster but not stable enough"""
if self.is_topologically_sorted(values):
return values
sorted_values = []
used = set()
deps = dict((k, sorted(v, key=values.index, reverse=True))
for k, v in list(self.deps.items()))
def visit(v):
if v in used:
return
if v in deps:
for dv in deps[v]:
visit(dv)
used.add(v)
sorted_values.append(v)
for v in reversed(values):
visit(v)
return list(reversed(sorted_values))
def topologically_sorted(self, values):
if self.is_topologically_sorted(values):
return values
deps = copy.deepcopy(self.deps)
queue = collections.deque(reversed(values))
sorted_values = []
while queue:
v = queue.popleft()
if v in deps and deps[v]:
queue.append(v)
else:
for k, d in list(deps.items()):
d.discard(v)
if not d:
del deps[k]
sorted_values.append(v)
return list(reversed(sorted_values))
def normalize(self, cfg):
self._set(cfg, self.topologically_sorted(self._get(cfg)))
class SelectorParameter(ComplexParameter):
def __init__(self, name, choices, max_cutoff,
order_class=PermutationParameter,
offset_class=LogIntegerParameter):
super(SelectorParameter, self).__init__(name)
self.choices = choices
self.max_cutoff = max_cutoff
self.order_param = order_class('{0}/order'.format(name), choices)
self.offset_params = [
offset_class('{0}/offsets/{1}'.format(name, i), 0, max_cutoff)
for i in range(len(choices) - 1)]
def sub_parameters(self):
return [self.order_param] + self.offset_params
def seed_value(self):
return {'order': self.order_param.seed_value(),
'offsets': [co.seed_value() for co in self.offset_params]}
def op1_randomize(self, config):
random.choice(self.sub_parameters()).op1_randomize(config)
def selector_iter(self, config):
"""
yield (cutoff, choice) pairs
cutoff will be None on the first value
"""
order = config[self.name]['order']
yield (None, order[0])
cutoff = 0
for n, offset in enumerate(config[self.name]['offsets']):
if offset > 0:
cutoff += offset
yield cutoff, order[n + 1]
class ParameterArray(ComplexParameter):
"""
Represents an array of Parameters
"""
def __init__(self, name, count, element_type, *args, **kwargs):
super(ParameterArray, self).__init__(name)
self.count = count
self.sub_params = [
element_type('{0}/{1}'.format(name, i), *args[i], **kwargs[i])
for i in range(count)]
def sub_parameters(self):
return self.sub_params
def seed_value(self):
return [p.seed_value() for p in self.sub_params]
def op1_randomize(self, config):
"""
randomly selects a sub-parameter and randomizes it
:param config: the configuration to be changed
"""
random.choice(self.sub_parameters()).op1_randomize(config)
class BooleanParameterArray(ParameterArray):
"""
Represents an array of BooleanParameters - currently unimplimented
"""
def __init__(self, name, count):
super(BooleanParameterArray, self).__init__(name, count, BooleanParameter)
def op3_swarm(self, cfg, cfg1, cfg2, *args, **kwargs):
# TODO
pass
def op3_cross(self, cfg, cfg1, cfg2, *args, **kwargs):
# TODO
pass
class IntegerParameterArray(ParameterArray):
"""
Represents an array of IntegerParameters - currently unimplemented
"""
def __init__(self, name, min_values, max_values):
assert len(min_values) == len(max_values)
super(IntegerParameterArray, self).__init__(name, len(min_values),
IntegerParameter,
min_value=min_values,
max_value=max_values)
def op3_swarm(self, cfg, cfg1, cfg2, *args, **kwargs):
# TODO
pass
def op3_cross(self, cfg, cfg1, cfg2, *args, **kwargs):
# TODO
pass
class Array(ComplexParameter):
"""
An interface for parameters representing an array of values.
"""
# TODO: constraints? (upper & lower bound etc)
def __init__(self, name, size):
super(Array, self).__init__(name)
self.size = size
def op3_cross(self, cfg, cfg1, cfg2, strength=0.3, *args, **kwargs):
"""
Crosses two arrays by replacing a random subsection of cfg1 with the
corresponding subsection of cfg2.The size of the chunk is a fixed fraction
of the total length, given by the strength
Behaves like a specialized 2-point crossover, where the first cut point is
random and the second cut is a set distance after.
:param cfg: the configuration to be changed
:param cfg1: the configuration being inserted into
:param cfg2: the configuration being inserted
:param strength: the size of the crossover, as a fraction of total array
length
"""
d = int(round(self.size * strength))
if d < 1:
log.debug('Crossover length too small. Cannot create new solution.')
if d >= self.size:
log.debug('Crossover length too big. Cannot create new solution.')
p1 = self.get_value(cfg1)
p2 = self.get_value(cfg2)
r = random.randint(0, len(
p1) - d) # Todo: treat path as circle i.e. allow cross-boundary cuts
p = numpy.concatenate([p1[:r], p2[r:r + d], p1[r + d:]])
self.set_value(cfg, p)
def op3_swarm(self, cfg, cfg1, cfg2, c=1, c1=0.5,
c2=0.5, velocity=0, strength=0.3, *args, **kwargs):
"""
Replacement for a particle swarm optimization iterative step for arrays.
Given a target cfg and 2 parent cfgs, probabilistically performs an
:py:meth:`op3_cross` with one of the 2 parents.
:param cfg: the configuration to be changed. Represents the cfg position
:param cfg1: a configuration to shift towards. Should be the local best
position
:param cfg2: a configuration to shift towards. Should be the global best
position
:param c: the probability of not performing a crossover
:param c1: the probability of performing a crossover with cfg1 (if a
crossover is performed)
:param c2: unused
:param velocity: the old velocity - unused
:param strength: the strength of the crossover
"""
if random.uniform(0, 1) > c:
if random.uniform(0, 1) < c1:
# Select crossover operator
self.op3_cross(cfg, cfg, cfg1, strength)
else:
self.op3_cross(cfg, cfg, cfg2, strength)
def get_value(self, config):
return self._get(config)
def set_value(self, config, value):
self._set(config, value)
class BooleanArray(Array):
"""
Represents an array of boolean values which are either 0 or 1
"""
def op3_swarm_parallel(self, cfg, cfg1, cfg2, c=1,
c1=0.5, c2=0.5, velocities=0):
"""
Simulates a single particle swarm optimization step for each element in the
array by updating each position and returning an array of new velocities.
The new velocities are given by
.. math:: c*velocity + r1*c1*(cfg1-cfg) + r2*c2*(cfg2-cfg)
where r1 and r2 are random values between 0 and 1. In each iteration, r1 and
r2 are constant across array elements
The new cfg positions are randomly chosen based on the new velocities
:param cfg: the configuration to be changed. This represents the current
position
:param cfg1: a configuration to shift towards. Should be the local best
position
:param cfg2: a configuration to shift towards. Should be the global best
position
:param c: the weight of the current velocities
:param c1: weight of cfg1
:param c2: weight of cfg2
:param velocities: the current velocities
:return: a numpy array of new velocities
"""
vs = velocities * c + (self.get_value(cfg1) - self.get_value(
cfg)) * c1 * random.random() + (self.get_value(
cfg2) - self.get_value(cfg)) * c2 * random.random()
# Map velocity to continuous space with sigmoid
ss = old_div(1, (1 + numpy.exp(-vs)))
# Decide position randomly
ps = (ss - numpy.random.rand(1, self.size)) > 0
self.set_value(cfg, ps)
return vs
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration randomly
:param config: the configuration to be changed
"""
value = numpy.random.rand(1, self.size) > 0.5
self._set(config, value)
def seed_value(self):
return numpy.random.rand(1, self.size) > 0.5
class FloatArray(Array):
"""
Represents an array of float values
"""
def __init__(self, name, size, fmax, fmin):
super(FloatArray, self).__init__(name, size)
self.fmax = fmax
self.fmin = fmin
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration randomly
:param config: the configuration to be changed
"""
value = numpy.random.rand(1, self.size) * (
self.fmax - self.fmin) + self.fmin
self._set(config, value)
def seed_value(self):
value = numpy.random.rand(1, self.size) * (
self.fmax - self.fmin) + self.fmin
return value
def op3_swarm_parallel(self, cfg, cfg1, cfg2, c=1,
c1=0.5, c2=0.5, velocities=0):
"""
Simulates a single particle swarm optimization step for each element in the
array by updating the each position and returning an array of new velocities
The new velocity is given by
.. math:: c*velocity + r1*c1*(cfg1-cfg) + r2*c2*(cfg2-cfg)
where r1 and r2 are random values between 0 and 1. In each iteration, r1 and
r2 are constant across array elements
The new cfg positions are randomly chosen based on the new velocities
:param cfg: the configuration to be changed. This represents the current
position
:param cfg1: a configuration to shift towards. Should be the local best
position
:param cfg2: a configuration to shift towards. Should be the global best
position
:param c: the weight of the cfg velocities
:param c1: weight of cfg1
:param c2: weight of cfg2
:param velocities: the cfg velocities
:return: a numpy array of new velocities
"""
vs = velocities * c + (self.get_value(cfg1) - self.get_value(
cfg)) * c1 * random.random() + (self.get_value(
cfg2) - self.get_value(cfg)) * c2 * random.random()
p = self.get_value(cfg) + vs
p[p > self.fmax] = self.fmax
p[p < self.fmin] = self.fmin
self.set_value(cfg, p)
return vs
##################
class ManipulatorProxy(object):
"""
wrapper around configuration manipulator and config pair
"""
def __init__(self, manipulator, cfg):
self.cfg = cfg
self.manipulator = manipulator
self.params = manipulator.parameters_dict(self.cfg)
def keys(self):
return list(self.params.keys())
def __getitem__(self, k):
return ParameterProxy(self.params[k], self.cfg)
class ParameterProxy(object):
"""
wrapper aint parameter and config pair, adds config
as first argument to all method calls to parameter
"""
def __init__(self, param, cfg):
self.cfg = cfg
self.param = param
def __getattr__(self, key):
"""equivalent of self.param.key(self.cfg, ...)"""
member = getattr(self.param, key)
def param_method_proxy(*args, **kwargs):
return member(self.cfg, *args, **kwargs)
if callable(member):
return param_method_proxy
else:
# we should only hit this for key == 'name'
return member
# Inspection Methods
def operators(param, num_parents):
"""
Return a list of operators for the given parameter that take the specified
number of input configurations
:param param: a Parameter class
:param num_parents: a String specifying number of inputs required by the operator.
should be one of '1', '2', '3', '4', or 'n'
"""
ops = []
methods = inspect.getmembers(param, inspect.ismethod)
for m in methods:
name, obj = m
if is_operator(name, num_parents):
ops.append(name)
return ops
def composable_operators(param, min_num_parents):
"""
Return a list of operators for the given parameter that can be programatically composed
with a composable technique generating min_num_parents.
Programatically composable operators have no non-cfg arguments
:param param: a Parameter class
:param min_num_parents: the minimum number of parents passed to the operator
"""
if min_num_parents < 1:
return []
allowed_num_parents = ['n']
for i in range(1,5):
if i > min_num_parents:
break
allowed_num_parents.append(str(i))
ops = []
methods = inspect.getmembers(param, inspect.ismethod)
for m in methods:
name, obj = m
argspec = inspect.getargspec(obj)
numargs = len(argspec.args) - (len(argspec.defaults) if argspec.defaults else 0)
for num_parents in allowed_num_parents:
if is_operator(name, num_parents):
if num_parents == 'n':
if numargs == 3: # self, cfg, cfgs
ops.append(name)
else:
if numargs == (1 + int(num_parents)):
ops.append(name)
break
return ops
def is_operator(name, num_parents):
"""
Tells whether a method is an operator taking in the specified number of inputs
from the method name
:param name: the method name
:param num_parents: a String specifying number of inputs required by the operator.
should be one of '1', '2', '3', '4', or 'n'
"""
return ('op' + num_parents + '_') == name[:4]
def all_operators():
"""
Return a dictionary mapping from parameter names to lists of operator function
names
"""
ops = {}
for p in all_params():
name, obj = p
all_ops = []
for num in ['1', '2', '3', '4', 'n']:
all_ops += operators(obj, num)
ops[name] = all_ops
return ops
def all_params():
params = inspect.getmembers(sys.modules[__name__], lambda x: inspect.isclass(
x) and x.__module__ == __name__ and issubclass(x, Parameter))
return params
| mit |
igor-toga/knob2 | knob/clients/nova.py | 1 | 15416 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from novaclient import client as nova_client
from novaclient import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslo_utils import excutils
from retrying import retry
import six
from six.moves.urllib import parse as urlparse
import requests
from knob.common import exception
from knob.common.i18n import _
from knob.common.i18n import _LW
LOG = logging.getLogger(__name__)
NOVA_API_VERSION = "2.1"
CLIENT_NAME = 'nova'
def retry_if_connection_err(exception):
return isinstance(exception, requests.ConnectionError)
def retry_if_result_is_false(result):
return result is False
class NovaClient(object):
deferred_server_statuses = ['BUILD',
'HARD_REBOOT',
'PASSWORD',
'REBOOT',
'RESCUE',
'RESIZE',
'REVERT_RESIZE',
'SHUTOFF',
'SUSPENDED',
'VERIFY_RESIZE']
def __init__(self, sess):
self._client = nova_client.Client(NOVA_API_VERSION, session=sess)
def client(self):
if self._client is None:
raise exception.NotFound('nova object not found')
return self._client
def keypair_create(self, key_name):
client = self.client()
key = client.keypairs.create(key_name)
return key
def keypair_delete(self, key_name):
client = self.client()
client.keypairs.delete(key_name)
def create_service_vm(self, data):
#nics = [{"net-id": net_id, "v4-fixed-ip": ''}]
client = self.client()
# verify keypair
key_name=data['key_name']
if client.keypairs.get(key_name) is None:
LOG.warning(_LW('Provided key with name (%(name)s)'),
{'name': key_name})
return None
image = client.images.find(name=data['image'])
flavor = client.flavors.find(name=data['flavor'])
server_ref = None
try:
nics = [{'port-id': data['port-id']}]
server_ref = client.servers.create(
name=data['name'],
image=image,
flavor=flavor,
nics=nics,
security_groups=[data['security_groups']],
key_name=key_name)
finally:
if server_ref is not None:
server_id = server_ref.id
else:
return None
try:
# wait till server is ready
status = self._check_active(server_id)
except exception.ResourceInError as ex:
status = False
LOG.warning(_LW('Instance (%(server)s) not found: %(ex)s'),
{'server': server_id, 'ex': ex})
except exception.ResourceUnknownStatus as ex:
status = False
LOG.warning(_LW('Instance (%(server)s) bad status while creating: %(ex)s'),
{'server': server_id, 'ex': ex})
if status is True:
return server_id
else:
return None
def remove_service_vm(self, server_id):
self.client().servers.delete(server_id)
try:
# wait till server is down
self.check_delete_server_complete(server_id)
except exception.ServiceNotFound as ex:
LOG.warning(_LW('Instance (%(server)s) bad status while deleting: %(ex)s'),
{'server': server_id, 'ex': ex})
LOG.info('successfully removed VM with server_id: %s' % server_id)
return
#--------------------------------------------------------------
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
def is_conflict(self, ex):
return isinstance(ex, exceptions.Conflict)
@excutils.exception_filter
def ignore_not_found(self, ex):
"""Raises the exception unless it is a not-found."""
return self.is_not_found(ex)
@excutils.exception_filter
def ignore_conflict_and_not_found(self, ex):
"""Raises the exception unless it is a conflict or not-found."""
return self.is_conflict(ex) or self.is_not_found(ex)
def is_unprocessable_entity(self, ex):
http_status = (getattr(ex, 'http_status', None) or
getattr(ex, 'code', None))
return (isinstance(ex, exceptions.ClientException) and
http_status == 422)
@retry(stop_max_attempt_number=max(cfg.CONF.client_retry_limit + 1, 0),
retry_on_exception=retry_if_connection_err)
def get_server(self, server):
"""Return fresh server object.
Substitutes Nova's NotFound for Heat's EntityNotFound,
to be returned to user as HTTP error.
"""
try:
return self.client().servers.get(server)
except exceptions.NotFound:
raise exception.EntityNotFound(entity='Server', name=server)
def fetch_server(self, server_id):
"""Fetch fresh server object from Nova.
Log warnings and return None for non-critical API errors.
Use this method in various ``check_*_complete`` resource methods,
where intermittent errors can be tolerated.
"""
server = None
try:
server = self.client().servers.get(server_id)
except exceptions.OverLimit as exc:
LOG.warning(_LW("Received an OverLimit response when "
"fetching server (%(id)s) : %(exception)s"),
{'id': server_id,
'exception': exc})
except exceptions.ClientException as exc:
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
(500, 503))):
LOG.warning(_LW("Received the following exception when "
"fetching server (%(id)s) : %(exception)s"),
{'id': server_id,
'exception': exc})
else:
raise
return server
def get_ip(self, server_id, net_type, ip_version, extended_type):
"""Return the server's IP of the given type and version."""
server = self.get_server(server_id)
if net_type in server.addresses:
for ip in server.addresses[net_type]:
if ip['version'] == ip_version and \
ip['OS-EXT-IPS:type'] == extended_type:
return ip['addr']
def get_status(self, server):
"""Return the server's status.
:param server: server object
:returns: status as a string
"""
# Some clouds append extra (STATUS) strings to the status, strip it
return server.status.split('(')[0]
@retry(stop_max_attempt_number=cfg.CONF.max_interface_check_attempts,
wait_fixed=500,
retry_on_result=retry_if_result_is_false)
def _check_active(self, server, res_name='Server'):
"""Check server status.
Accepts both server IDs and server objects.
Returns True if server is ACTIVE,
raises errors when server has an ERROR or unknown to Heat status,
returns False otherwise.
:param res_name: name of the resource to use in the exception message
"""
# not checking with is_uuid_like as most tests use strings e.g. '1234'
if isinstance(server, six.string_types):
server = self.fetch_server(server)
if server is None:
return False
else:
status = self.get_status(server)
else:
status = self.get_status(server)
if status != 'ACTIVE':
self.refresh_server(server)
status = self.get_status(server)
if status in self.deferred_server_statuses:
return False
elif status == 'ACTIVE':
return True
elif status == 'ERROR':
fault = getattr(server, 'fault', {})
raise exception.ResourceInError(
resource_status=status,
status_reason=_("Message: %(message)s, Code: %(code)s") % {
'message': fault.get('message', _('Unknown')),
'code': fault.get('code', _('Unknown'))
})
else:
raise exception.ResourceUnknownStatus(
resource_status=server.status,
result=_('%s is not active') % res_name)
def find_flavor_by_name_or_id(self, flavor):
"""Find the specified flavor by name or id.
:param flavor: the name of the flavor to find
:returns: the id of :flavor:
"""
return self._find_flavor_id(self.context.tenant_id,
flavor)
def _find_flavor_id(self, tenant_id, flavor):
# tenant id in the signature is used for the memoization key,
# that would differentiate similar resource names across tenants.
return self.get_flavor(flavor).id
def get_flavor(self, flavor_identifier):
"""Get the flavor object for the specified flavor name or id.
:param flavor_identifier: the name or id of the flavor to find
:returns: a flavor object with name or id :flavor:
"""
try:
flavor = self.client().flavors.get(flavor_identifier)
except exceptions.NotFound:
flavor = self.client().flavors.find(name=flavor_identifier)
return flavor
def get_host(self, host_name):
"""Get the host id specified by name.
:param host_name: the name of host to find
:returns: the list of match hosts
:raises: exception.EntityNotFound
"""
host_list = self.client().hosts.list()
for host in host_list:
if host.host_name == host_name and host.service == self.COMPUTE:
return host
raise exception.EntityNotFound(entity='Host', name=host_name)
def get_keypair(self, key_name):
"""Get the public key specified by :key_name:
:param key_name: the name of the key to look for
:returns: the keypair (name, public_key) for :key_name:
:raises: exception.EntityNotFound
"""
try:
return self.client().keypairs.get(key_name)
except exceptions.NotFound:
raise exception.EntityNotFound(entity='Key', name=key_name)
@retry(stop_max_attempt_number=cfg.CONF.max_interface_check_attempts,
wait_fixed=500,
retry_on_result=retry_if_result_is_false)
def check_delete_server_complete(self, server_id):
"""Wait for server to disappear from Nova."""
try:
server = self.fetch_server(server_id)
except Exception as exc:
self.ignore_not_found(exc)
return True
if not server:
return False
task_state_in_nova = getattr(server, 'OS-EXT-STS:task_state', None)
# the status of server won't change until the delete task has done
if task_state_in_nova == 'deleting':
return False
status = self.get_status(server)
if status in ("DELETED", "SOFT_DELETED"):
return True
if status == 'ERROR':
fault = getattr(server, 'fault', {})
message = fault.get('message', 'Unknown')
code = fault.get('code')
errmsg = _("Server %(name)s delete failed: (%(code)s) "
"%(message)s") % dict(name=server.name,
code=code,
message=message)
raise exception.ServiceNotFound(resource_status=status,
status_reason=errmsg)
return False
def rename(self, server, name):
"""Update the name for a server."""
server.update(name)
def server_to_ipaddress(self, server):
"""Return the server's IP address, fetching it from Nova."""
try:
server = self.client().servers.get(server)
except exceptions.NotFound as ex:
LOG.warning(_LW('Instance (%(server)s) not found: %(ex)s'),
{'server': server, 'ex': ex})
else:
for n in sorted(server.networks, reverse=True):
if len(server.networks[n]) > 0:
return server.networks[n][0]
@retry(stop_max_attempt_number=max(cfg.CONF.client_retry_limit + 1, 0),
retry_on_exception=retry_if_connection_err)
def absolute_limits(self):
"""Return the absolute limits as a dictionary."""
limits = self.client().limits.get()
return dict([(limit.name, limit.value)
for limit in list(limits.absolute)])
def interface_detach(self, server_id, port_id):
server = self.fetch_server(server_id)
if server:
server.interface_detach(port_id)
return True
else:
return False
def interface_attach(self, server_id, port_id=None, net_id=None, fip=None):
server = self.fetch_server(server_id)
if server:
server.interface_attach(port_id, net_id, fip)
return True
else:
return False
@retry(stop_max_attempt_number=cfg.CONF.max_interface_check_attempts,
wait_fixed=500,
retry_on_result=retry_if_result_is_false)
def check_interface_detach(self, server_id, port_id):
server = self.fetch_server(server_id)
if server:
interfaces = server.interface_list()
for iface in interfaces:
if iface.port_id == port_id:
return False
return True
@retry(stop_max_attempt_number=cfg.CONF.max_interface_check_attempts,
wait_fixed=500,
retry_on_result=retry_if_result_is_false)
def check_interface_attach(self, server_id, port_id):
server = self.fetch_server(server_id)
if server:
interfaces = server.interface_list()
for iface in interfaces:
if iface.port_id == port_id:
return True
return False
def _list_extensions(self):
extensions = self.client().list_extensions.show_all()
return set(extension.alias for extension in extensions)
def has_extension(self, alias):
"""Check if specific extension is present."""
return alias in self._list_extensions()
| apache-2.0 |
duyetdev/openerp-6.1.1 | openerp/addons/procurement/wizard/schedulers_all.py | 9 | 2927 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import threading
import pooler
from osv import osv, fields
class procurement_compute_all(osv.osv_memory):
_name = 'procurement.order.compute.all'
_description = 'Compute all schedulers'
_columns = {
'automatic': fields.boolean('Automatic orderpoint',help='Triggers an automatic procurement for all products that have a virtual stock under 0. You should probably not use this option, we suggest using a MTO configuration on products.'),
}
_defaults = {
'automatic': lambda *a: False,
}
def _procure_calculation_all(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
proc_obj = self.pool.get('procurement.order')
#As this function is in a new thread, i need to open a new cursor, because the old one may be closed
new_cr = pooler.get_db(cr.dbname).cursor()
for proc in self.browse(new_cr, uid, ids, context=context):
proc_obj.run_scheduler(new_cr, uid, automatic=proc.automatic, use_new_cursor=new_cr.dbname,\
context=context)
#close the new cursor
new_cr.close()
return {}
def procure_calculation(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
threaded_calculation = threading.Thread(target=self._procure_calculation_all, args=(cr, uid, ids, context))
threaded_calculation.start()
return {'type': 'ir.actions.act_window_close'}
procurement_compute_all()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
steedos/odoo7 | openerp/addons/purchase/purchase.py | 7 | 67983 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import pytz
from openerp import SUPERUSER_ID
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp import netsvc
from openerp import pooler
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
cr.execute("""update purchase_order_line set
date_planned=%s
where
order_id=%s and
(date_planned=%s or date_planned<%s)""", (value,po.id,po.minimum_planned_date,value))
cr.execute("""update purchase_order set
minimum_planned_date=%s where id=%s""", (value, po.id))
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.purchase_id,sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
WHERE
p.purchase_id IN %s GROUP BY m.state, p.purchase_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id] = all(line.invoiced for line in purchase.order_line)
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ Sent'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'approved',
},
}
_columns = {
'name': fields.char('Order Reference', size=64, required=True, select=True, help="Unique number of the purchase order, computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that generated this purchase order request; a sales order or an internal procurement request."
),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, size=64,
help="Reference of the sales order or quotation sent by your supplier. It's mainly used to do the matching when you receive the products as this reference is usually written on the delivery order sent by your supplier."),
'date_order':fields.date('Order Date', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}, select=True, help="Date on which this document has been created."),
'date_approve':fields.date('Date Approved', readonly=1, select=True, help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'warehouse_id': fields.many2one('stock.warehouse', 'Destination Warehouse'),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]} ),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency",readonly=True, required=True),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="The status of the purchase order or the quotation request. A quotation is a purchase order in a 'Draft' status. Then the order has to be confirmed by the user, the status switch to 'Confirmed'. Then the supplier must confirm the order to change the status to 'Approved'. When the purchase order is paid and received, the status becomes 'Done'. If a cancel action occurs in the invoice or in the reception of goods, the status becomes in exception.", select=True),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines', states={'approved':[('readonly',True)],'done':[('readonly',True)]}),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id', 'invoice_id', 'Invoices', help="Invoices generated for a purchase order"),
'picking_ids': fields.one2many('stock.picking.in', 'purchase_id', 'Picking List', readonly=True, help="This is the list of incoming shipments that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', help="It indicates that an invoice has been paid"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control > Based on P.O. lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Bases on incoming shipments: let you create an invoice when receptions are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums",help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'product_id': fields.related('order_line','product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company','Company',required=True,select=1, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
}
_defaults = {
'date_order': fields.date.context_today,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = 'date_order desc, id desc'
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order') or '/'
order = super(purchase_order, self).create(cr, uid, vals, context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
wf_service = netsvc.LocalService("workflow")
for id in unlink_ids:
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
def onchange_dest_address_id(self, cr, uid, ids, address_id):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {'warehouse_id': False}
supplier = address.browse(cr, uid, address_id)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id):
if not warehouse_id:
return {}
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id)
return {'value':{'location_id': warehouse.lot_input_id.id, 'dest_address_id': False}}
def onchange_partner_id(self, cr, uid, ids, partner_id):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'])
supplier = partner.browse(cr, uid, partner_id)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line]})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing pîcking orders of given purchase order ids.
'''
mod_obj = self.pool.get('ir.model.data')
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
action_model, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree4'))
action = self.pool.get(action_model).read(cr, uid, action_id, context=context)
ctx = eval(action['context'])
ctx.update({
'search_default_purchase_id': ids[0]
})
if pick_ids and len(pick_ids) == 1:
form_view_ids = [view_id for view_id, view in action['views'] if view == 'form']
view_id = form_view_ids and form_view_ids[0] or False
action.update({
'views': [],
'view_mode': 'form',
'view_id': view_id,
'res_id': pick_ids[0]
})
action.update({
'context': ctx,
})
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def print_confirm(self,cr,uid,ids,context=None):
print "Confirmed"
def print_double(self,cr,uid,ids,context=None):
print "double Approval"
def print_router(self,cr,uid,ids,context=None):
print "Routed"
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', ids[0], 'send_rfq', cr)
datas = {
'model': 'purchase.order',
'ids': ids,
'form': self.read(cr, uid, ids[0], context=context),
}
return {'type': 'ir.actions.report.xml', 'report_name': 'purchase.quotation', 'datas': datas, 'nodestroy': True}
#TODO: implement messages system
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not po.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define expense account for this company: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position or False
return fiscal_obj.map_account(cr, uid, fpos, acc_id)
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
wf_service = netsvc.LocalService("workflow")
for p_id in ids:
# Deleting the existing instance of workflow for PO
wf_service.trg_delete(uid, 'purchase.order', p_id, cr)
wf_service.trg_create(uid, 'purchase.order', p_id, cr)
return True
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
pay_acc_id = order.partner_id.property_account_payable.id
journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase'), ('company_id', '=', order.company_id.id)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]}, context=context)
# get invoice data and create invoice
inv_data = {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': pay_acc_id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.pricelist_id.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, inv_lines)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]}, context=context)
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def action_cancel(self, cr, uid, ids, context=None):
wf_service = netsvc.LocalService("workflow")
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
if pick.state not in ('draft','cancel'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('First cancel all receptions related to this purchase order.'))
for pick in purchase.picking_ids:
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_cancel', cr)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel','draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all receptions related to this purchase order.'))
if inv:
wf_service.trg_validate(uid, 'account.invoice', inv.id, 'invoice_cancel', cr)
self.write(cr,uid,ids,{'state':'cancel'})
for (id, name) in self.name_get(cr, uid, ids):
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return True
def date_to_datetime(self, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
# TODO: move to fields.datetime in server after 7.0
user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + relativedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
def _prepare_order_picking(self, cr, uid, order, context=None):
return {
'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),
'origin': order.name + ((order.origin and (':' + order.origin)) or ''),
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'partner_id': order.partner_id.id,
'invoice_state': '2binvoiced' if order.invoice_method == 'picking' else 'none',
'type': 'in',
'purchase_id': order.id,
'company_id': order.company_id.id,
'move_lines' : [],
}
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, context=None):
return {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_qty': order_line.product_qty,
'product_uos_qty': order_line.product_qty,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'date_expected': self.date_to_datetime(cr, uid, order_line.date_planned, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'move_dest_id': order_line.move_dest_id.id,
'state': 'draft',
'type':'in',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': order_line.price_unit
}
def _create_pickings(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates pickings and appropriate stock moves for given order lines, then
confirms the moves, makes them available, and confirms the picking.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise
a standard outgoing picking will be created to wrap the stock moves, as returned
by :meth:`~._prepare_order_picking`.
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: list of IDs of pickings used/created for the given order lines (usually just one)
"""
if not picking_id:
picking_id = self.pool.get('stock.picking').create(cr, uid, self._prepare_order_picking(cr, uid, order, context=context))
todo_moves = []
stock_move = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
for order_line in order_lines:
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
move = stock_move.create(cr, uid, self._prepare_order_line_move(cr, uid, order, order_line, picking_id, context=context))
if order_line.move_dest_id and order_line.move_dest_id.state != 'done':
order_line.move_dest_id.write({'location_id': order.location_id.id})
todo_moves.append(move)
stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
return [picking_id]
def action_picking_create(self, cr, uid, ids, context=None):
picking_ids = []
for order in self.browse(cr, uid, ids):
picking_ids.extend(self._create_pickings(cr, uid, order, order.order_line, None, context=context))
# Must return one unique picking ID: the one to connect in the subflow of the purchase order.
# In case of multiple (split) pickings, we should return the ID of the critical one, i.e. the
# one that should trigger the advancement of the purchase workflow.
# By default we will consider the first one as most important, but this behavior can be overridden.
return picking_ids[0] if picking_ids else False
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'state':'draft',
'shipped':False,
'invoiced':False,
'invoice_ids': [],
'picking_ids': [],
'partner_ref': '',
'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order'),
})
return super(purchase_order, self).copy(cr, uid, id, default, context)
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
wf_service = netsvc.LocalService("workflow")
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'move_dest_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
# Compute what the new orders should contain
new_orders = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'warehouse_id': porder.warehouse_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
for order_line in porder.order_line:
line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'product_id', 'move_dest_id', 'account_analytic_id'))
o_line = order_infos['order_line'].setdefault(line_key, {})
if o_line:
# merge the line with an existing line
o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor']
else:
# append a new "standalone" line
for field in ('product_qty', 'product_uom'):
field_val = getattr(order_line, field)
if isinstance(field_val, browse_record):
field_val = field_val.id
o_line[field] = field_val
o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()]
# create the new order
neworder_id = self.create(cr, uid, order_data)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
wf_service.trg_redirect(uid, 'purchase.order', old_id, neworder_id, cr)
wf_service.trg_validate(uid, 'purchase.order', old_id, 'purchase_cancel', cr)
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'move_dest_id': fields.many2one('stock.move', 'Reservation Destination', ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Status', required=True, readonly=True,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.boolean('Invoiced', readonly=True),
'partner_id': fields.related('order_id','partner_id',string='Partner',readonly=True,type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id','date_order',string='Order Date',readonly=True,type="date")
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state':'draft', 'move_ids':[],'invoiced':0,'invoice_lines':[]})
return super(purchase_order_line, self).copy_data(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, context=None):
procurement_ids_to_cancel = []
for line in self.browse(cr, uid, ids, context=context):
if line.move_dest_id:
procurement_ids_to_cancel.extend(procurement.id for procurement in line.move_dest_id.procurements)
if procurement_ids_to_cancel:
self.pool['procurement.order'].action_cancel(cr, uid, procurement_ids_to_cancel)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_uom.
"""
if context is None:
context = {}
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
context = dict(context, purchase_uom_check=True)
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order, as a string in
DEFAULT_SERVER_DATE_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(days=supplier_delay)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_supplierinfo = self.pool.get('product.supplierinfo')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if context.get('purchase_uom_check') and self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.date.context_today(self,cr,uid,context=context)
supplierinfo = False
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if (qty or 0.0) < min_qty: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
# - determine price_unit and taxes_id
if pricelist_id:
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
purchase_order_line()
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_id': fields.many2one('purchase.order', 'Purchase Order'),
}
def check_buy(self, cr, uid, ids, context=None):
''' return True if the supply method of the mto product is 'buy'
'''
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.product_id.supply_method <> 'buy':
return False
return True
def check_supplier_info(self, cr, uid, ids, context=None):
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
elif not partner:
message = _('No default supplier defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the supplier')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def action_po_assign(self, cr, uid, ids, context=None):
""" This is action which call from workflow to assign purchase order to procurements
@return: True
"""
res = self.make_po(cr, uid, ids, context=context)
res = res.values()
return len(res) and res[0] or 0 #TO CHECK: why workflow is generated error if return not integer value
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_warehouse(self, procurement, user_company):
"""
Return the warehouse containing the procurment stock location (or one of it ancestors)
If none match, returns then first warehouse of the company
"""
# TODO refactor the domain once we implement the "parent_of" domain operator
# NOTE This method has been copied in the `purchase_requisition` module to ensure
# retro-compatibility. This code duplication will be deleted in next stable version.
# Do not forget to update both version in case of modification.
company_id = (procurement.company_id or user_company).id
domains = [
[
'&', ('company_id', '=', company_id),
'|', '&', ('lot_stock_id.parent_left', '<', procurement.location_id.parent_left),
('lot_stock_id.parent_right', '>', procurement.location_id.parent_right),
('lot_stock_id', '=', procurement.location_id.id)
],
[('company_id', '=', company_id)]
]
cr, uid = procurement._cr, procurement._uid
context = procurement._context
Warehouse = self.pool['stock.warehouse']
for domain in domains:
ids = Warehouse.search(cr, uid, domain, context=context)
if ids:
return ids[0]
return False
def make_po(self, cr, uid, ids, context=None):
""" Make purchase order from procurement
@return: New created Purchase Orders procurement wise
"""
res = {}
if context is None:
context = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
partner_obj = self.pool.get('res.partner')
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
seq_obj = self.pool.get('ir.sequence')
for procurement in self.browse(cr, uid, ids, context=context):
res_id = procurement.move_id.id
partner = procurement.product_id.seller_id # Taken Main Supplier of Product of Procurement.
seller_qty = procurement.product_id.seller_qty
partner_id = partner.id
address_id = partner_obj.address_get(cr, uid, [partner_id], ['delivery'])['delivery']
pricelist_id = partner.property_product_pricelist_purchase.id
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty,seller_qty)
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner_id, {'uom': uom_id})[pricelist_id]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner_id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, partner.property_account_position, taxes_ids)
name = product.partner_ref
if product.description_purchase:
name += '\n'+ product.description_purchase
line_vals = {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'move_dest_id': res_id,
'taxes_id': [(6,0,taxes)],
}
name = seq_obj.get(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner_id,
'location_id': procurement.location_id.id,
'warehouse_id': self._get_warehouse(procurement, company),
'pricelist_id': pricelist_id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': partner.property_account_position and partner.property_account_position.id or False,
'payment_term_id': partner.property_supplier_payment_term.id or False,
}
res[procurement.id] = self.create_procurement_purchase_order(cr, uid, procurement, po_vals, line_vals, context=new_context)
self.write(cr, uid, [procurement.id], {'state': 'running', 'purchase_id': res[procurement.id]})
self.message_post(cr, uid, ids, body=_("Draft Purchase Order created"), context=context)
return res
def _product_virtual_get(self, cr, uid, order_point):
procurement = order_point.procurement_id
if procurement and procurement.state != 'exception' and procurement.purchase_id and procurement.purchase_id.state in ('draft', 'confirmed'):
return None
return super(procurement_order, self)._product_virtual_get(cr, uid, order_point)
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None):
if mail.model == 'purchase.order':
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', mail.res_id, 'send_rfq', cr)
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
}
_defaults = {
'purchase_ok': 1,
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', context['default_res_id'], 'send_rfq', cr)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
wf_service = netsvc.LocalService("workflow")
for order in purchase_order_obj.browse(cr, uid, po_ids, context=context):
# Signal purchase order workflow that an invoice has been validated.
invoiced = []
for po_line in order.order_line:
if any(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines):
invoiced.append(po_line.id)
if invoiced:
self.pool['purchase.order.line'].write(cr, uid, invoiced, {'invoiced': True})
wf_service.trg_write(uid, 'purchase.order', order.id, cr)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aclifton/cpeg853-gem5 | ext/ply/test/lex_object.py | 174 | 1141 | # -----------------------------------------------------------------------------
# lex_object.py
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
class CalcLexer:
tokens = (
'NAME','NUMBER',
'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
'LPAREN','RPAREN',
)
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(self,t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %s" % t.value)
t.value = 0
return t
t_ignore = " \t"
def t_newline(self,t):
r'\n+'
t.lineno += t.value.count("\n")
def t_error(self,t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
calc = CalcLexer()
# Build the lexer
lex.lex(object=calc)
lex.runmain(data="3+4")
| bsd-3-clause |
tersmitten/ansible | lib/ansible/modules/cloud/azure/azure_rm_sqldatabase_facts.py | 18 | 8874 | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_sqldatabase_facts
version_added: "2.8"
short_description: Get Azure SQL Database facts.
description:
- Get facts of Azure SQL Database.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the database.
elastic_pool_name:
description:
- The name of the elastic pool.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Get instance of SQL Database
azure_rm_sqldatabase_facts:
resource_group: testrg
server_name: testserver
name: testdb
- name: List instances of SQL Database
azure_rm_sqldatabase_facts:
resource_group: testrg
server_name: testserver
elastic_pool_name: testep
- name: List instances of SQL Database
azure_rm_sqldatabase_facts:
resource_group: testrg
server_name: testserver
'''
RETURN = '''
databases:
description: A list of dictionaries containing facts for SQL Database.
returned: always
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testrg/providers/Microsoft.Sql/servers/testserver/databases/testdb
name:
description:
- Database name.
returned: always
type: str
sample: testdb
location:
description:
- Resource location.
returned: always
type: str
sample: southeastasia
tags:
description:
- Resource tags.
returned: always
type: dict
sample:
taga: aaa
tagb: bbb
sku:
description:
- The name and tier of the SKU.
returned: always
type: complex
sample: sku
contains:
name:
description:
- The name of the SKU.
returned: always
type: str
sample: BC_Gen4_2
tier:
description:
- Service tier.
returned: always
type: str
sample: BusinessCritical
capacity:
description:
- Capacity.
returned: always
type: int
sample: 2
kind:
description:
- Kind of database. This is metadata used for the Azure portal experience.
returned: always
type: str
sample: v12.0,user
collation:
description:
- The collation of the database.
returned: always
type: str
sample: SQL_Latin1_General_CP1_CI_AS
status:
description:
- The status of the database.
returned: always
type: str
sample: Online
zone_redundant:
description:
- Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones.
returned: always
type: bool
sample: true
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.sql import SqlManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMSqlDatabaseFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
),
elastic_pool_name=dict(
type='str'
),
tags=dict(
type='list'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.resource_group = None
self.server_name = None
self.name = None
self.elastic_pool_name = None
self.tags = None
super(AzureRMSqlDatabaseFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name is not None:
self.results['databases'] = self.get()
elif self.elastic_pool_name is not None:
self.results['databases'] = self.list_by_elastic_pool()
else:
self.results['databases'] = self.list_by_server()
return self.results
def get(self):
response = None
results = []
try:
response = self.sql_client.databases.get(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Databases.')
if response and self.has_tags(response.tags, self.tags):
results.append(self.format_item(response))
return results
def list_by_elastic_pool(self):
response = None
results = []
try:
response = self.sql_client.databases.list_by_elastic_pool(resource_group_name=self.resource_group,
server_name=self.server_name,
elastic_pool_name=self.elastic_pool_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail('Could not get facts for Databases.')
if response is not None:
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.format_item(item))
return results
def list_by_server(self):
response = None
results = []
try:
response = self.sql_client.databases.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail('Could not get facts for Databases.')
if response is not None:
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'resource_group': self.resource_group,
'id': d.get('id', None),
'name': d.get('name', None),
'location': d.get('location', None),
'tags': d.get('tags', None),
'sku': {
'name': d.get('sku', {}).get('name', None),
'tier': d.get('sku', {}).get('tier', None),
'capacity': d.get('sku', {}).get('capacity', None)
},
'kind': d.get('kind', None),
'collation': d.get('collation', None),
'status': d.get('status', None),
'zone_redundant': d.get('zone_redundant', None)
}
return d
def main():
AzureRMSqlDatabaseFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
mnahm5/django-estore | Lib/site-packages/boto/gs/resumable_upload_handler.py | 153 | 31419 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import errno
import httplib
import os
import random
import re
import socket
import time
import urlparse
from hashlib import md5
from boto import config, UserAgent
from boto.connection import AWSAuthConnection
from boto.exception import InvalidUriError
from boto.exception import ResumableTransferDisposition
from boto.exception import ResumableUploadException
from boto.s3.keyfile import KeyFile
"""
Handler for Google Cloud Storage resumable uploads. See
http://code.google.com/apis/storage/docs/developer-guide.html#resumable
for details.
Resumable uploads will retry failed uploads, resuming at the byte
count completed by the last upload attempt. If too many retries happen with
no progress (per configurable num_retries param), the upload will be
aborted in the current process.
The caller can optionally specify a tracker_file_name param in the
ResumableUploadHandler constructor. If you do this, that file will
save the state needed to allow retrying later, in a separate process
(e.g., in a later run of gsutil).
"""
class ResumableUploadHandler(object):
BUFFER_SIZE = 8192
RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
# (start, end) response indicating server has nothing (upload protocol uses
# inclusive numbering).
SERVER_HAS_NOTHING = (0, -1)
def __init__(self, tracker_file_name=None, num_retries=None):
"""
Constructor. Instantiate once for each uploaded file.
:type tracker_file_name: string
:param tracker_file_name: optional file name to save tracker URI.
If supplied and the current process fails the upload, it can be
retried in a new process. If called with an existing file containing
a valid tracker URI, we'll resume the upload from this URI; else
we'll start a new resumable upload (and write the URI to this
tracker file).
:type num_retries: int
:param num_retries: the number of times we'll re-try a resumable upload
making no progress. (Count resets every time we get progress, so
upload can span many more than this number of retries.)
"""
self.tracker_file_name = tracker_file_name
self.num_retries = num_retries
self.server_has_bytes = 0 # Byte count at last server check.
self.tracker_uri = None
if tracker_file_name:
self._load_tracker_uri_from_file()
# Save upload_start_point in instance state so caller can find how
# much was transferred by this ResumableUploadHandler (across retries).
self.upload_start_point = None
def _load_tracker_uri_from_file(self):
f = None
try:
f = open(self.tracker_file_name, 'r')
uri = f.readline().strip()
self._set_tracker_uri(uri)
except IOError as e:
# Ignore non-existent file (happens first time an upload
# is attempted on a file), but warn user for other errors.
if e.errno != errno.ENOENT:
# Will restart because self.tracker_uri is None.
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
'upload from scratch.' %
(self.tracker_file_name, e.strerror))
except InvalidUriError as e:
# Warn user, but proceed (will restart because
# self.tracker_uri is None).
print('Invalid tracker URI (%s) found in URI tracker file '
'(%s). Restarting upload from scratch.' %
(uri, self.tracker_file_name))
finally:
if f:
f.close()
def _save_tracker_uri_to_file(self):
"""
Saves URI to tracker file if one was passed to constructor.
"""
if not self.tracker_file_name:
return
f = None
try:
with os.fdopen(os.open(self.tracker_file_name,
os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
f.write(self.tracker_uri)
except IOError as e:
raise ResumableUploadException(
'Couldn\'t write URI tracker file (%s): %s.\nThis can happen'
'if you\'re using an incorrectly configured upload tool\n'
'(e.g., gsutil configured to save tracker files to an '
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
def _set_tracker_uri(self, uri):
"""
Called when we start a new resumable upload or get a new tracker
URI for the upload. Saves URI and resets upload state.
Raises InvalidUriError if URI is syntactically invalid.
"""
parse_result = urlparse.urlparse(uri)
if (parse_result.scheme.lower() not in ['http', 'https'] or
not parse_result.netloc):
raise InvalidUriError('Invalid tracker URI (%s)' % uri)
self.tracker_uri = uri
self.tracker_uri_host = parse_result.netloc
self.tracker_uri_path = '%s?%s' % (
parse_result.path, parse_result.query)
self.server_has_bytes = 0
def get_tracker_uri(self):
"""
Returns upload tracker URI, or None if the upload has not yet started.
"""
return self.tracker_uri
def get_upload_id(self):
"""
Returns the upload ID for the resumable upload, or None if the upload
has not yet started.
"""
# We extract the upload_id from the tracker uri. We could retrieve the
# upload_id from the headers in the response but this only works for
# the case where we get the tracker uri from the service. In the case
# where we get the tracker from the tracking file we need to do this
# logic anyway.
delim = '?upload_id='
if self.tracker_uri and delim in self.tracker_uri:
return self.tracker_uri[self.tracker_uri.index(delim) + len(delim):]
else:
return None
def _remove_tracker_file(self):
if (self.tracker_file_name and
os.path.exists(self.tracker_file_name)):
os.unlink(self.tracker_file_name)
def _build_content_range_header(self, range_spec='*', length_spec='*'):
return 'bytes %s/%s' % (range_spec, length_spec)
def _query_server_state(self, conn, file_length):
"""
Queries server to find out state of given upload.
Note that this method really just makes special case use of the
fact that the upload server always returns the current start/end
state whenever a PUT doesn't complete.
Returns HTTP response from sending request.
Raises ResumableUploadException if problem querying server.
"""
# Send an empty PUT so that server replies with this resumable
# transfer's state.
put_headers = {}
put_headers['Content-Range'] = (
self._build_content_range_header('*', file_length))
put_headers['Content-Length'] = '0'
return AWSAuthConnection.make_request(conn, 'PUT',
path=self.tracker_uri_path,
auth_path=self.tracker_uri_path,
headers=put_headers,
host=self.tracker_uri_host)
def _query_server_pos(self, conn, file_length):
"""
Queries server to find out what bytes it currently has.
Returns (server_start, server_end), where the values are inclusive.
For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2.
Raises ResumableUploadException if problem querying server.
"""
resp = self._query_server_state(conn, file_length)
if resp.status == 200:
# To handle the boundary condition where the server has the complete
# file, we return (server_start, file_length-1). That way the
# calling code can always simply read up through server_end. (If we
# didn't handle this boundary condition here, the caller would have
# to check whether server_end == file_length and read one fewer byte
# in that case.)
return (0, file_length - 1) # Completed upload.
if resp.status != 308:
# This means the server didn't have any state for the given
# upload ID, which can happen (for example) if the caller saved
# the tracker URI to a file and then tried to restart the transfer
# after that upload ID has gone stale. In that case we need to
# start a new transfer (and the caller will then save the new
# tracker URI to the tracker file).
raise ResumableUploadException(
'Got non-308 response (%s) from server state query' %
resp.status, ResumableTransferDisposition.START_OVER)
got_valid_response = False
range_spec = resp.getheader('range')
if range_spec:
# Parse 'bytes=<from>-<to>' range_spec.
m = re.search('bytes=(\d+)-(\d+)', range_spec)
if m:
server_start = long(m.group(1))
server_end = long(m.group(2))
got_valid_response = True
else:
# No Range header, which means the server does not yet have
# any bytes. Note that the Range header uses inclusive 'from'
# and 'to' values. Since Range 0-0 would mean that the server
# has byte 0, omitting the Range header is used to indicate that
# the server doesn't have any bytes.
return self.SERVER_HAS_NOTHING
if not got_valid_response:
raise ResumableUploadException(
'Couldn\'t parse upload server state query response (%s)' %
str(resp.getheaders()), ResumableTransferDisposition.START_OVER)
if conn.debug >= 1:
print('Server has: Range: %d - %d.' % (server_start, server_end))
return (server_start, server_end)
def _start_new_resumable_upload(self, key, headers=None):
"""
Starts a new resumable upload.
Raises ResumableUploadException if any errors occur.
"""
conn = key.bucket.connection
if conn.debug >= 1:
print('Starting new resumable upload.')
self.server_has_bytes = 0
# Start a new resumable upload by sending a POST request with an
# empty body and the "X-Goog-Resumable: start" header. Include any
# caller-provided headers (e.g., Content-Type) EXCEPT Content-Length
# (and raise an exception if they tried to pass one, since it's
# a semantic error to specify it at this point, and if we were to
# include one now it would cause the server to expect that many
# bytes; the POST doesn't include the actual file bytes We set
# the Content-Length in the subsequent PUT, based on the uploaded
# file size.
post_headers = {}
for k in headers:
if k.lower() == 'content-length':
raise ResumableUploadException(
'Attempt to specify Content-Length header (disallowed)',
ResumableTransferDisposition.ABORT)
post_headers[k] = headers[k]
post_headers[conn.provider.resumable_upload_header] = 'start'
resp = conn.make_request(
'POST', key.bucket.name, key.name, post_headers)
# Get tracker URI from response 'Location' header.
body = resp.read()
# Check for various status conditions.
if resp.status in [500, 503]:
# Retry status 500 and 503 errors after a delay.
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Will wait/retry' % resp.status,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
elif resp.status != 200 and resp.status != 201:
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Aborting' % resp.status,
ResumableTransferDisposition.ABORT)
# Else we got 200 or 201 response code, indicating the resumable
# upload was created.
tracker_uri = resp.getheader('Location')
if not tracker_uri:
raise ResumableUploadException(
'No resumable tracker URI found in resumable initiation '
'POST response (%s)' % body,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
self._set_tracker_uri(tracker_uri)
self._save_tracker_uri_to_file()
def _upload_file_bytes(self, conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb, headers):
"""
Makes one attempt to upload file bytes, using an existing resumable
upload connection.
Returns (etag, generation, metageneration) from server upon success.
Raises ResumableUploadException if any problems occur.
"""
buf = fp.read(self.BUFFER_SIZE)
if cb:
# The cb_count represents the number of full buffers to send between
# cb executions.
if num_cb > 2:
cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(total_bytes_uploaded, file_length)
# Build resumable upload headers for the transfer. Don't send a
# Content-Range header if the file is 0 bytes long, because the
# resumable upload protocol uses an *inclusive* end-range (so, sending
# 'bytes 0-0/1' would actually mean you're sending a 1-byte file).
if not headers:
put_headers = {}
else:
put_headers = headers.copy()
if file_length:
if total_bytes_uploaded == file_length:
range_header = self._build_content_range_header(
'*', file_length)
else:
range_header = self._build_content_range_header(
'%d-%d' % (total_bytes_uploaded, file_length - 1),
file_length)
put_headers['Content-Range'] = range_header
# Set Content-Length to the total bytes we'll send with this PUT.
put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)
http_request = AWSAuthConnection.build_base_http_request(
conn, 'PUT', path=self.tracker_uri_path, auth_path=None,
headers=put_headers, host=self.tracker_uri_host)
http_conn.putrequest('PUT', http_request.path)
for k in put_headers:
http_conn.putheader(k, put_headers[k])
http_conn.endheaders()
# Turn off debug on http connection so upload content isn't included
# in debug stream.
http_conn.set_debuglevel(0)
while buf:
http_conn.send(buf)
for alg in self.digesters:
self.digesters[alg].update(buf)
total_bytes_uploaded += len(buf)
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes_uploaded, file_length)
i = 0
buf = fp.read(self.BUFFER_SIZE)
http_conn.set_debuglevel(conn.debug)
if cb:
cb(total_bytes_uploaded, file_length)
if total_bytes_uploaded != file_length:
# Abort (and delete the tracker file) so if the user retries
# they'll start a new resumable upload rather than potentially
# attempting to pick back up later where we left off.
raise ResumableUploadException(
'File changed during upload: EOF at %d bytes of %d byte file.' %
(total_bytes_uploaded, file_length),
ResumableTransferDisposition.ABORT)
resp = http_conn.getresponse()
# Restore http connection debug level.
http_conn.set_debuglevel(conn.debug)
if resp.status == 200:
# Success.
return (resp.getheader('etag'),
resp.getheader('x-goog-generation'),
resp.getheader('x-goog-metageneration'))
# Retry timeout (408) and status 500 and 503 errors after a delay.
elif resp.status in [408, 500, 503]:
disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY
else:
# Catch all for any other error codes.
disposition = ResumableTransferDisposition.ABORT
raise ResumableUploadException('Got response code %d while attempting '
'upload (%s)' %
(resp.status, resp.reason), disposition)
def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,
num_cb):
"""
Attempts a resumable upload.
Returns (etag, generation, metageneration) from server upon success.
Raises ResumableUploadException if any problems occur.
"""
(server_start, server_end) = self.SERVER_HAS_NOTHING
conn = key.bucket.connection
if self.tracker_uri:
# Try to resume existing resumable upload.
try:
(server_start, server_end) = (
self._query_server_pos(conn, file_length))
self.server_has_bytes = server_start
if server_end:
# If the server already has some of the content, we need to
# update the digesters with the bytes that have already been
# uploaded to ensure we get a complete hash in the end.
print('Catching up hash digest(s) for resumed upload')
fp.seek(0)
# Read local file's bytes through position server has. For
# example, if server has (0, 3) we want to read 3-0+1=4 bytes.
bytes_to_go = server_end + 1
while bytes_to_go:
chunk = fp.read(min(key.BufferSize, bytes_to_go))
if not chunk:
raise ResumableUploadException(
'Hit end of file during resumable upload hash '
'catchup. This should not happen under\n'
'normal circumstances, as it indicates the '
'server has more bytes of this transfer\nthan'
' the current file size. Restarting upload.',
ResumableTransferDisposition.START_OVER)
for alg in self.digesters:
self.digesters[alg].update(chunk)
bytes_to_go -= len(chunk)
if conn.debug >= 1:
print('Resuming transfer.')
except ResumableUploadException as e:
if conn.debug >= 1:
print('Unable to resume transfer (%s).' % e.message)
self._start_new_resumable_upload(key, headers)
else:
self._start_new_resumable_upload(key, headers)
# upload_start_point allows the code that instantiated the
# ResumableUploadHandler to find out the point from which it started
# uploading (e.g., so it can correctly compute throughput).
if self.upload_start_point is None:
self.upload_start_point = server_end
total_bytes_uploaded = server_end + 1
# Corner case: Don't attempt to seek if we've already uploaded the
# entire file, because if the file is a stream (e.g., the KeyFile
# wrapper around input key when copying between providers), attempting
# to seek to the end of file would result in an InvalidRange error.
if file_length < total_bytes_uploaded:
fp.seek(total_bytes_uploaded)
conn = key.bucket.connection
# Get a new HTTP connection (vs conn.get_http_connection(), which reuses
# pool connections) because httplib requires a new HTTP connection per
# transaction. (Without this, calling http_conn.getresponse() would get
# "ResponseNotReady".)
http_conn = conn.new_http_connection(self.tracker_uri_host, conn.port,
conn.is_secure)
http_conn.set_debuglevel(conn.debug)
# Make sure to close http_conn at end so if a local file read
# failure occurs partway through server will terminate current upload
# and can report that progress on next attempt.
try:
return self._upload_file_bytes(conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb,
headers)
except (ResumableUploadException, socket.error):
resp = self._query_server_state(conn, file_length)
if resp.status == 400:
raise ResumableUploadException('Got 400 response from server '
'state query after failed resumable upload attempt. This '
'can happen for various reasons, including specifying an '
'invalid request (e.g., an invalid canned ACL) or if the '
'file size changed between upload attempts',
ResumableTransferDisposition.ABORT)
else:
raise
finally:
http_conn.close()
def _check_final_md5(self, key, etag):
"""
Checks that etag from server agrees with md5 computed before upload.
This is important, since the upload could have spanned a number of
hours and multiple processes (e.g., gsutil runs), and the user could
change some of the file and not realize they have inconsistent data.
"""
if key.bucket.connection.debug >= 1:
print('Checking md5 against etag.')
if key.md5 != etag.strip('"\''):
# Call key.open_read() before attempting to delete the
# (incorrect-content) key, so we perform that request on a
# different HTTP connection. This is neededb because httplib
# will return a "Response not ready" error if you try to perform
# a second transaction on the connection.
key.open_read()
key.close()
key.delete()
raise ResumableUploadException(
'File changed during upload: md5 signature doesn\'t match etag '
'(incorrect uploaded object deleted)',
ResumableTransferDisposition.ABORT)
def handle_resumable_upload_exception(self, e, debug):
if (e.disposition == ResumableTransferDisposition.ABORT_CUR_PROCESS):
if debug >= 1:
print('Caught non-retryable ResumableUploadException (%s); '
'aborting but retaining tracker file' % e.message)
raise
elif (e.disposition == ResumableTransferDisposition.ABORT):
if debug >= 1:
print('Caught non-retryable ResumableUploadException (%s); '
'aborting and removing tracker file' % e.message)
self._remove_tracker_file()
raise
else:
if debug >= 1:
print('Caught ResumableUploadException (%s) - will retry' %
e.message)
def track_progress_less_iterations(self, server_had_bytes_before_attempt,
roll_back_md5=True, debug=0):
# At this point we had a re-tryable failure; see if made progress.
if self.server_has_bytes > server_had_bytes_before_attempt:
self.progress_less_iterations = 0 # If progress, reset counter.
else:
self.progress_less_iterations += 1
if roll_back_md5:
# Rollback any potential hash updates, as we did not
# make any progress in this iteration.
self.digesters = self.digesters_before_attempt
if self.progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
raise ResumableUploadException(
'Too many resumable upload attempts failed without '
'progress. You might try this upload again later',
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Use binary exponential backoff to desynchronize client requests.
sleep_time_secs = random.random() * (2**self.progress_less_iterations)
if debug >= 1:
print('Got retryable failure (%d progress-less in a row).\n'
'Sleeping %3.1f seconds before re-trying' %
(self.progress_less_iterations, sleep_time_secs))
time.sleep(sleep_time_secs)
def send_file(self, key, fp, headers, cb=None, num_cb=10, hash_algs=None):
"""
Upload a file to a key into a bucket on GS, using GS resumable upload
protocol.
:type key: :class:`boto.s3.key.Key` or subclass
:param key: The Key object to which data is to be uploaded
:type fp: file-like object
:param fp: The file pointer to upload
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report progress on
the upload. The callback should accept two integer parameters, the
first representing the number of bytes that have been successfully
transmitted to GS, and the second representing the total number of
bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer. Providing a negative integer will cause
your callback to be called with each buffer read.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary mapping hash algorithm
descriptions to corresponding state-ful hashing objects that
implement update(), digest(), and copy() (e.g. hashlib.md5()).
Defaults to {'md5': md5()}.
Raises ResumableUploadException if a problem occurs during the transfer.
"""
if not headers:
headers = {}
# If Content-Type header is present and set to None, remove it.
# This is gsutil's way of asking boto to refrain from auto-generating
# that header.
CT = 'Content-Type'
if CT in headers and headers[CT] is None:
del headers[CT]
headers['User-Agent'] = UserAgent
# Determine file size different ways for case where fp is actually a
# wrapper around a Key vs an actual file.
if isinstance(fp, KeyFile):
file_length = fp.getkey().size
else:
fp.seek(0, os.SEEK_END)
file_length = fp.tell()
fp.seek(0)
debug = key.bucket.connection.debug
# Compute the MD5 checksum on the fly.
if hash_algs is None:
hash_algs = {'md5': md5}
self.digesters = dict(
(alg, hash_algs[alg]()) for alg in hash_algs or {})
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 5.
if self.num_retries is None:
self.num_retries = config.getint('Boto', 'num_retries', 6)
self.progress_less_iterations = 0
while True: # Retry as long as we're making progress.
server_had_bytes_before_attempt = self.server_has_bytes
self.digesters_before_attempt = dict(
(alg, self.digesters[alg].copy())
for alg in self.digesters)
try:
# Save generation and metageneration in class state so caller
# can find these values, for use in preconditions of future
# operations on the uploaded object.
(etag, self.generation, self.metageneration) = (
self._attempt_resumable_upload(key, fp, file_length,
headers, cb, num_cb))
# Get the final digests for the uploaded content.
for alg in self.digesters:
key.local_hashes[alg] = self.digesters[alg].digest()
# Upload succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
self._check_final_md5(key, etag)
key.generation = self.generation
if debug >= 1:
print('Resumable upload complete.')
return
except self.RETRYABLE_EXCEPTIONS as e:
if debug >= 1:
print('Caught exception (%s)' % e.__repr__())
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close the connection before we resume
# the upload (which will cause a new connection to be
# opened the next time an HTTP request is sent).
key.bucket.connection.connection.close()
except ResumableUploadException as e:
self.handle_resumable_upload_exception(e, debug)
self.track_progress_less_iterations(server_had_bytes_before_attempt,
True, debug)
| mit |
pskrz/ansible | lib/ansible/plugins/action/assert.py | 163 | 2353 | # Copyright 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.playbook.conditional import Conditional
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
if not 'that' in self._task.args:
raise AnsibleError('conditional required in "that" string')
msg = None
if 'msg' in self._task.args:
msg = self._task.args['msg']
# make sure the 'that' items are a list
thats = self._task.args['that']
if not isinstance(thats, list):
thats = [ thats ]
# Now we iterate over the that items, temporarily assigning them
# to the task's when value so we can evaluate the conditional using
# the built in evaluate function. The when has already been evaluated
# by this point, and is not used again, so we don't care about mangling
# that value now
cond = Conditional(loader=self._loader)
for that in thats:
cond.when = [ that ]
test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars)
if not test_result:
result = dict(
failed = True,
evaluated_to = test_result,
assertion = that,
)
if msg:
result['msg'] = msg
return result
return dict(changed=False, msg='all assertions passed')
| gpl-3.0 |
GoogleCloudPlatform/cloud-foundation-toolkit | dm/templates/kms/kms.py | 1 | 2804 | # Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Creates a Cloud KMS KeyRing and cryptographic key resources. """
def generate_config(context):
"""
Entry point for the deployment resources
"""
resources = []
properties = context.properties
project_id = properties.get('project', context.env['project'])
parent = 'projects/{}/locations/{}'.format(
project_id,
properties.get('region')
)
keyring_name = properties.get('keyRingName', context.env['name'])
keyring_id = '{}/keyRings/{}'.format(parent, keyring_name)
# https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings
provider = 'gcp-types/cloudkms-v1:projects.locations.keyRings'
# keyring resource
keyring = {
'name': context.env['name'],
'type': provider,
'properties': {
'parent': parent,
'keyRingId': keyring_name
}
}
resources.append(keyring)
# cryptographic key resources
for key in properties.get('keys', []):
key_name = key['cryptoKeyName'].lower()
key_resource = '{}-{}'.format(context.env['name'], key_name)
crypto_key = {
'name': key_resource,
'type': provider + '.cryptoKeys',
'properties':
{
'parent': keyring_id,
'cryptoKeyId': key_name,
'purpose': key.get('cryptoKeyPurpose'),
'labels': key.get('labels',
{})
},
'metadata': {
'dependsOn': [context.env['name']]
}
}
# crypto key optional properties
for prop in ['versionTemplate', 'nextRotationTime', 'rotationPeriod']:
if prop in key:
crypto_key['properties'][prop] = key.get(prop)
resources.append(crypto_key)
return {
'resources':
resources,
'outputs':
[
{
'name': 'keyRing',
'value': '$(ref.{}.name)'.format(context.env['name'])
}
]
}
| apache-2.0 |
pjdelport/django | django/contrib/formtools/tests/__init__.py | 6 | 16217 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import os
import pickle
import re
import warnings
from django import http
from django.conf import settings
from django.contrib.formtools import preview, utils
from django.contrib.formtools.wizard import FormWizard
from django.test import TestCase
from django.test.html import parse_html
from django.test.utils import override_settings
from django.utils import unittest
from django.contrib.formtools.tests.wizard import *
from django.contrib.formtools.tests.forms import *
success_string = "Done was called!"
success_string_encoded = success_string.encode()
class TestFormPreview(preview.FormPreview):
def get_context(self, request, form):
context = super(TestFormPreview, self).get_context(request, form)
context.update({'custom_context': True})
return context
def get_initial(self, request):
return {'field1': 'Works!'}
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
@override_settings(
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), 'templates'),
),
)
class PreviewTests(TestCase):
urls = 'django.contrib.formtools.tests.urls'
def setUp(self):
super(PreviewTests, self).setUp()
# Create a FormPreview instance to share between tests
self.preview = preview.FormPreview(TestForm)
input_template = '<input type="hidden" name="%s" value="%s" />'
self.input = input_template % (self.preview.unused_name('stage'), "%d")
self.test_data = {'field1': 'foo', 'field1_': 'asdf'}
def test_unused_name(self):
"""
Verifies name mangling to get uniue field name.
"""
self.assertEqual(self.preview.unused_name('field1'), 'field1__')
def test_form_get(self):
"""
Test contrib.formtools.preview form retrieval.
Use the client library to see if we can sucessfully retrieve
the form (mostly testing the setup ROOT_URLCONF
process). Verify that an additional hidden input field
is created to manage the stage.
"""
response = self.client.get('/preview/')
stage = self.input % 1
self.assertContains(response, stage, 1)
self.assertEqual(response.context['custom_context'], True)
self.assertEqual(response.context['form'].initial, {'field1': 'Works!'})
def test_form_preview(self):
"""
Test contrib.formtools.preview form preview rendering.
Use the client library to POST to the form to see if a preview
is returned. If we do get a form back check that the hidden
value is correctly managing the state of the form.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 1, 'date1': datetime.date(2006, 10, 25)})
response = self.client.post('/preview/', self.test_data)
# Check to confirm stage is set to 2 in output form.
stage = self.input % 2
self.assertContains(response, stage, 1)
def test_form_submit(self):
"""
Test contrib.formtools.preview form submittal.
Use the client library to POST to the form with stage set to 3
to see if our forms done() method is called. Check first
without the security hash, verify failure, retry with security
hash and verify sucess.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 2, 'date1': datetime.date(2006, 10, 25)})
response = self.client.post('/preview/', self.test_data)
self.assertNotEqual(response.content, success_string_encoded)
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.content, success_string_encoded)
def test_bool_submit(self):
"""
Test contrib.formtools.preview form submittal when form contains:
BooleanField(required=False)
Ticket: #6209 - When an unchecked BooleanField is previewed, the preview
form's hash would be computed with no value for ``bool1``. However, when
the preview form is rendered, the unchecked hidden BooleanField would be
rendered with the string value 'False'. So when the preview form is
resubmitted, the hash would be computed with the value 'False' for
``bool1``. We need to make sure the hashes are the same in both cases.
"""
self.test_data.update({'stage':2})
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash': hash, 'bool1': 'False'})
with warnings.catch_warnings(record=True):
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.content, success_string_encoded)
def test_form_submit_good_hash(self):
"""
Test contrib.formtools.preview form submittal, using a correct
hash
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage':2})
response = self.client.post('/preview/', self.test_data)
self.assertNotEqual(response.content, success_string_encoded)
hash = utils.form_hmac(TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.content, success_string_encoded)
def test_form_submit_bad_hash(self):
"""
Test contrib.formtools.preview form submittal does not proceed
if the hash is incorrect.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage':2})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.content, success_string_encoded)
hash = utils.form_hmac(TestForm(self.test_data)) + "bad"
self.test_data.update({'hash': hash})
response = self.client.post('/previewpreview/', self.test_data)
self.assertNotEqual(response.content, success_string_encoded)
class FormHmacTests(unittest.TestCase):
def test_textfield_hash(self):
"""
Regression test for #10034: the hash generation function should ignore
leading/trailing whitespace so as to be friendly to broken browsers that
submit it (usually in textareas).
"""
f1 = HashTestForm({'name': 'joe', 'bio': 'Speaking español.'})
f2 = HashTestForm({'name': ' joe', 'bio': 'Speaking español. '})
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertEqual(hash1, hash2)
def test_empty_permitted(self):
"""
Regression test for #10643: the security hash should allow forms with
empty_permitted = True, or forms where data has not changed.
"""
f1 = HashTestBlankForm({})
f2 = HashTestForm({}, empty_permitted=True)
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertEqual(hash1, hash2)
#
# FormWizard tests
#
class TestWizardClass(FormWizard):
def get_template(self, step):
return 'forms/wizard.html'
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
class DummyRequest(http.HttpRequest):
def __init__(self, POST=None):
super(DummyRequest, self).__init__()
self.method = POST and "POST" or "GET"
if POST is not None:
self.POST.update(POST)
self._dont_enforce_csrf_checks = True
@override_settings(
SECRET_KEY="123",
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), 'templates'),
),
)
class WizardTests(TestCase):
urls = 'django.contrib.formtools.tests.urls'
wizard_step_data = (
{
'0-name': 'Pony',
'0-thirsty': '2',
},
{
'1-address1': '123 Main St',
'1-address2': 'Djangoland',
},
{
'2-random_crap': 'blah blah',
}
)
def setUp(self):
super(WizardTests, self).setUp()
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.formtools.wizard')
def tearDown(self):
super(WizardTests, self).tearDown()
self.restore_warnings_state()
def test_step_starts_at_zero(self):
"""
step should be zero for the first form
"""
response = self.client.get('/wizard1/')
self.assertEqual(0, response.context['step0'])
def test_step_increments(self):
"""
step should be incremented when we go to the next page
"""
response = self.client.post('/wizard1/', {"0-field":"test", "wizard_step":"0"})
self.assertEqual(1, response.context['step0'])
def test_bad_hash(self):
"""
Form should not advance if the hash is missing or bad
"""
response = self.client.post('/wizard1/',
{"0-field":"test",
"1-field":"test2",
"wizard_step": "1"})
self.assertEqual(0, response.context['step0'])
def test_good_hash(self):
"""
Form should advance if the hash is present and good, as calculated using
current method.
"""
data = {"0-field": "test",
"1-field": "test2",
"hash_0": {
2: "cd13b1db3e8f55174bc5745a1b1a53408d4fd1ca",
3: "9355d5dff22d49dbad58e46189982cec649f9f5b",
}[pickle.HIGHEST_PROTOCOL],
"wizard_step": "1"}
response = self.client.post('/wizard1/', data)
self.assertEqual(2, response.context['step0'])
def test_11726(self):
"""
Regression test for ticket #11726.
Wizard should not raise Http404 when steps are added dynamically.
"""
reached = [False]
that = self
class WizardWithProcessStep(TestWizardClass):
def process_step(self, request, form, step):
if step == 0:
if self.num_steps() < 2:
self.form_list.append(WizardPageTwoForm)
if step == 1:
that.assertTrue(isinstance(form, WizardPageTwoForm))
reached[0] = True
wizard = WizardWithProcessStep([WizardPageOneForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": {
2: "cd13b1db3e8f55174bc5745a1b1a53408d4fd1ca",
3: "9355d5dff22d49dbad58e46189982cec649f9f5b",
}[pickle.HIGHEST_PROTOCOL],
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
self.assertTrue(reached[0])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": {
2: "cd13b1db3e8f55174bc5745a1b1a53408d4fd1ca",
3: "9355d5dff22d49dbad58e46189982cec649f9f5b",
}[pickle.HIGHEST_PROTOCOL],
"hash_1": {
2: "1e6f6315da42e62f33a30640ec7e007ad3fbf1a1",
3: "c33142ef9d01b1beae238adf22c3c6c57328f51a",
}[pickle.HIGHEST_PROTOCOL],
"wizard_step": "2"}
self.assertRaises(http.Http404, wizard, DummyRequest(POST=data))
def test_14498(self):
"""
Regression test for ticket #14498. All previous steps' forms should be
validated.
"""
reached = [False]
that = self
class WizardWithProcessStep(TestWizardClass):
def process_step(self, request, form, step):
that.assertTrue(form.is_valid())
reached[0] = True
wizard = WizardWithProcessStep([WizardPageOneForm,
WizardPageTwoForm,
WizardPageThreeForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": {
2: "cd13b1db3e8f55174bc5745a1b1a53408d4fd1ca",
3: "9355d5dff22d49dbad58e46189982cec649f9f5b",
}[pickle.HIGHEST_PROTOCOL],
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
self.assertTrue(reached[0])
def test_14576(self):
"""
Regression test for ticket #14576.
The form of the last step is not passed to the done method.
"""
reached = [False]
that = self
class Wizard(TestWizardClass):
def done(self, request, form_list):
reached[0] = True
that.assertTrue(len(form_list) == 2)
wizard = Wizard([WizardPageOneForm,
WizardPageTwoForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": {
2: "cd13b1db3e8f55174bc5745a1b1a53408d4fd1ca",
3: "9355d5dff22d49dbad58e46189982cec649f9f5b",
}[pickle.HIGHEST_PROTOCOL],
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
self.assertTrue(reached[0])
def test_15075(self):
"""
Regression test for ticket #15075. Allow modifying wizard's form_list
in process_step.
"""
reached = [False]
that = self
class WizardWithProcessStep(TestWizardClass):
def process_step(self, request, form, step):
if step == 0:
self.form_list[1] = WizardPageTwoAlternativeForm
if step == 1:
that.assertTrue(isinstance(form, WizardPageTwoAlternativeForm))
reached[0] = True
wizard = WizardWithProcessStep([WizardPageOneForm,
WizardPageTwoForm,
WizardPageThreeForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": {
2: "cd13b1db3e8f55174bc5745a1b1a53408d4fd1ca",
3: "9355d5dff22d49dbad58e46189982cec649f9f5b",
}[pickle.HIGHEST_PROTOCOL],
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
self.assertTrue(reached[0])
def grab_field_data(self, response):
"""
Pull the appropriate field data from the context to pass to the next wizard step
"""
previous_fields = parse_html(response.context['previous_fields'])
fields = {'wizard_step': response.context['step0']}
for input_field in previous_fields:
input_attrs = dict(input_field.attributes)
fields[input_attrs["name"]] = input_attrs["value"]
return fields
def check_wizard_step(self, response, step_no):
"""
Helper function to test each step of the wizard
- Make sure the call succeeded
- Make sure response is the proper step number
- return the result from the post for the next step
"""
step_count = len(self.wizard_step_data)
self.assertContains(response, 'Step %d of %d' % (step_no, step_count))
data = self.grab_field_data(response)
data.update(self.wizard_step_data[step_no - 1])
return self.client.post('/wizard2/', data)
def test_9473(self):
response = self.client.get('/wizard2/')
for step_no in range(1, len(self.wizard_step_data) + 1):
response = self.check_wizard_step(response, step_no)
| bsd-3-clause |
cmelange/ansible | lib/ansible/modules/cloud/vmware/vmware_dvswitch.py | 60 | 7365 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_dvswitch
short_description: Create or remove a distributed vSwitch
description:
- Create or remove a distributed vSwitch
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter_name:
description:
- The name of the datacenter that will contain the dvSwitch
required: True
switch_name:
description:
- The name of the switch to create or remove
required: True
mtu:
description:
- The switch maximum transmission unit
required: True
uplink_quantity:
description:
- Quantity of uplink per ESXi host added to the switch
required: True
discovery_proto:
description:
- Link discovery protocol between Cisco and Link Layer discovery
choices:
- 'cdp'
- 'lldp'
required: True
discovery_operation:
description:
- Select the discovery operation
choices:
- 'both'
- 'none'
- 'advertise'
- 'listen'
state:
description:
- Create or remove dvSwitch
default: 'present'
choices:
- 'present'
- 'absent'
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create dvswitch
local_action:
module: vmware_dvswitch
hostname: vcenter_ip_or_hostname
username: vcenter_username
password: vcenter_password
datacenter_name: datacenter
switch_name: dvSwitch
mtu: 9000
uplink_quantity: 2
discovery_proto: lldp
discovery_operation: both
state: present
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareDVSwitch(object):
def __init__(self, module):
self.module = module
self.dvs = None
self.switch_name = self.module.params['switch_name']
self.datacenter_name = self.module.params['datacenter_name']
self.mtu = self.module.params['mtu']
self.uplink_quantity = self.module.params['uplink_quantity']
self.discovery_proto = self.module.params['discovery_proto']
self.discovery_operation = self.module.params['discovery_operation']
self.switch_name = self.module.params['switch_name']
self.state = self.module.params['state']
self.content = connect_to_api(module)
def process_state(self):
try:
dvs_states = {
'absent': {
'present': self.state_destroy_dvs,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvs,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvs,
}
}
dvs_states[self.state][self.check_dvs_configuration()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def create_dvswitch(self, network_folder):
result = None
changed = False
spec = vim.DistributedVirtualSwitch.CreateSpec()
spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
spec.configSpec.linkDiscoveryProtocolConfig = vim.host.LinkDiscoveryProtocolConfig()
spec.configSpec.name = self.switch_name
spec.configSpec.maxMtu = self.mtu
spec.configSpec.linkDiscoveryProtocolConfig.protocol = self.discovery_proto
spec.configSpec.linkDiscoveryProtocolConfig.operation = self.discovery_operation
spec.productInfo = vim.dvs.ProductSpec()
spec.productInfo.name = "DVS"
spec.productInfo.vendor = "VMware"
for count in range(1, self.uplink_quantity+1):
spec.configSpec.uplinkPortPolicy.uplinkPortName.append("uplink%d" % count)
task = network_folder.CreateDVS_Task(spec)
changed, result = wait_for_task(task)
return changed, result
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_destroy_dvs(self):
task = self.dvs.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=str(result))
def state_update_dvs(self):
self.module.exit_json(changed=False, msg="Currently not implemented.")
def state_create_dvs(self):
changed = True
result = None
if not self.module.check_mode:
dc = find_datacenter_by_name(self.content, self.datacenter_name)
changed, result = self.create_dvswitch(dc.networkFolder)
self.module.exit_json(changed=changed, result=str(result))
def check_dvs_configuration(self):
self.dvs = find_dvs_by_name(self.content, self.switch_name)
if self.dvs is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
mtu=dict(required=True, type='int'),
uplink_quantity=dict(required=True, type='int'),
discovery_proto=dict(required=True, choices=['cdp', 'lldp'], type='str'),
discovery_operation=dict(required=True, choices=['both', 'none', 'advertise', 'listen'], type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_dvswitch = VMwareDVSwitch(module)
vmware_dvswitch.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
jbaayen/sympy | sympy/logic/utilities/dimacs.py | 12 | 1339 | """For reading in DIMACS file format
www.cs.ubc.ca/~hoos/SATLIB/Benchmarks/SAT/satformat.ps
"""
from sympy.core import Symbol
from sympy.logic.boolalg import And, Or
import re
def load(s):
clauses = []
lines = s.split('\n')
pComment = re.compile('c.*')
pStats = re.compile('p\s*cnf\s*(\d*)\s*(\d*)')
numVars = 0
numClauses = 0
while len(lines) > 0:
line = lines.pop(0)
# Only deal with lines that aren't comments
if not pComment.match(line):
m = pStats.match(line)
if m:
numVars = int(m.group(1))
numClauses = int(m.group(2))
else:
nums = line.rstrip('\n').split(' ')
list = []
for lit in nums:
if lit != '':
if int(lit) == 0: continue
num = abs(int(lit))
sign = True
if int(lit) < 0:
sign = False
if sign: list.append(Symbol("cnf_%s" % num))
else: list.append(~Symbol("cnf_%s" % num))
if len(list) > 0:
clauses.append(Or(*list))
return And(*clauses)
def load_file(location):
s = open(location).read()
return load(s)
| bsd-3-clause |
go-bears/rally | rally/plugins/common/scenarios/requests/utils.py | 9 | 1581 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from rally.common.i18n import _
from rally.task import atomic
from rally.task import scenario
class RequestScenario(scenario.Scenario):
"""Base class for Request scenarios with basic atomic actions."""
@atomic.action_timer("requests.check_request")
def _check_request(self, url, method, status_code, **kwargs):
"""Compare request status code with specified code
:param status_code: Expected status code of request
:param url: Uniform resource locator
:param method: Type of request method (GET | POST ..)
:param kwargs: Optional additional request parameters
:raises: ValueError if return http status code
not equal to expected status code
"""
resp = requests.request(method, url, **kwargs)
if status_code != resp.status_code:
error_msg = _("Expected HTTP request code is `%s` actual `%s`")
raise ValueError(
error_msg % (status_code, resp.status_code))
| apache-2.0 |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/idlelib/configDialog.py | 37 | 53238 | """IDLE Configuration Dialog: support user customization of IDLE by GUI
Customize font faces, sizes, and colorization attributes. Set indentation
defaults. Customize keybindings. Colorization and keybindings can be
saved as user defined sets. Select startup options including shell/editor
and default window size. Define additional help sources.
Note that tab width in IDLE is currently fixed at eight due to Tk issues.
Refer to comments in EditorWindow autoindent code for details.
"""
from Tkinter import *
import tkMessageBox, tkColorChooser, tkFont
import string
from idlelib.configHandler import idleConf
from idlelib.dynOptionMenuWidget import DynOptionMenu
from idlelib.tabbedpages import TabbedPageSet
from idlelib.keybindingDialog import GetKeysDialog
from idlelib.configSectionNameDialog import GetCfgSectionNameDialog
from idlelib.configHelpSourceEdit import GetHelpSourceDialog
from idlelib import macosxSupport
class ConfigDialog(Toplevel):
def __init__(self,parent,title):
Toplevel.__init__(self, parent)
self.wm_withdraw()
self.configure(borderwidth=5)
self.title('IDLE Preferences')
self.geometry("+%d+%d" % (parent.winfo_rootx()+20,
parent.winfo_rooty()+30))
#Theme Elements. Each theme element key is its display name.
#The first value of the tuple is the sample area tag name.
#The second value is the display name list sort index.
self.themeElements={'Normal Text':('normal','00'),
'Python Keywords':('keyword','01'),
'Python Definitions':('definition','02'),
'Python Builtins':('builtin', '03'),
'Python Comments':('comment','04'),
'Python Strings':('string','05'),
'Selected Text':('hilite','06'),
'Found Text':('hit','07'),
'Cursor':('cursor','08'),
'Error Text':('error','09'),
'Shell Normal Text':('console','10'),
'Shell Stdout Text':('stdout','11'),
'Shell Stderr Text':('stderr','12'),
}
self.ResetChangedItems() #load initial values in changed items dict
self.CreateWidgets()
self.resizable(height=FALSE,width=FALSE)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.tabPages.focus_set()
#key bindings for this dialog
#self.bind('<Escape>',self.Cancel) #dismiss dialog, no save
#self.bind('<Alt-a>',self.Apply) #apply changes, save
#self.bind('<F1>',self.Help) #context help
self.LoadConfigs()
self.AttachVarCallbacks() #avoid callbacks during LoadConfigs
self.wm_deiconify()
self.wait_window()
def CreateWidgets(self):
self.tabPages = TabbedPageSet(self,
page_names=['Fonts/Tabs','Highlighting','Keys','General'])
frameActionButtons = Frame(self,pady=2)
#action buttons
if macosxSupport.runningAsOSXApp():
# Changing the default padding on OSX results in unreadable
# text in the buttons
paddingArgs={}
else:
paddingArgs={'padx':6, 'pady':3}
self.buttonHelp = Button(frameActionButtons,text='Help',
command=self.Help,takefocus=FALSE,
**paddingArgs)
self.buttonOk = Button(frameActionButtons,text='Ok',
command=self.Ok,takefocus=FALSE,
**paddingArgs)
self.buttonApply = Button(frameActionButtons,text='Apply',
command=self.Apply,takefocus=FALSE,
**paddingArgs)
self.buttonCancel = Button(frameActionButtons,text='Cancel',
command=self.Cancel,takefocus=FALSE,
**paddingArgs)
self.CreatePageFontTab()
self.CreatePageHighlight()
self.CreatePageKeys()
self.CreatePageGeneral()
self.buttonHelp.pack(side=RIGHT,padx=5)
self.buttonOk.pack(side=LEFT,padx=5)
self.buttonApply.pack(side=LEFT,padx=5)
self.buttonCancel.pack(side=LEFT,padx=5)
frameActionButtons.pack(side=BOTTOM)
Frame(self, height=2, borderwidth=0).pack(side=BOTTOM)
self.tabPages.pack(side=TOP,expand=TRUE,fill=BOTH)
def CreatePageFontTab(self):
#tkVars
self.fontSize=StringVar(self)
self.fontBold=BooleanVar(self)
self.fontName=StringVar(self)
self.spaceNum=IntVar(self)
self.editFont=tkFont.Font(self,('courier',10,'normal'))
##widget creation
#body frame
frame=self.tabPages.pages['Fonts/Tabs'].frame
#body section frames
frameFont=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Base Editor Font ')
frameIndent=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Indentation Width ')
#frameFont
frameFontName=Frame(frameFont)
frameFontParam=Frame(frameFont)
labelFontNameTitle=Label(frameFontName,justify=LEFT,
text='Font Face :')
self.listFontName=Listbox(frameFontName,height=5,takefocus=FALSE,
exportselection=FALSE)
self.listFontName.bind('<ButtonRelease-1>',self.OnListFontButtonRelease)
scrollFont=Scrollbar(frameFontName)
scrollFont.config(command=self.listFontName.yview)
self.listFontName.config(yscrollcommand=scrollFont.set)
labelFontSizeTitle=Label(frameFontParam,text='Size :')
self.optMenuFontSize=DynOptionMenu(frameFontParam,self.fontSize,None,
command=self.SetFontSample)
checkFontBold=Checkbutton(frameFontParam,variable=self.fontBold,
onvalue=1,offvalue=0,text='Bold',command=self.SetFontSample)
frameFontSample=Frame(frameFont,relief=SOLID,borderwidth=1)
self.labelFontSample=Label(frameFontSample,
text='AaBbCcDdEe\nFfGgHhIiJjK\n1234567890\n#:+=(){}[]',
justify=LEFT,font=self.editFont)
#frameIndent
frameIndentSize=Frame(frameIndent)
labelSpaceNumTitle=Label(frameIndentSize, justify=LEFT,
text='Python Standard: 4 Spaces!')
self.scaleSpaceNum=Scale(frameIndentSize, variable=self.spaceNum,
orient='horizontal',
tickinterval=2, from_=2, to=16)
#widget packing
#body
frameFont.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
frameIndent.pack(side=LEFT,padx=5,pady=5,fill=Y)
#frameFont
frameFontName.pack(side=TOP,padx=5,pady=5,fill=X)
frameFontParam.pack(side=TOP,padx=5,pady=5,fill=X)
labelFontNameTitle.pack(side=TOP,anchor=W)
self.listFontName.pack(side=LEFT,expand=TRUE,fill=X)
scrollFont.pack(side=LEFT,fill=Y)
labelFontSizeTitle.pack(side=LEFT,anchor=W)
self.optMenuFontSize.pack(side=LEFT,anchor=W)
checkFontBold.pack(side=LEFT,anchor=W,padx=20)
frameFontSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
self.labelFontSample.pack(expand=TRUE,fill=BOTH)
#frameIndent
frameIndentSize.pack(side=TOP,fill=X)
labelSpaceNumTitle.pack(side=TOP,anchor=W,padx=5)
self.scaleSpaceNum.pack(side=TOP,padx=5,fill=X)
return frame
def CreatePageHighlight(self):
self.builtinTheme=StringVar(self)
self.customTheme=StringVar(self)
self.fgHilite=BooleanVar(self)
self.colour=StringVar(self)
self.fontName=StringVar(self)
self.themeIsBuiltin=BooleanVar(self)
self.highlightTarget=StringVar(self)
##widget creation
#body frame
frame=self.tabPages.pages['Highlighting'].frame
#body section frames
frameCustom=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Custom Highlighting ')
frameTheme=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Highlighting Theme ')
#frameCustom
self.textHighlightSample=Text(frameCustom,relief=SOLID,borderwidth=1,
font=('courier',12,''),cursor='hand2',width=21,height=10,
takefocus=FALSE,highlightthickness=0,wrap=NONE)
text=self.textHighlightSample
text.bind('<Double-Button-1>',lambda e: 'break')
text.bind('<B1-Motion>',lambda e: 'break')
textAndTags=(('#you can click here','comment'),('\n','normal'),
('#to choose items','comment'),('\n','normal'),('def','keyword'),
(' ','normal'),('func','definition'),('(param):','normal'),
('\n ','normal'),('"""string"""','string'),('\n var0 = ','normal'),
("'string'",'string'),('\n var1 = ','normal'),("'selected'",'hilite'),
('\n var2 = ','normal'),("'found'",'hit'),
('\n var3 = ','normal'),('list', 'builtin'), ('(','normal'),
('None', 'builtin'),(')\n\n','normal'),
(' error ','error'),(' ','normal'),('cursor |','cursor'),
('\n ','normal'),('shell','console'),(' ','normal'),('stdout','stdout'),
(' ','normal'),('stderr','stderr'),('\n','normal'))
for txTa in textAndTags:
text.insert(END,txTa[0],txTa[1])
for element in self.themeElements.keys():
text.tag_bind(self.themeElements[element][0],'<ButtonPress-1>',
lambda event,elem=element: event.widget.winfo_toplevel()
.highlightTarget.set(elem))
text.config(state=DISABLED)
self.frameColourSet=Frame(frameCustom,relief=SOLID,borderwidth=1)
frameFgBg=Frame(frameCustom)
buttonSetColour=Button(self.frameColourSet,text='Choose Colour for :',
command=self.GetColour,highlightthickness=0)
self.optMenuHighlightTarget=DynOptionMenu(self.frameColourSet,
self.highlightTarget,None,highlightthickness=0)#,command=self.SetHighlightTargetBinding
self.radioFg=Radiobutton(frameFgBg,variable=self.fgHilite,
value=1,text='Foreground',command=self.SetColourSampleBinding)
self.radioBg=Radiobutton(frameFgBg,variable=self.fgHilite,
value=0,text='Background',command=self.SetColourSampleBinding)
self.fgHilite.set(1)
buttonSaveCustomTheme=Button(frameCustom,
text='Save as New Custom Theme',command=self.SaveAsNewTheme)
#frameTheme
labelTypeTitle=Label(frameTheme,text='Select : ')
self.radioThemeBuiltin=Radiobutton(frameTheme,variable=self.themeIsBuiltin,
value=1,command=self.SetThemeType,text='a Built-in Theme')
self.radioThemeCustom=Radiobutton(frameTheme,variable=self.themeIsBuiltin,
value=0,command=self.SetThemeType,text='a Custom Theme')
self.optMenuThemeBuiltin=DynOptionMenu(frameTheme,
self.builtinTheme,None,command=None)
self.optMenuThemeCustom=DynOptionMenu(frameTheme,
self.customTheme,None,command=None)
self.buttonDeleteCustomTheme=Button(frameTheme,text='Delete Custom Theme',
command=self.DeleteCustomTheme)
##widget packing
#body
frameCustom.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
frameTheme.pack(side=LEFT,padx=5,pady=5,fill=Y)
#frameCustom
self.frameColourSet.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=X)
frameFgBg.pack(side=TOP,padx=5,pady=0)
self.textHighlightSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,
fill=BOTH)
buttonSetColour.pack(side=TOP,expand=TRUE,fill=X,padx=8,pady=4)
self.optMenuHighlightTarget.pack(side=TOP,expand=TRUE,fill=X,padx=8,pady=3)
self.radioFg.pack(side=LEFT,anchor=E)
self.radioBg.pack(side=RIGHT,anchor=W)
buttonSaveCustomTheme.pack(side=BOTTOM,fill=X,padx=5,pady=5)
#frameTheme
labelTypeTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
self.radioThemeBuiltin.pack(side=TOP,anchor=W,padx=5)
self.radioThemeCustom.pack(side=TOP,anchor=W,padx=5,pady=2)
self.optMenuThemeBuiltin.pack(side=TOP,fill=X,padx=5,pady=5)
self.optMenuThemeCustom.pack(side=TOP,fill=X,anchor=W,padx=5,pady=5)
self.buttonDeleteCustomTheme.pack(side=TOP,fill=X,padx=5,pady=5)
return frame
def CreatePageKeys(self):
#tkVars
self.bindingTarget=StringVar(self)
self.builtinKeys=StringVar(self)
self.customKeys=StringVar(self)
self.keysAreBuiltin=BooleanVar(self)
self.keyBinding=StringVar(self)
##widget creation
#body frame
frame=self.tabPages.pages['Keys'].frame
#body section frames
frameCustom=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Custom Key Bindings ')
frameKeySets=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Key Set ')
#frameCustom
frameTarget=Frame(frameCustom)
labelTargetTitle=Label(frameTarget,text='Action - Key(s)')
scrollTargetY=Scrollbar(frameTarget)
scrollTargetX=Scrollbar(frameTarget,orient=HORIZONTAL)
self.listBindings=Listbox(frameTarget,takefocus=FALSE,
exportselection=FALSE)
self.listBindings.bind('<ButtonRelease-1>',self.KeyBindingSelected)
scrollTargetY.config(command=self.listBindings.yview)
scrollTargetX.config(command=self.listBindings.xview)
self.listBindings.config(yscrollcommand=scrollTargetY.set)
self.listBindings.config(xscrollcommand=scrollTargetX.set)
self.buttonNewKeys=Button(frameCustom,text='Get New Keys for Selection',
command=self.GetNewKeys,state=DISABLED)
#frameKeySets
frames = [Frame(frameKeySets, padx=2, pady=2, borderwidth=0)
for i in range(2)]
self.radioKeysBuiltin=Radiobutton(frames[0],variable=self.keysAreBuiltin,
value=1,command=self.SetKeysType,text='Use a Built-in Key Set')
self.radioKeysCustom=Radiobutton(frames[0],variable=self.keysAreBuiltin,
value=0,command=self.SetKeysType,text='Use a Custom Key Set')
self.optMenuKeysBuiltin=DynOptionMenu(frames[0],
self.builtinKeys,None,command=None)
self.optMenuKeysCustom=DynOptionMenu(frames[0],
self.customKeys,None,command=None)
self.buttonDeleteCustomKeys=Button(frames[1],text='Delete Custom Key Set',
command=self.DeleteCustomKeys)
buttonSaveCustomKeys=Button(frames[1],
text='Save as New Custom Key Set',command=self.SaveAsNewKeySet)
##widget packing
#body
frameCustom.pack(side=BOTTOM,padx=5,pady=5,expand=TRUE,fill=BOTH)
frameKeySets.pack(side=BOTTOM,padx=5,pady=5,fill=BOTH)
#frameCustom
self.buttonNewKeys.pack(side=BOTTOM,fill=X,padx=5,pady=5)
frameTarget.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
#frame target
frameTarget.columnconfigure(0,weight=1)
frameTarget.rowconfigure(1,weight=1)
labelTargetTitle.grid(row=0,column=0,columnspan=2,sticky=W)
self.listBindings.grid(row=1,column=0,sticky=NSEW)
scrollTargetY.grid(row=1,column=1,sticky=NS)
scrollTargetX.grid(row=2,column=0,sticky=EW)
#frameKeySets
self.radioKeysBuiltin.grid(row=0, column=0, sticky=W+NS)
self.radioKeysCustom.grid(row=1, column=0, sticky=W+NS)
self.optMenuKeysBuiltin.grid(row=0, column=1, sticky=NSEW)
self.optMenuKeysCustom.grid(row=1, column=1, sticky=NSEW)
self.buttonDeleteCustomKeys.pack(side=LEFT,fill=X,expand=True,padx=2)
buttonSaveCustomKeys.pack(side=LEFT,fill=X,expand=True,padx=2)
frames[0].pack(side=TOP, fill=BOTH, expand=True)
frames[1].pack(side=TOP, fill=X, expand=True, pady=2)
return frame
def CreatePageGeneral(self):
#tkVars
self.winWidth=StringVar(self)
self.winHeight=StringVar(self)
self.paraWidth=StringVar(self)
self.startupEdit=IntVar(self)
self.autoSave=IntVar(self)
self.encoding=StringVar(self)
self.userHelpBrowser=BooleanVar(self)
self.helpBrowser=StringVar(self)
#widget creation
#body
frame=self.tabPages.pages['General'].frame
#body section frames
frameRun=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Startup Preferences ')
frameSave=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Autosave Preferences ')
frameWinSize=Frame(frame,borderwidth=2,relief=GROOVE)
frameParaSize=Frame(frame,borderwidth=2,relief=GROOVE)
frameEncoding=Frame(frame,borderwidth=2,relief=GROOVE)
frameHelp=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Additional Help Sources ')
#frameRun
labelRunChoiceTitle=Label(frameRun,text='At Startup')
radioStartupEdit=Radiobutton(frameRun,variable=self.startupEdit,
value=1,command=self.SetKeysType,text="Open Edit Window")
radioStartupShell=Radiobutton(frameRun,variable=self.startupEdit,
value=0,command=self.SetKeysType,text='Open Shell Window')
#frameSave
labelRunSaveTitle=Label(frameSave,text='At Start of Run (F5) ')
radioSaveAsk=Radiobutton(frameSave,variable=self.autoSave,
value=0,command=self.SetKeysType,text="Prompt to Save")
radioSaveAuto=Radiobutton(frameSave,variable=self.autoSave,
value=1,command=self.SetKeysType,text='No Prompt')
#frameWinSize
labelWinSizeTitle=Label(frameWinSize,text='Initial Window Size'+
' (in characters)')
labelWinWidthTitle=Label(frameWinSize,text='Width')
entryWinWidth=Entry(frameWinSize,textvariable=self.winWidth,
width=3)
labelWinHeightTitle=Label(frameWinSize,text='Height')
entryWinHeight=Entry(frameWinSize,textvariable=self.winHeight,
width=3)
#paragraphFormatWidth
labelParaWidthTitle=Label(frameParaSize,text='Paragraph reformat'+
' width (in characters)')
entryParaWidth=Entry(frameParaSize,textvariable=self.paraWidth,
width=3)
#frameEncoding
labelEncodingTitle=Label(frameEncoding,text="Default Source Encoding")
radioEncLocale=Radiobutton(frameEncoding,variable=self.encoding,
value="locale",text="Locale-defined")
radioEncUTF8=Radiobutton(frameEncoding,variable=self.encoding,
value="utf-8",text="UTF-8")
radioEncNone=Radiobutton(frameEncoding,variable=self.encoding,
value="none",text="None")
#frameHelp
frameHelpList=Frame(frameHelp)
frameHelpListButtons=Frame(frameHelpList)
scrollHelpList=Scrollbar(frameHelpList)
self.listHelp=Listbox(frameHelpList,height=5,takefocus=FALSE,
exportselection=FALSE)
scrollHelpList.config(command=self.listHelp.yview)
self.listHelp.config(yscrollcommand=scrollHelpList.set)
self.listHelp.bind('<ButtonRelease-1>',self.HelpSourceSelected)
self.buttonHelpListEdit=Button(frameHelpListButtons,text='Edit',
state=DISABLED,width=8,command=self.HelpListItemEdit)
self.buttonHelpListAdd=Button(frameHelpListButtons,text='Add',
width=8,command=self.HelpListItemAdd)
self.buttonHelpListRemove=Button(frameHelpListButtons,text='Remove',
state=DISABLED,width=8,command=self.HelpListItemRemove)
#widget packing
#body
frameRun.pack(side=TOP,padx=5,pady=5,fill=X)
frameSave.pack(side=TOP,padx=5,pady=5,fill=X)
frameWinSize.pack(side=TOP,padx=5,pady=5,fill=X)
frameParaSize.pack(side=TOP,padx=5,pady=5,fill=X)
frameEncoding.pack(side=TOP,padx=5,pady=5,fill=X)
frameHelp.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
#frameRun
labelRunChoiceTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
radioStartupShell.pack(side=RIGHT,anchor=W,padx=5,pady=5)
radioStartupEdit.pack(side=RIGHT,anchor=W,padx=5,pady=5)
#frameSave
labelRunSaveTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
radioSaveAuto.pack(side=RIGHT,anchor=W,padx=5,pady=5)
radioSaveAsk.pack(side=RIGHT,anchor=W,padx=5,pady=5)
#frameWinSize
labelWinSizeTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
entryWinHeight.pack(side=RIGHT,anchor=E,padx=10,pady=5)
labelWinHeightTitle.pack(side=RIGHT,anchor=E,pady=5)
entryWinWidth.pack(side=RIGHT,anchor=E,padx=10,pady=5)
labelWinWidthTitle.pack(side=RIGHT,anchor=E,pady=5)
#paragraphFormatWidth
labelParaWidthTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
entryParaWidth.pack(side=RIGHT,anchor=E,padx=10,pady=5)
#frameEncoding
labelEncodingTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
radioEncNone.pack(side=RIGHT,anchor=E,pady=5)
radioEncUTF8.pack(side=RIGHT,anchor=E,pady=5)
radioEncLocale.pack(side=RIGHT,anchor=E,pady=5)
#frameHelp
frameHelpListButtons.pack(side=RIGHT,padx=5,pady=5,fill=Y)
frameHelpList.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
scrollHelpList.pack(side=RIGHT,anchor=W,fill=Y)
self.listHelp.pack(side=LEFT,anchor=E,expand=TRUE,fill=BOTH)
self.buttonHelpListEdit.pack(side=TOP,anchor=W,pady=5)
self.buttonHelpListAdd.pack(side=TOP,anchor=W)
self.buttonHelpListRemove.pack(side=TOP,anchor=W,pady=5)
return frame
def AttachVarCallbacks(self):
self.fontSize.trace_variable('w',self.VarChanged_fontSize)
self.fontName.trace_variable('w',self.VarChanged_fontName)
self.fontBold.trace_variable('w',self.VarChanged_fontBold)
self.spaceNum.trace_variable('w',self.VarChanged_spaceNum)
self.colour.trace_variable('w',self.VarChanged_colour)
self.builtinTheme.trace_variable('w',self.VarChanged_builtinTheme)
self.customTheme.trace_variable('w',self.VarChanged_customTheme)
self.themeIsBuiltin.trace_variable('w',self.VarChanged_themeIsBuiltin)
self.highlightTarget.trace_variable('w',self.VarChanged_highlightTarget)
self.keyBinding.trace_variable('w',self.VarChanged_keyBinding)
self.builtinKeys.trace_variable('w',self.VarChanged_builtinKeys)
self.customKeys.trace_variable('w',self.VarChanged_customKeys)
self.keysAreBuiltin.trace_variable('w',self.VarChanged_keysAreBuiltin)
self.winWidth.trace_variable('w',self.VarChanged_winWidth)
self.winHeight.trace_variable('w',self.VarChanged_winHeight)
self.paraWidth.trace_variable('w',self.VarChanged_paraWidth)
self.startupEdit.trace_variable('w',self.VarChanged_startupEdit)
self.autoSave.trace_variable('w',self.VarChanged_autoSave)
self.encoding.trace_variable('w',self.VarChanged_encoding)
def VarChanged_fontSize(self,*params):
value=self.fontSize.get()
self.AddChangedItem('main','EditorWindow','font-size',value)
def VarChanged_fontName(self,*params):
value=self.fontName.get()
self.AddChangedItem('main','EditorWindow','font',value)
def VarChanged_fontBold(self,*params):
value=self.fontBold.get()
self.AddChangedItem('main','EditorWindow','font-bold',value)
def VarChanged_spaceNum(self,*params):
value=self.spaceNum.get()
self.AddChangedItem('main','Indent','num-spaces',value)
def VarChanged_colour(self,*params):
self.OnNewColourSet()
def VarChanged_builtinTheme(self,*params):
value=self.builtinTheme.get()
self.AddChangedItem('main','Theme','name',value)
self.PaintThemeSample()
def VarChanged_customTheme(self,*params):
value=self.customTheme.get()
if value != '- no custom themes -':
self.AddChangedItem('main','Theme','name',value)
self.PaintThemeSample()
def VarChanged_themeIsBuiltin(self,*params):
value=self.themeIsBuiltin.get()
self.AddChangedItem('main','Theme','default',value)
if value:
self.VarChanged_builtinTheme()
else:
self.VarChanged_customTheme()
def VarChanged_highlightTarget(self,*params):
self.SetHighlightTarget()
def VarChanged_keyBinding(self,*params):
value=self.keyBinding.get()
keySet=self.customKeys.get()
event=self.listBindings.get(ANCHOR).split()[0]
if idleConf.IsCoreBinding(event):
#this is a core keybinding
self.AddChangedItem('keys',keySet,event,value)
else: #this is an extension key binding
extName=idleConf.GetExtnNameForEvent(event)
extKeybindSection=extName+'_cfgBindings'
self.AddChangedItem('extensions',extKeybindSection,event,value)
def VarChanged_builtinKeys(self,*params):
value=self.builtinKeys.get()
self.AddChangedItem('main','Keys','name',value)
self.LoadKeysList(value)
def VarChanged_customKeys(self,*params):
value=self.customKeys.get()
if value != '- no custom keys -':
self.AddChangedItem('main','Keys','name',value)
self.LoadKeysList(value)
def VarChanged_keysAreBuiltin(self,*params):
value=self.keysAreBuiltin.get()
self.AddChangedItem('main','Keys','default',value)
if value:
self.VarChanged_builtinKeys()
else:
self.VarChanged_customKeys()
def VarChanged_winWidth(self,*params):
value=self.winWidth.get()
self.AddChangedItem('main','EditorWindow','width',value)
def VarChanged_winHeight(self,*params):
value=self.winHeight.get()
self.AddChangedItem('main','EditorWindow','height',value)
def VarChanged_paraWidth(self,*params):
value=self.paraWidth.get()
self.AddChangedItem('main','FormatParagraph','paragraph',value)
def VarChanged_startupEdit(self,*params):
value=self.startupEdit.get()
self.AddChangedItem('main','General','editor-on-startup',value)
def VarChanged_autoSave(self,*params):
value=self.autoSave.get()
self.AddChangedItem('main','General','autosave',value)
def VarChanged_encoding(self,*params):
value=self.encoding.get()
self.AddChangedItem('main','EditorWindow','encoding',value)
def ResetChangedItems(self):
#When any config item is changed in this dialog, an entry
#should be made in the relevant section (config type) of this
#dictionary. The key should be the config file section name and the
#value a dictionary, whose key:value pairs are item=value pairs for
#that config file section.
self.changedItems={'main':{},'highlight':{},'keys':{},'extensions':{}}
def AddChangedItem(self,type,section,item,value):
value=str(value) #make sure we use a string
if section not in self.changedItems[type]:
self.changedItems[type][section]={}
self.changedItems[type][section][item]=value
def GetDefaultItems(self):
dItems={'main':{},'highlight':{},'keys':{},'extensions':{}}
for configType in dItems.keys():
sections=idleConf.GetSectionList('default',configType)
for section in sections:
dItems[configType][section]={}
options=idleConf.defaultCfg[configType].GetOptionList(section)
for option in options:
dItems[configType][section][option]=(
idleConf.defaultCfg[configType].Get(section,option))
return dItems
def SetThemeType(self):
if self.themeIsBuiltin.get():
self.optMenuThemeBuiltin.config(state=NORMAL)
self.optMenuThemeCustom.config(state=DISABLED)
self.buttonDeleteCustomTheme.config(state=DISABLED)
else:
self.optMenuThemeBuiltin.config(state=DISABLED)
self.radioThemeCustom.config(state=NORMAL)
self.optMenuThemeCustom.config(state=NORMAL)
self.buttonDeleteCustomTheme.config(state=NORMAL)
def SetKeysType(self):
if self.keysAreBuiltin.get():
self.optMenuKeysBuiltin.config(state=NORMAL)
self.optMenuKeysCustom.config(state=DISABLED)
self.buttonDeleteCustomKeys.config(state=DISABLED)
else:
self.optMenuKeysBuiltin.config(state=DISABLED)
self.radioKeysCustom.config(state=NORMAL)
self.optMenuKeysCustom.config(state=NORMAL)
self.buttonDeleteCustomKeys.config(state=NORMAL)
def GetNewKeys(self):
listIndex=self.listBindings.index(ANCHOR)
binding=self.listBindings.get(listIndex)
bindName=binding.split()[0] #first part, up to first space
if self.keysAreBuiltin.get():
currentKeySetName=self.builtinKeys.get()
else:
currentKeySetName=self.customKeys.get()
currentBindings=idleConf.GetCurrentKeySet()
if currentKeySetName in self.changedItems['keys'].keys(): #unsaved changes
keySetChanges=self.changedItems['keys'][currentKeySetName]
for event in keySetChanges.keys():
currentBindings[event]=keySetChanges[event].split()
currentKeySequences=currentBindings.values()
newKeys=GetKeysDialog(self,'Get New Keys',bindName,
currentKeySequences).result
if newKeys: #new keys were specified
if self.keysAreBuiltin.get(): #current key set is a built-in
message=('Your changes will be saved as a new Custom Key Set. '+
'Enter a name for your new Custom Key Set below.')
newKeySet=self.GetNewKeysName(message)
if not newKeySet: #user cancelled custom key set creation
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
return
else: #create new custom key set based on previously active key set
self.CreateNewKeySet(newKeySet)
self.listBindings.delete(listIndex)
self.listBindings.insert(listIndex,bindName+' - '+newKeys)
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
self.keyBinding.set(newKeys)
else:
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
def GetNewKeysName(self,message):
usedNames=(idleConf.GetSectionList('user','keys')+
idleConf.GetSectionList('default','keys'))
newKeySet=GetCfgSectionNameDialog(self,'New Custom Key Set',
message,usedNames).result
return newKeySet
def SaveAsNewKeySet(self):
newKeysName=self.GetNewKeysName('New Key Set Name:')
if newKeysName:
self.CreateNewKeySet(newKeysName)
def KeyBindingSelected(self,event):
self.buttonNewKeys.config(state=NORMAL)
def CreateNewKeySet(self,newKeySetName):
#creates new custom key set based on the previously active key set,
#and makes the new key set active
if self.keysAreBuiltin.get():
prevKeySetName=self.builtinKeys.get()
else:
prevKeySetName=self.customKeys.get()
prevKeys=idleConf.GetCoreKeys(prevKeySetName)
newKeys={}
for event in prevKeys.keys(): #add key set to changed items
eventName=event[2:-2] #trim off the angle brackets
binding=string.join(prevKeys[event])
newKeys[eventName]=binding
#handle any unsaved changes to prev key set
if prevKeySetName in self.changedItems['keys'].keys():
keySetChanges=self.changedItems['keys'][prevKeySetName]
for event in keySetChanges.keys():
newKeys[event]=keySetChanges[event]
#save the new theme
self.SaveNewKeySet(newKeySetName,newKeys)
#change gui over to the new key set
customKeyList=idleConf.GetSectionList('user','keys')
customKeyList.sort()
self.optMenuKeysCustom.SetMenu(customKeyList,newKeySetName)
self.keysAreBuiltin.set(0)
self.SetKeysType()
def LoadKeysList(self,keySetName):
reselect=0
newKeySet=0
if self.listBindings.curselection():
reselect=1
listIndex=self.listBindings.index(ANCHOR)
keySet=idleConf.GetKeySet(keySetName)
bindNames=keySet.keys()
bindNames.sort()
self.listBindings.delete(0,END)
for bindName in bindNames:
key=string.join(keySet[bindName]) #make key(s) into a string
bindName=bindName[2:-2] #trim off the angle brackets
if keySetName in self.changedItems['keys'].keys():
#handle any unsaved changes to this key set
if bindName in self.changedItems['keys'][keySetName].keys():
key=self.changedItems['keys'][keySetName][bindName]
self.listBindings.insert(END, bindName+' - '+key)
if reselect:
self.listBindings.see(listIndex)
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
def DeleteCustomKeys(self):
keySetName=self.customKeys.get()
if not tkMessageBox.askyesno('Delete Key Set','Are you sure you wish '+
'to delete the key set %r ?' % (keySetName),
parent=self):
return
#remove key set from config
idleConf.userCfg['keys'].remove_section(keySetName)
if keySetName in self.changedItems['keys']:
del(self.changedItems['keys'][keySetName])
#write changes
idleConf.userCfg['keys'].Save()
#reload user key set list
itemList=idleConf.GetSectionList('user','keys')
itemList.sort()
if not itemList:
self.radioKeysCustom.config(state=DISABLED)
self.optMenuKeysCustom.SetMenu(itemList,'- no custom keys -')
else:
self.optMenuKeysCustom.SetMenu(itemList,itemList[0])
#revert to default key set
self.keysAreBuiltin.set(idleConf.defaultCfg['main'].Get('Keys','default'))
self.builtinKeys.set(idleConf.defaultCfg['main'].Get('Keys','name'))
#user can't back out of these changes, they must be applied now
self.Apply()
self.SetKeysType()
def DeleteCustomTheme(self):
themeName=self.customTheme.get()
if not tkMessageBox.askyesno('Delete Theme','Are you sure you wish '+
'to delete the theme %r ?' % (themeName,),
parent=self):
return
#remove theme from config
idleConf.userCfg['highlight'].remove_section(themeName)
if themeName in self.changedItems['highlight']:
del(self.changedItems['highlight'][themeName])
#write changes
idleConf.userCfg['highlight'].Save()
#reload user theme list
itemList=idleConf.GetSectionList('user','highlight')
itemList.sort()
if not itemList:
self.radioThemeCustom.config(state=DISABLED)
self.optMenuThemeCustom.SetMenu(itemList,'- no custom themes -')
else:
self.optMenuThemeCustom.SetMenu(itemList,itemList[0])
#revert to default theme
self.themeIsBuiltin.set(idleConf.defaultCfg['main'].Get('Theme','default'))
self.builtinTheme.set(idleConf.defaultCfg['main'].Get('Theme','name'))
#user can't back out of these changes, they must be applied now
self.Apply()
self.SetThemeType()
def GetColour(self):
target=self.highlightTarget.get()
prevColour=self.frameColourSet.cget('bg')
rgbTuplet, colourString = tkColorChooser.askcolor(parent=self,
title='Pick new colour for : '+target,initialcolor=prevColour)
if colourString and (colourString!=prevColour):
#user didn't cancel, and they chose a new colour
if self.themeIsBuiltin.get(): #current theme is a built-in
message=('Your changes will be saved as a new Custom Theme. '+
'Enter a name for your new Custom Theme below.')
newTheme=self.GetNewThemeName(message)
if not newTheme: #user cancelled custom theme creation
return
else: #create new custom theme based on previously active theme
self.CreateNewTheme(newTheme)
self.colour.set(colourString)
else: #current theme is user defined
self.colour.set(colourString)
def OnNewColourSet(self):
newColour=self.colour.get()
self.frameColourSet.config(bg=newColour)#set sample
if self.fgHilite.get(): plane='foreground'
else: plane='background'
sampleElement=self.themeElements[self.highlightTarget.get()][0]
self.textHighlightSample.tag_config(sampleElement, **{plane:newColour})
theme=self.customTheme.get()
themeElement=sampleElement+'-'+plane
self.AddChangedItem('highlight',theme,themeElement,newColour)
def GetNewThemeName(self,message):
usedNames=(idleConf.GetSectionList('user','highlight')+
idleConf.GetSectionList('default','highlight'))
newTheme=GetCfgSectionNameDialog(self,'New Custom Theme',
message,usedNames).result
return newTheme
def SaveAsNewTheme(self):
newThemeName=self.GetNewThemeName('New Theme Name:')
if newThemeName:
self.CreateNewTheme(newThemeName)
def CreateNewTheme(self,newThemeName):
#creates new custom theme based on the previously active theme,
#and makes the new theme active
if self.themeIsBuiltin.get():
themeType='default'
themeName=self.builtinTheme.get()
else:
themeType='user'
themeName=self.customTheme.get()
newTheme=idleConf.GetThemeDict(themeType,themeName)
#apply any of the old theme's unsaved changes to the new theme
if themeName in self.changedItems['highlight'].keys():
themeChanges=self.changedItems['highlight'][themeName]
for element in themeChanges.keys():
newTheme[element]=themeChanges[element]
#save the new theme
self.SaveNewTheme(newThemeName,newTheme)
#change gui over to the new theme
customThemeList=idleConf.GetSectionList('user','highlight')
customThemeList.sort()
self.optMenuThemeCustom.SetMenu(customThemeList,newThemeName)
self.themeIsBuiltin.set(0)
self.SetThemeType()
def OnListFontButtonRelease(self,event):
font = self.listFontName.get(ANCHOR)
self.fontName.set(font.lower())
self.SetFontSample()
def SetFontSample(self,event=None):
fontName=self.fontName.get()
if self.fontBold.get():
fontWeight=tkFont.BOLD
else:
fontWeight=tkFont.NORMAL
self.editFont.config(size=self.fontSize.get(),
weight=fontWeight,family=fontName)
def SetHighlightTarget(self):
if self.highlightTarget.get()=='Cursor': #bg not possible
self.radioFg.config(state=DISABLED)
self.radioBg.config(state=DISABLED)
self.fgHilite.set(1)
else: #both fg and bg can be set
self.radioFg.config(state=NORMAL)
self.radioBg.config(state=NORMAL)
self.fgHilite.set(1)
self.SetColourSample()
def SetColourSampleBinding(self,*args):
self.SetColourSample()
def SetColourSample(self):
#set the colour smaple area
tag=self.themeElements[self.highlightTarget.get()][0]
if self.fgHilite.get(): plane='foreground'
else: plane='background'
colour=self.textHighlightSample.tag_cget(tag,plane)
self.frameColourSet.config(bg=colour)
def PaintThemeSample(self):
if self.themeIsBuiltin.get(): #a default theme
theme=self.builtinTheme.get()
else: #a user theme
theme=self.customTheme.get()
for elementTitle in self.themeElements.keys():
element=self.themeElements[elementTitle][0]
colours=idleConf.GetHighlight(theme,element)
if element=='cursor': #cursor sample needs special painting
colours['background']=idleConf.GetHighlight(theme,
'normal', fgBg='bg')
#handle any unsaved changes to this theme
if theme in self.changedItems['highlight'].keys():
themeDict=self.changedItems['highlight'][theme]
if element+'-foreground' in themeDict:
colours['foreground']=themeDict[element+'-foreground']
if element+'-background' in themeDict:
colours['background']=themeDict[element+'-background']
self.textHighlightSample.tag_config(element, **colours)
self.SetColourSample()
def HelpSourceSelected(self,event):
self.SetHelpListButtonStates()
def SetHelpListButtonStates(self):
if self.listHelp.size()<1: #no entries in list
self.buttonHelpListEdit.config(state=DISABLED)
self.buttonHelpListRemove.config(state=DISABLED)
else: #there are some entries
if self.listHelp.curselection(): #there currently is a selection
self.buttonHelpListEdit.config(state=NORMAL)
self.buttonHelpListRemove.config(state=NORMAL)
else: #there currently is not a selection
self.buttonHelpListEdit.config(state=DISABLED)
self.buttonHelpListRemove.config(state=DISABLED)
def HelpListItemAdd(self):
helpSource=GetHelpSourceDialog(self,'New Help Source').result
if helpSource:
self.userHelpList.append( (helpSource[0],helpSource[1]) )
self.listHelp.insert(END,helpSource[0])
self.UpdateUserHelpChangedItems()
self.SetHelpListButtonStates()
def HelpListItemEdit(self):
itemIndex=self.listHelp.index(ANCHOR)
helpSource=self.userHelpList[itemIndex]
newHelpSource=GetHelpSourceDialog(self,'Edit Help Source',
menuItem=helpSource[0],filePath=helpSource[1]).result
if (not newHelpSource) or (newHelpSource==helpSource):
return #no changes
self.userHelpList[itemIndex]=newHelpSource
self.listHelp.delete(itemIndex)
self.listHelp.insert(itemIndex,newHelpSource[0])
self.UpdateUserHelpChangedItems()
self.SetHelpListButtonStates()
def HelpListItemRemove(self):
itemIndex=self.listHelp.index(ANCHOR)
del(self.userHelpList[itemIndex])
self.listHelp.delete(itemIndex)
self.UpdateUserHelpChangedItems()
self.SetHelpListButtonStates()
def UpdateUserHelpChangedItems(self):
"Clear and rebuild the HelpFiles section in self.changedItems"
self.changedItems['main']['HelpFiles'] = {}
for num in range(1,len(self.userHelpList)+1):
self.AddChangedItem('main','HelpFiles',str(num),
string.join(self.userHelpList[num-1][:2],';'))
def LoadFontCfg(self):
##base editor font selection list
fonts=list(tkFont.families(self))
fonts.sort()
for font in fonts:
self.listFontName.insert(END,font)
configuredFont=idleConf.GetOption('main','EditorWindow','font',
default='courier')
lc_configuredFont = configuredFont.lower()
self.fontName.set(lc_configuredFont)
lc_fonts = [s.lower() for s in fonts]
if lc_configuredFont in lc_fonts:
currentFontIndex = lc_fonts.index(lc_configuredFont)
self.listFontName.see(currentFontIndex)
self.listFontName.select_set(currentFontIndex)
self.listFontName.select_anchor(currentFontIndex)
##font size dropdown
fontSize=idleConf.GetOption('main','EditorWindow','font-size',
default='10')
self.optMenuFontSize.SetMenu(('7','8','9','10','11','12','13','14',
'16','18','20','22'),fontSize )
##fontWeight
self.fontBold.set(idleConf.GetOption('main','EditorWindow',
'font-bold',default=0,type='bool'))
##font sample
self.SetFontSample()
def LoadTabCfg(self):
##indent sizes
spaceNum=idleConf.GetOption('main','Indent','num-spaces',
default=4,type='int')
self.spaceNum.set(spaceNum)
def LoadThemeCfg(self):
##current theme type radiobutton
self.themeIsBuiltin.set(idleConf.GetOption('main','Theme','default',
type='bool',default=1))
##currently set theme
currentOption=idleConf.CurrentTheme()
##load available theme option menus
if self.themeIsBuiltin.get(): #default theme selected
itemList=idleConf.GetSectionList('default','highlight')
itemList.sort()
self.optMenuThemeBuiltin.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('user','highlight')
itemList.sort()
if not itemList:
self.radioThemeCustom.config(state=DISABLED)
self.customTheme.set('- no custom themes -')
else:
self.optMenuThemeCustom.SetMenu(itemList,itemList[0])
else: #user theme selected
itemList=idleConf.GetSectionList('user','highlight')
itemList.sort()
self.optMenuThemeCustom.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('default','highlight')
itemList.sort()
self.optMenuThemeBuiltin.SetMenu(itemList,itemList[0])
self.SetThemeType()
##load theme element option menu
themeNames=self.themeElements.keys()
themeNames.sort(key=lambda x: self.themeElements[x][1])
self.optMenuHighlightTarget.SetMenu(themeNames,themeNames[0])
self.PaintThemeSample()
self.SetHighlightTarget()
def LoadKeyCfg(self):
##current keys type radiobutton
self.keysAreBuiltin.set(idleConf.GetOption('main','Keys','default',
type='bool',default=1))
##currently set keys
currentOption=idleConf.CurrentKeys()
##load available keyset option menus
if self.keysAreBuiltin.get(): #default theme selected
itemList=idleConf.GetSectionList('default','keys')
itemList.sort()
self.optMenuKeysBuiltin.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('user','keys')
itemList.sort()
if not itemList:
self.radioKeysCustom.config(state=DISABLED)
self.customKeys.set('- no custom keys -')
else:
self.optMenuKeysCustom.SetMenu(itemList,itemList[0])
else: #user key set selected
itemList=idleConf.GetSectionList('user','keys')
itemList.sort()
self.optMenuKeysCustom.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('default','keys')
itemList.sort()
self.optMenuKeysBuiltin.SetMenu(itemList,itemList[0])
self.SetKeysType()
##load keyset element list
keySetName=idleConf.CurrentKeys()
self.LoadKeysList(keySetName)
def LoadGeneralCfg(self):
#startup state
self.startupEdit.set(idleConf.GetOption('main','General',
'editor-on-startup',default=1,type='bool'))
#autosave state
self.autoSave.set(idleConf.GetOption('main', 'General', 'autosave',
default=0, type='bool'))
#initial window size
self.winWidth.set(idleConf.GetOption('main','EditorWindow','width'))
self.winHeight.set(idleConf.GetOption('main','EditorWindow','height'))
#initial paragraph reformat size
self.paraWidth.set(idleConf.GetOption('main','FormatParagraph','paragraph'))
# default source encoding
self.encoding.set(idleConf.GetOption('main', 'EditorWindow',
'encoding', default='none'))
# additional help sources
self.userHelpList = idleConf.GetAllExtraHelpSourcesList()
for helpItem in self.userHelpList:
self.listHelp.insert(END,helpItem[0])
self.SetHelpListButtonStates()
def LoadConfigs(self):
"""
load configuration from default and user config files and populate
the widgets on the config dialog pages.
"""
### fonts / tabs page
self.LoadFontCfg()
self.LoadTabCfg()
### highlighting page
self.LoadThemeCfg()
### keys page
self.LoadKeyCfg()
### general page
self.LoadGeneralCfg()
def SaveNewKeySet(self,keySetName,keySet):
"""
save a newly created core key set.
keySetName - string, the name of the new key set
keySet - dictionary containing the new key set
"""
if not idleConf.userCfg['keys'].has_section(keySetName):
idleConf.userCfg['keys'].add_section(keySetName)
for event in keySet.keys():
value=keySet[event]
idleConf.userCfg['keys'].SetOption(keySetName,event,value)
def SaveNewTheme(self,themeName,theme):
"""
save a newly created theme.
themeName - string, the name of the new theme
theme - dictionary containing the new theme
"""
if not idleConf.userCfg['highlight'].has_section(themeName):
idleConf.userCfg['highlight'].add_section(themeName)
for element in theme.keys():
value=theme[element]
idleConf.userCfg['highlight'].SetOption(themeName,element,value)
def SetUserValue(self,configType,section,item,value):
if idleConf.defaultCfg[configType].has_option(section,item):
if idleConf.defaultCfg[configType].Get(section,item)==value:
#the setting equals a default setting, remove it from user cfg
return idleConf.userCfg[configType].RemoveOption(section,item)
#if we got here set the option
return idleConf.userCfg[configType].SetOption(section,item,value)
def SaveAllChangedConfigs(self):
"Save configuration changes to the user config file."
idleConf.userCfg['main'].Save()
for configType in self.changedItems.keys():
cfgTypeHasChanges = False
for section in self.changedItems[configType].keys():
if section == 'HelpFiles':
#this section gets completely replaced
idleConf.userCfg['main'].remove_section('HelpFiles')
cfgTypeHasChanges = True
for item in self.changedItems[configType][section].keys():
value = self.changedItems[configType][section][item]
if self.SetUserValue(configType,section,item,value):
cfgTypeHasChanges = True
if cfgTypeHasChanges:
idleConf.userCfg[configType].Save()
for configType in ['keys', 'highlight']:
# save these even if unchanged!
idleConf.userCfg[configType].Save()
self.ResetChangedItems() #clear the changed items dict
def DeactivateCurrentConfig(self):
#Before a config is saved, some cleanup of current
#config must be done - remove the previous keybindings
winInstances=self.parent.instance_dict.keys()
for instance in winInstances:
instance.RemoveKeybindings()
def ActivateConfigChanges(self):
"Dynamically apply configuration changes"
winInstances=self.parent.instance_dict.keys()
for instance in winInstances:
instance.ResetColorizer()
instance.ResetFont()
instance.set_notabs_indentwidth()
instance.ApplyKeybindings()
instance.reset_help_menu_entries()
def Cancel(self):
self.destroy()
def Ok(self):
self.Apply()
self.destroy()
def Apply(self):
self.DeactivateCurrentConfig()
self.SaveAllChangedConfigs()
self.ActivateConfigChanges()
def Help(self):
pass
if __name__ == '__main__':
#test the dialog
root=Tk()
Button(root,text='Dialog',
command=lambda:ConfigDialog(root,'Settings')).pack()
root.instance_dict={}
root.mainloop()
| gpl-2.0 |
googleinterns/commentaries | models.py | 1 | 7751 | # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data as utils
import torch.nn.functional as F
# Uses curriculum structure
class ConvNetTeacher(nn.Module):
def __init__(self, dataset, inner_steps):
super(ConvNetTeacher, self).__init__()
inpl = 2 if dataset == 'mnist' else 4
self.layer1 = nn.Sequential(
nn.Conv2d(inpl, 32, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
matsize = 64 if dataset == 'mnist' else 256
self.fc_lambda = nn.Linear(matsize,1)
self.inner_steps = inner_steps
def forward(self, x, itr):
# normalisation of itr.
itr = itr/self.inner_steps
# place itr as extra channel in input image
itrchannel = (torch.ones(x.shape[0], 1, x.shape[2],x.shape[3]).type(torch.FloatTensor)*itr).to(device)
x = torch.cat([x, itrchannel], dim=1)
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out_lambda = torch.sigmoid(self.fc_lambda(out))
return out_lambda
# No curriculum structure
class ConvNetTeacher2(nn.Module):
def __init__(self,dataset, inner_steps):
super(ConvNetTeacher2, self).__init__()
inpl = 1 if args.dataset == 'mnist' else 3
self.layer1 = nn.Sequential(
nn.Conv2d(inpl, 32, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
matsize = 64 if args.dataset == 'mnist' else 256
self.fc_lambda = nn.Linear(matsize,1)
def forward(self, x, itr):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out_lambda = torch.sigmoid(self.fc_lambda(out))
return out_lambda
class ConvNetStudent(nn.Module):
def __init__(self,dataset):
super(ConvNetStudent, self).__init__()
inpl = 1 if dataset == 'mnist' else 3
self.layer1 = nn.Sequential(
nn.Conv2d(inpl, 32, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
matsize = 64 if dataset == 'mnist' else 256
self.fc_pi = nn.Linear(matsize, 10)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out_pi = F.log_softmax(self.fc_pi(out))
return out_pi
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, num_channels=3):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = conv3x3(num_channels,64)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.weight_decay = None
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, return_features=False):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.adaptive_avg_pool2d(out,1)
features = out.view(out.size(0), -1)
out = F.log_softmax(self.linear(features), dim=-1)
if return_features:
return out, features
else:
return out
def ResNet18(num_classes=10, dataset='cifar10'):
num_channels = 3
if dataset == 'mnist':
num_channels = 1
return ResNet(BasicBlock, [2,2,2,2], num_classes, num_channels)
def ResNet34(num_classes=10, dataset='cifar10'):
num_channels = 3
if dataset == 'mnist':
num_channels = 1
return ResNet(BasicBlock, [3,4,6,3], num_classes, num_channels=num_channels) | apache-2.0 |
djh4230/Apache-Phoenix | bin/daemon.py | 27 | 32440 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################################
# daemon/daemon.py
# Part of ‘python-daemon’, an implementation of PEP 3143.
#
# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
# Copyright © 2004–2005 Chad J. Schroeder
# Copyright © 2003 Clark Evans
# Copyright © 2002 Noah Spurrier
# Copyright © 2001 Jürgen Hermann
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Apache License, version 2.0 as published by the
# Apache Software Foundation.
# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
#
# Apache Phoenix note: this file is `daemon.py` from the package
# `python-daemon 2.0.5`, https://pypi.python.org/pypi/python-daemon/
#
# The class `PidFile` was added for adapting the `lockfile` package's interface
# without depending on yet another 3rd party package. Based on example from
# http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/
#
""" Daemon process behaviour.
"""
from __future__ import (absolute_import, unicode_literals)
import os
import sys
import resource
import errno
import signal
import socket
import atexit
import fcntl
try:
# Python 2 has both ‘str’ (bytes) and ‘unicode’ (text).
basestring = basestring
unicode = unicode
except NameError:
# Python 3 names the Unicode data type ‘str’.
basestring = str
unicode = str
class DaemonError(Exception):
""" Base exception class for errors from this module. """
def __init__(self, *args, **kwargs):
self._chain_from_context()
super(DaemonError, self).__init__(*args, **kwargs)
def _chain_from_context(self):
_chain_exception_from_existing_exception_context(self, as_cause=True)
class DaemonOSEnvironmentError(DaemonError, OSError):
""" Exception raised when daemon OS environment setup receives error. """
class DaemonProcessDetachError(DaemonError, OSError):
""" Exception raised when process detach fails. """
class DaemonContext:
""" Context for turning the current program into a daemon process.
A `DaemonContext` instance represents the behaviour settings and
process context for the program when it becomes a daemon. The
behaviour and environment is customised by setting options on the
instance, before calling the `open` method.
Each option can be passed as a keyword argument to the `DaemonContext`
constructor, or subsequently altered by assigning to an attribute on
the instance at any time prior to calling `open`. That is, for
options named `wibble` and `wubble`, the following invocation::
foo = daemon.DaemonContext(wibble=bar, wubble=baz)
foo.open()
is equivalent to::
foo = daemon.DaemonContext()
foo.wibble = bar
foo.wubble = baz
foo.open()
The following options are defined.
`files_preserve`
:Default: ``None``
List of files that should *not* be closed when starting the
daemon. If ``None``, all open file descriptors will be closed.
Elements of the list are file descriptors (as returned by a file
object's `fileno()` method) or Python `file` objects. Each
specifies a file that is not to be closed during daemon start.
`chroot_directory`
:Default: ``None``
Full path to a directory to set as the effective root directory of
the process. If ``None``, specifies that the root directory is not
to be changed.
`working_directory`
:Default: ``'/'``
Full path of the working directory to which the process should
change on daemon start.
Since a filesystem cannot be unmounted if a process has its
current working directory on that filesystem, this should either
be left at default or set to a directory that is a sensible “home
directory” for the daemon while it is running.
`umask`
:Default: ``0``
File access creation mask (“umask”) to set for the process on
daemon start.
A daemon should not rely on the parent process's umask value,
which is beyond its control and may prevent creating a file with
the required access mode. So when the daemon context opens, the
umask is set to an explicit known value.
If the conventional value of 0 is too open, consider setting a
value such as 0o022, 0o027, 0o077, or another specific value.
Otherwise, ensure the daemon creates every file with an
explicit access mode for the purpose.
`pidfile`
:Default: ``None``
Context manager for a PID lock file. When the daemon context opens
and closes, it enters and exits the `pidfile` context manager.
`detach_process`
:Default: ``None``
If ``True``, detach the process context when opening the daemon
context; if ``False``, do not detach.
If unspecified (``None``) during initialisation of the instance,
this will be set to ``True`` by default, and ``False`` only if
detaching the process is determined to be redundant; for example,
in the case when the process was started by `init`, by `initd`, or
by `inetd`.
`signal_map`
:Default: system-dependent
Mapping from operating system signals to callback actions.
The mapping is used when the daemon context opens, and determines
the action for each signal's signal handler:
* A value of ``None`` will ignore the signal (by setting the
signal action to ``signal.SIG_IGN``).
* A string value will be used as the name of an attribute on the
``DaemonContext`` instance. The attribute's value will be used
as the action for the signal handler.
* Any other value will be used as the action for the
signal handler. See the ``signal.signal`` documentation
for details of the signal handler interface.
The default value depends on which signals are defined on the
running system. Each item from the list below whose signal is
actually defined in the ``signal`` module will appear in the
default map:
* ``signal.SIGTTIN``: ``None``
* ``signal.SIGTTOU``: ``None``
* ``signal.SIGTSTP``: ``None``
* ``signal.SIGTERM``: ``'terminate'``
Depending on how the program will interact with its child
processes, it may need to specify a signal map that
includes the ``signal.SIGCHLD`` signal (received when a
child process exits). See the specific operating system's
documentation for more detail on how to determine what
circumstances dictate the need for signal handlers.
`uid`
:Default: ``os.getuid()``
`gid`
:Default: ``os.getgid()``
The user ID (“UID”) value and group ID (“GID”) value to switch
the process to on daemon start.
The default values, the real UID and GID of the process, will
relinquish any effective privilege elevation inherited by the
process.
`prevent_core`
:Default: ``True``
If true, prevents the generation of core files, in order to avoid
leaking sensitive information from daemons run as `root`.
`stdin`
:Default: ``None``
`stdout`
:Default: ``None``
`stderr`
:Default: ``None``
Each of `stdin`, `stdout`, and `stderr` is a file-like object
which will be used as the new file for the standard I/O stream
`sys.stdin`, `sys.stdout`, and `sys.stderr` respectively. The file
should therefore be open, with a minimum of mode 'r' in the case
of `stdin`, and mimimum of mode 'w+' in the case of `stdout` and
`stderr`.
If the object has a `fileno()` method that returns a file
descriptor, the corresponding file will be excluded from being
closed during daemon start (that is, it will be treated as though
it were listed in `files_preserve`).
If ``None``, the corresponding system stream is re-bound to the
file named by `os.devnull`.
"""
__metaclass__ = type
def __init__(
self,
chroot_directory=None,
working_directory="/",
umask=0,
uid=None,
gid=None,
prevent_core=True,
detach_process=None,
files_preserve=None,
pidfile=None,
stdin=None,
stdout=None,
stderr=None,
signal_map=None,
):
""" Set up a new instance. """
self.chroot_directory = chroot_directory
self.working_directory = working_directory
self.umask = umask
self.prevent_core = prevent_core
self.files_preserve = files_preserve
self.pidfile = pidfile
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
if uid is None:
uid = os.getuid()
self.uid = uid
if gid is None:
gid = os.getgid()
self.gid = gid
if detach_process is None:
detach_process = is_detach_process_context_required()
self.detach_process = detach_process
if signal_map is None:
signal_map = make_default_signal_map()
self.signal_map = signal_map
self._is_open = False
@property
def is_open(self):
""" ``True`` if the instance is currently open. """
return self._is_open
def open(self):
""" Become a daemon process.
:return: ``None``.
Open the daemon context, turning the current program into a daemon
process. This performs the following steps:
* If this instance's `is_open` property is true, return
immediately. This makes it safe to call `open` multiple times on
an instance.
* If the `prevent_core` attribute is true, set the resource limits
for the process to prevent any core dump from the process.
* If the `chroot_directory` attribute is not ``None``, set the
effective root directory of the process to that directory (via
`os.chroot`).
This allows running the daemon process inside a “chroot gaol”
as a means of limiting the system's exposure to rogue behaviour
by the process. Note that the specified directory needs to
already be set up for this purpose.
* Set the process UID and GID to the `uid` and `gid` attribute
values.
* Close all open file descriptors. This excludes those listed in
the `files_preserve` attribute, and those that correspond to the
`stdin`, `stdout`, or `stderr` attributes.
* Change current working directory to the path specified by the
`working_directory` attribute.
* Reset the file access creation mask to the value specified by
the `umask` attribute.
* If the `detach_process` option is true, detach the current
process into its own process group, and disassociate from any
controlling terminal.
* Set signal handlers as specified by the `signal_map` attribute.
* If any of the attributes `stdin`, `stdout`, `stderr` are not
``None``, bind the system streams `sys.stdin`, `sys.stdout`,
and/or `sys.stderr` to the files represented by the
corresponding attributes. Where the attribute has a file
descriptor, the descriptor is duplicated (instead of re-binding
the name).
* If the `pidfile` attribute is not ``None``, enter its context
manager.
* Mark this instance as open (for the purpose of future `open` and
`close` calls).
* Register the `close` method to be called during Python's exit
processing.
When the function returns, the running program is a daemon
process.
"""
if self.is_open:
return
if self.chroot_directory is not None:
change_root_directory(self.chroot_directory)
if self.prevent_core:
prevent_core_dump()
change_file_creation_mask(self.umask)
change_working_directory(self.working_directory)
change_process_owner(self.uid, self.gid)
if self.detach_process:
detach_process_context()
signal_handler_map = self._make_signal_handler_map()
set_signal_handlers(signal_handler_map)
exclude_fds = self._get_exclude_file_descriptors()
close_all_open_files(exclude=exclude_fds)
redirect_stream(sys.stdin, self.stdin)
redirect_stream(sys.stdout, self.stdout)
redirect_stream(sys.stderr, self.stderr)
if self.pidfile is not None:
self.pidfile.__enter__()
self._is_open = True
register_atexit_function(self.close)
def __enter__(self):
""" Context manager entry point. """
self.open()
return self
def close(self):
""" Exit the daemon process context.
:return: ``None``.
Close the daemon context. This performs the following steps:
* If this instance's `is_open` property is false, return
immediately. This makes it safe to call `close` multiple times
on an instance.
* If the `pidfile` attribute is not ``None``, exit its context
manager.
* Mark this instance as closed (for the purpose of future `open`
and `close` calls).
"""
if not self.is_open:
return
if self.pidfile is not None:
# Follow the interface for telling a context manager to exit,
# <URL:http://docs.python.org/library/stdtypes.html#typecontextmanager>.
self.pidfile.__exit__(None, None, None)
self._is_open = False
def __exit__(self, exc_type, exc_value, traceback):
""" Context manager exit point. """
self.close()
def terminate(self, signal_number, stack_frame):
""" Signal handler for end-process signals.
:param signal_number: The OS signal number received.
:param stack_frame: The frame object at the point the
signal was received.
:return: ``None``.
Signal handler for the ``signal.SIGTERM`` signal. Performs the
following step:
* Raise a ``SystemExit`` exception explaining the signal.
"""
exception = SystemExit(
"Terminating on signal {signal_number!r}".format(
signal_number=signal_number))
raise exception
def _get_exclude_file_descriptors(self):
""" Get the set of file descriptors to exclude closing.
:return: A set containing the file descriptors for the
files to be preserved.
The file descriptors to be preserved are those from the
items in `files_preserve`, and also each of `stdin`,
`stdout`, and `stderr`. For each item:
* If the item is ``None``, it is omitted from the return
set.
* If the item's ``fileno()`` method returns a value, that
value is in the return set.
* Otherwise, the item is in the return set verbatim.
"""
files_preserve = self.files_preserve
if files_preserve is None:
files_preserve = []
files_preserve.extend(
item for item in [self.stdin, self.stdout, self.stderr]
if hasattr(item, 'fileno'))
exclude_descriptors = set()
for item in files_preserve:
if item is None:
continue
file_descriptor = _get_file_descriptor(item)
if file_descriptor is not None:
exclude_descriptors.add(file_descriptor)
else:
exclude_descriptors.add(item)
return exclude_descriptors
def _make_signal_handler(self, target):
""" Make the signal handler for a specified target object.
:param target: A specification of the target for the
handler; see below.
:return: The value for use by `signal.signal()`.
If `target` is ``None``, return ``signal.SIG_IGN``. If `target`
is a text string, return the attribute of this instance named
by that string. Otherwise, return `target` itself.
"""
if target is None:
result = signal.SIG_IGN
elif isinstance(target, unicode):
name = target
result = getattr(self, name)
else:
result = target
return result
def _make_signal_handler_map(self):
""" Make the map from signals to handlers for this instance.
:return: The constructed signal map for this instance.
Construct a map from signal numbers to handlers for this
context instance, suitable for passing to
`set_signal_handlers`.
"""
signal_handler_map = dict(
(signal_number, self._make_signal_handler(target))
for (signal_number, target) in self.signal_map.items())
return signal_handler_map
def _get_file_descriptor(obj):
""" Get the file descriptor, if the object has one.
:param obj: The object expected to be a file-like object.
:return: The file descriptor iff the file supports it; otherwise
``None``.
The object may be a non-file object. It may also be a
file-like object with no support for a file descriptor. In
either case, return ``None``.
"""
file_descriptor = None
if hasattr(obj, 'fileno'):
try:
file_descriptor = obj.fileno()
except ValueError:
# The item doesn't support a file descriptor.
pass
return file_descriptor
def change_working_directory(directory):
""" Change the working directory of this process.
:param directory: The target directory path.
:return: ``None``.
"""
try:
os.chdir(directory)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change working directory ({exc})".format(exc=exc))
raise error
def change_root_directory(directory):
""" Change the root directory of this process.
:param directory: The target directory path.
:return: ``None``.
Set the current working directory, then the process root directory,
to the specified `directory`. Requires appropriate OS privileges
for this process.
"""
try:
os.chdir(directory)
os.chroot(directory)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change root directory ({exc})".format(exc=exc))
raise error
def change_file_creation_mask(mask):
""" Change the file creation mask for this process.
:param mask: The numeric file creation mask to set.
:return: ``None``.
"""
try:
os.umask(mask)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change file creation mask ({exc})".format(exc=exc))
raise error
def change_process_owner(uid, gid):
""" Change the owning UID and GID of this process.
:param uid: The target UID for the daemon process.
:param gid: The target GID for the daemon process.
:return: ``None``.
Set the GID then the UID of the process (in that order, to avoid
permission errors) to the specified `gid` and `uid` values.
Requires appropriate OS privileges for this process.
"""
try:
os.setgid(gid)
os.setuid(uid)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change process owner ({exc})".format(exc=exc))
raise error
def prevent_core_dump():
""" Prevent this process from generating a core dump.
:return: ``None``.
Set the soft and hard limits for core dump size to zero. On Unix,
this entirely prevents the process from creating core dump.
"""
core_resource = resource.RLIMIT_CORE
try:
# Ensure the resource limit exists on this platform, by requesting
# its current value.
core_limit_prev = resource.getrlimit(core_resource)
except ValueError as exc:
error = DaemonOSEnvironmentError(
"System does not support RLIMIT_CORE resource limit"
" ({exc})".format(exc=exc))
raise error
# Set hard and soft limits to zero, i.e. no core dump at all.
core_limit = (0, 0)
resource.setrlimit(core_resource, core_limit)
def detach_process_context():
""" Detach the process context from parent and session.
:return: ``None``.
Detach from the parent process and session group, allowing the
parent to exit while this process continues running.
Reference: “Advanced Programming in the Unix Environment”,
section 13.3, by W. Richard Stevens, published 1993 by
Addison-Wesley.
"""
def fork_then_exit_parent(error_message):
""" Fork a child process, then exit the parent process.
:param error_message: Message for the exception in case of a
detach failure.
:return: ``None``.
:raise DaemonProcessDetachError: If the fork fails.
"""
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError as exc:
error = DaemonProcessDetachError(
"{message}: [{exc.errno:d}] {exc.strerror}".format(
message=error_message, exc=exc))
raise error
fork_then_exit_parent(error_message="Failed first fork")
os.setsid()
fork_then_exit_parent(error_message="Failed second fork")
def is_process_started_by_init():
""" Determine whether the current process is started by `init`.
:return: ``True`` iff the parent process is `init`; otherwise
``False``.
The `init` process is the one with process ID of 1.
"""
result = False
init_pid = 1
if os.getppid() == init_pid:
result = True
return result
def is_socket(fd):
""" Determine whether the file descriptor is a socket.
:param fd: The file descriptor to interrogate.
:return: ``True`` iff the file descriptor is a socket; otherwise
``False``.
Query the socket type of `fd`. If there is no error, the file is a
socket.
"""
result = False
file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)
try:
socket_type = file_socket.getsockopt(
socket.SOL_SOCKET, socket.SO_TYPE)
except socket.error as exc:
exc_errno = exc.args[0]
if exc_errno == errno.ENOTSOCK:
# Socket operation on non-socket.
pass
else:
# Some other socket error.
result = True
else:
# No error getting socket type.
result = True
return result
def is_process_started_by_superserver():
""" Determine whether the current process is started by the superserver.
:return: ``True`` if this process was started by the internet
superserver; otherwise ``False``.
The internet superserver creates a network socket, and
attaches it to the standard streams of the child process. If
that is the case for this process, return ``True``, otherwise
``False``.
"""
result = False
stdin_fd = sys.__stdin__.fileno()
if is_socket(stdin_fd):
result = True
return result
def is_detach_process_context_required():
""" Determine whether detaching the process context is required.
:return: ``True`` iff the process is already detached; otherwise
``False``.
The process environment is interrogated for the following:
* Process was started by `init`; or
* Process was started by `inetd`.
If any of the above are true, the process is deemed to be already
detached.
"""
result = True
if is_process_started_by_init() or is_process_started_by_superserver():
result = False
return result
def close_file_descriptor_if_open(fd):
""" Close a file descriptor if already open.
:param fd: The file descriptor to close.
:return: ``None``.
Close the file descriptor `fd`, suppressing an error in the
case the file was not open.
"""
try:
os.close(fd)
except EnvironmentError as exc:
if exc.errno == errno.EBADF:
# File descriptor was not open.
pass
else:
error = DaemonOSEnvironmentError(
"Failed to close file descriptor {fd:d} ({exc})".format(
fd=fd, exc=exc))
raise error
MAXFD = 2048
def get_maximum_file_descriptors():
""" Get the maximum number of open file descriptors for this process.
:return: The number (integer) to use as the maximum number of open
files for this process.
The maximum is the process hard resource limit of maximum number of
open file descriptors. If the limit is “infinity”, a default value
of ``MAXFD`` is returned.
"""
limits = resource.getrlimit(resource.RLIMIT_NOFILE)
result = limits[1]
if result == resource.RLIM_INFINITY:
result = MAXFD
return result
def close_all_open_files(exclude=set()):
""" Close all open file descriptors.
:param exclude: Collection of file descriptors to skip when closing
files.
:return: ``None``.
Closes every file descriptor (if open) of this process. If
specified, `exclude` is a set of file descriptors to *not*
close.
"""
maxfd = get_maximum_file_descriptors()
for fd in reversed(range(maxfd)):
if fd not in exclude:
close_file_descriptor_if_open(fd)
def redirect_stream(system_stream, target_stream):
""" Redirect a system stream to a specified file.
:param standard_stream: A file object representing a standard I/O
stream.
:param target_stream: The target file object for the redirected
stream, or ``None`` to specify the null device.
:return: ``None``.
`system_stream` is a standard system stream such as
``sys.stdout``. `target_stream` is an open file object that
should replace the corresponding system stream object.
If `target_stream` is ``None``, defaults to opening the
operating system's null device and using its file descriptor.
"""
if target_stream is None:
target_fd = os.open(os.devnull, os.O_RDWR)
else:
target_fd = target_stream.fileno()
os.dup2(target_fd, system_stream.fileno())
def make_default_signal_map():
""" Make the default signal map for this system.
:return: A mapping from signal number to handler object.
The signals available differ by system. The map will not contain
any signals not defined on the running system.
"""
name_map = {
'SIGTSTP': None,
'SIGTTIN': None,
'SIGTTOU': None,
'SIGTERM': 'terminate',
}
signal_map = dict(
(getattr(signal, name), target)
for (name, target) in name_map.items()
if hasattr(signal, name))
return signal_map
def set_signal_handlers(signal_handler_map):
""" Set the signal handlers as specified.
:param signal_handler_map: A map from signal number to handler
object.
:return: ``None``.
See the `signal` module for details on signal numbers and signal
handlers.
"""
for (signal_number, handler) in signal_handler_map.items():
signal.signal(signal_number, handler)
def register_atexit_function(func):
""" Register a function for processing at program exit.
:param func: A callable function expecting no arguments.
:return: ``None``.
The function `func` is registered for a call with no arguments
at program exit.
"""
atexit.register(func)
def _chain_exception_from_existing_exception_context(exc, as_cause=False):
""" Decorate the specified exception with the existing exception context.
:param exc: The exception instance to decorate.
:param as_cause: If true, the existing context is declared to be
the cause of the exception.
:return: ``None``.
:PEP:`344` describes syntax and attributes (`__traceback__`,
`__context__`, `__cause__`) for use in exception chaining.
Python 2 does not have that syntax, so this function decorates
the exception with values from the current exception context.
"""
(existing_exc_type, existing_exc, existing_traceback) = sys.exc_info()
if as_cause:
exc.__cause__ = existing_exc
else:
exc.__context__ = existing_exc
exc.__traceback__ = existing_traceback
class PidFile(object):
"""
Adapter between a file path string and the `lockfile` API [0]. Based example
found at [1].
[0]: https://pythonhosted.org/lockfile/lockfile.html
[1]: http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/
"""
def __init__(self, path, enter_err_msg=None):
self.path = path
self.enter_err_msg = enter_err_msg
self.pidfile = None
def __enter__(self):
self.pidfile = open(self.path, 'a+')
try:
fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
sys.exit(self.enter_err_msg)
self.pidfile.seek(0)
self.pidfile.truncate()
self.pidfile.write(str(os.getpid()))
self.pidfile.flush()
self.pidfile.seek(0)
return self.pidfile
def __exit__(self, exc_type, exc_value, exc_tb):
try:
self.pidfile.close()
except IOError as err:
if err.errno != 9:
raise
os.remove(self.path)
# Local variables:
# coding: utf-8
# mode: python
# End:
# vim: fileencoding=utf-8 filetype=python :
| apache-2.0 |
peragro/peragro-rest | damn_rest/tests.py | 1 | 3050 | from __future__ import absolute_import
from __future__ import print_function
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.test import Client
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
class UploadPaperTest(TestCase):
def generate_file(self):
try:
myfile = open('test.csv', 'wb')
wr = csv.writer(myfile)
wr.writerow(('Paper ID','Paper Title', 'Authors'))
wr.writerow(('1','Title1', 'Author1'))
wr.writerow(('2','Title2', 'Author2'))
wr.writerow(('3','Title3', 'Author3'))
finally:
myfile.close()
return myfile
def setUp(self):
self.client = Client()
self.user = User.objects.create_superuser('admin', 'admin@admin.com', 'admin')
token = Token.objects.create(user=self.user)
def test_paper_upload(self):
print(Token.objects.all())
token = Token.objects.get(user_id = self.user.pk) # this outputs a normal looking token
token_auth = self.client.post("/api-token-auth/", {'username': 'admin', 'password': 'admin'})
print(token_auth.data, token_auth.status_code) # this outputs 400
self.assertEqual(token_auth.status_code, 200, "User couldn't log in")
response = self.client.login(username=self.user.username, password='admin')
print(('-'*70))
print(response)
print(('-'*70))
self.assertTrue(response)
f = open("/home/sueastside/dev/DAMN/damn-test-files/image/jpg/crate10b.jpg", 'rb')
post_data = {'file': None}
#response = self.client.post(url, post_data)
#self.assertContains(response, 'File type is not supported.')
url = reverse('upload_file', args=['test_project'])
print(url)
post_data['file'] = f
response = self.client.post(url, post_data, **{'HTTP_AUTHORIZATION':'Token '+token_auth.data['token'], })
print(response)
def ttest_paper_upload(self):
response = self.client.login(username=self.user.email, password='foz')
self.assertTrue(response)
myfile = self.generate_file()
file_path = myfile.name
f = open(file_path, "r")
url = reverse('registration_upload_papers', args=[self.event.slug])
# post wrong data type
post_data = {'uploaded_file': i}
response = self.client.post(url, post_data)
self.assertContains(response, 'File type is not supported.')
post_data['uploaded_file'] = f
response = self.client.post(url, post_data)
import_file = SubmissionImportFile.objects.all()[0]
self.assertEqual(SubmissionImportFile.objects.all().count(), 1)
#self.assertEqual(import_file.uploaded_file.name, 'files/registration/{0}'.format(file_path))
os.remove(myfile.name)
file_path = import_file.uploaded_file.path
os.remove(file_path)
| bsd-3-clause |
nimzco/Environment | Sublime/Packages/TypeScript/typescript/libs/text_helpers.py | 2 | 3640 | import sublime
from .global_vars import *
class Location:
"""Object containing line and offset (one-based) of file location
Location is a server protocol. Both line and offset are 1-based.
"""
def __init__(self, line, offset):
self.line = line
self.offset = offset
def to_dict(self):
return {"line": self.line, "offset": self.offset}
class StaticRegion:
"""Region that will not change as buffer is modified"""
def __init__(self, a, b):
self.a = a
self.b = b
def to_region(self):
return sublime.Region(self.a, self.b)
def begin(self):
return self.a
def empty(self):
return self.a == self.b
def copy_region(r):
"""Copy a region (this is needed because the original region may change)"""
return sublime.Region(r.begin(), r.end())
def copy_regions(regions):
"""Copy a list of regions"""
return [copy_region(r) for r in regions]
def region_to_static_region(r):
"""Copy a region into a static region"""
return StaticRegion(r.begin(), r.end())
def static_regions_to_regions(static_regions):
"""Convert a list of static regions to ordinary regions"""
return [sr.to_region() for sr in static_regions]
def regions_to_static_regions(regions):
"""Copy a list of regions into a list of static regions"""
return [region_to_static_region(r) for r in regions]
def decrease_empty_regions(empty_regions, amount):
"""
From a list of empty regions, make a list of regions whose begin() value is
one before the begin() value of the corresponding input (for left_delete)
"""
return [sublime.Region(r.begin() - amount, r.end() - amount) for r in empty_regions]
def decrease_locs_to_regions(locs, amount):
"""Move the given locations by amount, and then return the corresponding regions"""
return [sublime.Region(loc - amount, loc - amount) for loc in locs]
def extract_line_offset(line_offset):
"""
Destructure line and offset tuple from LineOffset object
convert 1-based line, offset to zero-based line, offset
``lineOffset`` LineOffset object
"""
if isinstance(line_offset, dict):
line = line_offset["line"] - 1
offset = line_offset["offset"] - 1
else:
line = line_offset.line - 1
offset = line_offset.offset - 1
return line, offset
def escape_html(raw_string):
"""Escape html content
Note: only use for short strings
"""
return raw_string.replace('&', '&').replace('<', '<').replace('>', '>').replace('\n', '<br>').replace(' ', ' ')
def left_expand_empty_region(regions, number=1):
"""Expand region list one to left for backspace change info"""
result = []
for region in regions:
if region.empty():
result.append(sublime.Region(region.begin() - number, region.end()))
else:
result.append(region)
return result
def right_expand_empty_region(regions):
"""Expand region list one to right for delete key change info"""
result = []
for region in regions:
if region.empty():
result.append(sublime.Region(region.begin(), region.end() + 1))
else:
result.append(region)
return result
def build_replace_regions(empty_regions_a, empty_regions_b):
"""
Given two list of cursor locations, connect each pair of locations for form
a list of regions, used for replacement later
"""
rr = []
for i in range(len(empty_regions_a)):
rr.append(sublime.Region(empty_regions_a[i].begin(), empty_regions_b[i].begin()))
return rr
| mit |
zitouni/gnuradio-3.6.1 | gnuradio-core/src/lib/filter/generate_gri_fir_filter_with_buffer_XXX.py | 17 | 1999 | #!/usr/bin/env python
# -*- python -*-
#
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import re
from generate_utils import *
roots = ['gri_fir_filter_with_buffer_XXX',]
def code3_to_acc_code (code3):
if i_code (code3) == 'c' or o_code (code3) == 'c' or tap_code (code3) == 'c':
return 'c'
if i_code (code3) == 'f' or o_code (code3) == 'f' or tap_code (code3) == 'f':
return 'f'
if i_code (code3) == 'i' or o_code (code3) == 'i' or tap_code (code3) == 'i':
return 'i'
return 'i' # even short short short needs int accumulator
def code3_to_input_cast (code3):
if i_code (code3) == 's' and o_code (code3) == 'c':
return '(float)'
return ''
def expand_h_cc (root, code3):
d = init_dict (root, code3)
expand_template (d, root + '.h.t')
expand_template (d, root + '.cc.t')
def init_dict (root, code3):
name = re.sub ('X+', code3, root)
d = standard_dict (name, code3)
d['INPUT_CAST'] = code3_to_input_cast (code3)
acc_code = code3_to_acc_code (code3)
d['ACC_TYPE'] = char_to_type[acc_code]
return d
def generate ():
for r in roots:
for s in fir_signatures:
expand_h_cc (r, s)
if __name__ == '__main__':
generate ()
| gpl-3.0 |
chongtianfeiyu/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/html5lib/treewalkers/pulldom.py | 1729 | 2302 | from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
| lgpl-3.0 |
anhlt/MusicSocial | MusicSocial/account/migrations/0003_auto__add_profiles.py | 1 | 5318 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Profiles'
db.create_table(u'account_profiles', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user',
self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True, null=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=120, null=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
('profile_image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('location', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal(u'account', ['Profiles'])
def backwards(self, orm):
# Deleting model 'Profiles'
db.delete_table(u'account_profiles')
models = {
u'account.profiles': {
'Meta': {'object_name': 'Profiles'},
'description': (
'django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': (
'django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'profile_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.OneToOneField', [],
{'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')",
'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': (
'django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True',
'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True',
'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['account'] | mit |
sandeepgupta2k4/tensorflow | tensorflow/python/layers/normalization.py | 6 | 21958 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the normalization layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import moving_averages
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import variables
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
class BatchNormalization(base.Layer):
"""Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Arguments:
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Conv2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: A string, the name of the layer.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
trainable=True,
name=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.renorm = renorm
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndim = len(input_shape)
if self.axis < 0:
axis = ndim + self.axis
else:
axis = self.axis
if axis < 0 or axis >= ndim:
raise ValueError('Value of `axis` argument ' + str(self.axis) +
' is out of range for input with rank ' + str(ndim))
param_dim = input_shape[axis]
if not param_dim.value:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
if self.center:
self.beta = self.add_variable(name='beta',
shape=(param_dim,),
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
trainable=True)
else:
self.beta = None
if self.scale:
self.gamma = self.add_variable(name='gamma',
shape=(param_dim,),
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
trainable=True)
else:
self.gamma = None
# Disable variable partitioning when creating the moving mean and variance
partitioner = self._scope.partitioner
try:
self._scope.set_partitioner(None)
self.moving_mean = self.add_variable(
name='moving_mean',
shape=(param_dim,),
initializer=self.moving_mean_initializer,
trainable=False)
self.moving_variance = self.add_variable(
name='moving_variance',
shape=(param_dim,),
initializer=self.moving_variance_initializer,
trainable=False)
if self.renorm:
# Create variables to maintain the moving mean and standard deviation.
# These are used in training and thus are different from the moving
# averages above. The renorm variables are colocated with moving_mean
# and moving_variance.
# NOTE: below, the outer `with device` block causes the current device
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name, shape):
var = self.add_variable(name=name,
shape=shape,
initializer=init_ops.zeros_initializer(),
trainable=False)
return var
with ops.device(None):
with ops.device(lambda _: self.moving_mean.device):
self.renorm_mean = _renorm_variable('renorm_mean', (param_dim,))
self.renorm_mean_weight = _renorm_variable('renorm_mean_weight', ())
# We initialize renorm_stddev to 0, and maintain the (0-initialized)
# renorm_stddev_weight. This allows us to (1) mix the average
# stddev with the minibatch stddev early in training, and (2) compute
# the unbiased average stddev by dividing renorm_stddev by the weight.
with ops.device(lambda _: self.moving_variance.device):
self.renorm_stddev = _renorm_variable('renorm_stddev', (param_dim,))
self.renorm_stddev_weight = _renorm_variable(
'renorm_stddev_weight', ())
finally:
self._scope.set_partitioner(partitioner)
self.built = True
def _renorm_correction_and_moments(self, mean, variance, training):
"""Returns the correction and update values for renorm."""
stddev = math_ops.sqrt(variance + self.epsilon)
# Compute the average mean and standard deviation, as if they were
# initialized with this batch's moments.
mixed_renorm_mean = (self.renorm_mean +
(1. - self.renorm_mean_weight) * mean)
mixed_renorm_stddev = (self.renorm_stddev +
(1. - self.renorm_stddev_weight) * stddev)
# Compute the corrections for batch renorm.
r = stddev / mixed_renorm_stddev
d = (mean - mixed_renorm_mean) / mixed_renorm_stddev
# Ensure the corrections use pre-update moving averages.
with ops.control_dependencies([r, d]):
mean = array_ops.identity(mean)
stddev = array_ops.identity(stddev)
rmin, rmax, dmax = [self.renorm_clipping.get(key)
for key in ['rmin', 'rmax', 'dmax']]
if rmin is not None:
r = math_ops.maximum(r, rmin)
if rmax is not None:
r = math_ops.minimum(r, rmax)
if dmax is not None:
d = math_ops.maximum(d, -dmax)
d = math_ops.minimum(d, dmax)
# When not training, use r=1, d=0, and decay=1 meaning no updates.
r = _smart_select(training, lambda: r, lambda: array_ops.ones_like(r))
d = _smart_select(training, lambda: d, lambda: array_ops.zeros_like(d))
decay = _smart_select(training, lambda: self.renorm_momentum, lambda: 1.)
def _update_renorm_variable(var, weight, value):
"""Updates a moving average and weight, returns the unbiased value."""
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving value.
# Make sure the weight is not updated until before r and d computation.
value = array_ops.identity(value)
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = moving_averages.assign_moving_average(
var, value, decay, zero_debias=False)
new_weight = moving_averages.assign_moving_average(
weight, weight_value, decay, zero_debias=False)
return new_var / new_weight
with ops.colocate_with(self.moving_mean):
new_mean = _update_renorm_variable(self.renorm_mean,
self.renorm_mean_weight,
mean)
with ops.colocate_with(self.moving_variance):
new_stddev = _update_renorm_variable(self.renorm_stddev,
self.renorm_stddev_weight,
stddev)
# Make sqrt(moving_variance + epsilon) = new_stddev.
new_variance = math_ops.square(new_stddev) - self.epsilon
return (r, d, new_mean, new_variance)
def call(self, inputs, training=False):
# First, compute the axes along which to reduce the mean / variance,
# as well as the broadcast shape to be used for all parameters.
input_shape = inputs.get_shape()
ndim = len(input_shape)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis].value
# Determines whether broadcasting is needed.
needs_broadcasting = (sorted(reduction_axes) != list(range(ndim))[:-1])
scale, offset = self.gamma, self.beta
# Determine a boolean value for `training`: could be True, False, or None.
training_value = utils.constant_value(training)
if training_value is not False:
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
mean, variance = nn.moments(inputs, reduction_axes)
mean = _smart_select(training,
lambda: mean,
lambda: self.moving_mean)
variance = _smart_select(training,
lambda: variance,
lambda: self.moving_variance)
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
mean, variance, training)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
scale = array_ops.stop_gradient(r, name='renorm_r')
offset = array_ops.stop_gradient(d, name='renorm_d')
if self.gamma is not None:
scale *= self.gamma
offset *= self.gamma
if self.beta is not None:
offset += self.beta
else:
new_mean, new_variance = mean, variance
# Update moving averages when training, and prevent updates otherwise.
decay = _smart_select(training, lambda: self.momentum, lambda: 1.)
mean_update = moving_averages.assign_moving_average(
self.moving_mean, new_mean, decay, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
self.moving_variance, new_variance, decay, zero_debias=False)
if not self.updates:
self.add_update(mean_update)
self.add_update(variance_update)
else:
mean, variance = self.moving_mean, self.moving_variance
def _broadcast(v):
if needs_broadcasting and v is not None:
# In this case we must explicitly broadcast all parameters.
return array_ops.reshape(v, broadcast_shape)
return v
return nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
_broadcast(offset),
_broadcast(scale),
self.epsilon)
def batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
training=False,
trainable=True,
name=None,
reuse=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99):
"""Functional interface for the batch normalization layer.
Reference: http://arxiv.org/abs/1502.03167
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Note: the operations which update the `moving_mean` and `moving_variance`
variables will not be added as dependencies of your training operation and so
must be run separately. For example:
```
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
sess.run([train_op, extra_update_ops], ...)
```
Alternatively, add the operations as a dependency to your training operation
manually, and then just run your training operation as normal:
```
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_op = optimizer.minimize(loss)
...
sess.run([train_op], ...)
```
Arguments:
inputs: Tensor input.
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics). **NOTE**: make sure to set this
parameter correctly, or else your training/inference will not work
properly.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
Returns:
Output tensor.
"""
layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
trainable=trainable,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization
# Helper function
def _smart_select(pred, fn_then, fn_else):
"""Selects fn_then() or fn_else() based on the value of pred.
The purpose of this function is the same as `utils.smart_cond`. However, at
the moment there is a bug (b/36297356) that seems to kick in only when
`smart_cond` delegates to `tf.cond`, which sometimes results in the training
hanging when using parameter servers. This function will output the result
of `fn_then` or `fn_else` if `pred` is known at graph construction time.
Otherwise, it will use `tf.where` which will result in some redundant work
(both branches will be computed but only one selected). However, the tensors
involved will usually be small (means and variances in batchnorm), so the
cost will be small and will not be incurred at all if `pred` is a constant.
Args:
pred: A boolean scalar `Tensor`.
fn_then: A callable to use when pred==True.
fn_else: A callable to use when pred==False.
Returns:
A `Tensor` whose value is fn_then() or fn_else() based on the value of pred.
"""
pred_value = utils.constant_value(pred)
if pred_value:
return fn_then()
elif pred_value is False:
return fn_else()
t_then = array_ops.expand_dims(fn_then(), 0)
t_else = array_ops.expand_dims(fn_else(), 0)
pred = array_ops.reshape(pred, [1])
result = array_ops.where(pred, t_then, t_else)
return array_ops.squeeze(result, [0])
| apache-2.0 |
RamonGuiuGou/l10n-spain | payment_redsys/controllers/main.py | 3 | 2326 | # -*- coding: utf-8 -*-
import logging
import pprint
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
from openerp.addons.website_sale.controllers.main import website_sale
_logger = logging.getLogger(__name__)
class RedsysController(http.Controller):
_return_url = '/payment/redsys/return'
_cancel_url = '/payment/redsys/cancel'
_exception_url = '/payment/redsys/error'
_reject_url = '/payment/redsys/reject'
@http.route([
'/payment/redsys/return',
'/payment/redsys/cancel',
'/payment/redsys/error',
'/payment/redsys/reject',
], type='http', auth='none')
def redsys_return(self, **post):
""" Redsys."""
_logger.info('Redsys: entering form_feedback with post data %s',
pprint.pformat(post))
if post:
request.registry['payment.transaction'].form_feedback(
request.cr, SUPERUSER_ID, post, 'redsys',
context=request.context)
return_url = post.pop('return_url', '')
if not return_url:
return_url = '/shop'
return werkzeug.utils.redirect(return_url)
@http.route(
['/payment/redsys/result/<page>'], type='http', auth='user',
methods=['GET'], website=True)
def redsys_result(self, page, **vals):
try:
order_id = vals.get('order_id', 0)
sale_obj = request.env['sale.order']
order = sale_obj.browse(int(order_id))
res = {
'order': order,
}
return request.render('payment_redsys.%s' % str(page), res)
except:
return request.render('website.404')
class WebsiteSale(website_sale):
@http.route(['/shop/payment/transaction/<int:acquirer_id>'], type='json',
auth="public", website=True)
def payment_transaction(self, acquirer_id):
tx_id = super(WebsiteSale, self).payment_transaction(acquirer_id)
cr, context = request.cr, request.context
acquirer_obj = request.registry.get('payment.acquirer')
acquirer = acquirer_obj.browse(
cr, SUPERUSER_ID, acquirer_id, context=context)
if acquirer.provider == 'redsys':
request.website.sale_reset(context=request.context)
return tx_id
| agpl-3.0 |
andersonresende/django | tests/queries/models.py | 15 | 15717 | """
Various complex queries that have been problematic in the past.
"""
from __future__ import unicode_literals
import threading
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
@python_2_unicode_compatible
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children')
category = models.ForeignKey(NamedCategory, null=True, default=None)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __str__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpickleable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
@python_2_unicode_compatible
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
notes = models.ManyToManyField(Note)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
value = models.IntegerField(null=True)
class Meta:
ordering = ['info']
def __str__(self):
return self.info
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True)
creator = models.ForeignKey(Author)
note = models.ForeignKey(Note)
class Meta:
ordering = ['-note', 'name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, to_field='num', null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __str__(self):
return '%d: %s' % (self.rank, self.author.name)
@python_2_unicode_compatible
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item)
class Meta:
ordering = ['item']
def __str__(self):
return self.title
@python_2_unicode_compatible
class Number(models.Model):
num = models.IntegerField()
def __str__(self):
return six.text_type(self.num)
# Symmetrical m2m field with a normal field using the reverse accessor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y')
class Y(models.Model):
x1 = models.ForeignKey(X, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY')
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self')
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_queryset(self):
qs = super(CustomManager, self).get_queryset()
return qs.filter(public=True, tag__name='t1')
@python_2_unicode_compatible
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __str__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_queryset(self):
return super(MemberManager, self).get_queryset().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, primary_key=True)
parent = models.ForeignKey(Member, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk)
class CustomPkTag(models.Model):
id = models.CharField(max_length=20, primary_key=True)
custom_pk = models.ManyToManyField(CustomPk)
tag = models.CharField(max_length=20)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
@python_2_unicode_compatible
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", null=True, unique=True)
def __str__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity)
# Multiple foreign keys
@python_2_unicode_compatible
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA)
b = models.ForeignKey(LeafB)
@python_2_unicode_compatible
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __str__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
@python_2_unicode_compatible
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection)
# Multi-layer ordering
@python_2_unicode_compatible
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, null=True)
f = models.IntegerField(null=True)
class Meta:
ordering = ['single']
@python_2_unicode_compatible
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, null=True)
class Meta:
ordering = ['others']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Eaten(models.Model):
food = models.ForeignKey(Food, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
@python_2_unicode_compatible
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", to_field="num", null=True)
def __str__(self):
return "%s" % self.num
# Bug #12252
@python_2_unicode_compatible
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class ProxyObjectA(ObjectA):
class Meta:
proxy = True
class ChildObjectA(ObjectA):
pass
@python_2_unicode_compatible
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
num = models.PositiveSmallIntegerField()
def __str__(self):
return self.name
class ProxyObjectB(ObjectB):
class Meta:
proxy = True
@python_2_unicode_compatible
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, null=True)
objectb = models.ForeignKey(ObjectB, null=True)
childobjecta = models.ForeignKey(ChildObjectA, null=True, related_name='ca_pk')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __str__(self):
return self.name
@python_2_unicode_compatible
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __str__(self):
return self.name + " " + self.special_name
@python_2_unicode_compatible
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory)
def __str__(self):
return "category item: " + str(self.category)
@python_2_unicode_compatible
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory)
def __str__(self):
return "one2one " + self.new_name
class CategoryRelationship(models.Model):
first = models.ForeignKey(SimpleCategory, related_name='first_rel')
second = models.ForeignKey(SimpleCategory, related_name='second_rel')
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
class ModelD(models.Model):
name = models.TextField()
class ModelC(models.Model):
name = models.TextField()
class ModelB(models.Model):
name = models.TextField()
c = models.ForeignKey(ModelC)
class ModelA(models.Model):
name = models.TextField()
b = models.ForeignKey(ModelB, null=True)
d = models.ForeignKey(ModelD)
@python_2_unicode_compatible
class Job(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class JobResponsibilities(models.Model):
job = models.ForeignKey(Job, to_field='name')
responsibility = models.ForeignKey('Responsibility', to_field='description')
@python_2_unicode_compatible
class Responsibility(models.Model):
description = models.CharField(max_length=20, unique=True)
jobs = models.ManyToManyField(Job, through=JobResponsibilities,
related_name='responsibilities')
def __str__(self):
return self.description
# Models for disjunction join promotion low level testing.
class FK1(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK2(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK3(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class BaseA(models.Model):
a = models.ForeignKey(FK1, null=True)
b = models.ForeignKey(FK2, null=True)
c = models.ForeignKey(FK3, null=True)
@python_2_unicode_compatible
class Identifier(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Program(models.Model):
identifier = models.OneToOneField(Identifier)
class Channel(models.Model):
programs = models.ManyToManyField(Program)
identifier = models.OneToOneField(Identifier)
class Book(models.Model):
title = models.TextField()
chapter = models.ForeignKey('Chapter')
class Chapter(models.Model):
title = models.TextField()
paragraph = models.ForeignKey('Paragraph')
class Paragraph(models.Model):
text = models.TextField()
page = models.ManyToManyField('Page')
class Page(models.Model):
text = models.TextField()
class MyObject(models.Model):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
data = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
# Models for #17600 regressions
@python_2_unicode_compatible
class Order(models.Model):
id = models.IntegerField(primary_key=True)
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
@python_2_unicode_compatible
class OrderItem(models.Model):
order = models.ForeignKey(Order, related_name='items')
status = models.IntegerField()
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
class BaseUser(models.Model):
pass
@python_2_unicode_compatible
class Task(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, related_name='owner')
creator = models.ForeignKey(BaseUser, related_name='creator')
def __str__(self):
return self.title
@python_2_unicode_compatible
class Staff(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class StaffUser(BaseUser):
staff = models.OneToOneField(Staff, related_name='user')
def __str__(self):
return self.staff
class Ticket21203Parent(models.Model):
parentid = models.AutoField(primary_key=True)
parent_bool = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=True)
class Ticket21203Child(models.Model):
childid = models.AutoField(primary_key=True)
parent = models.ForeignKey(Ticket21203Parent)
class Person(models.Model):
name = models.CharField(max_length=128)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=128)
employees = models.ManyToManyField(Person, related_name='employers', through='Employment')
def __str__(self):
return self.name
class Employment(models.Model):
employer = models.ForeignKey(Company)
employee = models.ForeignKey(Person)
title = models.CharField(max_length=128)
# Bug #22429
class School(models.Model):
pass
class Student(models.Model):
school = models.ForeignKey(School)
class Classroom(models.Model):
school = models.ForeignKey(School)
students = models.ManyToManyField(Student, related_name='classroom')
| bsd-3-clause |
shubhdev/edxOnBaadal | lms/djangoapps/instructor/tests/test_spoc_gradebook.py | 45 | 6129 | """
Tests of the instructor dashboard spoc gradebook
"""
from django.core.urlresolvers import reverse
from nose.plugins.attrib import attr
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory, AdminFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from capa.tests.response_xml_factory import StringResponseXMLFactory
from courseware.tests.factories import StudentModuleFactory
from xmodule.modulestore.django import modulestore
USER_COUNT = 11
@attr('shard_1')
class TestGradebook(ModuleStoreTestCase):
"""
Test functionality of the spoc gradebook. Sets up a course with assignments and
students who've scored various scores on these assignments. Base class for further
gradebook tests.
"""
grading_policy = None
def setUp(self):
super(TestGradebook, self).setUp()
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password='test')
# remove the caches
modulestore().request_cache = None
modulestore().metadata_inheritance_cache_subsystem = None
kwargs = {}
if self.grading_policy is not None:
kwargs['grading_policy'] = self.grading_policy
self.course = CourseFactory.create(**kwargs)
chapter = ItemFactory.create(
parent_location=self.course.location,
category="sequential",
)
section = ItemFactory.create(
parent_location=chapter.location,
category="sequential",
metadata={'graded': True, 'format': 'Homework'}
)
self.users = [UserFactory.create() for _ in xrange(USER_COUNT)]
for user in self.users:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
for i in xrange(USER_COUNT - 1):
category = "problem"
item = ItemFactory.create(
parent_location=section.location,
category=category,
data=StringResponseXMLFactory().build_xml(answer='foo'),
metadata={'rerandomize': 'always'}
)
for j, user in enumerate(self.users):
StudentModuleFactory.create(
grade=1 if i < j else 0,
max_grade=1,
student=user,
course_id=self.course.id,
module_state_key=item.location
)
self.response = self.client.get(reverse(
'spoc_gradebook',
args=(self.course.id.to_deprecated_string(),)
))
def test_response_code(self):
self.assertEquals(self.response.status_code, 200)
@attr('shard_1')
class TestDefaultGradingPolicy(TestGradebook):
"""
Tests that the grading policy is properly applied for all users in the course
Uses the default policy (50% passing rate)
"""
def test_all_users_listed(self):
for user in self.users:
self.assertIn(user.username, unicode(self.response.content, 'utf-8'))
def test_default_policy(self):
# Default >= 50% passes, so Users 5-10 should be passing for Homework 1 [6]
# One use at the top of the page [1]
self.assertEquals(7, self.response.content.count('grade_Pass'))
# Users 1-5 attempted Homework 1 (and get Fs) [4]
# Users 1-10 attempted any homework (and get Fs) [10]
# Users 4-10 scored enough to not get rounded to 0 for the class (and get Fs) [7]
# One use at top of the page [1]
self.assertEquals(22, self.response.content.count('grade_F'))
# All other grades are None [29 categories * 11 users - 27 non-empty grades = 292]
# One use at the top of the page [1]
self.assertEquals(293, self.response.content.count('grade_None'))
@attr('shard_1')
class TestLetterCutoffPolicy(TestGradebook):
"""
Tests advanced grading policy (with letter grade cutoffs). Includes tests of
UX display (color, etc).
"""
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 1
},
],
"GRADE_CUTOFFS": {
'A': .9,
'B': .8,
'C': .7,
'D': .6,
}
}
def test_styles(self):
self.assertIn("grade_A {color:green;}", self.response.content)
self.assertIn("grade_B {color:Chocolate;}", self.response.content)
self.assertIn("grade_C {color:DarkSlateGray;}", self.response.content)
self.assertIn("grade_D {color:DarkSlateGray;}", self.response.content)
def test_assigned_grades(self):
print self.response.content
# Users 9-10 have >= 90% on Homeworks [2]
# Users 9-10 have >= 90% on the class [2]
# One use at the top of the page [1]
self.assertEquals(5, self.response.content.count('grade_A'))
# User 8 has 80 <= Homeworks < 90 [1]
# User 8 has 80 <= class < 90 [1]
# One use at the top of the page [1]
self.assertEquals(3, self.response.content.count('grade_B'))
# User 7 has 70 <= Homeworks < 80 [1]
# User 7 has 70 <= class < 80 [1]
# One use at the top of the page [1]
self.assertEquals(3, self.response.content.count('grade_C'))
# User 6 has 60 <= Homeworks < 70 [1]
# User 6 has 60 <= class < 70 [1]
# One use at the top of the page [1]
self.assertEquals(3, self.response.content.count('grade_C'))
# Users 1-5 have 60% > grades > 0 on Homeworks [5]
# Users 1-5 have 60% > grades > 0 on the class [5]
# One use at top of the page [1]
self.assertEquals(11, self.response.content.count('grade_F'))
# User 0 has 0 on Homeworks [1]
# User 0 has 0 on the class [1]
# One use at the top of the page [1]
self.assertEquals(3, self.response.content.count('grade_None'))
| agpl-3.0 |
2013Commons/HUE-SHARK | build/env/lib/python2.7/site-packages/Django-1.2.3-py2.7.egg/django/middleware/http.py | 73 | 2189 | from django.core.exceptions import MiddlewareNotUsed
from django.utils.http import http_date
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has a ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
if_none_match = request.META.get('HTTP_IF_NONE_MATCH', None)
if if_none_match == response['ETag']:
# Setting the status is enough here. The response handling path
# automatically removes content for this status code (in
# http.conditional_content_removal()).
response.status_code = 304
if response.has_header('Last-Modified'):
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE', None)
if if_modified_since == response['Last-Modified']:
# Setting the status code is enough here (same reasons as
# above).
response.status_code = 304
return response
class SetRemoteAddrFromForwardedFor(object):
"""
This middleware has been removed; see the Django 1.1 release notes for
details.
It previously set REMOTE_ADDR based on HTTP_X_FORWARDED_FOR. However, after
investiagtion, it turns out this is impossible to do in a general manner:
different proxies treat the X-Forwarded-For header differently. Thus, a
built-in middleware can lead to application-level security problems, and so
this was removed in Django 1.1
"""
def __init__(self):
import warnings
warnings.warn("SetRemoteAddrFromForwardedFor has been removed. "
"See the Django 1.1 release notes for details.",
category=DeprecationWarning)
raise MiddlewareNotUsed() | apache-2.0 |
OpenPymeMx/OCB | addons/crm/wizard/crm_opportunity_to_phonecall.py | 38 | 3647 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import time
class crm_opportunity2phonecall(osv.osv_memory):
"""Converts Opportunity to Phonecall"""
_inherit = 'crm.phonecall2phonecall'
_name = 'crm.opportunity2phonecall'
_description = 'Opportunity to Phonecall'
def default_get(self, cr, uid, fields, context=None):
opp_obj = self.pool.get('crm.lead')
categ_id = False
data_obj = self.pool.get('ir.model.data')
try:
res_id = data_obj._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = data_obj.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
record_ids = context and context.get('active_ids', []) or []
res = {}
res.update({'action': 'log', 'date': time.strftime('%Y-%m-%d %H:%M:%S')})
for opp in opp_obj.browse(cr, uid, record_ids, context=context):
if 'name' in fields:
res.update({'name': opp.name})
if 'user_id' in fields:
res.update({'user_id': opp.user_id and opp.user_id.id or False})
if 'section_id' in fields:
res.update({'section_id': opp.section_id and opp.section_id.id or False})
if 'categ_id' in fields:
res.update({'categ_id': categ_id})
if 'partner_id' in fields:
res.update({'partner_id': opp.partner_id and opp.partner_id.id or False})
if 'contact_name' in fields:
res.update({'contact_name': opp.partner_id and opp.partner_id.name or False})
if 'phone' in fields:
res.update({'phone': opp.phone or (opp.partner_id and opp.partner_id.phone or False)})
return res
def action_schedule(self, cr, uid, ids, context=None):
value = {}
if context is None:
context = {}
phonecall = self.pool.get('crm.phonecall')
opportunity_ids = context and context.get('active_ids') or []
opportunity = self.pool.get('crm.lead')
data = self.browse(cr, uid, ids, context=context)[0]
call_ids = opportunity.schedule_phonecall(cr, uid, opportunity_ids, data.date, data.name, \
data.note, data.phone, data.contact_name, data.user_id and data.user_id.id or False, \
data.section_id and data.section_id.id or False, \
data.categ_id and data.categ_id.id or False, \
action=data.action, context=context)
return {'type': 'ir.actions.act_window_close'}
crm_opportunity2phonecall()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zetaops/ulakbus | ulakbus/views/bap/bap_ogretim_uyesi_etkinlik_basvuru_listeleme.py | 1 | 2862 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
from ulakbus.models import BAPEtkinlikProje
from ulakbus.views.bap.bap_etkinlik_basvuru_inceleme import EtkinlikBasvuruInceleForm
from zengine.forms import fields
from zengine.views.crud import CrudView, list_query, obj_filter
from zengine.lib.translation import gettext as _
class OEEtkinlikBasvuruListeleme(CrudView):
"""
Öğretim üyesinin etkinlik başvurularını listeleyeceği, başvuruları bireysel olarak
görüntüleyebileceği, seçilen başvuru için dekanlık izin dilekçesi çıkarabileceği iş akışı
adımıdır.
"""
class Meta:
allow_search = True
model = 'BAPEtkinlikProje'
def __init__(self, current=None):
CrudView.__init__(self, current)
self.ListForm.add = None
def listele(self):
"""
Öğretim üyesinin yapmış olduğu etkinlik başvurularını incelediği adımdır.
"""
self.list(list_fields=['bildiri_basligi', 'durum'])
def goruntule(self):
"""
Öğretim üyesinin yapmış olduğu etkinlik başvurusunu detaylı olarak görüntülediği adımdır.
"""
key = self.input['object_id']
self.show()
form = EtkinlikBasvuruInceleForm(title=_(u"Etkinlik Başvuru Detayları"))
form.listeye_don = fields.Button(_(u"Listeye Dön"))
butceler = BAPEtkinlikProje.objects.get(key).EtkinlikButce
for butce in butceler:
form.Butce(talep_turu=butce.talep_turu, istenen_tutar=butce.istenen_tutar)
self.form_out(form)
self.current.output["meta"]["allow_actions"] = False
self.current.output["meta"]["allow_add_listnode"] = False
def dilekce_olustur(self):
"""
Öğretim üyesinin dekanlık izin dilekçesini oluşturduğu adımdır.
"""
self.set_client_cmd('download')
# todo: Döküman classı yazılınca oradan donecek url buraya taşınacak
self.current.output['download_url'] = "FILE_URL_FROM_DOCUMENT_CLASS"
@obj_filter
def basvuru_islemleri(self, obj, result, **kwargs):
"""
Default action buttonlar, öğretim üyesinin etkinlik basvurusundaki eylemlerine göre
düzenlenmiştir.
"""
result['actions'] = [
{'name': _(u'Görüntüle'), 'cmd': 'goruntule', 'mode': 'normal', 'show_as': 'button'},
{'name': _(u'Dilekçe Oluştur'), 'cmd': 'dilekce', 'mode': 'normal', 'show_as': 'button'}
]
@list_query
def list_by_personel_id(self, queryset):
"""
Öğretim üyesinin kendi etkinlikleri filtrelenmiştir.
"""
return queryset.filter(basvuru_yapan=self.current.user.personel.okutman)
| gpl-3.0 |
pombredanne/frappe | frappe/email/smtp.py | 11 | 5607 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import smtplib
import _socket
from frappe.utils import cint
from frappe import _
def send(email, append_to=None):
"""send the message or add it to Outbox Email"""
if frappe.flags.in_test:
frappe.flags.sent_mail = email.as_string()
return
if frappe.are_emails_muted():
frappe.msgprint(_("Emails are muted"))
return
try:
smtpserver = SMTPServer(append_to=append_to)
if hasattr(smtpserver, "always_use_account_email_id_as_sender") and \
cint(smtpserver.always_use_account_email_id_as_sender) and smtpserver.login:
if not email.reply_to:
email.reply_to = email.sender
email.sender = smtpserver.login
smtpserver.sess.sendmail(email.sender, email.recipients + (email.cc or []),
email.as_string())
except smtplib.SMTPSenderRefused:
frappe.msgprint(_("Invalid login or password"))
raise
except smtplib.SMTPRecipientsRefused:
frappe.msgprint(_("Invalid recipient address"))
raise
def get_outgoing_email_account(raise_exception_not_set=True, append_to=None):
"""Returns outgoing email account based on `append_to` or the default
outgoing account. If default outgoing account is not found, it will
try getting settings from `site_config.json`."""
if not getattr(frappe.local, "outgoing_email_account", None):
frappe.local.outgoing_email_account = {}
if not frappe.local.outgoing_email_account.get(append_to or "default"):
email_account = None
if append_to:
email_account = _get_email_account({"enable_outgoing": 1, "append_to": append_to})
if not email_account:
email_account = get_default_outgoing_email_account(raise_exception_not_set=raise_exception_not_set)
if not email_account and raise_exception_not_set:
frappe.throw(_("Please setup default Email Account from Setup > Email > Email Account"),
frappe.OutgoingEmailError)
frappe.local.outgoing_email_account[append_to or "default"] = email_account
return frappe.local.outgoing_email_account[append_to or "default"]
def get_default_outgoing_email_account(raise_exception_not_set=True):
email_account = _get_email_account({"enable_outgoing": 1, "default_outgoing": 1})
if not email_account and frappe.conf.get("mail_server"):
# from site_config.json
email_account = frappe.new_doc("Email Account")
email_account.update({
"smtp_server": frappe.conf.get("mail_server"),
"smtp_port": frappe.conf.get("mail_port"),
"use_tls": cint(frappe.conf.get("use_ssl") or 0),
"email_id": frappe.conf.get("mail_login"),
"password": frappe.conf.get("mail_password"),
"sender": frappe.conf.get("auto_email_id", "notifications@example.com")
})
email_account.from_site_config = True
if not email_account and not raise_exception_not_set:
return None
if frappe.are_emails_muted():
# create a stub
email_account = frappe.new_doc("Email Account")
email_account.update({
"sender": "notifications@example.com"
})
return email_account
def _get_email_account(filters):
name = frappe.db.get_value("Email Account", filters)
return frappe.get_doc("Email Account", name) if name else None
class SMTPServer:
def __init__(self, login=None, password=None, server=None, port=None, use_ssl=None, append_to=None):
# get defaults from mail settings
self._sess = None
self.email_account = None
self.server = None
if server:
self.server = server
self.port = port
self.use_ssl = cint(use_ssl)
self.login = login
self.password = password
else:
self.setup_email_account(append_to)
def setup_email_account(self, append_to=None):
self.email_account = get_outgoing_email_account(raise_exception_not_set=False, append_to=append_to)
if self.email_account:
self.server = self.email_account.smtp_server
self.login = getattr(self.email_account, "login_id", None) \
or self.email_account.email_id
self.password = self.email_account.password
self.port = self.email_account.smtp_port
self.use_ssl = self.email_account.use_tls
self.sender = self.email_account.email_id
self.always_use_account_email_id_as_sender = self.email_account.get("always_use_account_email_id_as_sender")
@property
def sess(self):
"""get session"""
if self._sess:
return self._sess
# check if email server specified
if not getattr(self, 'server'):
err_msg = _('Email Account not setup. Please create a new Email Account from Setup > Email > Email Account')
frappe.msgprint(err_msg)
raise frappe.OutgoingEmailError, err_msg
try:
if self.use_ssl and not self.port:
self.port = 587
self._sess = smtplib.SMTP((self.server or "").encode('utf-8'),
cint(self.port) or None)
if not self._sess:
err_msg = _('Could not connect to outgoing email server')
frappe.msgprint(err_msg)
raise frappe.OutgoingEmailError, err_msg
if self.use_ssl:
self._sess.ehlo()
self._sess.starttls()
self._sess.ehlo()
if self.login and self.password:
ret = self._sess.login((self.login or "").encode('utf-8'),
(self.password or "").encode('utf-8'))
# check if logged correctly
if ret[0]!=235:
frappe.msgprint(ret[1])
raise frappe.OutgoingEmailError, ret[1]
return self._sess
except _socket.error:
# Invalid mail server -- due to refusing connection
frappe.throw(_('Invalid Outgoing Mail Server or Port'))
except smtplib.SMTPAuthenticationError:
frappe.throw(_("Invalid login or password"))
except smtplib.SMTPException:
frappe.msgprint(_('Unable to send emails at this time'))
raise
| mit |
xiandiancloud/edx-platform-Y | lms/djangoapps/courseware/tests/test_submitting_problems.py | 16 | 38697 | # -*- coding: utf-8 -*-
"""
Integration tests for submitting problem responses and getting grades.
"""
# text processing dependencies
import json
import os
from textwrap import dedent
from mock import patch
from django.conf import settings
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
# Need access to internal func to put users in the right group
from courseware import grades
from courseware.models import StudentModule
#import factories and parent testcase modules
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from capa.tests.response_xml_factory import (
OptionResponseXMLFactory, CustomResponseXMLFactory, SchematicResponseXMLFactory,
CodeResponseXMLFactory,
)
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from lms.lib.xblock.runtime import quote_slashes
from student.tests.factories import UserFactory
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check that a course gets graded properly.
"""
# arbitrary constant
COURSE_SLUG = "100"
COURSE_NAME = "test_course"
def setUp(self):
super(TestSubmittingProblems, self).setUp(create_user=False)
# Create course
self.course = CourseFactory.create(display_name=self.COURSE_NAME, number=self.COURSE_SLUG)
assert self.course, "Couldn't load course %r" % self.COURSE_NAME
# create a test student
self.student = 'view@test.com'
self.password = 'foo'
self.create_account('u1', self.student, self.password)
self.activate_user(self.student)
self.enroll(self.course)
self.student_user = User.objects.get(email=self.student)
self.factory = RequestFactory()
def refresh_course(self):
"""
Re-fetch the course from the database so that the object being dealt with has everything added to it.
"""
self.course = self.store.get_course(self.course.id)
def problem_location(self, problem_url_name):
"""
Returns the url of the problem given the problem's name
"""
return self.course.id.make_usage_key('problem', problem_url_name)
def modx_url(self, problem_location, dispatch):
"""
Return the url needed for the desired action.
problem_location: location of the problem on which we want some action
dispatch: the the action string that gets passed to the view as a kwarg
example: 'check_problem' for having responses processed
"""
return reverse(
'xblock_handler',
kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(problem_location.to_deprecated_string()),
'handler': 'xmodule_handler',
'suffix': dispatch,
}
)
def submit_question_answer(self, problem_url_name, responses):
"""
Submit answers to a question.
Responses is a dict mapping problem ids to answers:
{'2_1': 'Correct', '2_2': 'Incorrect'}
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_check')
answer_key_prefix = 'input_i4x-' + self.course.org + '-{}-problem-{}_'.format(self.COURSE_SLUG, problem_url_name)
# format the response dictionary to be sent in the post request by adding the above prefix to each key
response_dict = {(answer_key_prefix + k): v for k, v in responses.items()}
resp = self.client.post(modx_url, response_dict)
return resp
def reset_question_answer(self, problem_url_name):
"""
Reset specified problem for current user.
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_reset')
resp = self.client.post(modx_url)
return resp
def show_question_answer(self, problem_url_name):
"""
Shows the answer to the current student.
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_show')
resp = self.client.post(modx_url)
return resp
def add_dropdown_to_section(self, section_location, name, num_inputs=2):
"""
Create and return a dropdown problem.
section_location: location object of section in which to create the problem
(problems must live in a section to be graded properly)
name: string name of the problem
num_input: the number of input fields to create in the problem
"""
prob_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=num_inputs,
weight=num_inputs,
options=['Correct', 'Incorrect', u'ⓤⓝⓘⓒⓞⓓⓔ'],
correct_option='Correct'
)
problem = ItemFactory.create(
parent_location=section_location,
category='problem',
data=prob_xml,
metadata={'rerandomize': 'always'},
display_name=name
)
# re-fetch the course from the database so the object is up to date
self.refresh_course()
return problem
def add_graded_section_to_course(self, name, section_format='Homework', late=False, reset=False, showanswer=False):
"""
Creates a graded homework section within a chapter and returns the section.
"""
# if we don't already have a chapter create a new one
if not(hasattr(self, 'chapter')):
self.chapter = ItemFactory.create(
parent_location=self.course.location,
category='chapter'
)
if late:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
metadata={'graded': True, 'format': section_format, 'due': '2013-05-20T23:30'}
)
elif reset:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
rerandomize='always',
metadata={
'graded': True,
'format': section_format,
}
)
elif showanswer:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
showanswer='never',
metadata={
'graded': True,
'format': section_format,
}
)
else:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
metadata={'graded': True, 'format': section_format}
)
# now that we've added the problem and section to the course
# we fetch the course from the database so the object we are
# dealing with has these additions
self.refresh_course()
return section
class TestCourseGrader(TestSubmittingProblems):
"""
Suite of tests for the course grader.
"""
def add_grading_policy(self, grading_policy):
"""
Add a grading policy to the course.
"""
self.course.grading_policy = grading_policy
self.update_course(self.course, self.student_user.id)
self.refresh_course()
def get_grade_summary(self):
"""
calls grades.grade for current user and course.
the keywords for the returned object are
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
"""
fake_request = self.factory.get(
reverse('progress', kwargs={'course_id': self.course.id.to_deprecated_string()})
)
return grades.grade(self.student_user, fake_request, self.course)
def get_progress_summary(self):
"""
Return progress summary structure for current user and course.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
"""
fake_request = self.factory.get(
reverse('progress', kwargs={'course_id': self.course.id.to_deprecated_string()})
)
progress_summary = grades.progress_summary(
self.student_user, fake_request, self.course
)
return progress_summary
def check_grade_percent(self, percent):
"""
Assert that percent grade is as expected.
"""
grade_summary = self.get_grade_summary()
self.assertEqual(grade_summary['percent'], percent)
def earned_hw_scores(self):
"""
Global scores, each Score is a Problem Set.
Returns list of scores: [<points on hw_1>, <poinst on hw_2>, ..., <poinst on hw_n>]
"""
return [s.earned for s in self.get_grade_summary()['totaled_scores']['Homework']]
def score_for_hw(self, hw_url_name):
"""
Returns list of scores for a given url.
Returns list of scores for the given homework:
[<points on problem_1>, <poinst on problem_2>, ..., <poinst on problem_n>]
"""
# list of grade summaries for each section
sections_list = []
for chapter in self.get_progress_summary():
sections_list.extend(chapter['sections'])
# get the first section that matches the url (there should only be one)
hw_section = next(section for section in sections_list if section.get('url_name') == hw_url_name)
return [s.earned for s in hw_section['scores']]
def basic_setup(self, late=False, reset=False, showanswer=False):
"""
Set up a simple course for testing basic grading functionality.
"""
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}],
"GRADE_CUTOFFS": {
'A': .9,
'B': .33
}
}
self.add_grading_policy(grading_policy)
# set up a simple course with four problems
self.homework = self.add_graded_section_to_course('homework', late=late, reset=reset, showanswer=showanswer)
self.add_dropdown_to_section(self.homework.location, 'p1', 1)
self.add_dropdown_to_section(self.homework.location, 'p2', 1)
self.add_dropdown_to_section(self.homework.location, 'p3', 1)
self.refresh_course()
def weighted_setup(self):
"""
Set up a simple course for testing weighted grading functionality.
"""
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 0.25
}, {
"type": "Final",
"name": "Final Section",
"short_label": "Final",
"weight": 0.75
}]
}
self.add_grading_policy(grading_policy)
# set up a structure of 1 homework and 1 final
self.homework = self.add_graded_section_to_course('homework')
self.problem = self.add_dropdown_to_section(self.homework.location, 'H1P1')
self.final = self.add_graded_section_to_course('Final Section', 'Final')
self.final_question = self.add_dropdown_to_section(self.final.location, 'FinalQuestion')
def dropping_setup(self):
"""
Set up a simple course for testing the dropping grading functionality.
"""
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 3,
"drop_count": 1,
"short_label": "HW",
"weight": 1
}]
}
self.add_grading_policy(grading_policy)
# Set up a course structure that just consists of 3 homeworks.
# Since the grading policy drops 1 entire homework, each problem is worth 25%
# names for the problem in the homeworks
self.hw1_names = ['h1p1', 'h1p2']
self.hw2_names = ['h2p1', 'h2p2']
self.hw3_names = ['h3p1', 'h3p2']
self.homework1 = self.add_graded_section_to_course('homework1')
self.add_dropdown_to_section(self.homework1.location, self.hw1_names[0], 1)
self.add_dropdown_to_section(self.homework1.location, self.hw1_names[1], 1)
self.homework2 = self.add_graded_section_to_course('homework2')
self.add_dropdown_to_section(self.homework2.location, self.hw2_names[0], 1)
self.add_dropdown_to_section(self.homework2.location, self.hw2_names[1], 1)
self.homework3 = self.add_graded_section_to_course('homework3')
self.add_dropdown_to_section(self.homework3.location, self.hw3_names[0], 1)
self.add_dropdown_to_section(self.homework3.location, self.hw3_names[1], 1)
def test_submission_late(self):
"""Test problem for due date in the past"""
self.basic_setup(late=True)
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_submission_reset(self):
"""Test problem ProcessingErrors due to resets"""
self.basic_setup(reset=True)
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
# submit a second time to draw NotFoundError
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_submission_show_answer(self):
"""Test problem for ProcessingErrors due to showing answer"""
self.basic_setup(showanswer=True)
resp = self.show_question_answer('p1')
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_none_grade(self):
"""
Check grade is 0 to begin with.
"""
self.basic_setup()
self.check_grade_percent(0)
self.assertEqual(self.get_grade_summary()['grade'], None)
def test_b_grade_exact(self):
"""
Check that at exactly the cutoff, the grade is B.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.check_grade_percent(0.33)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_b_grade_above(self):
"""
Check grade between cutoffs.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_a_grade(self):
"""
Check that 100 percent completion gets an A
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_wrong_answers(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_submissions_api_overrides_scores(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
# But now we mock out a get_scores call, and watch as it overrides the
# score read from StudentModule and our student gets an A instead.
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_submissions_api_anonymous_student_id(self):
"""
Check that the submissions API is sent an anonymous student ID.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.get_grade_summary()
# Verify that the submissions API was sent an anonymized student ID
mock_get_scores.assert_called_with(
self.course.id.to_deprecated_string(), '99ac6730dc5f900d69fd735975243b31'
)
def test_weighted_homework(self):
"""
Test that the homework section has proper weight.
"""
self.weighted_setup()
# Get both parts correct
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.25)
self.assertEqual(self.earned_hw_scores(), [2.0]) # Order matters
self.assertEqual(self.score_for_hw('homework'), [2.0])
def test_weighted_exam(self):
"""
Test that the exam section has the proper weight.
"""
self.weighted_setup()
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.75)
def test_weighted_total(self):
"""
Test that the weighted total adds to 100.
"""
self.weighted_setup()
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(1.0)
def dropping_homework_stage1(self):
"""
Get half the first homework correct and all of the second
"""
self.submit_question_answer(self.hw1_names[0], {'2_1': 'Correct'})
self.submit_question_answer(self.hw1_names[1], {'2_1': 'Incorrect'})
for name in self.hw2_names:
self.submit_question_answer(name, {'2_1': 'Correct'})
def test_dropping_grades_normally(self):
"""
Test that the dropping policy does not change things before it should.
"""
self.dropping_setup()
self.dropping_homework_stage1()
self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 0]) # Order matters
self.check_grade_percent(0.75)
def test_dropping_nochange(self):
"""
Tests that grade does not change when making the global homework grade minimum not unique.
"""
self.dropping_setup()
self.dropping_homework_stage1()
self.submit_question_answer(self.hw3_names[0], {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])
self.assertEqual(self.score_for_hw('homework3'), [1.0, 0.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 1.0]) # Order matters
self.check_grade_percent(0.75)
def test_dropping_all_correct(self):
"""
Test that the lowest is dropped for a perfect score.
"""
self.dropping_setup()
self.dropping_homework_stage1()
for name in self.hw3_names:
self.submit_question_answer(name, {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 2.0]) # Order matters
self.assertEqual(self.score_for_hw('homework3'), [1.0, 1.0])
class ProblemWithUploadedFilesTest(TestSubmittingProblems):
"""Tests of problems with uploaded files."""
def setUp(self):
super(ProblemWithUploadedFilesTest, self).setUp()
self.section = self.add_graded_section_to_course('section')
def problem_setup(self, name, files):
"""
Create a CodeResponse problem with files to upload.
"""
xmldata = CodeResponseXMLFactory().build_xml(
allowed_files=files, required_files=files,
)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
display_name=name,
data=xmldata
)
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def test_three_files(self):
# Open the test files, and arrange to close them later.
filenames = "prog1.py prog2.py prog3.py"
fileobjs = [
open(os.path.join(settings.COMMON_TEST_DATA_ROOT, "capa", filename))
for filename in filenames.split()
]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
self.problem_setup("the_problem", filenames)
with patch('courseware.module_render.XQUEUE_INTERFACE.session') as mock_session:
resp = self.submit_question_answer("the_problem", {'2_1': fileobjs})
self.assertEqual(resp.status_code, 200)
json_resp = json.loads(resp.content)
self.assertEqual(json_resp['success'], "incorrect")
# See how post got called.
name, args, kwargs = mock_session.mock_calls[0]
self.assertEqual(name, "post")
self.assertEqual(len(args), 1)
self.assertTrue(args[0].endswith("/submit/"))
self.assertItemsEqual(kwargs.keys(), ["files", "data"])
self.assertItemsEqual(kwargs['files'].keys(), filenames.split())
class TestPythonGradedResponse(TestSubmittingProblems):
"""
Check that we can submit a schematic and custom response, and it answers properly.
"""
SCHEMATIC_SCRIPT = dedent("""
# for a schematic response, submission[i] is the json representation
# of the diagram and analysis results for the i-th schematic tag
def get_tran(json,signal):
for element in json:
if element[0] == 'transient':
return element[1].get(signal,[])
return []
def get_value(at,output):
for (t,v) in output:
if at == t: return v
return None
output = get_tran(submission[0],'Z')
okay = True
# output should be 1, 1, 1, 1, 1, 0, 0, 0
if get_value(0.0000004, output) < 2.7: okay = False;
if get_value(0.0000009, output) < 2.7: okay = False;
if get_value(0.0000014, output) < 2.7: okay = False;
if get_value(0.0000019, output) < 2.7: okay = False;
if get_value(0.0000024, output) < 2.7: okay = False;
if get_value(0.0000029, output) > 0.25: okay = False;
if get_value(0.0000034, output) > 0.25: okay = False;
if get_value(0.0000039, output) > 0.25: okay = False;
correct = ['correct' if okay else 'incorrect']""").strip()
SCHEMATIC_CORRECT = json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 2.8],
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
SCHEMATIC_INCORRECT = json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 0.0], # wrong.
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
CUSTOM_RESPONSE_SCRIPT = dedent("""
def test_csv(expect, ans):
# Take out all spaces in expected answer
expect = [i.strip(' ') for i in str(expect).split(',')]
# Take out all spaces in student solution
ans = [i.strip(' ') for i in str(ans).split(',')]
def strip_q(x):
# Strip quotes around strings if students have entered them
stripped_ans = []
for item in x:
if item[0] == "'" and item[-1]=="'":
item = item.strip("'")
elif item[0] == '"' and item[-1] == '"':
item = item.strip('"')
stripped_ans.append(item)
return stripped_ans
return strip_q(expect) == strip_q(ans)""").strip()
CUSTOM_RESPONSE_CORRECT = "0, 1, 2, 3, 4, 5, 'Outside of loop', 6"
CUSTOM_RESPONSE_INCORRECT = "Reading my code I see. I hope you like it :)"
COMPUTED_ANSWER_SCRIPT = dedent("""
if submission[0] == "a shout in the street":
correct = ['correct']
else:
correct = ['incorrect']""").strip()
COMPUTED_ANSWER_CORRECT = "a shout in the street"
COMPUTED_ANSWER_INCORRECT = "because we never let them in"
def setUp(self):
super(TestPythonGradedResponse, self).setUp()
self.section = self.add_graded_section_to_course('section')
self.correct_responses = {}
self.incorrect_responses = {}
def schematic_setup(self, name):
"""
set up an example Circuit_Schematic_Builder problem
"""
script = self.SCHEMATIC_SCRIPT
xmldata = SchematicResponseXMLFactory().build_xml(answer=script)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='circuitschematic.yaml',
display_name=name,
data=xmldata
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = self.SCHEMATIC_CORRECT
self.incorrect_responses[name] = self.SCHEMATIC_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def custom_response_setup(self, name):
"""
set up an example custom response problem using a check function
"""
test_csv = self.CUSTOM_RESPONSE_SCRIPT
expect = self.CUSTOM_RESPONSE_CORRECT
cfn_problem_xml = CustomResponseXMLFactory().build_xml(script=test_csv, cfn='test_csv', expect=expect)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='customgrader.yaml',
data=cfn_problem_xml,
display_name=name
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = expect
self.incorrect_responses[name] = self.CUSTOM_RESPONSE_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def computed_answer_setup(self, name):
"""
set up an example problem using an answer script'''
"""
script = self.COMPUTED_ANSWER_SCRIPT
computed_xml = CustomResponseXMLFactory().build_xml(answer=script)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='customgrader.yaml',
data=computed_xml,
display_name=name
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = self.COMPUTED_ANSWER_CORRECT
self.incorrect_responses[name] = self.COMPUTED_ANSWER_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def _check_correct(self, name):
"""
check that problem named "name" gets evaluated correctly correctly
"""
resp = self.submit_question_answer(name, {'2_1': self.correct_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
def _check_incorrect(self, name):
"""
check that problem named "name" gets evaluated incorrectly correctly
"""
resp = self.submit_question_answer(name, {'2_1': self.incorrect_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'incorrect')
def _check_ireset(self, name):
"""
Check that the problem can be reset
"""
# first, get the question wrong
resp = self.submit_question_answer(name, {'2_1': self.incorrect_responses[name]})
# reset the question
self.reset_question_answer(name)
# then get it right
resp = self.submit_question_answer(name, {'2_1': self.correct_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
def test_schematic_correct(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_correct(name)
def test_schematic_incorrect(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_incorrect(name)
def test_schematic_reset(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_ireset(name)
def test_check_function_correct(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_correct(name)
def test_check_function_incorrect(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_incorrect(name)
def test_check_function_reset(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_ireset(name)
def test_computed_correct(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_correct(name)
def test_computed_incorrect(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_incorrect(name)
def test_computed_reset(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_ireset(name)
class TestAnswerDistributions(TestSubmittingProblems):
"""Check that we can pull answer distributions for problems."""
def setUp(self):
"""Set up a simple course with four problems."""
super(TestAnswerDistributions, self).setUp()
self.homework = self.add_graded_section_to_course('homework')
self.add_dropdown_to_section(self.homework.location, 'p1', 1)
self.add_dropdown_to_section(self.homework.location, 'p2', 1)
self.add_dropdown_to_section(self.homework.location, 'p3', 1)
self.refresh_course()
def test_empty(self):
# Just make sure we can process this without errors.
empty_distribution = grades.answer_distributions(self.course.id)
self.assertFalse(empty_distribution) # should be empty
def test_one_student(self):
# Basic test to make sure we have simple behavior right for a student
# Throw in a non-ASCII answer
self.submit_question_answer('p1', {'2_1': u'ⓤⓝⓘⓒⓞⓓⓔ'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
distributions = grades.answer_distributions(self.course.id)
self.assertEqual(
distributions,
{
('p1', 'p1', 'i4x-MITx-100-problem-p1_2_1'): {
u'ⓤⓝⓘⓒⓞⓓⓔ': 1
},
('p2', 'p2', 'i4x-MITx-100-problem-p2_2_1'): {
'Correct': 1
}
}
)
def test_multiple_students(self):
# Our test class is based around making requests for a particular user,
# so we're going to cheat by creating another user and copying and
# modifying StudentModule entries to make them from other users. It's
# a little hacky, but it seemed the simpler way to do this.
self.submit_question_answer('p1', {'2_1': u'Correct'})
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
self.submit_question_answer('p3', {'2_1': u'Correct'})
# Make the above submissions owned by user2
user2 = UserFactory.create()
problems = StudentModule.objects.filter(
course_id=self.course.id,
student=self.student_user
)
for problem in problems:
problem.student_id = user2.id
problem.save()
# Now make more submissions by our original user
self.submit_question_answer('p1', {'2_1': u'Correct'})
self.submit_question_answer('p2', {'2_1': u'Correct'})
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p1', 'p1', 'i4x-MITx-100-problem-p1_2_1'): {
'Correct': 2
},
('p2', 'p2', 'i4x-MITx-100-problem-p2_2_1'): {
'Correct': 1,
'Incorrect': 1
},
('p3', 'p3', 'i4x-MITx-100-problem-p3_2_1'): {
'Correct': 1
}
}
)
def test_other_data_types(self):
# We'll submit one problem, and then muck with the student_answers
# dict inside its state to try different data types (str, int, float,
# none)
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the state entry for that problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
for val in ('Correct', True, False, 0, 0.0, 1, 1.0, None):
state = json.loads(student_module.state)
state["student_answers"]['i4x-MITx-100-problem-p1_2_1'] = val
student_module.state = json.dumps(state)
student_module.save()
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p1', 'p1', 'i4x-MITx-100-problem-p1_2_1'): {
str(val): 1
},
}
)
def test_missing_content(self):
# If there's a StudentModule entry for content that no longer exists,
# we just quietly ignore it (because we can't display a meaningful url
# or name for it).
self.submit_question_answer('p1', {'2_1': 'Incorrect'})
# Now fetch the state entry for that problem and alter it so it points
# to a non-existent problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
student_module.module_state_key = student_module.module_state_key.replace(
name=student_module.module_state_key.name + "_fake"
)
student_module.save()
# It should be empty (ignored)
empty_distribution = grades.answer_distributions(self.course.id)
self.assertFalse(empty_distribution) # should be empty
def test_broken_state(self):
# Missing or broken state for a problem should be skipped without
# causing the whole answer_distribution call to explode.
# Submit p1
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the StudentModule entry for p1 so we can corrupt its state
prb1 = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
# Submit p2
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
for new_p1_state in ('{"student_answers": {}}', "invalid json!", None):
prb1.state = new_p1_state
prb1.save()
# p1 won't show up, but p2 should still work
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p2', 'p2', 'i4x-MITx-100-problem-p2_2_1'): {
'Incorrect': 1
},
}
)
| agpl-3.0 |
dockerera/func | func/overlord/cmd_modules/dumpconfig.py | 3 | 1494 | """
Dump func-client/overlord config information
Copyright 2011, Red Hat, Inc
see AUTHORS
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import optparse
import os
from func.overlord import base_command
from certmaster import certs
class DumpConfig(base_command.BaseCommand):
name = "dump_config"
usage = "dump func-client/overlord config"
summary = usage
def do(self, args):
self.server_spec = self.parentCommand.server_spec
self.getOverlord()
print 'config:'
for l in str(self.overlord_obj.config).split('\n'):
print '\t' + l
print ''
print 'key file: %s' % self.overlord_obj.key
cert = certs.retrieve_cert_from_file(self.overlord_obj.cert)
print 'cert file: %s' % self.overlord_obj.cert
print 'ca file: %s' % self.overlord_obj.ca
print 'cert dn: %s' % cert.get_subject().CN
print 'certificate hash: %s' % cert.subject_name_hash()
print 'timeout: %s' % self.overlord_obj.timeout
print 'forks: %s' % self.overlord_obj.nforks
print 'cmd modules loaded:'
for mn in sorted(self.overlord_obj.methods.keys()):
print '\t' + mn
print 'minion map:'
print self.overlord_obj.minionmap
| gpl-2.0 |
atmark-techno/atmark-dist | user/python/Lib/plat-sunos5/TERMIOS.py | 4 | 7177 | # Generated by h2py from /usr/include/termios.h
# Included from sys/termios.h
# Included from sys/feature_tests.h
_POSIX_C_SOURCE = 1
# Included from sys/ttydev.h
B0 = 0
B50 = 1
B75 = 2
B110 = 3
B134 = 4
B150 = 5
B200 = 6
B300 = 7
B600 = 8
B1200 = 9
B1800 = 10
B2400 = 11
B4800 = 12
B9600 = 13
B19200 = 14
B38400 = 15
EXTA = 14
EXTB = 15
# Included from sys/types.h
# Included from sys/isa_defs.h
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 4
_DOUBLE_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 4
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 4
_ALIGNMENT_REQUIRED = 0
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 16
_ALIGNMENT_REQUIRED = 1
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 8
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 8
_ALIGNMENT_REQUIRED = 1
# Included from sys/machtypes.h
SHRT_MIN = -32768
SHRT_MAX = 32767
INT_MIN = (-2147483647-1)
INT_MAX = 2147483647
LONG_MIN = (-2147483647-1)
LONG_MAX = 2147483647
P_MYID = (-1)
# Included from sys/select.h
# Included from sys/time.h
DST_NONE = 0
DST_USA = 1
DST_AUST = 2
DST_WET = 3
DST_MET = 4
DST_EET = 5
DST_CAN = 6
DST_GB = 7
DST_RUM = 8
DST_TUR = 9
DST_AUSTALT = 10
ITIMER_REAL = 0
ITIMER_VIRTUAL = 1
ITIMER_PROF = 2
ITIMER_REALPROF = 3
SEC = 1
MILLISEC = 1000
MICROSEC = 1000000
NANOSEC = 1000000000
__CLOCK_REALTIME0 = 0
CLOCK_VIRTUAL = 1
CLOCK_PROF = 2
__CLOCK_REALTIME3 = 3
CLOCK_REALTIME = __CLOCK_REALTIME3
CLOCK_REALTIME = __CLOCK_REALTIME0
TIMER_RELTIME = 0x0
TIMER_ABSTIME = 0x1
# Included from sys/mutex.h
# Included from sys/dki_lkinfo.h
# Included from sys/dl.h
NOSTATS = 1
LSB_NLKDS = 91
def MUTEX_HELD(x): return (mutex_owned(x))
# Included from time.h
NULL = 0
CLOCKS_PER_SEC = 1000000
# Included from sys/siginfo.h
SIGEV_NONE = 1
SIGEV_SIGNAL = 2
SIGEV_THREAD = 3
SI_NOINFO = 32767
SI_USER = 0
SI_LWP = (-1)
SI_QUEUE = (-2)
SI_TIMER = (-3)
SI_ASYNCIO = (-4)
SI_MESGQ = (-5)
# Included from sys/machsig.h
ILL_ILLOPC = 1
ILL_ILLOPN = 2
ILL_ILLADR = 3
ILL_ILLTRP = 4
ILL_PRVOPC = 5
ILL_PRVREG = 6
ILL_COPROC = 7
ILL_BADSTK = 8
NSIGILL = 8
EMT_TAGOVF = 1
NSIGEMT = 1
FPE_INTDIV = 1
FPE_INTOVF = 2
FPE_FLTDIV = 3
FPE_FLTOVF = 4
FPE_FLTUND = 5
FPE_FLTRES = 6
FPE_FLTINV = 7
FPE_FLTSUB = 8
NSIGFPE = 8
SEGV_MAPERR = 1
SEGV_ACCERR = 2
NSIGSEGV = 2
BUS_ADRALN = 1
BUS_ADRERR = 2
BUS_OBJERR = 3
NSIGBUS = 3
TRAP_BRKPT = 1
TRAP_TRACE = 2
NSIGTRAP = 2
CLD_EXITED = 1
CLD_KILLED = 2
CLD_DUMPED = 3
CLD_TRAPPED = 4
CLD_STOPPED = 5
CLD_CONTINUED = 6
NSIGCLD = 6
POLL_IN = 1
POLL_OUT = 2
POLL_MSG = 3
POLL_ERR = 4
POLL_PRI = 5
POLL_HUP = 6
NSIGPOLL = 6
PROF_SIG = 1
NSIGPROF = 1
SI_MAXSZ = 128
def SI_CANQUEUE(c): return ((c) <= SI_QUEUE)
FD_SETSIZE = 1024
NBBY = 8
_POSIX_VDISABLE = 0
def CTRL(c): return ((c)&037)
IBSHIFT = 16
NCC = 8
NCCS = 19
VINTR = 0
VQUIT = 1
VERASE = 2
VKILL = 3
VEOF = 4
VEOL = 5
VEOL2 = 6
VMIN = 4
VTIME = 5
VSWTCH = 7
VSTART = 8
VSTOP = 9
VSUSP = 10
VDSUSP = 11
VREPRINT = 12
VDISCARD = 13
VWERASE = 14
VLNEXT = 15
VCEOF = NCC
VCEOL = (NCC + 1)
CNUL = 0
CDEL = 0177
CESC = ord('\\')
CINTR = CTRL(ord('c'))
CQUIT = 034
CERASE = 0177
CKILL = CTRL(ord('u'))
CEOT = 04
CEOL = 0
CEOL2 = 0
CEOF = 04
CSTART = 021
CSTOP = 023
CSWTCH = 032
CNSWTCH = 0
CSUSP = CTRL(ord('z'))
CDSUSP = CTRL(ord('y'))
CRPRNT = CTRL(ord('r'))
CFLUSH = CTRL(ord('o'))
CWERASE = CTRL(ord('w'))
CLNEXT = CTRL(ord('v'))
IGNBRK = 0000001
BRKINT = 0000002
IGNPAR = 0000004
PARMRK = 0000010
INPCK = 0000020
ISTRIP = 0000040
INLCR = 0000100
IGNCR = 0000200
ICRNL = 0000400
IUCLC = 0001000
IXON = 0002000
IXANY = 0004000
IXOFF = 0010000
IMAXBEL = 0020000
DOSMODE = 0100000
OPOST = 0000001
OLCUC = 0000002
ONLCR = 0000004
OCRNL = 0000010
ONOCR = 0000020
ONLRET = 0000040
OFILL = 0000100
OFDEL = 0000200
NLDLY = 0000400
NL0 = 0
NL1 = 0000400
CRDLY = 0003000
CR0 = 0
CR1 = 0001000
CR2 = 0002000
CR3 = 0003000
TABDLY = 0014000
TAB0 = 0
TAB1 = 0004000
TAB2 = 0010000
TAB3 = 0014000
XTABS = 0014000
BSDLY = 0020000
BS0 = 0
BS1 = 0020000
VTDLY = 0040000
VT0 = 0
VT1 = 0040000
FFDLY = 0100000
FF0 = 0
FF1 = 0100000
PAGEOUT = 0200000
WRAP = 0400000
CBAUD = 0000017
CSIZE = 0000060
CS5 = 0
CS6 = 0000020
CS7 = 0000040
CS8 = 0000060
CSTOPB = 0000100
CREAD = 0000200
PARENB = 0000400
PARODD = 0001000
HUPCL = 0002000
CLOCAL = 0004000
RCV1EN = 0010000
XMT1EN = 0020000
LOBLK = 0040000
XCLUDE = 0100000
CRTSXOFF = 010000000000
CRTSCTS = 020000000000
CIBAUD = 03600000
PAREXT = 04000000
CBAUDEXT = 010000000
CIBAUDEXT = 020000000
CRTS_IFLOW = 010000000000
CCTS_OFLOW = 020000000000
ISIG = 0000001
ICANON = 0000002
XCASE = 0000004
ECHO = 0000010
ECHOE = 0000020
ECHOK = 0000040
ECHONL = 0000100
NOFLSH = 0000200
TOSTOP = 0000400
ECHOCTL = 0001000
ECHOPRT = 0002000
ECHOKE = 0004000
DEFECHO = 0010000
FLUSHO = 0020000
PENDIN = 0040000
IEXTEN = 0100000
_TIOC = (ord('T')<<8)
TIOC = _TIOC
TCGETA = (_TIOC|1)
TCSETA = (_TIOC|2)
TCSETAW = (_TIOC|3)
TCSETAF = (_TIOC|4)
TCSBRK = (_TIOC|5)
TCXONC = (_TIOC|6)
TCFLSH = (_TIOC|7)
TIOCKBON = (_TIOC|8)
TIOCKBOF = (_TIOC|9)
KBENABLED = (_TIOC|10)
IOCTYPE = 0xff00
TCDSET = (_TIOC|32)
RTS_TOG = (_TIOC|33)
TIOCGWINSZ = (_TIOC|104)
TIOCSWINSZ = (_TIOC|103)
TIOCGSOFTCAR = (_TIOC|105)
TIOCSSOFTCAR = (_TIOC|106)
TCGETS = (_TIOC|13)
TCSETS = (_TIOC|14)
TCSANOW = (_TIOC|14)
TCSETSW = (_TIOC|15)
TCSADRAIN = (_TIOC|15)
TCSETSF = (_TIOC|16)
TCSAFLUSH = (_TIOC|16)
TCIFLUSH = 0
TCOFLUSH = 1
TCIOFLUSH = 2
TCOOFF = 0
TCOON = 1
TCIOFF = 2
TCION = 3
tIOC = (ord('t')<<8)
TIOCGETD = (tIOC|0)
TIOCSETD = (tIOC|1)
TIOCHPCL = (tIOC|2)
TIOCGETP = (tIOC|8)
TIOCSETP = (tIOC|9)
TIOCSETN = (tIOC|10)
TIOCEXCL = (tIOC|13)
TIOCNXCL = (tIOC|14)
TIOCFLUSH = (tIOC|16)
TIOCSETC = (tIOC|17)
TIOCGETC = (tIOC|18)
TIOCLBIS = (tIOC|127)
TIOCLBIC = (tIOC|126)
TIOCLSET = (tIOC|125)
TIOCLGET = (tIOC|124)
TIOCSBRK = (tIOC|123)
TIOCCBRK = (tIOC|122)
TIOCSDTR = (tIOC|121)
TIOCCDTR = (tIOC|120)
TIOCSLTC = (tIOC|117)
TIOCGLTC = (tIOC|116)
TIOCOUTQ = (tIOC|115)
TIOCNOTTY = (tIOC|113)
TIOCSTOP = (tIOC|111)
TIOCSTART = (tIOC|110)
TIOCGPGRP = (tIOC|20)
TIOCSPGRP = (tIOC|21)
TIOCGSID = (tIOC|22)
TIOCSSID = (tIOC|24)
TIOCSTI = (tIOC|23)
TIOCMSET = (tIOC|26)
TIOCMBIS = (tIOC|27)
TIOCMBIC = (tIOC|28)
TIOCMGET = (tIOC|29)
TIOCM_LE = 0001
TIOCM_DTR = 0002
TIOCM_RTS = 0004
TIOCM_ST = 0010
TIOCM_SR = 0020
TIOCM_CTS = 0040
TIOCM_CAR = 0100
TIOCM_CD = TIOCM_CAR
TIOCM_RNG = 0200
TIOCM_RI = TIOCM_RNG
TIOCM_DSR = 0400
TIOCREMOTE = (tIOC|30)
TIOCSIGNAL = (tIOC|31)
LDIOC = (ord('D')<<8)
LDOPEN = (LDIOC|0)
LDCLOSE = (LDIOC|1)
LDCHG = (LDIOC|2)
LDGETT = (LDIOC|8)
LDSETT = (LDIOC|9)
LDSMAP = (LDIOC|110)
LDGMAP = (LDIOC|111)
LDNMAP = (LDIOC|112)
LDEMAP = (LDIOC|113)
LDDMAP = (LDIOC|114)
DIOC = (ord('d')<<8)
DIOCGETP = (DIOC|8)
DIOCSETP = (DIOC|9)
FIORDCHK = ((ord('f')<<8)|3)
B0 = 0
B50 = 1
B75 = 2
B110 = 3
B134 = 4
B150 = 5
B200 = 6
B300 = 7
B600 = 8
B1200 = 9
B1800 = 10
B2400 = 11
B4800 = 12
B9600 = 13
B19200 = 14
B38400 = 15
B57600 = 16
B76800 = 17
B115200 = 18
B153600 = 19
B230400 = 20
B307200 = 21
B460800 = 22
| gpl-2.0 |
christi3k/zulip | zerver/tests/test_realm.py | 1 | 10099 | from __future__ import absolute_import
from __future__ import print_function
import ujson
from django.http import HttpResponse
from mock import patch
from typing import Any, Dict, List, Text, Union
from zerver.lib.actions import (
do_change_is_admin,
do_set_realm_property,
do_deactivate_realm,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import tornado_redirected_to_list
from zerver.models import get_realm, get_user_profile_by_email, Realm
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(self, email, new_realm_name):
# type: (Text, Text) -> None
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_do_set_realm_name_caching(self):
# type: () -> None
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
self.example_user('hamlet')
realm = get_realm('zulip')
new_name = u'Zed You Elle Eye Pea'
do_set_realm_property(realm, 'name', new_name)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name('hamlet@zulip.com', new_name)
def test_update_realm_name_events(self):
# type: () -> None
realm = get_realm('zulip')
new_name = u'Puliz'
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'name', new_name)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='name',
value=new_name,
))
def test_update_realm_description_events(self):
# type: () -> None
realm = get_realm('zulip')
new_description = u'zulip dev group'
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'description', new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_update_realm_description(self):
# type: () -> None
email = 'iago@zulip.com'
self.login(email)
realm = get_realm('zulip')
new_description = u'zulip dev group'
data = dict(description=ujson.dumps(new_description))
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_patch('/json/realm', data)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.description, new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_realm_description_length(self):
# type: () -> None
new_description = u'A' * 1001
data = dict(description=ujson.dumps(new_description))
# create an admin user
email = 'iago@zulip.com'
self.login(email)
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Realm description is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.description, new_description)
def test_admin_restrictions_for_changing_realm_name(self):
# type: () -> None
new_name = 'Mice will play while the cat is away'
user_profile = self.example_user('othello')
email = user_profile.email
self.login(email)
do_change_is_admin(user_profile, False)
req = dict(name=ujson.dumps(new_name))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Must be a realm administrator')
def test_unauthorized_name_change(self):
# type: () -> None
data = {'full_name': 'Sir Hamlet'}
user_profile = self.example_user('hamlet')
email = user_profile.email
self.login(email)
do_set_realm_property(user_profile.realm, 'name_changes_disabled', True)
url = '/json/settings/change'
result = self.client_post(url, data)
self.assertEqual(result.status_code, 200)
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
def test_do_deactivate_realm_clears_user_realm_cache(self):
# type: () -> None
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
self.example_user('hamlet')
realm = get_realm('zulip')
do_deactivate_realm(realm)
user = self.example_user('hamlet')
self.assertTrue(user.realm.deactivated)
def test_do_deactivate_realm_on_deactived_realm(self):
# type: () -> None
"""Ensure early exit is working in realm deactivation"""
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
def test_change_realm_default_language(self):
# type: () -> None
new_lang = "de"
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
email = 'iago@zulip.com'
self.login(email)
req = dict(default_language=ujson.dumps(new_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.default_language, new_lang)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, invalid_lang)
class RealmAPITest(ZulipTestCase):
def setUp(self):
# type: () -> None
user_profile = self.example_user('cordelia')
email = user_profile.email
self.login(email)
do_change_is_admin(user_profile, True)
def set_up_db(self, attr, value):
# type: (str, Any) -> None
realm = get_realm('zulip')
setattr(realm, attr, value)
realm.save()
def update_with_api(self, name, value):
# type: (str, Union[Text, int, bool]) -> Realm
result = self.client_patch('/json/realm', {name: ujson.dumps(value)})
self.assert_json_success(result)
return get_realm('zulip') # refresh data
def do_test_realm_update_api(self, name):
# type: (str) -> None
"""Test updating realm properties.
If new realm properties have been added to the Realm model but the
test_values dict below has not been updated, this will raise an
assertion error.
"""
bool_tests = [False, True] # type: List[bool]
test_values = dict(
add_emoji_by_admins_only=bool_tests,
create_stream_by_admins_only=bool_tests,
default_language=[u'de', u'en'],
description=[u'Realm description', u'New description'],
email_changes_disabled=bool_tests,
invite_required=bool_tests,
invite_by_admins_only=bool_tests,
inline_image_preview=bool_tests,
inline_url_embed_preview=bool_tests,
message_retention_days=[10, 20],
name=[u'Zulip', u'New Name'],
name_changes_disabled=bool_tests,
restricted_to_domain=bool_tests,
waiting_period_threshold=[10, 20],
) # type: Dict[str, Any]
vals = test_values.get(name)
if vals is None:
raise AssertionError('No test created for %s' % (name))
self.set_up_db(name, vals[0])
realm = self.update_with_api(name, vals[1])
self.assertEqual(getattr(realm, name), vals[1])
realm = self.update_with_api(name, vals[0])
self.assertEqual(getattr(realm, name), vals[0])
def test_update_realm_properties(self):
# type: () -> None
for prop in Realm.property_types:
self.do_test_realm_update_api(prop)
def test_update_realm_allow_message_editing(self):
# type: () -> None
"""Tests updating the realm property 'allow_message_editing'."""
self.set_up_db('allow_message_editing', False)
self.set_up_db('message_content_edit_limit_seconds', 0)
realm = self.update_with_api('allow_message_editing', True)
realm = self.update_with_api('message_content_edit_limit_seconds', 100)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = self.update_with_api('allow_message_editing', False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = self.update_with_api('message_content_edit_limit_seconds', 200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
| apache-2.0 |
dkodnik/arp | addons/account/wizard/account_unreconcile.py | 385 | 2086 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_unreconcile(osv.osv_memory):
_name = "account.unreconcile"
_description = "Account Unreconcile"
def trans_unrec(self, cr, uid, ids, context=None):
obj_move_line = self.pool.get('account.move.line')
if context is None:
context = {}
if context.get('active_ids', False):
obj_move_line._remove_move_reconcile(cr, uid, context['active_ids'], context=context)
return {'type': 'ir.actions.act_window_close'}
class account_unreconcile_reconcile(osv.osv_memory):
_name = "account.unreconcile.reconcile"
_description = "Account Unreconcile Reconcile"
def trans_unrec_reconcile(self, cr, uid, ids, context=None):
obj_move_reconcile = self.pool.get('account.move.reconcile')
if context is None:
context = {}
rec_ids = context['active_ids']
if rec_ids:
obj_move_reconcile.unlink(cr, uid, rec_ids, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
blighj/django | tests/model_forms/test_uuid.py | 90 | 1139 | from django import forms
from django.core.exceptions import ValidationError
from django.test import TestCase
from .models import UUIDPK
class UUIDPKForm(forms.ModelForm):
class Meta:
model = UUIDPK
fields = '__all__'
class ModelFormBaseTest(TestCase):
def test_create_save_error(self):
form = UUIDPKForm({})
self.assertFalse(form.is_valid())
msg = "The UUIDPK could not be created because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
form.save()
def test_update_save_error(self):
obj = UUIDPK.objects.create(name='foo')
form = UUIDPKForm({}, instance=obj)
self.assertFalse(form.is_valid())
msg = "The UUIDPK could not be changed because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
form.save()
def test_model_multiple_choice_field_uuid_pk(self):
f = forms.ModelMultipleChoiceField(UUIDPK.objects.all())
with self.assertRaisesMessage(ValidationError, "'invalid_uuid' is not a valid UUID."):
f.clean(['invalid_uuid'])
| bsd-3-clause |
raccoongang/edx-platform | openedx/core/djangoapps/lang_pref/middleware.py | 2 | 3606 | """
Middleware for Language Preferences
"""
from django.conf import settings
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation.trans_real import parse_accept_lang_header
from openedx.core.djangoapps.lang_pref import COOKIE_DURATION, LANGUAGE_HEADER, LANGUAGE_KEY
from openedx.core.djangoapps.user_api.errors import UserAPIInternalError, UserAPIRequestError
from openedx.core.djangoapps.user_api.preferences.api import (
delete_user_preference,
get_user_preference,
set_user_preference
)
class LanguagePreferenceMiddleware(object):
"""
Middleware for user preferences.
Ensures that, once set, a user's preferences are reflected in the page
whenever they are logged in.
"""
def process_request(self, request):
"""
If a user's UserPreference contains a language preference, use the user's preference.
Save the current language preference cookie as the user's preferred language.
"""
cookie_lang = request.COOKIES.get(settings.LANGUAGE_COOKIE, None)
if cookie_lang:
if request.user.is_authenticated():
set_user_preference(request.user, LANGUAGE_KEY, cookie_lang)
else:
request._anonymous_user_cookie_lang = cookie_lang
accept_header = request.META.get(LANGUAGE_HEADER, None)
if accept_header:
current_langs = parse_accept_lang_header(accept_header)
# Promote the cookie_lang over any language currently in the accept header
current_langs = [(lang, qvalue) for (lang, qvalue) in current_langs if lang != cookie_lang]
current_langs.insert(0, (cookie_lang, 1))
accept_header = ",".join("{};q={}".format(lang, qvalue) for (lang, qvalue) in current_langs)
else:
accept_header = cookie_lang
request.META[LANGUAGE_HEADER] = accept_header
# Allow the new cookie setting to update the language in the user's session
if LANGUAGE_SESSION_KEY in request.session and request.session[LANGUAGE_SESSION_KEY] != cookie_lang:
del request.session[LANGUAGE_SESSION_KEY]
def process_response(self, request, response):
# If the user is logged in, check for their language preference
if getattr(request, 'user', None) and request.user.is_authenticated():
user_pref = None
anonymous_cookie_lang = getattr(request, '_anonymous_user_cookie_lang', None)
if anonymous_cookie_lang:
user_pref = anonymous_cookie_lang
set_user_preference(request.user, LANGUAGE_KEY, anonymous_cookie_lang)
else:
# Get the user's language preference
try:
user_pref = get_user_preference(request.user, LANGUAGE_KEY)
except (UserAPIRequestError, UserAPIInternalError):
# If we can't find the user preferences, then don't modify the cookie
pass
# If set, set the user_pref in the LANGUAGE_COOKIE
if user_pref:
response.set_cookie(
settings.LANGUAGE_COOKIE,
value=user_pref,
domain=settings.SESSION_COOKIE_DOMAIN,
max_age=COOKIE_DURATION,
)
else:
response.delete_cookie(
settings.LANGUAGE_COOKIE,
domain=settings.SESSION_COOKIE_DOMAIN
)
return response
| agpl-3.0 |
TheWardoctor/Wardoctors-repo | script.module.schism.common/lib/requests/packages/chardet/charsetprober.py | 3127 | 1902 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
| apache-2.0 |
1d4Nf6/flocker | flocker/route/_logging.py | 15 | 1649 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
from eliot import Field, ActionType
from eliot._validation import ValidationError
from ipaddr import IPv4Address
def _system(name):
return u"flocker:route:" + name
def validate_ipv4_address(value):
if not isinstance(value, IPv4Address):
raise ValidationError(
value,
u"Field %s requires type to be IPv4Address (not %s)" % (
u"target_ip", type(value)))
def serialize_ipv4_address(address):
return unicode(address)
TARGET_IP = Field(
key=u"target_ip",
serializer=serialize_ipv4_address,
extraValidator=validate_ipv4_address,
description=u"The IP address which is the target of a proxy.")
TARGET_PORT = Field.forTypes(
u"target_port", [int],
u"The port number which is the target of a proxy.")
ARGV = Field.forTypes(
u"argv", [list],
u"The argument list of a child process being executed.")
IPTABLES = ActionType(
_system(u"iptables"),
[ARGV],
[],
u"An iptables command which Flocker is executing against the system.")
CREATE_PROXY_TO = ActionType(
_system(u"create_proxy_to"),
[TARGET_IP, TARGET_PORT],
[],
U"Flocker is creating a new proxy.")
OPEN_PORT = ActionType(
_system(u"open_port"),
[TARGET_PORT],
[],
U"Flocker is opening a firewall port.")
DELETE_PROXY = ActionType(
_system(u"delete_proxy"),
[TARGET_IP, TARGET_PORT],
[],
u"Flocker is deleting an existing proxy.")
DELETE_OPEN_PORT = ActionType(
_system(u"delete_open_port"),
[TARGET_PORT],
[],
U"Flocker is close a firewall port.")
| apache-2.0 |
tanmaykm/edx-platform | common/lib/xmodule/xmodule/lti_2_util.py | 16 | 16182 | # pylint: disable=attribute-defined-outside-init
"""
A mixin class for LTI 2.0 functionality. This is really just done to refactor the code to
keep the LTIModule class from getting too big
"""
import json
import re
import mock
import urllib
import hashlib
import base64
import logging
from webob import Response
from xblock.core import XBlock
from oauthlib.oauth1 import Client
log = logging.getLogger(__name__)
LTI_2_0_REST_SUFFIX_PARSER = re.compile(r"^user/(?P<anon_id>\w+)", re.UNICODE)
LTI_2_0_JSON_CONTENT_TYPE = 'application/vnd.ims.lis.v2.result+json'
class LTIError(Exception):
"""Error class for LTIModule and LTI20ModuleMixin"""
pass
class LTI20ModuleMixin(object):
"""
This class MUST be mixed into LTIModule. It does not do anything on its own. It's just factored
out for modularity.
"""
# LTI 2.0 Result Service Support
@XBlock.handler
def lti_2_0_result_rest_handler(self, request, suffix):
"""
Handler function for LTI 2.0 JSON/REST result service.
See http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
An example JSON object:
{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result",
"resultScore" : 0.83,
"comment" : "This is exceptional work."
}
For PUTs, the content type must be "application/vnd.ims.lis.v2.result+json".
We use the "suffix" parameter to parse out the user from the end of the URL. An example endpoint url is
http://localhost:8000/courses/org/num/run/xblock/i4x:;_;_org;_num;_lti;_GUID/handler_noauth/lti_2_0_result_rest_handler/user/<anon_id>
so suffix is of the form "user/<anon_id>"
Failures result in 401, 404, or 500s without any body. Successes result in 200. Again see
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
(Note: this prevents good debug messages for the client, so we might want to change this, or the spec)
Arguments:
request (xblock.django.request.DjangoWebobRequest): Request object for current HTTP request
suffix (unicode): request path after "lti_2_0_result_rest_handler/". expected to be "user/<anon_id>"
Returns:
webob.response: response to this request. See above for details.
"""
if self.system.debug:
self._log_correct_authorization_header(request)
if not self.accept_grades_past_due and self.is_past_due():
return Response(status=404) # have to do 404 due to spec, but 400 is better, with error msg in body
try:
anon_id = self.parse_lti_2_0_handler_suffix(suffix)
except LTIError:
return Response(status=404) # 404 because a part of the URL (denoting the anon user id) is invalid
try:
self.verify_lti_2_0_result_rest_headers(request, verify_content_type=True)
except LTIError:
return Response(status=401) # Unauthorized in this case. 401 is right
real_user = self.system.get_real_user(anon_id)
if not real_user: # that means we can't save to database, as we do not have real user id.
msg = "[LTI]: Real user not found against anon_id: {}".format(anon_id)
log.info(msg)
return Response(status=404) # have to do 404 due to spec, but 400 is better, with error msg in body
if request.method == "PUT":
return self._lti_2_0_result_put_handler(request, real_user)
elif request.method == "GET":
return self._lti_2_0_result_get_handler(request, real_user)
elif request.method == "DELETE":
return self._lti_2_0_result_del_handler(request, real_user)
else:
return Response(status=404) # have to do 404 due to spec, but 405 is better, with error msg in body
def _log_correct_authorization_header(self, request):
"""
Helper function that logs proper HTTP Authorization header for a given request
Used only in debug situations, this logs the correct Authorization header based on
the request header and body according to OAuth 1 Body signing
Arguments:
request (xblock.django.request.DjangoWebobRequest): Request object to log Authorization header for
Returns:
nothing
"""
sha1 = hashlib.sha1()
sha1.update(request.body)
oauth_body_hash = unicode(base64.b64encode(sha1.digest()))
log.debug("[LTI] oauth_body_hash = {}".format(oauth_body_hash))
client_key, client_secret = self.get_client_key_secret()
client = Client(client_key, client_secret)
mock_request = mock.Mock(
uri=unicode(urllib.unquote(request.url)),
headers=request.headers,
body=u"",
decoded_body=u"",
http_method=unicode(request.method),
)
params = client.get_oauth_params(mock_request)
mock_request.oauth_params = params
mock_request.oauth_params.append((u'oauth_body_hash', oauth_body_hash))
sig = client.get_oauth_signature(mock_request)
mock_request.oauth_params.append((u'oauth_signature', sig))
_, headers, _ = client._render(mock_request) # pylint: disable=protected-access
log.debug("\n\n#### COPY AND PASTE AUTHORIZATION HEADER ####\n{}\n####################################\n\n"
.format(headers['Authorization']))
def parse_lti_2_0_handler_suffix(self, suffix):
"""
Parser function for HTTP request path suffixes
parses the suffix argument (the trailing parts of the URL) of the LTI2.0 REST handler.
must be of the form "user/<anon_id>". Returns anon_id if match found, otherwise raises LTIError
Arguments:
suffix (unicode): suffix to parse
Returns:
unicode: anon_id if match found
Raises:
LTIError if suffix cannot be parsed or is not in its expected form
"""
if suffix:
match_obj = LTI_2_0_REST_SUFFIX_PARSER.match(suffix)
if match_obj:
return match_obj.group('anon_id')
# fall-through handles all error cases
msg = "No valid user id found in endpoint URL"
log.info("[LTI]: {}".format(msg))
raise LTIError(msg)
def _lti_2_0_result_get_handler(self, request, real_user): # pylint: disable=unused-argument
"""
Helper request handler for GET requests to LTI 2.0 result endpoint
GET handler for lti_2_0_result. Assumes all authorization has been checked.
Arguments:
request (xblock.django.request.DjangoWebobRequest): Request object (unused)
real_user (django.contrib.auth.models.User): Actual user linked to anon_id in request path suffix
Returns:
webob.response: response to this request, in JSON format with status 200 if success
"""
base_json_obj = {
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result"
}
self.system.rebind_noauth_module_to_user(self, real_user)
if self.module_score is None: # In this case, no score has been ever set
return Response(json.dumps(base_json_obj), content_type=LTI_2_0_JSON_CONTENT_TYPE)
# Fall through to returning grade and comment
base_json_obj['resultScore'] = round(self.module_score, 2)
base_json_obj['comment'] = self.score_comment
return Response(json.dumps(base_json_obj), content_type=LTI_2_0_JSON_CONTENT_TYPE)
def _lti_2_0_result_del_handler(self, request, real_user): # pylint: disable=unused-argument
"""
Helper request handler for DELETE requests to LTI 2.0 result endpoint
DELETE handler for lti_2_0_result. Assumes all authorization has been checked.
Arguments:
request (xblock.django.request.DjangoWebobRequest): Request object (unused)
real_user (django.contrib.auth.models.User): Actual user linked to anon_id in request path suffix
Returns:
webob.response: response to this request. status 200 if success
"""
self.clear_user_module_score(real_user)
return Response(status=200)
def _lti_2_0_result_put_handler(self, request, real_user):
"""
Helper request handler for PUT requests to LTI 2.0 result endpoint
PUT handler for lti_2_0_result. Assumes all authorization has been checked.
Arguments:
request (xblock.django.request.DjangoWebobRequest): Request object
real_user (django.contrib.auth.models.User): Actual user linked to anon_id in request path suffix
Returns:
webob.response: response to this request. status 200 if success. 404 if body of PUT request is malformed
"""
try:
(score, comment) = self.parse_lti_2_0_result_json(request.body)
except LTIError:
return Response(status=404) # have to do 404 due to spec, but 400 is better, with error msg in body
# According to http://www.imsglobal.org/lti/ltiv2p0/ltiIMGv2p0.html#_Toc361225514
# PUTting a JSON object with no "resultScore" field is equivalent to a DELETE.
if score is None:
self.clear_user_module_score(real_user)
return Response(status=200)
# Fall-through record the score and the comment in the module
self.set_user_module_score(real_user, score, self.max_score(), comment)
return Response(status=200)
def clear_user_module_score(self, user):
"""
Clears the module user state, including grades and comments, and also scoring in db's courseware_studentmodule
Arguments:
user (django.contrib.auth.models.User): Actual user whose module state is to be cleared
Returns:
nothing
"""
self.set_user_module_score(user, None, None)
def set_user_module_score(self, user, score, max_score, comment=u""):
"""
Sets the module user state, including grades and comments, and also scoring in db's courseware_studentmodule
Arguments:
user (django.contrib.auth.models.User): Actual user whose module state is to be set
score (float): user's numeric score to set. Must be in the range [0.0, 1.0]
max_score (float): max score that could have been achieved on this module
comment (unicode): comments provided by the grader as feedback to the student
Returns:
nothing
"""
if score is not None and max_score is not None:
scaled_score = score * max_score
else:
scaled_score = None
self.system.rebind_noauth_module_to_user(self, user)
# have to publish for the progress page...
self.system.publish(
self,
'grade',
{
'value': scaled_score,
'max_value': max_score,
'user_id': user.id,
},
)
self.module_score = scaled_score
self.score_comment = comment
def verify_lti_2_0_result_rest_headers(self, request, verify_content_type=True):
"""
Helper method to validate LTI 2.0 REST result service HTTP headers. returns if correct, else raises LTIError
Arguments:
request (xblock.django.request.DjangoWebobRequest): Request object
verify_content_type (bool): If true, verifies the content type of the request is that spec'ed by LTI 2.0
Returns:
nothing, but will only return if verification succeeds
Raises:
LTIError if verification fails
"""
content_type = request.headers.get('Content-Type')
if verify_content_type and content_type != LTI_2_0_JSON_CONTENT_TYPE:
log.info("[LTI]: v2.0 result service -- bad Content-Type: {}".format(content_type))
raise LTIError(
"For LTI 2.0 result service, Content-Type must be {}. Got {}".format(LTI_2_0_JSON_CONTENT_TYPE,
content_type))
try:
self.verify_oauth_body_sign(request, content_type=LTI_2_0_JSON_CONTENT_TYPE)
except (ValueError, LTIError) as err:
log.info("[LTI]: v2.0 result service -- OAuth body verification failed: {}".format(err.message))
raise LTIError(err.message)
def parse_lti_2_0_result_json(self, json_str):
"""
Helper method for verifying LTI 2.0 JSON object contained in the body of the request.
The json_str must be loadable. It can either be an dict (object) or an array whose first element is an dict,
in which case that first dict is considered.
The dict must have the "@type" key with value equal to "Result",
"resultScore" key with value equal to a number [0, 1],
The "@context" key must be present, but we don't do anything with it. And the "comment" key may be
present, in which case it must be a string.
Arguments:
json_str (unicode): The body of the LTI 2.0 results service request, which is a JSON string]
Returns:
(float, str): (score, [optional]comment) if verification checks out
Raises:
LTIError (with message) if verification fails
"""
try:
json_obj = json.loads(json_str)
except (ValueError, TypeError):
msg = "Supplied JSON string in request body could not be decoded: {}".format(json_str)
log.info("[LTI] {}".format(msg))
raise LTIError(msg)
# the standard supports a list of objects, who knows why. It must contain at least 1 element, and the
# first element must be a dict
if not isinstance(json_obj, dict):
if isinstance(json_obj, list) and len(json_obj) >= 1 and isinstance(json_obj[0], dict):
json_obj = json_obj[0]
else:
msg = ("Supplied JSON string is a list that does not contain an object as the first element. {}"
.format(json_str))
log.info("[LTI] {}".format(msg))
raise LTIError(msg)
# '@type' must be "Result"
result_type = json_obj.get("@type")
if result_type != "Result":
msg = "JSON object does not contain correct @type attribute (should be 'Result', is {})".format(result_type)
log.info("[LTI] {}".format(msg))
raise LTIError(msg)
# '@context' must be present as a key
REQUIRED_KEYS = ["@context"] # pylint: disable=invalid-name
for key in REQUIRED_KEYS:
if key not in json_obj:
msg = "JSON object does not contain required key {}".format(key)
log.info("[LTI] {}".format(msg))
raise LTIError(msg)
# 'resultScore' is not present. If this was a PUT this means it's actually a DELETE according
# to the LTI spec. We will indicate this by returning None as score, "" as comment.
# The actual delete will be handled by the caller
if "resultScore" not in json_obj:
return None, json_obj.get('comment', "")
# if present, 'resultScore' must be a number between 0 and 1 inclusive
try:
score = float(json_obj.get('resultScore', "unconvertable")) # Check if float is present and the right type
if not 0 <= score <= 1:
msg = 'score value outside the permitted range of 0-1.'
log.info("[LTI] {}".format(msg))
raise LTIError(msg)
except (TypeError, ValueError) as err:
msg = "Could not convert resultScore to float: {}".format(err.message)
log.info("[LTI] {}".format(msg))
raise LTIError(msg)
return score, json_obj.get('comment', "")
| agpl-3.0 |
pprett/statsmodels | statsmodels/sandbox/distributions/copula.py | 38 | 8351 | '''
Which Archimedean is Best?
Extreme Value copulas formulas are based on Genest 2009
References
----------
Genest, C., 2009. Rank-based inference for bivariate extreme-value
copulas. The Annals of Statistics, 37(5), pp.2990-3022.
'''
import numpy as np
from scipy.special import expm1, log1p
def copula_bv_indep(u,v):
'''independent bivariate copula
'''
return u*v
def copula_bv_min(u,v):
'''comonotonic bivariate copula
'''
return np.minimum(u, v)
def copula_bv_max(u, v):
'''countermonotonic bivariate copula
'''
return np.maximum(u + v - 1, 0)
def copula_bv_clayton(u, v, theta):
'''Clayton or Cook, Johnson bivariate copula
'''
if not theta > 0:
raise ValueError('theta needs to be strictly positive')
return np.power(np.power(u, -theta) + np.power(v, -theta) - 1, -theta)
def copula_bv_frank(u, v, theta):
'''Cook, Johnson bivariate copula
'''
if not theta > 0:
raise ValueError('theta needs to be strictly positive')
cdfv = -np.log(1 + expm1(-theta*u) * expm1(-theta*v) / expm1(-theta))/theta
cdfv = np.minimum(cdfv, 1) #necessary for example if theta=100
return cdfv
def copula_bv_gauss(u, v, rho):
raise NotImplementedError
def copula_bv_t(u, v, rho, df):
raise NotImplementedError
#not used yet
class Transforms(object):
def __init__(self):
pass
class TransfFrank(object):
def evaluate(self, t, theta):
return - (np.log(-expm1(-theta*t)) - np.log(-expm1(-theta)))
#return - np.log(expm1(-theta*t) / expm1(-theta))
def inverse(self, phi, theta):
return -np.log1p(np.exp(-phi) * expm1(-theta)) / theta
class TransfClayton(object):
def _checkargs(theta):
return theta > 0
def evaluate(self, t, theta):
return np.power(t, -theta) - 1.
def inverse(self, phi, theta):
return np.power(1 + phi, -theta)
class TransfGumbel(object):
'''
requires theta >=1
'''
def _checkargs(theta):
return theta >= 1
def evaluate(self, t, theta):
return np.power(-np.log(t), theta)
def inverse(self, phi, theta):
return np.exp(-np.power(phi, 1. / theta))
class TransfIndep(object):
def evaluate(self, t):
return -np.log(t)
def inverse(self, phi):
return np.exp(-phi)
def copula_bv_archimedean(u, v, transform, args=()):
'''
'''
phi = transform.evaluate
phi_inv = transform.inverse
cdfv = phi_inv(phi(u, *args) + phi(v, *args), *args)
return cdfv
def copula_mv_archimedean(u, transform, args=(), axis=-1):
'''generic multivariate Archimedean copula
'''
phi = transform.evaluate
phi_inv = transform.inverse
cdfv = phi_inv(phi(u, *args).sum(axis), *args)
return cdfv
def copula_bv_ev(u, v, transform, args=()):
'''generic bivariate extreme value copula
'''
return np.exp(np.log(u * v) * (transform(np.log(v)/np.log(u*v), *args)))
def transform_tawn(t, a1, a2, theta):
'''asymmetric logistic model of Tawn 1988
special case: a1=a2=1 : Gumbel
restrictions:
- theta in (0,1]
- a1, a2 in [0,1]
'''
def _check_args(a1, a2, theta):
condth = (theta > 0) and (theta <= 1)
conda1 = (a1 >= 0) and (a1 <= 1)
conda2 = (a2 >= 0) and (a2 <= 1)
return condth and conda1 and conda2
if not np.all(_check_args(a1, a2, theta)):
raise ValueError('invalid args')
transf = (1 - a1) * (1-t)
transf += (1 - a2) * t
transf += ((a1 * t)**(1./theta) + (a2 * (1-t))**(1./theta))**theta
return transf
def transform_joe(t, a1, a2, theta):
'''asymmetric negative logistic model of Joe 1990
special case: a1=a2=1 : symmetric negative logistic of Galambos 1978
restrictions:
- theta in (0,inf)
- a1, a2 in (0,1]
'''
def _check_args(a1, a2, theta):
condth = (theta > 0)
conda1 = (a1 > 0) and (a1 <= 1)
conda2 = (a2 > 0) and (a2 <= 1)
return condth and conda1 and conda2
if not np.all(_check_args(a1, a2, theta)):
raise ValueError('invalid args')
transf = 1 - ((a1 * (1-t))**(-1./theta) + (a2 * t)**(-1./theta))**(-theta)
return transf
def transform_tawn2(t, theta, k):
'''asymmetric mixed model of Tawn 1988
special case: k=0, theta in [0,1] : symmetric mixed model of
Tiago de Oliveira 1980
restrictions:
- theta > 0
- theta + 3*k > 0
- theta + k <= 1
- theta + 2*k <= 1
'''
def _check_args(theta, k):
condth = (theta >= 0)
cond1 = (theta + 3*k > 0) and (theta + k <= 1) and (theta + 2*k <= 1)
return condth and cond1
if not np.all(_check_args(theta, k)):
raise ValueError('invalid args')
transf = 1 - (theta + k) * t + theta * t*t + k * t**3
return transf
def transform_bilogistic(t, beta, delta):
'''bilogistic model of Coles and Tawn 1994, Joe, Smith and Weissman 1992
restrictions:
- (beta, delta) in (0,1)^2 or
- (beta, delta) in (-inf,0)^2
not vectorized because of numerical integration
'''
def _check_args(beta, delta):
cond1 = (beta > 0) and (beta <= 1) and (delta > 0) and (delta <= 1)
cond2 = (beta < 0) and (delta < 0)
return cond1 | cond2
if not np.all(_check_args(beta, delta)):
raise ValueError('invalid args')
def _integrant(w):
term1 = (1 - beta) * np.power(w, -beta) * (1-t)
term2 = (1 - delta) * np.power(1-w, -delta) * t
np.maximum(term1, term2)
from scipy.integrate import quad
transf = quad(_integrant, 0, 1)
return transf
def transform_hr(t, lamda):
'''model of Huesler Reiss 1989
special case: a1=a2=1 : symmetric negative logistic of Galambos 1978
restrictions:
- lambda in (0,inf)
'''
def _check_args(lamda):
cond = (lamda > 0)
return cond
if not np.all(_check_args(lamda)):
raise ValueError('invalid args')
term = np.log((1. - t) / t) * 0.5 / lamda
from scipy.stats import norm #use special if I want to avoid stats import
transf = (1 - t) * norm._cdf(lamda + term) + t * norm._cdf(lamda - term)
return transf
def transform_tev(t, rho, x):
'''t-EV model of Demarta and McNeil 2005
restrictions:
- rho in (-1,1)
- x > 0
'''
def _check_args(rho, x):
cond1 = (x > 0)
cond2 = (rho > 0) and (rho < 1)
return cond1 and cond2
if not np.all(_check_args(rho, x)):
raise ValueError('invalid args')
from scipy.stats import t as stats_t #use special if I want to avoid stats import
z = np.sqrt(1. + x) * (np.power(t/(1.-t), 1./x) - rho)
z /= np.sqrt(1 - rho*rho)
transf = (1 - t) * stats_t._cdf(z, x+1) + t * stats_t._cdf(z, x+1)
return transf
#define dictionary of copulas by names and aliases
copulanames = {'indep' : copula_bv_indep,
'i' : copula_bv_indep,
'min' : copula_bv_min,
'max' : copula_bv_max,
'clayton' : copula_bv_clayton,
'cookjohnson' : copula_bv_clayton,
'cj' : copula_bv_clayton,
'frank' : copula_bv_frank,
'gauss' : copula_bv_gauss,
'normal' : copula_bv_gauss,
't' : copula_bv_frank}
class CopulaBivariate(object):
'''bivariate copula class
Instantiation needs the arguments, cop_args, that are required for copula
'''
def __init__(self, marginalcdfs, copula, copargs=()):
if copula in copulanames:
self.copula = copulanames[copula]
else:
#see if we can call it as a copula function
try:
tmp = copula(0.5, 0.5, *copargs)
except: #blanket since we throw again
raise ValueError('copula needs to be a copula name or callable')
self.copula = copula
#no checking done on marginals
self.marginalcdfs = marginalcdfs
self.copargs = copargs
def cdf(self, xy, args=None):
'''xx needs to be iterable, instead of x,y for extension to multivariate
'''
x, y = xy
if args is None:
args = self.copargs
return self.copula(self.marginalcdfs[0](x), self.marginalcdfs[1](y),
*args)
| bsd-3-clause |
pgmillon/ansible | lib/ansible/plugins/action/vyos.py | 10 | 4037 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.vyos.vyos import vyos_provider_spec
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True if self._task.action == 'vyos_config' else False
socket_path = None
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
del self._task.args['provider']
elif self._play_context.connection == 'local':
provider = load_provider(vyos_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'vyos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout')
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
if to_text(out, errors='surrogate_then_replace').strip().endswith('#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit discard')
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| gpl-3.0 |
tmaiwald/OSIM | OSIM/Modeling/Components/Capacity.py | 1 | 1152 | import math
from OSIM.Modeling.AbstractComponents.SingleComponent import SingleComponent
from OSIM.Modeling.CircuitSystemEquations import CircuitSystemEquations
class Capacity(SingleComponent):
def __init__(self, nodes, name, value, superComponent, **kwargs):
super(Capacity, self).__init__(nodes, name, value, superComponent, **kwargs)
def doStep(self, freq_or_tau):
myIdx = self.sys.compDict.get(self.name)
[x1v,x2v] = self.insertAdmittanceintoSystem(freq_or_tau)
if self.sys.atype == CircuitSystemEquations.ATYPE_TRAN:
adm = self.getAdmittance(self.nodes, freq_or_tau)
self.sys.b[myIdx] = adm * (x1v - x2v)
def getAdmittance(self, nodesFromTo, freq_or_tstep):
if self.sys.atype == CircuitSystemEquations.ATYPE_TRAN:
return self.value/(self.sys.tnow-self.sys.told)
else:
return (1j * 2 * math.pi * freq_or_tstep * self.value)
def setParameterOrVariableValue(self, name, value):
if (name == "C"):
self.value = value
return
else:
print(self.name + " ERROR: " + name + " unknown!!")
| bsd-2-clause |
UofA-Robomasters/rm_vision_buff_activation_light | prompt_lights/prompt_lights_searching.py | 1 | 11616 | import cv2, sys, os, rospy, math
import numpy as np
from scipy.misc import imresize
file_dir = None
is_debug_mode = True
file_dir = os.path.dirname(os.path.abspath(__file__))
root = file_dir+'/..'#'/number_searching'
sys.path.insert(0, root)
# print(root)
# print(os.path.dirname(root))
from number_searching.grid_recognition import read_image_from_file,preprocessing_for_number_searching,filter_redundancy_boxes
from number_searching.preprocess_for_number_recognition import draw_box, region_of_interest
draw_prompt_lights_box_color = (255,255,255)
"""
Analysis and filter contours
"""
def analysis_and_filter_contours_for_prompt_lights_searching(contours):
ratio = 2.0 / 1.0
sudokuWidth = 50
sudokuHeight = 25
angleTolerance = 6
ratioToleranceRate = 0.2
dimensionsToleranceRate = 0.4
contours_filtered = list()
rects = list()
boxes = list()
for contour in contours:
tempRect = cv2.minAreaRect(contour)
# if is_debug_mode:
# print("[Debug] tempRect:", tempRect)
width = tempRect[1][0]
height = tempRect[1][1]
if not (width > height):
# tempRect = cv2.boxPoints((tempRect[0],(tempRect[1][0],tempRect[1][1]),tempRect[2] + 90.0))
tempRect = (tempRect[0],(tempRect[1][1],tempRect[1][0]),tempRect[2] + 90.0)
width = tempRect[1][0]
height = tempRect[1][1]
if(height==0):
height = -1
ratio_cur = width / height
if (ratio_cur > (1.0-ratioToleranceRate) * ratio and \
ratio_cur < (1.0+ratioToleranceRate) * ratio and \
width > (1.0-dimensionsToleranceRate) * sudokuWidth and \
width < (1.0+dimensionsToleranceRate) * sudokuWidth and \
height > (1.0-dimensionsToleranceRate) * sudokuHeight and \
height < (1.0+dimensionsToleranceRate) * sudokuHeight and \
((tempRect[2] > -angleTolerance and tempRect[2] < angleTolerance) or \
tempRect[2] < (-180+angleTolerance) or \
tempRect[2] > (180-angleTolerance))
):
contours_filtered.append(contour)
rects.append(tempRect)
if (is_debug_mode):
tempRect_points = cv2.boxPoints(tempRect)
boxes.append(tempRect_points)
return contours_filtered, rects, boxes
"""
Pre-processing image
"""
def preprocessing_for_prompt_lights_searching(src_img):
# convert source iamge to gray scale and resize
gray = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)
# gray = imresize(gray, [50, 80])
# blur
# gray = cv2.medianBlur(gray,13)
blur = cv2.GaussianBlur(gray,(5,5),0)
# threshold
# ret, gray = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \
cv2.THRESH_BINARY, 15, 3)
# ret, gray = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
# enhance outline
kernel = np.ones([3, 3], np.uint8)
gray = cv2.dilate(gray, kernel, iterations = 1)
return gray
"""
This function first classify points into groups by x distance.
Then, pick up the larget group.
"""
def prompt_light_filter_outlier_boxes_by_x_dist(contours, rects, number_boxes):
if (len(rects)==0): # avoid empty input
return [],[],[],[]
dist_list = [[rects[i]] for i in range(len(rects))]
boxes_list = [[number_boxes[i]] for i in range(len(rects))]
contours_list = [[contours[i]] for i in range(len(rects))]
x_bin_size = 10
# find near centre points for each centre point (by horizontal distance)
for rect_i in range(len(rects)):
for rect_j in range(rect_i+1,len(rects)):
rect_i_center_x = rects[rect_i][0][0]
rect_i_center_y = rects[rect_i][0][1]
rect_j_center_x = rects[rect_j][0][0]
rect_j_center_y = rects[rect_j][0][1]
dist_x = abs(rect_i_center_x - rect_j_center_x)
dist_y = abs(rect_i_center_y - rect_j_center_y)
dist_ij = dist_x**2 + dist_y**2
if dist_x < x_bin_size:
dist_list[rect_i].append(rects[rect_j])
dist_list[rect_j].append(rects[rect_i])
boxes_list[rect_i].append(number_boxes[rect_j])
boxes_list[rect_j].append(number_boxes[rect_i])
contours_list[rect_i].append(contours[rect_j])
contours_list[rect_j].append(contours[rect_i])
# get the size of each bin
dist_len_list = [0.0] * len(rects)
for i in range(len(dist_list)):
dist_len_list[i] = len(dist_list[i])
# largest bin (group) size
max_bin_size = max(dist_len_list)
good_list_index = dist_len_list.index(max(dist_len_list))
bad_box_indexs = list()
good_contours = contours_list.pop(good_list_index)
good_rects = dist_list.pop(good_list_index)
good_boxes = boxes_list.pop(good_list_index)
return good_contours, good_rects, good_boxes, bad_box_indexs
"""
This function get rid of outlier prompt light boxes
"""
def filter_outlier_boxes(contours, rects, number_boxes):
dist_list = [0.0] * len(rects)
for rect_i in range(len(rects)):
for rect_j in range(rect_i+1,len(rects)):
rect_i_center_x = rects[rect_i][0][0]
rect_i_center_y = rects[rect_i][0][1]
rect_j_center_x = rects[rect_j][0][0]
rect_j_center_y = rects[rect_j][0][1]
dist_x = abs(rect_i_center_x - rect_j_center_x)
dist_y = abs(rect_i_center_y - rect_j_center_y)
dist_ij = dist_x**2 + dist_y**2
dist_list[rect_i] += dist_ij
dist_list[rect_j] += dist_ij
bad_box_indexs = list()
good_contours = list()
good_rects = list()
good_boxes = list()
for i in range(min(5, len(rects))):
current_min_index = dist_list.index(min(dist_list))
bad_box_indexs.append(dist_list.pop(current_min_index))
good_contours.append(contours.pop(current_min_index))
good_rects.append(rects.pop(current_min_index))
good_boxes.append(number_boxes.pop(current_min_index))
return good_contours, good_rects, good_boxes, bad_box_indexs
"""
This function will extract roi after the prompt lights have been found
"""
def preprocess_for_prompt_light_identify(src_img, rects, number_boxes):
global draw_prompt_lights_box_color
number_boxes_regions_list = list()
box_index = 0
# src_img = cv2.GaussianBlur(src_img,(51,51),0)
for box in number_boxes:
# extract ROI to pick the most comment color in the box
blur = cv2.GaussianBlur(region_of_interest(src_img, box),(15,15),0)
blur = imresize(blur, [25, 50]) # resize
# simply get rid of rim
draw_prompt_lights_box_color = (int(blur[(12,25)][0]),int(blur[(12,25)][1]),int(blur[(12,25)][2]))
draw_box(src_img, box, draw_prompt_lights_box_color) # draw the rim with a most comment color in the box
# extract ROI for promt lights identify
blur = cv2.GaussianBlur(region_of_interest(src_img, box),(15,15),0)
extracted_result = imresize(blur, [25, 50]) # resize
# extracted result ready for return
number_boxes_regions_list.append(extracted_result)
# Debug
box_center = rects[box_index][0]
cv2.circle(src_img, (int(round(box_center[0])), int(round(box_center[1]))), 1, (0,0,255), 5)
# update loop variable
box_index += 1
return number_boxes_regions_list
"""
This function identify the prompt light status
"""
def prompt_light_identify(color):
b_channel = int(color[0])
g_channel = int(color[1])
r_channel = int(color[2])
is_on = False
# print(abs(r_channel - g_channel), abs(r_channel - b_channel))
dist = (abs(r_channel - g_channel))**2 + (abs(r_channel - b_channel))**2
if dist > 800:
is_on = True
return is_on
"""
Major process of prompt lights searching
"""
def prompt_lights_searching(src_img):
processed_img = preprocessing_for_prompt_lights_searching(src_img)
# processed_img = preprocessing_for_number_searching(src_img)
# src_img = np.copy(processed_img)
im2, contours, hierarchy = cv2.findContours(processed_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# cv2.drawContours(src_img, contours, -1, (255,0,0), 3)
# cv2.fillPoly(src_img, contours, (0, 255, 0))
# print(type(contours))
#Analysis to get boxes
contours, rects, number_boxes = analysis_and_filter_contours_for_prompt_lights_searching(contours)
# cv2.drawContours(src_img, contours, -1, (255,0,255), 3)
#Avoid redundancy boxes
contours, rects, number_boxes, _ = filter_redundancy_boxes(contours, rects, number_boxes)
#Find a largest bin in x direction
contours, rects, number_boxes, _ = prompt_light_filter_outlier_boxes_by_x_dist(contours, rects, number_boxes)
#Avoid outliers
_, rects, number_boxes, _ = filter_outlier_boxes(contours, rects, number_boxes)
#Extract info for prompt lights identify
number_boxes_regions_list = preprocess_for_prompt_light_identify(src_img, rects, number_boxes)
#Identify prompt lights and get statistic info
hitting_num = 0
if len(rects) == 5:
for i in range(len(rects)):
identify_info = number_boxes_regions_list[i][(12,25)]
label_color = (0,255,0)
if prompt_light_identify(identify_info):
label_color = (0,0,255)
hitting_num += 1
draw_box(src_img, number_boxes[i], label_color) # draw the rim
# cv2.putText(src_img, str(identify_info), (int(rects[i][0][0]),int(rects[i][0][1])), cv2.FONT_HERSHEY_SIMPLEX, 1, label_color, 2)
return src_img, hitting_num
"""
Main function (for testing)
"""
if __name__ == "__main__":
""" ================ Testing with image files (START) ================ """
"""
# import .grid_recognition
# from grid_recognition import read_image_from_file
#load src image
src_img = read_image_from_file()
# src_img, number_boxes_regions_list, _ = number_search(src_img)
src_img = prompt_lights_searching(src_img)
cv2.imshow('src_img', src_img)
key = cv2.waitKey(0)
"""
""" ================= Testing with image files (END) ================= """
""" ================ Testing with video files (START) ================ """
# """
# cam = cv2.VideoCapture('./../Buff2017.mp4')
cam = cv2.VideoCapture(file_dir+'/../../buff_test_video_01.mpeg')
# cam = cv2.VideoCapture(1)
# Define the codec and create VideoWriter object
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# fourcc = cv2.VideoWriter_fourcc(*'FMP4')
fourcc = cv2.VideoWriter_fourcc(*'H264')
out = None#cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
frame_num = 1
segment_num = 1
frame_rate = 24
recording = False
while True:
ret, frame = cam.read()
assert ret == True
# src_img, number_boxes_regions_list, _ = number_search(frame)
src_img, hitting_num = prompt_lights_searching(frame)
if is_debug_mode:
cv2.imshow('src_img', src_img)
print(hitting_num)
# for i in range(len(number_boxes_regions_list)):
# cv2.imshow(str(i),number_boxes_regions_list[i])
key = cv2.waitKey(1000/frame_rate) & 0xff
# key = cv2.waitKey(0) & 0xff
if key == ord('q'):
break
# """
""" ================= Testing with image files (END) ================= """
cv2.destroyAllWindows()
| apache-2.0 |
mattcaldwell/pip | pip/_vendor/cachecontrol/caches/file_cache.py | 6 | 2681 | import hashlib
import os
from pip._vendor.lockfile import FileLock
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(object):
def __init__(self, directory, forever=False, filemode=0o0600,
dirmode=0o0700):
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
if not os.path.exists(name):
return None
with open(name, 'rb') as fh:
return fh.read()
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with FileLock(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
os.remove(name)
| mit |
queer1/bitcurator | python/bc_genrep_premis.py | 1 | 9762 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# BitCurator
#
# This code is distributed under the terms of the GNU General Public
# License, Version 3. See the text file "COPYING" for further details
# about the terms of this license.
#
# bc_premis_genxml.py
#
# Generate XML tree for premis events
#
import os, fiwalk, uuid, sys
import lxml.builder as lb
from lxml import etree
try:
from argparse import ArgumentParser
except ImportError:
raise ImportError("This script requires ArgumentParser which is in Python 2.7 or Python 3.0")
global root
root = "null"
class BcPremisFile:
# The first block of disk-image object
def bcPremisGenXmlObject(self, image_name):
global root
object1 = etree.SubElement(root, 'object')
root.append(object1)
objectIdentifier = etree.SubElement(object1, "objectIdentifier")
object1.append(objectIdentifier)
objectIdentifierType = etree.Element('objectIdentifierType')
objectIdentifierType.text = str(uuid.uuid1())
objectIdentifier.append(objectIdentifierType)
objectIdentifierValue = etree.Element('objectIdentifierValue')
objectIdentifierValue.text = image_name
objectIdentifier.append(objectIdentifierValue)
# Extract the image_name from the dfxml line "command_line"
def extractImageName(self, dfxml_command_line, dfxml_type):
# Command_line text looks like this for fiwalk dfxml:
# <command_line>fiwalk -f -X <pathToXml> <pathToImage>.aff</command_line>
# it looks like the following for BE reports dfxml:
# <command_line>cmd/bulk_extractor <image>.aff -o <path_to_eDir></command_line>
templist = dfxml_command_line.split(" ")
if (dfxml_type == "fw"):
# print("D: command_line as list: ", templist[4])
return templist[4]
elif dfxml_type == "be":
# print("D: command_line as list: ", templist[2] )
return templist[2]
def bcGenPremisEvent(self, root, eIdType, eIdVal, eType, eDateTime, eOutcome, eoDetail, of_premis, write_to_file = False):
# Generate the Event:
event = etree.SubElement(root, 'event')
root.append(event)
eventIdentifier = etree.SubElement(event, "eventIdentifier")
event.append(eventIdentifier)
eventIdentifierType = etree.SubElement(eventIdentifier, "eventIdentifierType")
# Use UUID generation if eIDType is set to 0
if (eIdType == 0):
eventIdentifierType.text = str(uuid.uuid1())
else:
eventIdentifierType.text = str(eIdType)
eventIdentifier.append(eventIdentifierType)
eventIdentifierValue = etree.SubElement(eventIdentifier, "eventIdentifierValue")
eventIdentifierValue.text = eIdVal
eventIdentifier.append(eventIdentifierValue)
eventType = etree.SubElement(event, "eventType")
eventType.text = eType
event.append(eventType)
eventDateTime = etree.SubElement(event, "eventDateTime")
eventDateTime.text = eDateTime
event.append(eventDateTime)
eventOutcomeInformation = etree.SubElement(event, "eventOutcomeInformation")
event.append(eventOutcomeInformation)
eventOutcome = etree.SubElement(eventOutcomeInformation, "eventOutcome")
eventOutcome.text = eOutcome
eventOutcomeInformation.append(eventOutcome)
eventOutcomeDetail = etree.SubElement(eventOutcomeInformation, "eventOutcomeDetail")
eventOutcomeDetail.text = eoDetail
eventOutcomeInformation.append(eventOutcomeDetail)
# pretty string:
s = etree.tounicode(root, pretty_print=True)
#print(s)
if (write_to_file == True):
of_premis.write(bytes(s, 'UTF-8'))
# Generate Object and event parts for the disk image
def bcGenPremisXmlDiskImage(self, image_name, premis_image_info, premis_file):
# We will not create the output file till the last event.
of_premis = "null"
# create XML
global root
root = etree.Element("premis", xmlns="info:lc/xmlns/premis-v2", xsi="http://www.w3c.org/2001/XMLSchema-instance", version="2.0")
# Generate the disk image Object segment
self.bcPremisGenXmlObject(image_name)
# Generate the disk image event segment
eventIdType = 0 # UUID
if image_name.endswith(".aff"):
eventIdVal = "affinfo "+image_name
elif image_name.endswith(".E01"):
eventIdVal = "E01"+image_name
else:
eventIdVal = image_name
eventType = "Capture"
eDateTime = premis_image_info['acq_date']
eoDetail = 'Version: '+ str(premis_image_info['version']) + ', Image size: '+ str(premis_image_info['imagesize'])
if image_name.endswith(".aff"):
eOutcome = "AFF"
elif image_name.endswith(".E01"):
eOutcome = "E01"
else:
eOutcome = "Unknown image type"
print(">> No Premis Events generated: ", eOutcome)
return(" ")
## print("D: Geenrating disk image Event: ", root, image_name)
self.bcGenPremisEvent(root, eventIdType, eventIdVal, eventType, eDateTime, eOutcome, eoDetail, of_premis, False)
return root
# Generate premis XML code for Fiwalk event
def bcGenPremisXmlFiwalk(self, dfxmlfile, premis_file, outcome=True, fw_tab=False):
# If dfxmlfile doesn't exist, Fiwalk command probably failed.
# If outcome is False, it is confirmed to have failed.
# Generate premis event accordingly.
# FIXME: Add premis event for failed case here.
# We don't write to the file till the last event is done. If this
# routine is invoked by a Fiwalk-tab, this is the last event.
# For such a case, create a new file.
## print("D: bcGenPremisXmlFiwalk: XmlFile: ", dfxmlfile)
## print("D: bcGenPremisXmlFiwalk: Premis file: ", premis_file)
if fw_tab == True:
if os.path.exists(premis_file):
of_premis = open(premis_file,"wb")
line1 = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
of_premis.write(bytes(line1, 'UTF-8'))
else:
of_premis = "null"
else:
of_premis = "null"
# Get the image name from "command_line" part of dfxml file:
dfxml_command_line = fiwalk.fiwalk_xml_command_line(dfxmlfile)
image_name = self.extractImageName(dfxml_command_line, "fw")
# Generate the Fiwalk Event:
eventIdType = 0 # UUID
eventIdVal = dfxml_command_line
eDateTime = fiwalk.fiwalk_xml_start_time(dfxmlfile)
eoDetail = "DFXML File: " + dfxmlfile
if (outcome == True):
eOutcome = "Fiwalk Success"
else:
eOutcome = "Fiwalk Failure"
## print("D:bcGenPremisXmlFiwalk: Generating Premis Event: ", root, dfxmlfile)
if of_premis != "null":
self.bcGenPremisEvent(root, eventIdType, eventIdVal, "File System Analysis", eDateTime, eOutcome, eoDetail, of_premis, fw_tab)
#self.bcGenPremisEvent(root, eventIdType, eventIdVal, "File System Analysis", eDateTime, eOutcome, eoDetail, of_premis, fw_tab)
return root
def bcGenPremisXmlBulkExtractor(self, beReportFile, premis_file, isFirstEvent=False):
# Extract some values from the corresponding input XML file
beReportXml_command_line = fiwalk.fiwalk_xml_command_line(beReportFile)
image_name = self.extractImageName(beReportXml_command_line, "be")
be_version = fiwalk.fiwalk_xml_version(beReportFile)
# BE is the last event. So open the outfile to write
if not os.path.exists(premis_file):
of_premis = open(premis_file,"wb")
else:
of_premis = "null"
print(">>> Generating Bulk Extractor Premis Events XML ")
eventIdType = 0 # If this is 0, we will generate UUID
eventIdVal = beReportXml_command_line
eventType = "Feature Stream Analysis"
eDateTime = fiwalk.fiwalk_xml_start_time(beReportFile)
# FIXME: Need more input on what to extract for Details
eoDetail = "version: "+be_version
# We don't check the flag for eOutcome as we don't run the
# bulk extractor on command line. We already have th feature files
# from a previous run of the beViewer. We just use the information from
# the report.xml file for generating premis events.
eOutcome = "Bulk Extractor Output"
line1 = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
of_premis.write(bytes(line1, 'UTF-8'))
self.bcGenPremisEvent(root, eventIdType, eventIdVal, eventType, eDateTime, eOutcome, eoDetail, of_premis, True)
if __name__=="__main__":
import sys, time, re
parser = ArgumentParser(prog='bc_premis_genxml.py', description='Generate PREMIS XML file for BitCurator events')
parser.add_argument('--dfxmlfile', action='store', help="DFXML file ")
parser.add_argument('--bulk_extractor', action='store', help=" Bulk-extrator Report file ")
parser.add_argument('--Allreports', action='store', help=' All Reports')
parser.add_argument('--premis_file',action='store',help='Output Premis File; Concatinates if exists')
args = parser.parse_args()
print("D: dfxmlfile: ", args.dfxmlfile)
print("D: output premis file", args.premis_file)
premis = BcPremisFile()
'''
if (args.bulk_extractor):
bcGenPremisXmlBulkExtractor(self, reportsFile, premis_file, outcome=True)
'''
premis.bcGenPremisXmlFiwalk(args.dfxmlfile, args.premis_file)
| gpl-3.0 |
joomel1/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py | 157 | 5144 | # Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import sys
import time
LOG_HANDLER_NAME = 'MeteredStreamLogHandler'
class MeteredStream(object):
"""
This class implements a stream wrapper that has 'meters' as well as
regular output. A 'meter' is a single line of text that can be erased
and rewritten repeatedly, without producing multiple lines of output. It
can be used to produce effects like progress bars.
"""
@staticmethod
def _erasure(txt):
num_chars = len(txt)
return '\b' * num_chars + ' ' * num_chars + '\b' * num_chars
@staticmethod
def _ensure_newline(txt):
return txt if txt.endswith('\n') else txt + '\n'
def __init__(self, stream=None, verbose=False, logger=None, time_fn=None, pid=None, number_of_columns=None):
self._stream = stream or sys.stderr
self._verbose = verbose
self._time_fn = time_fn or time.time
self._pid = pid or os.getpid()
self._isatty = self._stream.isatty()
self._erasing = self._isatty and not verbose
self._last_partial_line = ''
self._last_write_time = 0.0
self._throttle_delay_in_secs = 0.066 if self._erasing else 10.0
self._number_of_columns = sys.maxint
if self._isatty and number_of_columns:
self._number_of_columns = number_of_columns
self._logger = logger
self._log_handler = None
if self._logger:
log_level = logging.DEBUG if verbose else logging.INFO
self._log_handler = _LogHandler(self)
self._log_handler.setLevel(log_level)
self._logger.addHandler(self._log_handler)
def __del__(self):
self.cleanup()
def cleanup(self):
if self._logger:
self._logger.removeHandler(self._log_handler)
self._log_handler = None
def write_throttled_update(self, txt):
now = self._time_fn()
if now - self._last_write_time >= self._throttle_delay_in_secs:
self.write_update(txt, now)
def write_update(self, txt, now=None):
self.write(txt, now)
if self._erasing:
self._last_partial_line = txt[txt.rfind('\n') + 1:]
def write(self, txt, now=None, pid=None):
now = now or self._time_fn()
pid = pid or self._pid
self._last_write_time = now
if self._last_partial_line:
self._erase_last_partial_line()
if self._verbose:
now_tuple = time.localtime(now)
msg = '%02d:%02d:%02d.%03d %d %s' % (now_tuple.tm_hour, now_tuple.tm_min, now_tuple.tm_sec, int((now * 1000) % 1000), pid, self._ensure_newline(txt))
elif self._isatty:
msg = txt
else:
msg = self._ensure_newline(txt)
self._stream.write(msg)
def writeln(self, txt, now=None, pid=None):
self.write(self._ensure_newline(txt), now, pid)
def _erase_last_partial_line(self):
num_chars = len(self._last_partial_line)
self._stream.write(self._erasure(self._last_partial_line))
self._last_partial_line = ''
def flush(self):
if self._last_partial_line:
self._stream.write('\n')
self._last_partial_line = ''
self._stream.flush()
def number_of_columns(self):
return self._number_of_columns
class _LogHandler(logging.Handler):
def __init__(self, meter):
logging.Handler.__init__(self)
self._meter = meter
self.name = LOG_HANDLER_NAME
def emit(self, record):
self._meter.writeln(record.getMessage(), record.created, record.process)
| bsd-3-clause |
atlefren/beerdatabase | alembic/versions/51712c35b178_create_country_geom_table.py | 1 | 1568 | """create country_geom table
Revision ID: 51712c35b178
Revises: 357280d45e18
Create Date: 2016-02-09 22:36:54.257145
"""
import os
import json
# revision identifiers, used by Alembic.
revision = '51712c35b178'
down_revision = '357280d45e18'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
countrygeom_table = op.create_table(
'country_geom',
sa.Column('iso_code', sa.Unicode(4), primary_key=True),
sa.Column('continent', sa.Unicode(255)),
sa.Column('geom_text', sa.Text()),
)
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
country_file = os.path.join(path, 'data', 'ne_50m_admin_0_countries.geojson')
print '!!'
with open(country_file, 'r') as country_data:
data = [{
'iso_code': feature['properties']['iso_a2'],
'continent': feature['properties']['continent'],
'geom_text': json.dumps(feature['geometry']),
} for feature in json.loads(country_data.read())['features'] if feature['properties']['iso_a2'] != '-99']
print len(data)
op.bulk_insert(countrygeom_table, data)
op.execute('''
ALTER TABLE country_geom
ADD COLUMN geog geography(GEOMETRY,4326)
''')
op.execute('''
UPDATE country_geom
SET geog=ST_GeomFromGeoJSON(geom_text)::geography
''')
op.execute('''
ALTER TABLE country_geom
DROP COLUMN geom_text
''')
def downgrade():
op.execute('DROP TABLE country_geom;')
| mit |
ouspg/ridac | open-simsim/setup_device.py | 1 | 14968 | from gnuradio import gr
from gnuradio import gru
from gnuradio import usrp
from gnuradio import alibaba
import sys
class configurator:
def __init__(self):
self.rx_nchan=0
self.tx_nchan=0
self.rx_interp=0
self.tx_interp=0
self.rx_gain=0
self.tx_gain=0
self.rx_if_freq=0
self.tx_if_freq=0
self.transmit_freq=0
self.tx_amplitude=0
self.dst_file=""
self.source_file=""
self.cycles_per_symbol=0
self.buffer_file_suffix = ""
self.buffer_file_count = 0
self.repeats_per_key = 0
self.keys_in_buffer = 0
self.freq_one=0
self.freq_zero=0
self.dst_file_reader=""
self.rx_if_freq_2=0
self.block_sep_val=0
self.char_block_sep_val_miller=0
self.char_block_sep_val_manchester=0
self.data_rate=0
self.blocksplitter_offset_channel_one=0
self.blocksplitter_offset_channel_two=0
#self.blocksplitter_how_many_zeros = 0
self.normalizer_value=0
self.check_crc=False
self.check_parity=False
self.manchester_offset_channel_one=0
self.miller_offset_channel_one=0
self.manchester_offset_channel_two=0
self.miller_offset_channel_two=0
self.output_channel_one=0
self.output_channel_two=0
self.lowpass_cutoff_channel_one=0
self.lowpass_transition_width_channel_one=0
self.lowpass_cutoff_channel_two=0
self.lowpass_transition_width_channel_two=0
class my_top_block(gr.top_block):
def __init__ (self):
gr.top_block.__init__(self)
self.tx_sampling_rate=0
self.rx_sampling_rate=0
#generate a int*. It is used as a shared data object between two signal processing blocks.
self.send_state=alibaba.new_intp()
alibaba.intp_assign(self.send_state,0)
def setup_sender(self, number_channels, tx_interp, tx_gain, tune_frequency):
#64 Mhz is the frequency of the ADC, reduced by the decimation of the DDC
#the DAC in the transmit path is operated with 128MhZ. Samples are interpolated according to the interp- variable.
self.tx_sampling_rate=128e6/tx_interp
# get USRP transmitter
self.tx = usrp.sink_c (0, tx_interp)
self.tx_subdev = usrp.selected_subdev(self.tx, (0,0))
self.tx.tune(0, self.tx_subdev, tune_frequency)
self.tx.set_pga(0,tx_gain)
self.tx.set_mux(gru.hexint(0x98))
def setup_receiver(self, number_channels, rx_interp, rx_gain, tune_frequency_channel_one, tune_frequency_channel_two):
self.rx_sampling_rate=64e6/rx_interp
self.rx = usrp.source_c(decim_rate=rx_interp, nchan=number_channels)
self.rx_subdev = usrp.selected_subdev(self.rx, (0,0))
self.rx.tune(0, self.rx_subdev, tune_frequency_channel_one)
if number_channels == 2:
self.rx.tune(1, self.rx_subdev, tune_frequency_channel_two)
self.rx.set_pga(0,rx_gain)
self.rx.set_mux(gru.hexint(0xf0f0f0f0))
def generate_file_sink(self, size, dst_filename):
#generate signal source and receive sink (head is used to reduce the number of samples)
sink = gr.file_sink(size, dst_filename)
return sink
def generate_file_source(self,size, filename, repeat):
src = gr.file_source(size, filename,repeat)
return src
def generate_signal_source(self, waveform, frequency, amplitude):
siggen = gr.sig_source_c (self.tx_sampling_rate,waveform,frequency,amplitude,0)
return siggen
class my_top_block_125(my_top_block):
def __init__ (self, config):
my_top_block.__init__(self)
self.tx_freqency= config.transmit_freq
self.frequency_one=config.freq_one
self.frequency_zero=config.freq_zero
self.dst= self.generate_file_sink(gr.sizeof_char, config.dst_file)
self.setup_sender(config.tx_nchan, config.tx_interp, config.tx_gain, config.tx_if_freq)
self.setup_receiver(config.rx_nchan, config.rx_interp, config.rx_gain, config.rx_if_freq,0)
self.siggen= self.generate_signal_source(gr.GR_SIN_WAVE,self.tx_freqency,config.tx_amplitude)
self.configure_graph()
def configure_graph(self):
if self.frequency_one > self.frequency_zero:
highest_frequency=self.frequency_one
else:
highest_frequency=self.frequency_zero
lowpass=gr.firdes.low_pass(1,self.rx_sampling_rate, highest_frequency*1.1, highest_frequency*1.2, gr.firdes.WIN_HAMMING)
fir_low= gr.fir_filter_fff (1,lowpass)
demodulator=alibaba.fsk_demodulator_fb(self.rx_sampling_rate,self.frequency_one,self.frequency_zero)
symbol_combiner=alibaba.combine_symbols_bb(3,7)
#create a vector with the start sequence of the ID
start_sequence = alibaba.CharVector(6)
start_sequence[0]=0
start_sequence[1]=0
start_sequence[2]=0
start_sequence[3]=1
start_sequence[4]=1
start_sequence[5]=1
#this module will ensure, that an id is valid (id is found ten times)
self.check=alibaba.checkid_bb(start_sequence, 1)
#bring the vector in readable form...
binarytocharconverter=alibaba.char_converter_bch()
#convert the complex signal in a signal represented by float values
floatconverter=gr.complex_to_float();
#connect receive path
self.connect(self.rx, floatconverter, fir_low,demodulator,symbol_combiner,self.check, binarytocharconverter, self.dst)
#connect transmit path
self.connect(self.siggen,self.tx)
class my_top_block_125_send(my_top_block):
def __init__ (self,config):
my_top_block.__init__(self)
self.setup_sender(
config.tx_nchan, config.tx_interp,
config.tx_gain,
config.tx_if_freq)
self.sigsource=alibaba.controlled_signalsource_bc(
self.tx_sampling_rate,
config.freq_one,
config.freq_zero,
config.tx_amplitude,
0,
config.cycles_per_symbol)
#instantiate and connect transmit path
self.src = self.generate_file_source(gr.sizeof_char, config.source_file,True)
self.connect(self.src, self.sigsource, self.tx)
class my_top_block_125_send_brute(my_top_block):
def __init__ (self,config):
my_top_block.__init__(self)
self.setup_sender(
config.tx_nchan, config.tx_interp,
config.tx_gain,
config.tx_if_freq)
self.sigsource=alibaba.controlled_signalsource_bc(
self.tx_sampling_rate,
config.freq_one,
config.freq_zero,
config.tx_amplitude,
0,
config.cycles_per_symbol)
self.repeater = alibaba.sequence_repeater_bb()
self.repeater.set_repeat_count(config.repeats_per_key)
self.repeater.set_sequence_length(96)
self.bsuffix = config.buffer_file_suffix
self.buffers = config.buffer_file_count
self.keys_in_buffer = config.keys_in_buffer
self.src = alibaba.file_ring_source(gr.sizeof_char)
for fnam in self.yieldTmpFiles(False):
self.src.append_filename(fnam)
self.connect(self.src, self.repeater, self.sigsource, self.tx)
def yieldTmpFiles(self, repeat = True):
for i in (range(self.buffers)):
yield "%d%s"%(i,self.bsuffix)
while repeat:
for i in (range(self.buffers)):
yield "%d%s"%(i,self.bsuffix)
class my_top_block_1356(my_top_block):
def __init__ (self, config):
my_top_block.__init__(self)
self.separation_value=config.block_sep_val
self.separation_value_miller=config.char_block_sep_val_miller
self.separation_value_manchester=config.char_block_sep_val_manchester
self.data_rate=config.data_rate
self.block_splitter_offset_channel_one=config.blocksplitter_offset_channel_one
self.block_splitter_offset_channel_two=config.blocksplitter_offset_channel_two
self.normalizer_value=config.normalizer_value
self.check_crc=config.check_crc
self.check_parity=config.check_parity
self.manchester_offset_channel_one=config.manchester_offset_channel_one
self.miller_offset_channel_one=config.miller_offset_channel_one
self.manchester_offset_channel_two=config.manchester_offset_channel_two
self.miller_offset_channel_two=config.miller_offset_channel_two
self.output_channel_one=config.output_channel_one
self.output_channel_two=config.output_channel_two
self.lowpass_cutoff_channel_one=config.lowpass_cutoff_channel_one
self.lowpass_transition_width_channel_one=config.lowpass_transition_width_channel_one
self.lowpass_cutoff_channel_two=config.lowpass_cutoff_channel_two
self.lowpass_transition_width_channel_two=config.lowpass_transition_width_channel_two
self.setup_receiver(config.rx_nchan, config.rx_interp, config.rx_gain, config.rx_if_freq,config.rx_if_freq_2)
#self.dst_data= self.generate_file_sink(gr.sizeof_float, config.dst_file)
self.dst_data= self.generate_file_sink_hex(config.dst_file, self.separation_value_miller, self.separation_value_manchester, True)
self.dst_reader=self.generate_file_sink_hex(config.dst_file_reader, self.separation_value_miller, self.separation_value_manchester, True)
self.configure_graph()
def generate_file_sink_hex(self, filename, separation_value_miller, separation_value_manchester, separate_by_space):
data = alibaba.file_sink_hex(gr.sizeof_char, filename, separation_value_miller, separation_value_manchester, separate_by_space)
return data
def configure_graph (self):
float_converter=gr.complex_to_float()
float_converter_reader=gr.complex_to_float()
zero_count=8*int(round(self.rx_sampling_rate/self.data_rate))
splitter_reader=alibaba.blocksplitter_ff(self.block_splitter_offset_channel_two, zero_count, self.separation_value) # for Iffreq=0! Since the reader speaks so much louder, it is possible to cut out the tag part by taking a offset value that is high enough!
splitter=alibaba.blocksplitter_ff(self.block_splitter_offset_channel_one, zero_count, self.separation_value)
square=alibaba.square_ff()
square_reader=alibaba.square_ff()
normalizer=alibaba.normalizer_ff(self.separation_value, self.normalizer_value) #1000 is the value we want to normalize to...
normalizer_reader=alibaba.normalizer_ff(self.separation_value, self.normalizer_value) #1000 is the value we want to normalize to...
self.tmp=alibaba.new_intp() #Note: just a dummy pointer passed to mimadecoder. not actually used...
alibaba.intp_assign(self.tmp,0)
mimadecoder=alibaba.mimadecoder_fb(
self.rx_sampling_rate,
self.data_rate,
self.miller_offset_channel_one,
self.manchester_offset_channel_one,
self.output_channel_one,
self.separation_value,
self.separation_value_miller,
self.separation_value_manchester,
self.check_parity,
self.check_crc,
self.tmp)
mimadecoder_reader=alibaba.mimadecoder_fb(
self.rx_sampling_rate,
self.data_rate,
self.miller_offset_channel_two,
self.manchester_offset_channel_two,
self.output_channel_two,
self.separation_value,
self.separation_value_miller,
self.separation_value_manchester,
self.check_parity,
self.check_crc,
self.send_state)
lowpass=gr.firdes.low_pass(
1,
self.rx_sampling_rate,
self.lowpass_cutoff_channel_one,
self.lowpass_transition_width_channel_one,
gr.firdes.WIN_HAMMING)
fir_low= gr.fir_filter_fff (1,lowpass)
lowpass_reader=gr.firdes.low_pass(
1,
self.rx_sampling_rate,
self.lowpass_cutoff_channel_two,
self.lowpass_transition_width_channel_two,
gr.firdes.WIN_HAMMING)
fir_low_reader= gr.fir_filter_fff (1,lowpass_reader)
di = gr.deinterleave(gr.sizeof_gr_complex)
self.connect(self.rx, di)
self.connect((di,0),
float_converter,
square,
fir_low,
splitter,
normalizer,
mimadecoder,
self.dst_data)
self.connect((di,1),
float_converter_reader,
square_reader,
fir_low_reader,
splitter_reader,
normalizer_reader,
mimadecoder_reader,
self.dst_reader)
#self.connect((di,1), float_converter, splitter, self.dst_data)
class my_top_block_1356_transponder(my_top_block_1356):
def __init__ (self, config):
my_top_block_1356.__init__(self,config)
self.data_rate=config.data_rate
self.amplitude=config.tx_amplitude
self.source_file=config.source_file
self.setup_sender(config.tx_nchan, config.tx_interp, config.tx_gain, config.tx_if_freq)
self.configure_graph_send()
def configure_graph_send(self):
complexconverter=gr.float_to_complex()
print self.source_file
#data= (2,2,0,0,1,1,0,0,1,0,0,2,2)
#data= (2,2,1,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,1,2,2)
#data= (0,0,0,0,0,0,0,0,0,0,0,0,0,0)
#datasource = gr.vector_source_b (data,True)
#self.siggen=alibaba.readersource_bf(self.tx_sampling_rate, self.data_rate, self.amplitude)
self.tagsiggen=alibaba.tagsource_bf(self.tx_sampling_rate, self.data_rate, self.amplitude,self.send_state, self.source_file)
self.connect(self.tagsiggen, complexconverter, self.tx)
#self.connect(datasource, self.siggen, complexconverter, self.tx)
| gpl-3.0 |
lightcn/odoo | addons/l10n_fr/__openerp__.py | 260 | 3479 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
{
'name': 'France - Accounting',
'version': '1.1',
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'category': 'Localization/Account Charts',
'description': """
This is the module to manage the accounting chart for France in OpenERP.
========================================================================
This module applies to companies based in France mainland. It doesn't apply to
companies based in the DOM-TOMs (Guadeloupe, Martinique, Guyane, Réunion, Mayotte).
This localisation module creates the VAT taxes of type 'tax included' for purchases
(it is notably required when you use the module 'hr_expense'). Beware that these
'tax included' VAT taxes are not managed by the fiscal positions provided by this
module (because it is complex to manage both 'tax excluded' and 'tax included'
scenarios in fiscal positions).
This localisation module doesn't properly handle the scenario when a France-mainland
company sells services to a company based in the DOMs. We could manage it in the
fiscal positions, but it would require to differentiate between 'product' VAT taxes
and 'service' VAT taxes. We consider that it is too 'heavy' to have this by default
in l10n_fr; companies that sell services to DOM-based companies should update the
configuration of their taxes and fiscal positions manually.
**Credits:** Sistheo, Zeekom, CrysaLEAD, Akretion and Camptocamp.
""",
'depends': ['base_iban', 'account', 'account_chart', 'base_vat'],
'data': [
'views/report_l10nfrbilan.xml',
'views/report_l10nfrresultat.xml',
'l10n_fr_reports.xml',
'fr_report.xml',
'plan_comptable_general.xml',
'l10n_fr_view.xml',
'l10n_fr_wizard.xml',
'fr_pcg_taxes.xml',
'fr_tax.xml',
'fr_fiscal_templates.xml',
'security/ir.model.access.csv',
'wizard/fr_report_bilan_view.xml',
'wizard/fr_report_compute_resultant_view.xml',
],
'test': ['test/l10n_fr_report.yml'],
'demo': [],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
paaschpa/badcomputering | docutils/languages/sv.py | 141 | 2072 | # $Id: sv.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Adam Chodorowski <chodorowski@users.sourceforge.net>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Swedish language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
'author': u'F\u00f6rfattare',
'authors': u'F\u00f6rfattare',
'organization': u'Organisation',
'address': u'Adress',
'contact': u'Kontakt',
'version': u'Version',
'revision': u'Revision',
'status': u'Status',
'date': u'Datum',
'copyright': u'Copyright',
'dedication': u'Dedikation',
'abstract': u'Sammanfattning',
'attention': u'Observera!',
'caution': u'Varning!',
'danger': u'FARA!',
'error': u'Fel',
'hint': u'V\u00e4gledning',
'important': u'Viktigt',
'note': u'Notera',
'tip': u'Tips',
'warning': u'Varning',
'contents': u'Inneh\u00e5ll' }
"""Mapping of node class name to label text."""
bibliographic_fields = {
# 'Author' and 'Authors' identical in Swedish; assume the plural:
u'f\u00f6rfattare': 'authors',
u' n/a': 'author',
u'organisation': 'organization',
u'adress': 'address',
u'kontakt': 'contact',
u'version': 'version',
u'revision': 'revision',
u'status': 'status',
u'datum': 'date',
u'copyright': 'copyright',
u'dedikation': 'dedication',
u'sammanfattning': 'abstract' }
"""Swedish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| bsd-3-clause |
LarsFronius/ansible | lib/ansible/modules/cloud/openstack/os_router.py | 22 | 14110 | #!/usr/bin/python
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_router
short_description: Create or delete routers from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Create or Delete routers from OpenStack. Although Neutron allows
routers to share the same name, this module enforces name uniqueness
to be more user friendly.
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
admin_state_up:
description:
- Desired admin state of the created or existing router.
required: false
default: true
enable_snat:
description:
- Enable Source NAT (SNAT) attribute.
required: false
default: true
network:
description:
- Unique name or ID of the external gateway network.
- required I(interfaces) or I(enable_snat) are provided.
required: false
default: None
project:
description:
- Unique name or ID of the project.
required: false
default: None
version_added: "2.2"
external_fixed_ips:
description:
- The IP address parameters for the external gateway network. Each
is a dictionary with the subnet name or ID (subnet) and the IP
address to assign on the subnet (ip). If no IP is specified,
one is automatically assigned from that subnet.
required: false
default: None
interfaces:
description:
- List of subnets to attach to the router internal interface.
required: false
default: None
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a simple router, not attached to a gateway or subnets.
- os_router:
cloud: mycloud
state: present
name: simple_router
# Create a simple router, not attached to a gateway or subnets for a given project.
- os_router:
cloud: mycloud
state: present
name: simple_router
project: myproj
# Creates a router attached to ext_network1 on an IPv4 subnet and one
# internal subnet interface.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
interfaces:
- private-subnet
# Update existing router1 external gateway to include the IPv6 subnet.
# Note that since 'interfaces' is not provided, any existing internal
# interfaces on an existing router will be left intact.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
- subnet: ipv6-public-subnet
ip: 2001:db8::3
# Delete router1
- os_router:
cloud: mycloud
state: absent
name: router1
'''
RETURN = '''
router:
description: Dictionary describing the router.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Router ID.
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
name:
description: Router name.
type: string
sample: "router1"
admin_state_up:
description: Administrative state of the router.
type: boolean
sample: true
status:
description: The router status.
type: string
sample: "ACTIVE"
tenant_id:
description: The tenant ID.
type: string
sample: "861174b82b43463c9edc5202aadc60ef"
external_gateway_info:
description: The external gateway parameters.
type: dictionary
sample: {
"enable_snat": true,
"external_fixed_ips": [
{
"ip_address": "10.6.6.99",
"subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
}
]
}
routes:
description: The extra routes configuration for L3 router.
type: list
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _needs_update(cloud, module, router, network, internal_subnet_ids):
"""Decide if the given router needs an update.
"""
if router['admin_state_up'] != module.params['admin_state_up']:
return True
if router['external_gateway_info']:
if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']:
return True
if network:
if not router['external_gateway_info']:
return True
elif router['external_gateway_info']['network_id'] != network['id']:
return True
# check external interfaces
if module.params['external_fixed_ips']:
for new_iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(new_iface['subnet'])
exists = False
# compare the requested interface with existing, looking for an existing match
for existing_iface in router['external_gateway_info']['external_fixed_ips']:
if existing_iface['subnet_id'] == subnet['id']:
if 'ip' in new_iface:
if existing_iface['ip_address'] == new_iface['ip']:
# both subnet id and ip address match
exists = True
break
else:
# only the subnet was given, so ip doesn't matter
exists = True
break
# this interface isn't present on the existing router
if not exists:
return True
# check internal interfaces
if module.params['interfaces']:
existing_subnet_ids = []
for port in cloud.list_router_interfaces(router, 'internal'):
if 'fixed_ips' in port:
for fixed_ip in port['fixed_ips']:
existing_subnet_ids.append(fixed_ip['subnet_id'])
if set(internal_subnet_ids) != set(existing_subnet_ids):
return True
return False
def _system_state_change(cloud, module, router, network, internal_ids):
"""Check if the system state would be changed."""
state = module.params['state']
if state == 'absent' and router:
return True
if state == 'present':
if not router:
return True
return _needs_update(cloud, module, router, network, internal_ids)
return False
def _build_kwargs(cloud, module, router, network):
kwargs = {
'admin_state_up': module.params['admin_state_up'],
}
if router:
kwargs['name_or_id'] = router['id']
else:
kwargs['name'] = module.params['name']
if network:
kwargs['ext_gateway_net_id'] = network['id']
# can't send enable_snat unless we have a network
kwargs['enable_snat'] = module.params['enable_snat']
if module.params['external_fixed_ips']:
kwargs['ext_fixed_ips'] = []
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
d = {'subnet_id': subnet['id']}
if 'ip' in iface:
d['ip_address'] = iface['ip']
kwargs['ext_fixed_ips'].append(d)
return kwargs
def _validate_subnets(module, cloud):
external_subnet_ids = []
internal_subnet_ids = []
if module.params['external_fixed_ips']:
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
if not subnet:
module.fail_json(msg='subnet %s not found' % iface['subnet'])
external_subnet_ids.append(subnet['id'])
if module.params['interfaces']:
for iface in module.params['interfaces']:
subnet = cloud.get_subnet(iface)
if not subnet:
module.fail_json(msg='subnet %s not found' % iface)
internal_subnet_ids.append(subnet['id'])
return external_subnet_ids, internal_subnet_ids
def main():
argument_spec = openstack_full_argument_spec(
state=dict(default='present', choices=['absent', 'present']),
name=dict(required=True),
admin_state_up=dict(type='bool', default=True),
enable_snat=dict(type='bool', default=True),
network=dict(default=None),
interfaces=dict(type='list', default=None),
external_fixed_ips=dict(type='list', default=None),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['project'] and
StrictVersion(shade.__version__) <= StrictVersion('1.9.0')):
module.fail_json(msg="To utilize project, the installed version of"
"the shade library MUST be > 1.9.0")
state = module.params['state']
name = module.params['name']
network = module.params['network']
project = module.params['project']
if module.params['external_fixed_ips'] and not network:
module.fail_json(msg='network is required when supplying external_fixed_ips')
try:
cloud = shade.openstack_cloud(**module.params)
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
router = cloud.get_router(name, filters=filters)
net = None
if network:
net = cloud.get_network(network)
if not net:
module.fail_json(msg='network %s not found' % network)
# Validate and cache the subnet IDs so we can avoid duplicate checks
# and expensive API calls.
external_ids, internal_ids = _validate_subnets(module, cloud)
if module.check_mode:
module.exit_json(
changed=_system_state_change(cloud, module, router, net, internal_ids)
)
if state == 'present':
changed = False
if not router:
kwargs = _build_kwargs(cloud, module, router, net)
if project_id:
kwargs['project_id'] = project_id
router = cloud.create_router(**kwargs)
for internal_subnet_id in internal_ids:
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
changed = True
else:
if _needs_update(cloud, module, router, net, internal_ids):
kwargs = _build_kwargs(cloud, module, router, net)
updated_router = cloud.update_router(**kwargs)
# Protect against update_router() not actually
# updating the router.
if not updated_router:
changed = False
# On a router update, if any internal interfaces were supplied,
# just detach all existing internal interfaces and attach the new.
elif internal_ids:
router = updated_router
ports = cloud.list_router_interfaces(router, 'internal')
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
for internal_subnet_id in internal_ids:
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
changed = True
module.exit_json(changed=changed,
router=router,
id=router['id'])
elif state == 'absent':
if not router:
module.exit_json(changed=False)
else:
# We need to detach all internal interfaces on a router before
# we will be allowed to delete it.
ports = cloud.list_router_interfaces(router, 'internal')
router_id = router['id']
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
cloud.delete_router(router_id)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
BhallaLab/moose-full | moose-examples/snippets/interpol2d.py | 2 | 1991 | # interpol2d.py ---
#
# Filename: interpol2d.py
# Description:
# Author:
# Maintainer:
# Created: Thu Jun 28 15:19:46 2012 (+0530)
# Version:
# Last-Updated: Thu Jun 28 17:11:42 2012 (+0530)
# By: subha
# Update #: 49
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import numpy as np
import sys
sys.path.append('../../python')
import moose
def interpolation_demo():
interpol = moose.Interpol2D('/interpol2D')
interpol.xmin = 0.0
interpol.xmax = 1.0
interpol.ymin = 0.0
interpol.ymax = 1.0
# Make a 50 element array with entries at equal distance from
# [0,1) and reshape it into a 10x5 matrix and assign to table.
matrix = np.linspace(0, 1.0, 50).reshape(10, 5)
print 'Setting table to'
print matrix
interpol.tableVector2D = matrix
# interpolating beyond top left corner.
# value should be
pos = (0.8, 0.3)
print 'Interpolated value at', pos
print interpol.z[pos[0], pos[1]]
print 'Point going out of bound on both x and y', interpol.z[1.1, 1.1]
print 'Point going out of bound on both x and y', interpol.z[0.5, 1.1]
if __name__ == '__main__':
interpolation_demo()
#
# interpol2d.py ends here
| gpl-2.0 |
kelle/astropy | astropy/tests/test_logger.py | 1 | 15399 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import imp
import sys
import warnings
import pytest
from .helper import catch_warnings
from .. import log
from ..logger import LoggingError, conf
from ..utils.exceptions import AstropyWarning, AstropyUserWarning
# Save original values of hooks. These are not the system values, but the
# already overwritten values since the logger already gets imported before
# this file gets executed.
_excepthook = sys.__excepthook__
_showwarning = warnings.showwarning
try:
ip = get_ipython()
except NameError:
ip = None
def setup_function(function):
# Reset modules to default
imp.reload(warnings)
imp.reload(sys)
# Reset internal original hooks
log._showwarning_orig = None
log._excepthook_orig = None
# Set up the logger
log._set_defaults()
# Reset hooks
if log.warnings_logging_enabled():
log.disable_warnings_logging()
if log.exception_logging_enabled():
log.disable_exception_logging()
teardown_module = setup_function
def test_warnings_logging_disable_no_enable():
with pytest.raises(LoggingError) as e:
log.disable_warnings_logging()
assert e.value.args[0] == 'Warnings logging has not been enabled'
def test_warnings_logging_enable_twice():
log.enable_warnings_logging()
with pytest.raises(LoggingError) as e:
log.enable_warnings_logging()
assert e.value.args[0] == 'Warnings logging has already been enabled'
def test_warnings_logging_overridden():
log.enable_warnings_logging()
warnings.showwarning = lambda: None
with pytest.raises(LoggingError) as e:
log.disable_warnings_logging()
assert e.value.args[0] == 'Cannot disable warnings logging: warnings.showwarning was not set by this logger, or has been overridden'
def test_warnings_logging():
# Without warnings logging
with catch_warnings() as warn_list:
with log.log_to_list() as log_list:
warnings.warn("This is a warning", AstropyUserWarning)
assert len(log_list) == 0
assert len(warn_list) == 1
assert warn_list[0].message.args[0] == "This is a warning"
# With warnings logging
with catch_warnings() as warn_list:
log.enable_warnings_logging()
with log.log_to_list() as log_list:
warnings.warn("This is a warning", AstropyUserWarning)
log.disable_warnings_logging()
assert len(log_list) == 1
assert len(warn_list) == 0
assert log_list[0].levelname == 'WARNING'
assert log_list[0].message.startswith('This is a warning')
assert log_list[0].origin == 'astropy.tests.test_logger'
# With warnings logging (differentiate between Astropy and non-Astropy)
with catch_warnings() as warn_list:
log.enable_warnings_logging()
with log.log_to_list() as log_list:
warnings.warn("This is a warning", AstropyUserWarning)
warnings.warn("This is another warning, not from Astropy")
log.disable_warnings_logging()
assert len(log_list) == 1
assert len(warn_list) == 1
assert log_list[0].levelname == 'WARNING'
assert log_list[0].message.startswith('This is a warning')
assert log_list[0].origin == 'astropy.tests.test_logger'
assert warn_list[0].message.args[0] == "This is another warning, not from Astropy"
# Without warnings logging
with catch_warnings() as warn_list:
with log.log_to_list() as log_list:
warnings.warn("This is a warning", AstropyUserWarning)
assert len(log_list) == 0
assert len(warn_list) == 1
assert warn_list[0].message.args[0] == "This is a warning"
def test_warnings_logging_with_custom_class():
class CustomAstropyWarningClass(AstropyWarning):
pass
# With warnings logging
with catch_warnings() as warn_list:
log.enable_warnings_logging()
with log.log_to_list() as log_list:
warnings.warn("This is a warning", CustomAstropyWarningClass)
log.disable_warnings_logging()
assert len(log_list) == 1
assert len(warn_list) == 0
assert log_list[0].levelname == 'WARNING'
assert log_list[0].message.startswith('CustomAstropyWarningClass: This is a warning')
assert log_list[0].origin == 'astropy.tests.test_logger'
def test_warning_logging_with_io_votable_warning():
from ..io.votable.exceptions import W02, vo_warn
with catch_warnings() as warn_list:
log.enable_warnings_logging()
with log.log_to_list() as log_list:
vo_warn(W02, ('a', 'b'))
log.disable_warnings_logging()
assert len(log_list) == 1
assert len(warn_list) == 0
assert log_list[0].levelname == 'WARNING'
x = log_list[0].message.startswith(("W02: ?:?:?: W02: a attribute 'b' is "
"invalid. Must be a standard XML id"))
assert x
assert log_list[0].origin == 'astropy.tests.test_logger'
def test_import_error_in_warning_logging():
"""
Regression test for https://github.com/astropy/astropy/issues/2671
This test actually puts a goofy fake module into ``sys.modules`` to test
this problem.
"""
class FakeModule(object):
def __getattr__(self, attr):
raise ImportError('_showwarning should ignore any exceptions '
'here')
log.enable_warnings_logging()
sys.modules['<test fake module>'] = FakeModule()
try:
warnings.showwarning(AstropyWarning('Regression test for #2671'),
AstropyWarning, '<this is only a test>', 1)
finally:
del sys.modules['<test fake module>']
def test_exception_logging_disable_no_enable():
with pytest.raises(LoggingError) as e:
log.disable_exception_logging()
assert e.value.args[0] == 'Exception logging has not been enabled'
def test_exception_logging_enable_twice():
log.enable_exception_logging()
with pytest.raises(LoggingError) as e:
log.enable_exception_logging()
assert e.value.args[0] == 'Exception logging has already been enabled'
# You can't really override the exception handler in IPython this way, so
# this test doesn't really make sense in the IPython context.
@pytest.mark.skipif(str("ip is not None"))
def test_exception_logging_overridden():
log.enable_exception_logging()
sys.excepthook = lambda etype, evalue, tb: None
with pytest.raises(LoggingError) as e:
log.disable_exception_logging()
assert e.value.args[0] == 'Cannot disable exception logging: sys.excepthook was not set by this logger, or has been overridden'
@pytest.mark.xfail(str("ip is not None"))
def test_exception_logging():
# Without exception logging
try:
with log.log_to_list() as log_list:
raise Exception("This is an Exception")
except Exception as exc:
sys.excepthook(*sys.exc_info())
assert exc.args[0] == "This is an Exception"
else:
assert False # exception should have been raised
assert len(log_list) == 0
# With exception logging
try:
log.enable_exception_logging()
with log.log_to_list() as log_list:
raise Exception("This is an Exception")
except Exception as exc:
sys.excepthook(*sys.exc_info())
assert exc.args[0] == "This is an Exception"
else:
assert False # exception should have been raised
assert len(log_list) == 1
assert log_list[0].levelname == 'ERROR'
assert log_list[0].message.startswith('Exception: This is an Exception')
assert log_list[0].origin == 'astropy.tests.test_logger'
# Without exception logging
log.disable_exception_logging()
try:
with log.log_to_list() as log_list:
raise Exception("This is an Exception")
except Exception as exc:
sys.excepthook(*sys.exc_info())
assert exc.args[0] == "This is an Exception"
else:
assert False # exception should have been raised
assert len(log_list) == 0
@pytest.mark.xfail(str("ip is not None"))
def test_exception_logging_origin():
# The point here is to get an exception raised from another location
# and make sure the error's origin is reported correctly
from ..utils.collections import HomogeneousList
l = HomogeneousList(int)
try:
log.enable_exception_logging()
with log.log_to_list() as log_list:
l.append('foo')
except TypeError as exc:
sys.excepthook(*sys.exc_info())
assert exc.args[0].startswith(
"homogeneous list must contain only objects of type ")
else:
assert False
assert len(log_list) == 1
assert log_list[0].levelname == 'ERROR'
assert log_list[0].message.startswith(
"TypeError: homogeneous list must contain only objects of type ")
assert log_list[0].origin == 'astropy.utils.collections'
@pytest.mark.skipif("sys.version_info[:2] >= (3, 5)",
reason="Infinite recursion on Python 3.5")
@pytest.mark.xfail(str("ip is not None"))
def test_exception_logging_argless_exception():
"""
Regression test for a crash that occurred on Python 3 when logging an
exception that was instantiated with no arguments (no message, etc.)
Regression test for https://github.com/astropy/astropy/pull/4056
"""
try:
log.enable_exception_logging()
with log.log_to_list() as log_list:
raise Exception()
except Exception as exc:
sys.excepthook(*sys.exc_info())
else:
assert False # exception should have been raised
assert len(log_list) == 1
assert log_list[0].levelname == 'ERROR'
assert log_list[0].message == 'Exception [astropy.tests.test_logger]'
assert log_list[0].origin == 'astropy.tests.test_logger'
@pytest.mark.parametrize(('level'), [None, 'DEBUG', 'INFO', 'WARN', 'ERROR'])
def test_log_to_list(level):
orig_level = log.level
try:
if level is not None:
log.setLevel(level)
with log.log_to_list() as log_list:
log.error("Error message")
log.warning("Warning message")
log.info("Information message")
log.debug("Debug message")
finally:
log.setLevel(orig_level)
if level is None:
# The log level *should* be set to whatever it was in the config
level = conf.log_level
# Check list length
if level == 'DEBUG':
assert len(log_list) == 4
elif level == 'INFO':
assert len(log_list) == 3
elif level == 'WARN':
assert len(log_list) == 2
elif level == 'ERROR':
assert len(log_list) == 1
# Check list content
assert log_list[0].levelname == 'ERROR'
assert log_list[0].message.startswith('Error message')
assert log_list[0].origin == 'astropy.tests.test_logger'
if len(log_list) >= 2:
assert log_list[1].levelname == 'WARNING'
assert log_list[1].message.startswith('Warning message')
assert log_list[1].origin == 'astropy.tests.test_logger'
if len(log_list) >= 3:
assert log_list[2].levelname == 'INFO'
assert log_list[2].message.startswith('Information message')
assert log_list[2].origin == 'astropy.tests.test_logger'
if len(log_list) >= 4:
assert log_list[3].levelname == 'DEBUG'
assert log_list[3].message.startswith('Debug message')
assert log_list[3].origin == 'astropy.tests.test_logger'
def test_log_to_list_level():
with log.log_to_list(filter_level='ERROR') as log_list:
log.error("Error message")
log.warning("Warning message")
assert len(log_list) == 1 and log_list[0].levelname == 'ERROR'
def test_log_to_list_origin1():
with log.log_to_list(filter_origin='astropy.tests') as log_list:
log.error("Error message")
log.warning("Warning message")
assert len(log_list) == 2
def test_log_to_list_origin2():
with log.log_to_list(filter_origin='astropy.wcs') as log_list:
log.error("Error message")
log.warning("Warning message")
assert len(log_list) == 0
@pytest.mark.parametrize(('level'), [None, 'DEBUG', 'INFO', 'WARN', 'ERROR'])
def test_log_to_file(tmpdir, level):
local_path = tmpdir.join('test.log')
log_file = local_path.open('wb')
log_path = str(local_path.realpath())
orig_level = log.level
try:
if level is not None:
log.setLevel(level)
with log.log_to_file(log_path):
log.error("Error message")
log.warning("Warning message")
log.info("Information message")
log.debug("Debug message")
log_file.close()
finally:
log.setLevel(orig_level)
log_file = local_path.open('rb')
log_entries = log_file.readlines()
log_file.close()
if level is None:
# The log level *should* be set to whatever it was in the config
level = conf.log_level
# Check list length
if level == 'DEBUG':
assert len(log_entries) == 4
elif level == 'INFO':
assert len(log_entries) == 3
elif level == 'WARN':
assert len(log_entries) == 2
elif level == 'ERROR':
assert len(log_entries) == 1
# Check list content
assert eval(log_entries[0].strip())[-3:] == (
'astropy.tests.test_logger', 'ERROR', 'Error message')
if len(log_entries) >= 2:
assert eval(log_entries[1].strip())[-3:] == (
'astropy.tests.test_logger', 'WARNING', 'Warning message')
if len(log_entries) >= 3:
assert eval(log_entries[2].strip())[-3:] == (
'astropy.tests.test_logger', 'INFO', 'Information message')
if len(log_entries) >= 4:
assert eval(log_entries[3].strip())[-3:] == (
'astropy.tests.test_logger', 'DEBUG', 'Debug message')
def test_log_to_file_level(tmpdir):
local_path = tmpdir.join('test.log')
log_file = local_path.open('wb')
log_path = str(local_path.realpath())
with log.log_to_file(log_path, filter_level='ERROR'):
log.error("Error message")
log.warning("Warning message")
log_file.close()
log_file = local_path.open('rb')
log_entries = log_file.readlines()
log_file.close()
assert len(log_entries) == 1
assert eval(log_entries[0].strip())[-2:] == (
'ERROR', 'Error message')
def test_log_to_file_origin1(tmpdir):
local_path = tmpdir.join('test.log')
log_file = local_path.open('wb')
log_path = str(local_path.realpath())
with log.log_to_file(log_path, filter_origin='astropy.tests'):
log.error("Error message")
log.warning("Warning message")
log_file.close()
log_file = local_path.open('rb')
log_entries = log_file.readlines()
log_file.close()
assert len(log_entries) == 2
def test_log_to_file_origin2(tmpdir):
local_path = tmpdir.join('test.log')
log_file = local_path.open('wb')
log_path = str(local_path.realpath())
with log.log_to_file(log_path, filter_origin='astropy.wcs'):
log.error("Error message")
log.warning("Warning message")
log_file.close()
log_file = local_path.open('rb')
log_entries = log_file.readlines()
log_file.close()
assert len(log_entries) == 0
| bsd-3-clause |
serviceagility/boto | tests/unit/vpc/test_dhcpoptions.py | 114 | 8767 | from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, DhcpOptions
class TestDescribeDhcpOptions(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeDhcpOptionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<dhcpOptionsSet>
<item>
<dhcpOptionsId>dopt-7a8b9c2d</dhcpOptionsId>
<dhcpConfigurationSet>
<item>
<key>domain-name</key>
<valueSet>
<item>
<value>example.com</value>
</item>
</valueSet>
</item>
<item>
<key>domain-name-servers</key>
<valueSet>
<item>
<value>10.2.5.1</value>
</item>
</valueSet>
</item>
<item>
<key>domain-name-servers</key>
<valueSet>
<item>
<value>10.2.5.2</value>
</item>
</valueSet>
</item>
</dhcpConfigurationSet>
<tagSet/>
</item>
</dhcpOptionsSet>
</DescribeDhcpOptionsResponse>
"""
def test_get_all_dhcp_options(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_dhcp_options(['dopt-7a8b9c2d'],
[('key', 'domain-name')])
self.assert_request_parameters({
'Action': 'DescribeDhcpOptions',
'DhcpOptionsId.1': 'dopt-7a8b9c2d',
'Filter.1.Name': 'key',
'Filter.1.Value.1': 'domain-name'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 1)
self.assertIsInstance(api_response[0], DhcpOptions)
self.assertEquals(api_response[0].id, 'dopt-7a8b9c2d')
self.assertEquals(api_response[0].options['domain-name'], ['example.com'])
self.assertEquals(api_response[0].options['domain-name-servers'], ['10.2.5.1', '10.2.5.2'])
class TestCreateDhcpOptions(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateDhcpOptionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<dhcpOptions>
<dhcpOptionsId>dopt-7a8b9c2d</dhcpOptionsId>
<dhcpConfigurationSet>
<item>
<key>domain-name</key>
<valueSet>
<item>
<value>example.com</value>
</item>
</valueSet>
</item>
<item>
<key>domain-name-servers</key>
<valueSet>
<item>
<value>10.2.5.1</value>
</item>
<item>
<value>10.2.5.2</value>
</item>
</valueSet>
</item>
<item>
<key>ntp-servers</key>
<valueSet>
<item>
<value>10.12.12.1</value>
</item>
<item>
<value>10.12.12.2</value>
</item>
</valueSet>
</item>
<item>
<key>netbios-name-servers</key>
<valueSet>
<item>
<value>10.20.20.1</value>
</item>
</valueSet>
</item>
<item>
<key>netbios-node-type</key>
<valueSet>
<item>
<value>2</value>
</item>
</valueSet>
</item>
</dhcpConfigurationSet>
<tagSet/>
</dhcpOptions>
</CreateDhcpOptionsResponse>
"""
def test_create_dhcp_options(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_dhcp_options(
domain_name='example.com', domain_name_servers=['10.2.5.1', '10.2.5.2'],
ntp_servers=('10.12.12.1', '10.12.12.2'),
netbios_name_servers='10.20.20.1',
netbios_node_type='2')
self.assert_request_parameters({
'Action': 'CreateDhcpOptions',
'DhcpConfiguration.1.Key': 'domain-name',
'DhcpConfiguration.1.Value.1': 'example.com',
'DhcpConfiguration.2.Key': 'domain-name-servers',
'DhcpConfiguration.2.Value.1': '10.2.5.1',
'DhcpConfiguration.2.Value.2': '10.2.5.2',
'DhcpConfiguration.3.Key': 'ntp-servers',
'DhcpConfiguration.3.Value.1': '10.12.12.1',
'DhcpConfiguration.3.Value.2': '10.12.12.2',
'DhcpConfiguration.4.Key': 'netbios-name-servers',
'DhcpConfiguration.4.Value.1': '10.20.20.1',
'DhcpConfiguration.5.Key': 'netbios-node-type',
'DhcpConfiguration.5.Value.1': '2'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, DhcpOptions)
self.assertEquals(api_response.id, 'dopt-7a8b9c2d')
self.assertEquals(api_response.options['domain-name'], ['example.com'])
self.assertEquals(api_response.options['domain-name-servers'], ['10.2.5.1', '10.2.5.2'])
self.assertEquals(api_response.options['ntp-servers'], ['10.12.12.1', '10.12.12.2'])
self.assertEquals(api_response.options['netbios-name-servers'], ['10.20.20.1'])
self.assertEquals(api_response.options['netbios-node-type'], ['2'])
class TestDeleteDhcpOptions(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteDhcpOptionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteDhcpOptionsResponse>
"""
def test_delete_dhcp_options(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_dhcp_options('dopt-7a8b9c2d')
self.assert_request_parameters({
'Action': 'DeleteDhcpOptions',
'DhcpOptionsId': 'dopt-7a8b9c2d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestAssociateDhcpOptions(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<AssociateDhcpOptionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</AssociateDhcpOptionsResponse>
"""
def test_associate_dhcp_options(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.associate_dhcp_options(
'dopt-7a8b9c2d', 'vpc-1a2b3c4d')
self.assert_request_parameters({
'Action': 'AssociateDhcpOptions',
'DhcpOptionsId': 'dopt-7a8b9c2d',
'VpcId': 'vpc-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
| mit |
kjc88/sl4a | python/src/Lib/plat-mac/buildtools.py | 31 | 13962 | """tools for BuildApplet and BuildApplication"""
import warnings
warnings.warnpy3k("the buildtools module is deprecated and is removed in 3.0",
stacklevel=2)
import sys
import os
import string
import imp
import marshal
from Carbon import Res
import Carbon.Files
import Carbon.File
import MacOS
import macostools
import macresource
import EasyDialogs
import shutil
BuildError = "BuildError"
# .pyc file (and 'PYC ' resource magic number)
MAGIC = imp.get_magic()
# Template file (searched on sys.path)
TEMPLATE = "PythonInterpreter"
# Specification of our resource
RESTYPE = 'PYC '
RESNAME = '__main__'
# A resource with this name sets the "owner" (creator) of the destination
# It should also have ID=0. Either of these alone is not enough.
OWNERNAME = "owner resource"
# Default applet creator code
DEFAULT_APPLET_CREATOR="Pyta"
# OpenResFile mode parameters
READ = 1
WRITE = 2
# Parameter for FSOpenResourceFile
RESOURCE_FORK_NAME=Carbon.File.FSGetResourceForkName()
def findtemplate(template=None):
"""Locate the applet template along sys.path"""
if MacOS.runtimemodel == 'macho':
return None
if not template:
template=TEMPLATE
for p in sys.path:
file = os.path.join(p, template)
try:
file, d1, d2 = Carbon.File.FSResolveAliasFile(file, 1)
break
except (Carbon.File.Error, ValueError):
continue
else:
raise BuildError, "Template %r not found on sys.path" % (template,)
file = file.as_pathname()
return file
def process(template, filename, destname, copy_codefragment=0,
rsrcname=None, others=[], raw=0, progress="default", destroot=""):
if progress == "default":
progress = EasyDialogs.ProgressBar("Processing %s..."%os.path.split(filename)[1], 120)
progress.label("Compiling...")
progress.inc(0)
# check for the script name being longer than 32 chars. This may trigger a bug
# on OSX that can destroy your sourcefile.
if '#' in os.path.split(filename)[1]:
raise BuildError, "BuildApplet could destroy your sourcefile on OSX, please rename: %s" % filename
# Read the source and compile it
# (there's no point overwriting the destination if it has a syntax error)
fp = open(filename, 'rU')
text = fp.read()
fp.close()
try:
code = compile(text + '\n', filename, "exec")
except SyntaxError, arg:
raise BuildError, "Syntax error in script %s: %s" % (filename, arg)
except EOFError:
raise BuildError, "End-of-file in script %s" % (filename,)
# Set the destination file name. Note that basename
# does contain the whole filepath, only a .py is stripped.
if string.lower(filename[-3:]) == ".py":
basename = filename[:-3]
if MacOS.runtimemodel != 'macho' and not destname:
destname = basename
else:
basename = filename
if not destname:
if MacOS.runtimemodel == 'macho':
destname = basename + '.app'
else:
destname = basename + '.applet'
if not rsrcname:
rsrcname = basename + '.rsrc'
# Try removing the output file. This fails in MachO, but it should
# do any harm.
try:
os.remove(destname)
except os.error:
pass
process_common(template, progress, code, rsrcname, destname, 0,
copy_codefragment, raw, others, filename, destroot)
def update(template, filename, output):
if MacOS.runtimemodel == 'macho':
raise BuildError, "No updating yet for MachO applets"
if progress:
progress = EasyDialogs.ProgressBar("Updating %s..."%os.path.split(filename)[1], 120)
else:
progress = None
if not output:
output = filename + ' (updated)'
# Try removing the output file
try:
os.remove(output)
except os.error:
pass
process_common(template, progress, None, filename, output, 1, 1)
def process_common(template, progress, code, rsrcname, destname, is_update,
copy_codefragment, raw=0, others=[], filename=None, destroot=""):
if MacOS.runtimemodel == 'macho':
return process_common_macho(template, progress, code, rsrcname, destname,
is_update, raw, others, filename, destroot)
if others:
raise BuildError, "Extra files only allowed for MachoPython applets"
# Create FSSpecs for the various files
template_fsr, d1, d2 = Carbon.File.FSResolveAliasFile(template, 1)
template = template_fsr.as_pathname()
# Copy data (not resources, yet) from the template
if progress:
progress.label("Copy data fork...")
progress.set(10)
if copy_codefragment:
tmpl = open(template, "rb")
dest = open(destname, "wb")
data = tmpl.read()
if data:
dest.write(data)
dest.close()
tmpl.close()
del dest
del tmpl
# Open the output resource fork
if progress:
progress.label("Copy resources...")
progress.set(20)
try:
output = Res.FSOpenResourceFile(destname, RESOURCE_FORK_NAME, WRITE)
except MacOS.Error:
destdir, destfile = os.path.split(destname)
Res.FSCreateResourceFile(destdir, unicode(destfile), RESOURCE_FORK_NAME)
output = Res.FSOpenResourceFile(destname, RESOURCE_FORK_NAME, WRITE)
# Copy the resources from the target specific resource template, if any
typesfound, ownertype = [], None
try:
input = Res.FSOpenResourceFile(rsrcname, RESOURCE_FORK_NAME, READ)
except (MacOS.Error, ValueError):
pass
if progress:
progress.inc(50)
else:
if is_update:
skip_oldfile = ['cfrg']
else:
skip_oldfile = []
typesfound, ownertype = copyres(input, output, skip_oldfile, 0, progress)
Res.CloseResFile(input)
# Check which resource-types we should not copy from the template
skiptypes = []
if 'vers' in typesfound: skiptypes.append('vers')
if 'SIZE' in typesfound: skiptypes.append('SIZE')
if 'BNDL' in typesfound: skiptypes = skiptypes + ['BNDL', 'FREF', 'icl4',
'icl8', 'ics4', 'ics8', 'ICN#', 'ics#']
if not copy_codefragment:
skiptypes.append('cfrg')
## skipowner = (ownertype <> None)
# Copy the resources from the template
input = Res.FSOpenResourceFile(template, RESOURCE_FORK_NAME, READ)
dummy, tmplowner = copyres(input, output, skiptypes, 1, progress)
Res.CloseResFile(input)
## if ownertype is None:
## raise BuildError, "No owner resource found in either resource file or template"
# Make sure we're manipulating the output resource file now
Res.UseResFile(output)
if ownertype is None:
# No owner resource in the template. We have skipped the
# Python owner resource, so we have to add our own. The relevant
# bundle stuff is already included in the interpret/applet template.
newres = Res.Resource('\0')
newres.AddResource(DEFAULT_APPLET_CREATOR, 0, "Owner resource")
ownertype = DEFAULT_APPLET_CREATOR
if code:
# Delete any existing 'PYC ' resource named __main__
try:
res = Res.Get1NamedResource(RESTYPE, RESNAME)
res.RemoveResource()
except Res.Error:
pass
# Create the raw data for the resource from the code object
if progress:
progress.label("Write PYC resource...")
progress.set(120)
data = marshal.dumps(code)
del code
data = (MAGIC + '\0\0\0\0') + data
# Create the resource and write it
id = 0
while id < 128:
id = Res.Unique1ID(RESTYPE)
res = Res.Resource(data)
res.AddResource(RESTYPE, id, RESNAME)
attrs = res.GetResAttrs()
attrs = attrs | 0x04 # set preload
res.SetResAttrs(attrs)
res.WriteResource()
res.ReleaseResource()
# Close the output file
Res.CloseResFile(output)
# Now set the creator, type and bundle bit of the destination.
# Done with FSSpec's, FSRef FInfo isn't good enough yet (2.3a1+)
dest_fss = Carbon.File.FSSpec(destname)
dest_finfo = dest_fss.FSpGetFInfo()
dest_finfo.Creator = ownertype
dest_finfo.Type = 'APPL'
dest_finfo.Flags = dest_finfo.Flags | Carbon.Files.kHasBundle | Carbon.Files.kIsShared
dest_finfo.Flags = dest_finfo.Flags & ~Carbon.Files.kHasBeenInited
dest_fss.FSpSetFInfo(dest_finfo)
macostools.touched(destname)
if progress:
progress.label("Done.")
progress.inc(0)
def process_common_macho(template, progress, code, rsrcname, destname, is_update,
raw=0, others=[], filename=None, destroot=""):
# Check that we have a filename
if filename is None:
raise BuildError, "Need source filename on MacOSX"
# First make sure the name ends in ".app"
if destname[-4:] != '.app':
destname = destname + '.app'
# Now deduce the short name
destdir, shortname = os.path.split(destname)
if shortname[-4:] == '.app':
# Strip the .app suffix
shortname = shortname[:-4]
# And deduce the .plist and .icns names
plistname = None
icnsname = None
if rsrcname and rsrcname[-5:] == '.rsrc':
tmp = rsrcname[:-5]
plistname = tmp + '.plist'
if os.path.exists(plistname):
icnsname = tmp + '.icns'
if not os.path.exists(icnsname):
icnsname = None
else:
plistname = None
if not icnsname:
dft_icnsname = os.path.join(sys.prefix, 'Resources/Python.app/Contents/Resources/PythonApplet.icns')
if os.path.exists(dft_icnsname):
icnsname = dft_icnsname
if not os.path.exists(rsrcname):
rsrcname = None
if progress:
progress.label('Creating bundle...')
import bundlebuilder
builder = bundlebuilder.AppBuilder(verbosity=0)
builder.mainprogram = filename
builder.builddir = destdir
builder.name = shortname
builder.destroot = destroot
if rsrcname:
realrsrcname = macresource.resource_pathname(rsrcname)
builder.files.append((realrsrcname,
os.path.join('Contents/Resources', os.path.basename(rsrcname))))
for o in others:
if type(o) == str:
builder.resources.append(o)
else:
builder.files.append(o)
if plistname:
import plistlib
builder.plist = plistlib.Plist.fromFile(plistname)
if icnsname:
builder.iconfile = icnsname
if not raw:
builder.argv_emulation = 1
builder.setup()
builder.build()
if progress:
progress.label('Done.')
progress.inc(0)
## macostools.touched(dest_fss)
# Copy resources between two resource file descriptors.
# skip a resource named '__main__' or (if skipowner is set) with ID zero.
# Also skip resources with a type listed in skiptypes.
#
def copyres(input, output, skiptypes, skipowner, progress=None):
ctor = None
alltypes = []
Res.UseResFile(input)
ntypes = Res.Count1Types()
progress_type_inc = 50/ntypes
for itype in range(1, 1+ntypes):
type = Res.Get1IndType(itype)
if type in skiptypes:
continue
alltypes.append(type)
nresources = Res.Count1Resources(type)
progress_cur_inc = progress_type_inc/nresources
for ires in range(1, 1+nresources):
res = Res.Get1IndResource(type, ires)
id, type, name = res.GetResInfo()
lcname = string.lower(name)
if lcname == OWNERNAME and id == 0:
if skipowner:
continue # Skip this one
else:
ctor = type
size = res.size
attrs = res.GetResAttrs()
if progress:
progress.label("Copy %s %d %s"%(type, id, name))
progress.inc(progress_cur_inc)
res.LoadResource()
res.DetachResource()
Res.UseResFile(output)
try:
res2 = Res.Get1Resource(type, id)
except MacOS.Error:
res2 = None
if res2:
if progress:
progress.label("Overwrite %s %d %s"%(type, id, name))
progress.inc(0)
res2.RemoveResource()
res.AddResource(type, id, name)
res.WriteResource()
attrs = attrs | res.GetResAttrs()
res.SetResAttrs(attrs)
Res.UseResFile(input)
return alltypes, ctor
def copyapptree(srctree, dsttree, exceptlist=[], progress=None):
names = []
if os.path.exists(dsttree):
shutil.rmtree(dsttree)
os.mkdir(dsttree)
todo = os.listdir(srctree)
while todo:
this, todo = todo[0], todo[1:]
if this in exceptlist:
continue
thispath = os.path.join(srctree, this)
if os.path.isdir(thispath):
thiscontent = os.listdir(thispath)
for t in thiscontent:
todo.append(os.path.join(this, t))
names.append(this)
for this in names:
srcpath = os.path.join(srctree, this)
dstpath = os.path.join(dsttree, this)
if os.path.isdir(srcpath):
os.mkdir(dstpath)
elif os.path.islink(srcpath):
endpoint = os.readlink(srcpath)
os.symlink(endpoint, dstpath)
else:
if progress:
progress.label('Copy '+this)
progress.inc(0)
shutil.copy2(srcpath, dstpath)
def writepycfile(codeobject, cfile):
import marshal
fc = open(cfile, 'wb')
fc.write('\0\0\0\0') # MAGIC placeholder, written later
fc.write('\0\0\0\0') # Timestap placeholder, not needed
marshal.dump(codeobject, fc)
fc.flush()
fc.seek(0, 0)
fc.write(MAGIC)
fc.close()
| apache-2.0 |
SciLifeLab/TACA | tests/test_analysis_nanopore.py | 1 | 14296 | #!/usr/bin/env python
import unittest
import logging
import filecmp
import mock
import os
from taca.analysis.analysis_nanopore import *
from taca.utils import config as conf
CONFIG = conf.load_yaml_config('data/taca_test_nanopore_cfg.yaml')
class TestNanoporeAnalysis(unittest.TestCase):
def test_find_runs_to_process(self):
"""Find all expected nanopore runs to process."""
expected_dirs = ["data/nanopore_data/run1/still_sequencing/20200101_1412_MN19414_AAU641_68125dc2",
"data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2",
"data/nanopore_data/run2/done_sequencing/20200102_1412_MN19414_AAU642_68125dc2",
"data/nanopore_data/run3/demultiplexing/20200103_1412_MN19414_AAU643_68125dc2",
"data/nanopore_data/run7/done_no_sample_sheet/20200107_1412_MN19417_AAU645_68125dc2",
"data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2"]
found_dirs = find_runs_to_process()
self.assertEqual(sorted(found_dirs), sorted(expected_dirs))
@mock.patch('taca.analysis.analysis_nanopore.parse_samplesheet')
def test_parse_lims_sample_sheet(self, mock_parser):
"""Find and parse lims sample sheet."""
mock_parser.return_value = ('data/nanopore_data/run2/done_sequencing/20200102_1412_MN19414_AAU642_68125dc2/SQK-LSK109_sample_sheet.csv',
'data/nanopore_data/run2/done_sequencing/20200102_1412_MN19414_AAU642_68125dc2/anglerfish_sample_sheet.csv')
run_dir = 'data/nanopore_data/run2/done_sequencing/20200102_1412_MN19414_AAU642_68125dc2'
nanoseq_sample_sheet, anglerfish_sample_sheet = parse_lims_sample_sheet(run_dir)
self.assertEqual(nanoseq_sample_sheet, 'data/nanopore_data/run2/done_sequencing/20200102_1412_MN19414_AAU642_68125dc2/SQK-LSK109_sample_sheet.csv')
self.assertEqual(anglerfish_sample_sheet, 'data/nanopore_data/run2/done_sequencing/20200102_1412_MN19414_AAU642_68125dc2/anglerfish_sample_sheet.csv')
def test_get_original_samplesheet(self):
"""Get location of lims sample sheet."""
run_id = '20200102_1412_MN19414_AAU642_68125dc2'
found_sample_sheet = get_original_samplesheet(run_id)
expected_sample_sheet = 'data/nanopore_samplesheets/2020/SQK-LSK109_AAU642_Samplesheet_22-594126.csv'
self.assertEqual(found_sample_sheet, expected_sample_sheet)
def test_parse_samplesheet(self):
"""Make nanoseq sample sheet from lims sample sheet."""
run_dir = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'
lims_samplesheet = 'data/nanopore_samplesheets/2020/SQK-LSK109_AAU644_Samplesheet_24-594126.csv'
anglerfish_sample_sheet = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2/anglerfish_sample_sheet.csv'
nanoseq_samplesheet = parse_samplesheet(run_dir, lims_samplesheet)
self.assertTrue(filecmp.cmp(nanoseq_samplesheet, 'data/nanopore_samplesheets/expected/SQK-LSK109_sample_sheet.csv'))
self.assertTrue(filecmp.cmp(anglerfish_sample_sheet, 'data/nanopore_samplesheets/expected/anglerfish_sample_sheet.csv'))
@mock.patch('taca.analysis.analysis_nanopore.get_flowcell_id')
@mock.patch('taca.analysis.analysis_nanopore.is_multiplexed')
@mock.patch('taca.analysis.analysis_nanopore.subprocess.Popen')
def test_start_analysis_pipeline_multiplexed(self, mock_popen, mock_is_multiplexed, mock_get_id):
"""Submit detached nanoseq job for multiplexed data."""
mock_get_id.return_value = 'FLO-FLG001'
mock_is_multiplexed.return_value = True
run_dir = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'
sample_sheet = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2/SQK-LSK109_sample_sheet.csv'
start_nanoseq(run_dir, sample_sheet)
expected_parameters = ('nextflow run nf-core/nanoseq --input ' + sample_sheet
+ ' --input_path ' + os.path.join(run_dir, 'fast5')
+ ' --outdir ' + os.path.join(run_dir, 'nanoseq_output')
+ ' --flowcell FLO-FLG001'
+ ' --guppy_gpu'
+ ' --skip_alignment'
+ ' --kit SQK-LSK109'
+ ' --max_cpus 6'
+ ' --max_memory 20.GB'
+ ' --barcode_kit EXP-NBD104'
+ ' -profile singularity; echo $? > .exitcode_for_nanoseq')
mock_popen.assert_called_once_with(expected_parameters, stdout=subprocess.PIPE, shell=True, cwd=run_dir)
@mock.patch('taca.analysis.analysis_nanopore.get_flowcell_id')
@mock.patch('taca.analysis.analysis_nanopore.is_multiplexed')
@mock.patch('taca.analysis.analysis_nanopore.subprocess.Popen')
def test_start_analysis_pipeline_not_multiplexed(self, mock_popen, mock_is_multiplexed, mock_get_id):
"""Submit detached nanoseq job for non multiplexed data."""
mock_get_id.return_value = 'FLO-FLG001'
mock_is_multiplexed.return_value = False
run_dir = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'
sample_sheet = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2/SQK-LSK109_sample_sheet.csv'
start_nanoseq(run_dir, sample_sheet)
expected_parameters = ('nextflow run nf-core/nanoseq --input ' + sample_sheet
+ ' --input_path ' + os.path.join(run_dir, 'fast5')
+ ' --outdir ' + os.path.join(run_dir, 'nanoseq_output')
+ ' --flowcell FLO-FLG001'
+ ' --guppy_gpu'
+ ' --skip_alignment'
+ ' --kit SQK-LSK109'
+ ' --max_cpus 6'
+ ' --max_memory 20.GB'
+ ' -profile singularity; echo $? > .exitcode_for_nanoseq')
mock_popen.assert_called_once_with(expected_parameters, stdout=subprocess.PIPE, shell=True, cwd=run_dir)
def test_get_flowcell_id(self):
"""Get flowcell ID from report.md."""
run_dir = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'
got_id = get_flowcell_id(run_dir)
expected_id = 'FLO-FLG001'
self.assertEqual(got_id, expected_id)
def test_is_multiplexed(self):
"""Return True if run is multiplexed, else False."""
multiplexed_sample_sheet = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2/SQK-LSK109_sample_sheet.csv'
non_multiplexed_sample_sheet = 'data/nanopore_data/run3/demultiplexing/20200103_1412_MN19414_AAU643_68125dc2/SQK-LSK109_AAU643_sample_sheet.csv'
self.assertTrue(is_multiplexed(multiplexed_sample_sheet))
self.assertFalse(is_multiplexed(non_multiplexed_sample_sheet))
def test_get_barcode_kit(self):
"""Return EXP-NBD104 or EXP-NBD114 barcode kit based on sample sheet."""
sample_sheet_104 = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2/SQK-LSK109_sample_sheet.csv'
got_kit_104 = get_barcode_kit(sample_sheet_104)
sample_sheet_114 = 'data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2/SQK-LSK109_sample_sheet.csv'
got_kit_114 = get_barcode_kit(sample_sheet_114)
self.assertEqual(got_kit_104, 'EXP-NBD104')
self.assertEqual(got_kit_114, 'EXP-NBD114')
def test_check_exit_status(self):
"""Check nanoseq exit status from file."""
self.assertTrue(check_exit_status('data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2/.exitcode_for_nanoseq'))
self.assertFalse(check_exit_status('data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2/.exitcode_for_nanoseq'))
@mock.patch('taca.analysis.analysis_nanopore.os.makedirs')
@mock.patch('taca.analysis.analysis_nanopore.subprocess.Popen')
def test_start_anglerfish(self, mock_popen, mock_mkdir):
"""Start Anglerfish."""
run_dir = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'
af_sample_sheet = 'anglerfish_sample_sheet.csv'
output_dir = 'anglerfish_output'
start_anglerfish(run_dir, af_sample_sheet, output_dir)
expected_parameters = ('anglerfish.py'
+ ' --samplesheet anglerfish_sample_sheet.csv'
+ ' --out_fastq anglerfish_output'
+ ' --threads 2'
+ ' --skip_demux'
+ ' --skip_fastqc; echo $? > .exitcode_for_anglerfish')
mock_popen.assert_called_once_with(expected_parameters, stdout=subprocess.PIPE, shell=True, cwd=run_dir)
@mock.patch('taca.analysis.analysis_nanopore.find_anglerfish_results')
@mock.patch('taca.analysis.analysis_nanopore.shutil.copyfile')
def test_copy_results_for_lims(self, mock_copy, mock_results):
"""Copy Anglerfish results to lims."""
run = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'
anglerfish_results_path = 'anglerfish_output'
anglerfish_results_file = os.path.join(run, anglerfish_results_path, 'anglerfish_2020_09_23_141922', 'anglerfish_stats.txt')
lims_results_file = 'some/dir/2020/anglerfish_stats_AAU644.txt'
mock_results.return_value = anglerfish_results_file
copy_results_for_lims(run, anglerfish_results_path)
mock_copy.assert_called_once_with(anglerfish_results_file, lims_results_file)
def test_find_anglerfish_results(self):
"""Locate Anglerfish results file."""
anglerfish_dir = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2/anglerfish_output'
found_file = find_anglerfish_results(anglerfish_dir)
expected_file = os.path.join(anglerfish_dir, 'anglerfish_2020_09_23_141922', 'anglerfish_stats.txt')
self.assertEqual(expected_file, found_file)
def test_is_not_transferred(self):
"""Check if nanopore run has been transferred."""
self.assertTrue(is_not_transferred('20200104_1412_MN19414_AAU644_68125dc2', 'data/nanopore_data/transfer.tsv'))
self.assertFalse(is_not_transferred('20200105_1412_MN19414_AAU645_68125dc2', 'data/nanopore_data/transfer.tsv'))
@mock.patch('taca.analysis.analysis_nanopore.RsyncAgent')
def test_transfer_run(self, mock_rsync):
"""Start rsync of finished run."""
run_dir = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'
transfer_run(run_dir)
rsync_opts = {'-Lav': None,
'--chown': ':ngi2016003',
'--chmod' : 'Dg+s,g+rw',
'-r' : None,
'--exclude' : 'work'}
mock_rsync.assert_called_with(run_dir,
dest_path='some_dir',
remote_host='some_host',
remote_user='some_user',
validate=False,
opts=rsync_opts)
@mock.patch('taca.analysis.analysis_nanopore.shutil.move')
def test_archive_run(self, mock_move):
"""Move directory to archive."""
run_dir = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'
archive_run(run_dir)
mock_move.assert_called_once_with('data/nanopore_data/run4', 'data/nanopore_data/nosync')
@mock.patch('taca.analysis.analysis_nanopore.parse_lims_sample_sheet')
@mock.patch('taca.analysis.analysis_nanopore.os.path.isfile')
@mock.patch('taca.analysis.analysis_nanopore.start_nanoseq')
def test_process_run_start_analysis(self, mock_start, mock_isfile, mock_parse_ss):
"""Start nanoseq analysis."""
nanoseq_sample_sheet = 'data/nanopore_data/run2/done_sequencing/20200102_1412_MN19414_AAU642_68125dc2/SQK-LSK109_sample_sheet.csv'
anglerfish_sample_sheet = 'some/path'
mock_parse_ss.return_value = nanoseq_sample_sheet
mock_isfile.return_value = True
run_dir = 'data/nanopore_data/run2/done_sequencing/20200102_1412_MN19414_AAU642_68125dc2'
process_run(run_dir, None, None)
mock_start.assert_called_once_with(run_dir, nanoseq_sample_sheet)
@mock.patch('taca.analysis.analysis_nanopore.transfer_run')
@mock.patch('taca.analysis.analysis_nanopore.update_transfer_log')
@mock.patch('taca.analysis.analysis_nanopore.archive_run')
@mock.patch('taca.analysis.analysis_nanopore.send_mail')
def test_process_run_transfer(self, mock_mail, mock_archive, mock_update, mock_transfer):
"""Start transfer of run directory."""
mock_transfer.return_value = True
run_dir = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'
process_run(run_dir, 'dummy/path', None)
email_subject = ('Run successfully processed: 20200104_1412_MN19414_AAU644_68125dc2')
email_message = 'Run 20200104_1412_MN19414_AAU644_68125dc2 has been analysed, transferred and archived successfully.'
email_recipients = 'test@test.com'
mock_mail.assert_called_once_with(email_subject, email_message, email_recipients)
@mock.patch('taca.analysis.analysis_nanopore.send_mail')
def test_process_run_fail_analysis(self, mock_mail):
"""Send email to operator if nanoseq analysis failed."""
run_dir = 'data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2'
process_run(run_dir, None, None)
email_subject = ('Analysis failed for run 20200108_1412_MN19414_AAU648_68125dc2')
email_message = 'The nanoseq analysis failed for run {}.'.format(run_dir)
email_recipients = 'test@test.com'
mock_mail.assert_called_once_with(email_subject, email_message, email_recipients)
| mit |
Samuel789/MediPi | MedManagementWeb/env/lib/python3.5/site-packages/django/contrib/postgres/fields/ranges.py | 109 | 6308 | import json
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range
from django.contrib.postgres import forms, lookups
from django.db import models
from django.utils import six
from .utils import AttributeSetter
__all__ = [
'RangeField', 'IntegerRangeField', 'BigIntegerRangeField',
'FloatRangeField', 'DateTimeRangeField', 'DateRangeField',
]
class RangeField(models.Field):
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
# Initializing base_field here ensures that its model matches the model for self.
if hasattr(self, 'base_field'):
self.base_field = self.base_field()
super(RangeField, self).__init__(*args, **kwargs)
@property
def model(self):
try:
return self.__dict__['model']
except KeyError:
raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__)
@model.setter
def model(self, model):
self.__dict__['model'] = model
self.base_field.model = model
def get_prep_value(self, value):
if value is None:
return None
elif isinstance(value, Range):
return value
elif isinstance(value, (list, tuple)):
return self.range_type(value[0], value[1])
return value
def to_python(self, value):
if isinstance(value, six.string_types):
# Assume we're deserializing
vals = json.loads(value)
for end in ('lower', 'upper'):
if end in vals:
vals[end] = self.base_field.to_python(vals[end])
value = self.range_type(**vals)
elif isinstance(value, (list, tuple)):
value = self.range_type(value[0], value[1])
return value
def set_attributes_from_name(self, name):
super(RangeField, self).set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
def value_to_string(self, obj):
value = self.value_from_object(obj)
if value is None:
return None
if value.isempty:
return json.dumps({"empty": True})
base_field = self.base_field
result = {"bounds": value._bounds}
for end in ('lower', 'upper'):
val = getattr(value, end)
if val is None:
result[end] = None
else:
obj = AttributeSetter(base_field.attname, val)
result[end] = base_field.value_to_string(obj)
return json.dumps(result)
def formfield(self, **kwargs):
kwargs.setdefault('form_class', self.form_field)
return super(RangeField, self).formfield(**kwargs)
class IntegerRangeField(RangeField):
base_field = models.IntegerField
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int4range'
class BigIntegerRangeField(RangeField):
base_field = models.BigIntegerField
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int8range'
class FloatRangeField(RangeField):
base_field = models.FloatField
range_type = NumericRange
form_field = forms.FloatRangeField
def db_type(self, connection):
return 'numrange'
class DateTimeRangeField(RangeField):
base_field = models.DateTimeField
range_type = DateTimeTZRange
form_field = forms.DateTimeRangeField
def db_type(self, connection):
return 'tstzrange'
class DateRangeField(RangeField):
base_field = models.DateField
range_type = DateRange
form_field = forms.DateRangeField
def db_type(self, connection):
return 'daterange'
RangeField.register_lookup(lookups.DataContains)
RangeField.register_lookup(lookups.ContainedBy)
RangeField.register_lookup(lookups.Overlap)
class RangeContainedBy(models.Lookup):
lookup_name = 'contained_by'
type_mapping = {
'integer': 'int4range',
'bigint': 'int8range',
'double precision': 'numrange',
'date': 'daterange',
'timestamp with time zone': 'tstzrange',
}
def as_sql(self, qn, connection):
field = self.lhs.output_field
if isinstance(field, models.FloatField):
sql = '%s::numeric <@ %s::{}'.format(self.type_mapping[field.db_type(connection)])
else:
sql = '%s <@ %s::{}'.format(self.type_mapping[field.db_type(connection)])
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return sql % (lhs, rhs), params
def get_prep_lookup(self):
return RangeField().get_prep_value(self.rhs)
models.DateField.register_lookup(RangeContainedBy)
models.DateTimeField.register_lookup(RangeContainedBy)
models.IntegerField.register_lookup(RangeContainedBy)
models.BigIntegerField.register_lookup(RangeContainedBy)
models.FloatField.register_lookup(RangeContainedBy)
@RangeField.register_lookup
class FullyLessThan(lookups.PostgresSimpleLookup):
lookup_name = 'fully_lt'
operator = '<<'
@RangeField.register_lookup
class FullGreaterThan(lookups.PostgresSimpleLookup):
lookup_name = 'fully_gt'
operator = '>>'
@RangeField.register_lookup
class NotLessThan(lookups.PostgresSimpleLookup):
lookup_name = 'not_lt'
operator = '&>'
@RangeField.register_lookup
class NotGreaterThan(lookups.PostgresSimpleLookup):
lookup_name = 'not_gt'
operator = '&<'
@RangeField.register_lookup
class AdjacentToLookup(lookups.PostgresSimpleLookup):
lookup_name = 'adjacent_to'
operator = '-|-'
@RangeField.register_lookup
class RangeStartsWith(models.Transform):
lookup_name = 'startswith'
function = 'lower'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class RangeEndsWith(models.Transform):
lookup_name = 'endswith'
function = 'upper'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class IsEmpty(models.Transform):
lookup_name = 'isempty'
function = 'isempty'
output_field = models.BooleanField()
| apache-2.0 |
ampax/edx-platform-backup | common/lib/xmodule/xmodule/modulestore/tests/test_xml_importer.py | 12 | 7822 | """
Tests for XML importer.
"""
import mock
from xblock.core import XBlock
from xblock.fields import String, Scope, ScopeIds
from xblock.runtime import Runtime, KvsFieldData, DictKeyValueStore
from xmodule.x_module import XModuleMixin
from opaque_keys.edx.locations import Location
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore.xml_importer import _import_module_and_update_references
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.tests import DATA_DIR
from uuid import uuid4
import unittest
import importlib
class ModuleStoreNoSettings(unittest.TestCase):
"""
A mixin to create a mongo modulestore that avoids settings
"""
HOST = MONGO_HOST
PORT = MONGO_PORT_NUM
DB = 'test_mongo_%s' % uuid4().hex[:5]
COLLECTION = 'modulestore'
FS_ROOT = DATA_DIR
DEFAULT_CLASS = 'xmodule.modulestore.tests.test_xml_importer.StubXBlock'
RENDER_TEMPLATE = lambda t_n, d, ctx = None, nsp = 'main': ''
modulestore_options = {
'default_class': DEFAULT_CLASS,
'fs_root': DATA_DIR,
'render_template': RENDER_TEMPLATE,
}
DOC_STORE_CONFIG = {
'host': HOST,
'port': PORT,
'db': DB,
'collection': COLLECTION,
}
MODULESTORE = {
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
}
modulestore = None
def cleanup_modulestore(self):
"""
cleanup
"""
if self.modulestore:
self.modulestore._drop_database() # pylint: disable=protected-access
def setUp(self):
"""
Add cleanups
"""
self.addCleanup(self.cleanup_modulestore)
super(ModuleStoreNoSettings, self).setUp()
#===========================================
def modulestore():
"""
Mock the django dependent global modulestore function to disentangle tests from django
"""
def load_function(engine_path):
"""
Load the given engine
"""
module_path, _, name = engine_path.rpartition('.')
return getattr(importlib.import_module(module_path), name)
if ModuleStoreNoSettings.modulestore is None:
class_ = load_function(ModuleStoreNoSettings.MODULESTORE['ENGINE'])
options = {}
options.update(ModuleStoreNoSettings.MODULESTORE['OPTIONS'])
options['render_template'] = render_to_template_mock
# pylint: disable=star-args
ModuleStoreNoSettings.modulestore = class_(
None, # contentstore
ModuleStoreNoSettings.MODULESTORE['DOC_STORE_CONFIG'],
branch_setting_func=lambda: ModuleStoreEnum.Branch.draft_preferred,
**options
)
return ModuleStoreNoSettings.modulestore
# pylint: disable=unused-argument
def render_to_template_mock(*args):
pass
class StubXBlock(XBlock, XModuleMixin, InheritanceMixin):
"""
Stub XBlock used for testing.
"""
test_content_field = String(
help="A content field that will be explicitly set",
scope=Scope.content,
default="default value"
)
test_settings_field = String(
help="A settings field that will be explicitly set",
scope=Scope.settings,
default="default value"
)
class RemapNamespaceTest(ModuleStoreNoSettings):
"""
Test that remapping the namespace from import to the actual course location.
"""
def setUp(self):
"""
Create a stub XBlock backed by in-memory storage.
"""
self.runtime = mock.MagicMock(Runtime)
self.field_data = KvsFieldData(kvs=DictKeyValueStore())
self.scope_ids = ScopeIds('Bob', 'stubxblock', '123', 'import')
self.xblock = StubXBlock(self.runtime, self.field_data, self.scope_ids)
super(RemapNamespaceTest, self).setUp()
def test_remap_namespace_native_xblock(self):
# Set the XBlock's location
self.xblock.location = Location("org", "import", "run", "category", "stubxblock")
# Explicitly set the content and settings fields
self.xblock.test_content_field = "Explicitly set"
self.xblock.test_settings_field = "Explicitly set"
self.xblock.save()
# Move to different runtime w/ different course id
target_location_namespace = SlashSeparatedCourseKey("org", "course", "run")
new_version = _import_module_and_update_references(
self.xblock,
modulestore(),
999,
self.xblock.location.course_key,
target_location_namespace,
do_import_static=False
)
# Check the XBlock's location
self.assertEqual(new_version.location.course_key, target_location_namespace)
# Check the values of the fields.
# The content and settings fields should be preserved
self.assertEqual(new_version.test_content_field, 'Explicitly set')
self.assertEqual(new_version.test_settings_field, 'Explicitly set')
# Expect that these fields are marked explicitly set
self.assertIn(
'test_content_field',
new_version.get_explicitly_set_fields_by_scope(scope=Scope.content)
)
self.assertIn(
'test_settings_field',
new_version.get_explicitly_set_fields_by_scope(scope=Scope.settings)
)
def test_remap_namespace_native_xblock_default_values(self):
# Set the XBlock's location
self.xblock.location = Location("org", "import", "run", "category", "stubxblock")
# Do NOT set any values, so the fields should use the defaults
self.xblock.save()
# Remap the namespace
target_location_namespace = Location("org", "course", "run", "category", "stubxblock")
new_version = _import_module_and_update_references(
self.xblock,
modulestore(),
999,
self.xblock.location.course_key,
target_location_namespace.course_key,
do_import_static=False
)
# Check the values of the fields.
# The content and settings fields should be the default values
self.assertEqual(new_version.test_content_field, 'default value')
self.assertEqual(new_version.test_settings_field, 'default value')
# The fields should NOT appear in the explicitly set fields
self.assertNotIn(
'test_content_field',
new_version.get_explicitly_set_fields_by_scope(scope=Scope.content)
)
self.assertNotIn(
'test_settings_field',
new_version.get_explicitly_set_fields_by_scope(scope=Scope.settings)
)
def test_remap_namespace_native_xblock_inherited_values(self):
# Set the XBlock's location
self.xblock.location = Location("org", "import", "run", "category", "stubxblock")
self.xblock.save()
# Remap the namespace
target_location_namespace = Location("org", "course", "run", "category", "stubxblock")
new_version = _import_module_and_update_references(
self.xblock,
modulestore(),
999,
self.xblock.location.course_key,
target_location_namespace.course_key,
do_import_static=False
)
# Inherited fields should NOT be explicitly set
self.assertNotIn(
'start', new_version.get_explicitly_set_fields_by_scope(scope=Scope.settings)
)
self.assertNotIn(
'graded', new_version.get_explicitly_set_fields_by_scope(scope=Scope.settings)
)
| agpl-3.0 |
merenlab/anvio | anvio/tables/indels.py | 2 | 3120 | # -*- coding: utf-8
# pylint: disable=line-too-long
import anvio
import anvio.db as db
import anvio.tables as t
import anvio.utils as utils
import anvio.terminal as terminal
from anvio.tables.tableops import Table
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "Evan Kiefl"
__email__ = "kiefl.evan@gmail.com"
run = terminal.Run()
progress = terminal.Progress()
pp = terminal.pretty_print
class TableForIndels(Table):
def __init__(self, db_path, run=run, progress=progress):
self.db_path = db_path
self.run = run
self.progress = progress
Table.__init__(self, self.db_path, utils.get_required_version_for_db(db_path), run=self.run, progress=self.progress)
self.num_entries = self.get_num_entries()
self.db_entries = []
# after getting an instance, we don't want things to keep accumulating
# in memory. the purpose of the following variable is to ensure whenever
# the number of entries in `self.db_entries` variable exceeds a certain
# value, it will be written to the database and the global variable
# `self.db_entries` will be emptied, saving significant memory space:
self.max_num_entries_in_storage_buffer = 50000
def get_num_entries(self):
database = db.DB(self.db_path, utils.get_required_version_for_db(self.db_path))
num_entries = database.get_row_counts_from_table(t.indels_table_name)
database.disconnect()
return num_entries
def append_entry(self, entry):
"""FIXME This needs documentation to explain difference between append and append_entry"""
self.db_entries.append(entry)
if len(self.db_entries) > self.max_num_entries_in_storage_buffer:
# everytime we are here, the contenst of self.db_entries will be stored in the
# database
self.store()
def append(self, entry):
"""Append a single entry based on a sequence
Parameters
==========
entry : sequence
values in order they are in the table, entry_id excluded (it will be appended in the
body of this function)
"""
self.db_entries.append(entry)
self.num_entries += 1
if len(self.db_entries) >= self.max_num_entries_in_storage_buffer:
# everytime we are here, the contenst of self.db_entries will be stored in the
# database
self.store()
def store(self):
if not len(self.db_entries):
return
database = db.DB(self.db_path, utils.get_required_version_for_db(self.db_path))
database._exec_many('''INSERT INTO %s VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''' % t.indels_table_name, self.db_entries)
database.disconnect()
if anvio.DEBUG:
run.info_single("INDELS: %d entries added to the indels table." % len(self.db_entries), mc="green")
self.db_entries = []
| gpl-3.0 |
infphilo/hisat2 | hisat2_extract_snps_haplotypes_VCF.py | 2 | 33594 | #!/usr/bin/env python
#
# Copyright 2016, Daehwan Kim <infphilo@gmail.com>
#
# This file is part of HISAT 2.
#
# HISAT 2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HISAT 2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HISAT 2. If not, see <http://www.gnu.org/licenses/>.
#
import sys, os, subprocess
from argparse import ArgumentParser, FileType
digit2str = [str(i) for i in range(10)]
"""
"""
def read_genome(genome_file):
chr_dic = {}
chr_name, sequence = "", ""
for line in genome_file:
if line.startswith(">"):
if chr_name and sequence:
chr_dic[chr_name] = sequence
chr_name = line.strip().split()[0][1:]
sequence = ""
else:
sequence += line.strip()
if chr_name and sequence:
chr_dic[chr_name] = sequence
return chr_dic
"""
Compare two variants [chr, pos, type, data, dic]
"""
def compare_vars(a, b):
a_chr, a_pos, a_type, a_data = a[:4]
b_chr, b_pos, b_type, b_data = b[:4]
assert a_chr == b_chr
if a_pos != b_pos:
return a_pos - b_pos
if a_type != b_type:
if a_type == 'I':
return -1
elif b_type == 'I':
return 1
if a_type == 'S':
return -1
else:
return 1
if a_data < b_data:
return -1
elif a_data > b_data:
return 1
else:
return 0
"""
"""
def compatible_vars(a, b):
a_chr, a_pos, a_type, a_data = a[:4]
b_chr, b_pos, b_type, b_data = b[:4]
assert a_chr == b_chr
assert a_pos <= b_pos
if a_pos == b_pos:
return False
if a_type == 'D':
if b_pos <= a_pos + a_data:
return False
return True
"""
Given a VCF line, the function reports a list of variants [pos, type, data]
type: 'S' for single nucleotide polymorphism, 'D' for deletion, and 'I' for insertion
"""
def extract_vars(chr_dic, chr, pos, ref_allele, alt_alleles, varID):
chr_seq = chr_dic[chr]
vars = []
assert ',' not in ref_allele
alt_alleles = alt_alleles.split(',')
for a in range(len(alt_alleles)):
alt_allele = alt_alleles[a]
if 'N' in alt_allele:
continue
ref_allele2, pos2 = ref_allele, pos
min_len = min(len(ref_allele2), len(alt_allele))
assert min_len >= 1
if min_len > 1:
ref_allele2 = ref_allele2[min_len - 1:]
alt_allele = alt_allele[min_len - 1:]
pos2 += (min_len - 1)
type, data = '', ''
if len(ref_allele2) == 1 and len(alt_allele) == 1:
type = 'S'
data = alt_allele
assert ref_allele2 != alt_allele
if chr_seq[pos2] != ref_allele2:
continue
elif len(ref_allele2) == 1:
assert len(alt_allele) > 1
type = 'I'
data = alt_allele[1:]
if len(data) > 32:
continue
if chr_seq[pos] != ref_allele2:
continue
elif len(alt_allele) == 1:
assert len(ref_allele2) > 1
type = 'D'
data = len(ref_allele2) - 1
if chr_seq[pos2:pos2+data+1] != ref_allele2:
continue
else:
assert False
varID2 = varID
if len(alt_alleles) > 1:
varID2 = "%s.%d" % (varID, a)
vars.append([chr, pos2, type, data, {"id":varID, "id2":varID2}])
return vars
"""
"""
def generate_haplotypes(snp_file,
haplotype_file,
vars,
inter_gap,
intra_gap,
num_genomes,
num_haplotypes):
assert len(vars) > 0
# Sort variants and remove redundant variants
vars = sorted(vars, cmp=compare_vars)
tmp_vars = []
v = 0
while v < len(vars):
var = vars[v]
for v2 in range(v + 1, len(vars)):
var2 = vars[v2]
if compare_vars(var, var2) == 0:
v += 1
if "CLNSIG" not in var[4]:
if "CLNSIG" in var2[4]:
var[4]["CLNSIG"] = var2[4]["CLNSIG"]
if "genotype" not in var[4]:
if "genotype" in var2[4]:
var[4]["genotype"] = var2[4]["genotype"]
else:
assert compare_vars(var, var2) < 0
break
tmp_vars.append(var)
v += 1
vars = tmp_vars
# Write SNPs into a file (.snp)
for var in vars:
chr, pos, type, data, var_dic = var
varID = var_dic["id2"]
if type == 'S':
type = "single"
elif type == 'D':
type = "deletion"
else:
assert type == 'I'
type = "insertion"
print >> snp_file, "%s\t%s\t%s\t%s\t%s" % \
(varID, type, chr, pos, data)
# variant compatibility
vars_cmpt = [-1 for i in range(len(vars))]
for v in range(len(vars)):
var_chr, var_pos, var_type, var_data = vars[v][:4]
if var_type == 'D':
var_pos += (var_data - 1)
for v2 in range(v + 1, len(vars)):
if vars_cmpt[v2] >= 0:
continue
var2_chr, var2_pos, var2_type = vars[v2][:3]
assert var_chr == var2_chr
if var_type == 'D' and var2_type == 'D':
if var_pos + 1 < var2_pos:
break
else:
if var_pos < var2_pos:
break
vars_cmpt[v2] = v
# Assign genotypes for those missing genotypes
genotypes_list = []
if num_genomes > 0:
max_genotype_num = 1
for v in range(len(vars)):
var = vars[v]
var_dic = var[4]
if "genotype" not in var_dic:
used = [True, True] + [False for i in range(8)]
if vars_cmpt[v] >= 0:
v2 = v - 1
while v2 >= vars_cmpt[v]:
var2 = vars[v2]
if not compatible_vars(var2, var):
var2_dic = var2[4]
assert "genotype" in var2_dic
genotype_num = int(var2_dic["genotype"][0])
used[genotype_num] = True
v2 -= 1
assert False in used
for i in range(len(used)):
if not used[i]:
var_dic["genotype"] = ("%d" % i) * (num_genomes * 2)
if i > max_genotype_num:
max_genotype_num = i
break
genotypes_list.append(var_dic["genotype"])
num_chromosomes = len(genotypes_list[0])
# daehwan - for debugging purposes
"""
for v in range(len(vars)):
var = vars[v]
var_chr, var_pos, var_type, var_data, var_dic = var
print v, var_chr, var_pos, var_type, var_data, var_dic["id"], var_dic["id2"],
if "CLNSIG" in var_dic:
print "CLNSIG:", var_dic["CLNSIG"],
if "genotype" in var_dic:
print var_dic["genotype"][:50],
print
"""
# genotypes_list looks like
# Var0: 000001000
# Var1: 010000000
# Var2: 001100000
# Var3: 222222222
# Get haplotypes from genotypes_list
haplotypes = set()
cnv_genotypes = ["" for i in range(num_chromosomes)]
for genotypes in genotypes_list:
for i in range(len(genotypes)):
genotype = genotypes[i]
cnv_genotypes[i] += genotype
cnv_genotypes = set(cnv_genotypes)
for raw_haplotype in cnv_genotypes:
for num in range(1, max_genotype_num + 1):
num_str = str(num)
if num_str not in raw_haplotype:
continue
haplotype = ""
for i in range(len(raw_haplotype)):
if raw_haplotype[i] == num_str:
if haplotype == "":
haplotype = str(i)
else:
haplotype += ("#%d" % i)
assert haplotype != ""
haplotypes.add(haplotype)
else:
for v in range(len(vars)):
var = vars[v]
var_dic = var[4]
used = [False for i in range(100)]
if vars_cmpt[v] >= 0:
v2 = v - 1
while v2 >= vars_cmpt[v]:
var2 = vars[v2]
if not compatible_vars(var2, var):
var2_dic = var2[4]
assert "genotype" in var2_dic
genotype_num = var2_dic["genotype"]
used[genotype_num] = True
v2 -= 1
assert False in used
for i in range(len(used)):
if not used[i]:
var_dic["genotype"] = i
break
genotypes_list.append(var_dic["genotype"])
# genotypes_list looks like
# Var0: 0
# Var1: 0
# Var2: 1
# Var3: 2
# Get haplotypes from genotypes_list
max_genotype_num = max(genotypes_list)
haplotypes = ["" for i in range(max_genotype_num + 1)]
for i in range(len(genotypes_list)):
num = genotypes_list[i]
if haplotypes[num] == "":
haplotypes[num] = str(i)
else:
haplotypes[num] += ("#%d" % i)
haplotypes = set(haplotypes)
# haplotypes look like
# '8#10#12#23', '8#12#23', '5#8#12#23#30'
# Split some haplotypes that include large gaps inside
def split_haplotypes(haplotypes):
split_haplotypes = set()
for haplotype in haplotypes:
haplotype = haplotype.split('#')
assert len(haplotype) > 0
if len(haplotype) == 1:
split_haplotypes.add(haplotype[0])
continue
prev_s, s = 0, 1
while s < len(haplotype):
_, prev_locus, prev_type, prev_data, _ = vars[int(haplotype[s-1])]
_, locus, type, data, _ = vars[int(haplotype[s])]
prev_locus, locus = int(prev_locus), int(locus)
if prev_type == 'D':
prev_locus += (int(prev_data) - 1)
if prev_locus + intra_gap < locus:
split_haplotypes.add('#'.join(haplotype[prev_s:s]))
prev_s = s
s += 1
if s == len(haplotype):
split_haplotypes.add('#'.join(haplotype[prev_s:s]))
return split_haplotypes
haplotypes2 = split_haplotypes(haplotypes)
def cmp_haplotype(a, b):
a = a.split('#')
_, a1_locus, _, _, _ = vars[int(a[0])]
_, a2_locus, a2_type, a2_data, _ = vars[int(a[-1])]
a_begin, a_end = int(a1_locus), int(a2_locus)
if a2_type == 'D':
a_end += (int(a2_data) - 1)
b = b.split('#')
_, b1_locus, _, _, _ = vars[int(b[0])]
_, b2_locus, b2_type, b2_data, _ = vars[int(b[-1])]
b_begin, b_end = int(b1_locus), int(b2_locus)
if b2_type == 'D':
b_end += (int(b2_data) - 1)
if a_begin != b_begin:
return a_begin - b_begin
return a_end - b_end
haplotypes = sorted(list(haplotypes2), cmp=cmp_haplotype)
# daehwan - for debugging purposes
"""
dis = prev_locus - locus
print "\n[%d, %d]: %d haplotypes" % (i, j, len(haplotypes)), dis
if len(cur_vars) in range(0, 1000):
# print "vars:", sorted(list(cur_vars), cmp=cmp_varKey
print "num:", len(haplotypes)
for haplotype in haplotypes:
print haplotype.split('#')
print "\nnum:", len(haplotypes2)
for haplotype in haplotypes2:
print haplotype.split('#')
"""
# Write haplotypes
for h_i in range(len(haplotypes)):
h = haplotypes[h_i].split('#')
chr, h1_locus, _, _, _ = vars[int(h[0])]
_, h2_locus, h2_type, h2_data, _ = vars[int(h[-1])]
h_begin, h_end = int(h1_locus), int(h2_locus)
if h2_type == 'D':
h_end += (int(h2_data) - 1)
assert h_begin <= h_end
h_new_begin = h_begin
for h_j in reversed(range(0, h_i)):
hc = haplotypes[h_j].split('#')
_, hc_begin, hc_type, hc_data, _ = vars[int(hc[-1])]
hc_begin = int(hc_begin)
hc_end = hc_begin
if hc_type == 'D':
hc_end += (int(hc_data) - 1)
if hc_end + inter_gap < h_begin:
break
if h_new_begin > hc_end:
h_new_begin = hc_end
assert h_new_begin <= h_begin
h_add = []
for id in h:
var_dic = vars[int(id)][4]
h_add.append(var_dic["id2"])
print >> haplotype_file, "ht%d\t%s\t%d\t%d\t%s" % \
(num_haplotypes, chr, h_new_begin, h_end, ','.join(h_add))
num_haplotypes += 1
return num_haplotypes
"""
"""
def main(genome_file,
VCF_fnames,
base_fname,
inter_gap,
intra_gap,
only_rs,
reference_type,
genotype_vcf,
genotype_gene_list,
extra_files,
verbose):
# Load genomic sequences
chr_dic = read_genome(genome_file)
# GRCh38 - ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/release/20130502/supporting/GRCh38_positions
# ALL.chr22.phase3_shapeit2_mvncall_integrated_v3plus_nounphased.rsID.genotypes.GRCh38_dbSNP_no_SVs.vcf.gz
# GRCh37 - ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/release/20130502
# ALL.chr22.phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes.vcf.gz
# List of variants (e.g. ClinVar database)
genotype_var_list = {}
# List of genomic regions to be processed
genotype_ranges = {}
if genotype_vcf != "":
var_set = set()
assert len(genotype_gene_list) > 0
if genotype_vcf.endswith(".gz"):
vcf_cmd = ["gzip", "-cd", genotype_vcf]
else:
vcf_cmd = ["cat", genotype_vcf]
vcf_proc = subprocess.Popen(vcf_cmd,
stdout=subprocess.PIPE,
stderr=open("/dev/null", 'w'))
for line in vcf_proc.stdout:
if line.startswith("#"):
continue
chr, pos, varID, ref_allele, alt_alleles, qual, filter, info = line.strip().split('\t')
pos = int(pos) - 1
if chr not in chr_dic:
continue
gene = None
for g in genotype_gene_list:
if info.find(g) != -1:
gene = g
break
if not gene:
continue
CLNSIG = -1
for item in info.split(';'):
if not item.startswith("CLNSIG"):
continue
try:
key, value = item.split('=')
CLNSIG = int(value)
except ValueError:
continue
if CLNSIG not in [4, 5]:
continue
if CLNSIG == 4:
CLNSIG = "Likely pathogenic"
else:
CLNSIG = "Pathogenic"
vars = extract_vars(chr_dic, chr, pos, ref_allele, alt_alleles, varID)
if len(vars) == 0:
continue
if chr not in genotype_var_list:
genotype_var_list[chr] = []
genotype_ranges[chr] = {}
if gene not in genotype_ranges[chr]:
genotype_ranges[chr][gene] = [len(chr_dic[chr]), -1]
for var in vars:
var_chr, var_pos, var_ref_allele, var_alt_allele = var[:4]
var_str = "%s-%d-%s-%s" % (var_chr, var_pos, var_ref_allele, var_alt_allele)
if var_str in var_set:
continue
var[4]["CLNSIG"] = CLNSIG
var[4]["gene"] = gene
genotype_var_list[chr].append(var)
if var_pos < genotype_ranges[chr][gene][0]:
genotype_ranges[chr][gene][0] = var_pos
if var_pos > genotype_ranges[chr][gene][1]:
genotype_ranges[chr][gene][1] = var_pos
var_set.add(var_str)
print >> sys.stderr, "Number of variants in %s is:" % (genotype_vcf)
for chr, vars in genotype_var_list.items():
vars = sorted(vars, cmp=compare_vars)
print >> sys.stderr, "\tChromosome %s: %d variants" % (chr, len(vars))
for chr, gene_ranges in genotype_ranges.items():
for gene, value in gene_ranges.items():
gene_ranges[gene] = [value[0] - 100, value[1] + 100]
value = genotype_ranges[chr][gene]
if verbose:
print >> sys.stderr, "%s\t%s\t%d-%d" % (chr, gene, value[0], value[1])
if extra_files or True:
clnsig_file = open("%s.clnsig" % base_fname, 'w')
for chr, vars in genotype_var_list.items():
for var in vars:
varID = var[4]["id2"]
CLNSIG = var[4]["CLNSIG"]
gene = var[4]["gene"]
print >> clnsig_file, "%s\t%s\t%s" % (varID, gene, CLNSIG)
clnsig_file.close()
SNP_file = open("%s.snp" % base_fname, 'w')
haplotype_file = open("%s.haplotype" % base_fname, 'w')
# Write reference information and backbone sequences into files
if extra_files:
ref_file = open("%s.ref" % base_fname, 'w')
for chr, gene_ranges in genotype_ranges.items():
for gene, value in gene_ranges.items():
left, right = value
if reference_type == "gene":
left, right = 0, right - left
print >> ref_file, "%s\t%s\t%d\t%d" % (gene, chr, left, right)
ref_file.close()
if reference_type == "gene":
backbone_file = open("%s_backbone.fa" % base_fname, 'w')
for chr, gene_ranges in genotype_ranges.items():
for gene, value in gene_ranges.items():
left, right = value
left, right = 0, right - left
print >> backbone_file, ">%s" % (gene)
backbone_seq = chr_dic[chr][value[0]:value[1]+1]
for s in range(0, len(backbone_seq), 60):
print >> backbone_file, backbone_seq[s:s+60]
backbone_file.close()
elif reference_type == "chromosome":
first = True
for chr in genotype_ranges.keys():
if first:
os.system("samtools faidx genome.fa %s > %s_backbone.fa" % (chr, base_fname))
first = False
else:
os.system("samtools faidx genome.fa %s >> %s_backbone.fa" % (chr, base_fname))
else:
assert reference_type == "genome"
os.system("cp genome.fa %s_backbone.fa" % base_fname)
num_haplotypes = 0
num_unassigned = 0
for VCF_fname in VCF_fnames:
empty_VCF_file = False
if VCF_fname == "/dev/null" or \
not os.path.exists(VCF_fname):
empty_VCF_file = True
if reference_type != "genome" and \
len(genotype_gene_list) > 0:
continue
if not empty_VCF_file:
if VCF_fname.endswith(".gz"):
vcf_cmd = ["gzip", "-cd", VCF_fname]
else:
vcf_cmd = ["cat", VCF_fname]
vcf_proc = subprocess.Popen(vcf_cmd,
stdout=subprocess.PIPE,
stderr=open("/dev/null", 'w'))
genomeIDs = []
vars, genotypes_list = [], []
prev_varID, prev_chr, prev_pos = "", "", -1
num_lines = 0
for line in vcf_proc.stdout:
num_lines += 1
if line.startswith("##"):
continue
fields = line.strip().split('\t')
chr, pos, varID, ref_allele, alt_alleles, qual, filter, info = fields[:8]
if prev_chr != chr:
curr_right = -1
if len(fields) >= 9:
format = fields[8]
genotypes = []
if len(fields) >= 10:
genotypes = fields[9:]
if line.startswith("#"):
genomeIDs = genotypes
num_genomes = len(genomeIDs)
continue
assert len(genotypes) == len(genomeIDs)
if only_rs and not varID.startswith("rs"):
continue
if ';' in varID:
continue
if varID == prev_varID:
continue
if chr not in chr_dic:
continue
chr_seq = chr_dic[chr]
chr_genotype_vars = []
chr_genotype_ranges = {}
if len(genotype_gene_list) > 0:
assert chr in genotype_var_list
chr_genotype_vars = genotype_var_list[chr]
assert chr in genotype_ranges
chr_genotype_ranges = genotype_ranges[chr]
pos = int(pos) - 1
offset = 0
gene = None
if num_lines % 10000 == 1:
print >> sys.stderr, "\t%s:%d\r" % (chr, pos),
if chr_genotype_ranges:
skip = True
for gene_, range_ in chr_genotype_ranges.items():
if pos > range_[0] and pos < range_[1]:
skip = False
break
if skip:
continue
if len(vars) == 0:
for var in chr_genotype_vars:
var_chr, var_pos, var_type, var_data, var_dic = var
if var_pos < range_[0]:
continue
if var_pos > range_[1]:
break
if reference_type == "gene":
var_pos -= range_[0]
vars.append([gene_, var_pos, var_type, var_data, var_dic])
curr_right = range_[1]
if reference_type == "gene":
offset = range_[0]
gene = gene_
if pos == prev_pos:
continue
if len(vars) > 0 and \
(curr_right + inter_gap < pos or prev_chr != chr):
num_haplotypes = generate_haplotypes(SNP_file,
haplotype_file,
vars,
inter_gap,
intra_gap,
num_genomes,
num_haplotypes)
vars = []
def add_vars(pos,
offset,
gene,
varID,
ref_allele,
alt_alleles,
vars,
genotypes):
tmp_vars = extract_vars(chr_dic, chr, pos, ref_allele, alt_alleles, varID)
max_right = -1
for v in range(len(tmp_vars)):
var = tmp_vars[v]
_, pos2, type, data = var[:4]
cnv_genotypes = []
for genotype in genotypes:
P1, P2 = genotype[0], genotype[2]
if P1 == digit2str[v + 1]:
cnv_genotypes.append('1')
else:
cnv_genotypes.append('0')
if P2 == digit2str[v + 1]:
cnv_genotypes.append('1')
else:
cnv_genotypes.append('0')
# Skip SNPs not present in a given population (e.g. 2,504 genomes in 1000 Genomes Project)
if cnv_genotypes != [] and \
'1' not in cnv_genotypes:
continue
tmp_varID = var[4]["id2"]
var_dic = {"id":varID, "id2":tmp_varID, "genotype":''.join(cnv_genotypes)}
if reference_type == "gene":
vars.append([gene, pos2 - offset, type, data, var_dic])
else:
vars.append([chr, pos2, type, data, var_dic])
right = pos2
if type == 'D':
right += (int(data) - 1)
if max_right < right:
max_right = right
return max_right
right = add_vars(pos,
offset,
gene,
varID,
ref_allele,
alt_alleles,
vars,
genotypes)
if curr_right < right:
curr_right = right
prev_varID = varID
prev_chr = chr
prev_pos = pos
if len(vars) > 0:
num_haplotypes = generate_haplotypes(SNP_file,
haplotype_file,
vars,
inter_gap,
intra_gap,
num_genomes,
num_haplotypes)
vars = []
else:
for chr in genotype_var_list.keys():
chr_seq = chr_dic[chr]
chr_genotype_vars = genotype_var_list[chr]
curr_right = -1
vars = []
for var in chr_genotype_vars:
var_chr, var_pos, var_type, var_data, var_dic = var
num_genomes = 0
if len(vars) > 0 and curr_right + inter_gap < var_pos:
num_haplotypes = generate_haplotypes(SNP_file,
haplotype_file,
vars,
inter_gap,
intra_gap,
num_genomes,
num_haplotypes)
vars = []
vars.append([var_chr, var_pos, var_type, var_data, var_dic])
curr_right = var_pos
if var_type == 'D':
curr_right += (var_data - 1)
if len(vars) > 0:
num_haplotypes = generate_haplotypes(SNP_file,
haplotype_file,
vars,
inter_gap,
intra_gap,
num_genomes,
num_haplotypes)
vars = []
SNP_file.close()
haplotype_file.close()
if genotype_vcf != "":
clnsig_file.close()
if __name__ == '__main__':
parser = ArgumentParser(
description='Extract SNPs and haplotypes from VCF files')
parser.add_argument('genome_file',
nargs='?',
type=FileType('r'),
help='input genome file (e.g. genome.fa)')
parser.add_argument('VCF_fnames',
nargs='?',
type=str,
help='A comma-seperated VCF files (plain text or gzipped file is accepted: GRCh38_dbSNP_no_SVs.vcf or GRCh38_dbSNP_no_SVs.vcf.gz')
parser.add_argument("base_fname",
nargs='?',
type=str,
help="base filename for SNPs and haplotypes")
parser.add_argument("--reference-type",
dest="reference_type",
type=str,
default="genome",
help="Reference type: gene, chromosome, and genome (default: genome)")
parser.add_argument("--inter-gap",
dest="inter_gap",
type=int,
default=30,
help="Maximum distance for variants to be in the same haplotype (default: 30)")
parser.add_argument("--intra-gap",
dest="intra_gap",
type=int,
default=50,
help="Break a haplotype into several haplotypes (default: 50)")
parser.add_argument('--non-rs',
dest='only_rs',
action='store_false',
help='Allow SNP IDs not beginning with rs')
parser.add_argument('--genotype-vcf',
dest='genotype_vcf',
type=str,
default="",
help='VCF file name for genotyping (default: empty)')
parser.add_argument('--genotype-gene-list',
dest='genotype_gene_list',
type=str,
default="",
help='A comma-separated list of genes to be genotyped (default: empty)')
parser.add_argument('--extra-files',
dest='extra_files',
action='store_true',
help='Output extra files such as _backbone.fa and .ref')
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true',
help='also print some statistics to stderr')
args = parser.parse_args()
if not args.genome_file or \
not args.VCF_fnames or \
not args.base_fname:
parser.print_help()
exit(1)
args.VCF_fnames = args.VCF_fnames.split(',')
if args.genotype_vcf != "":
if args.genotype_gene_list == "":
genes = set()
if args.genotype_vcf.endswith(".gz"):
vcf_cmd = ["gzip", "-cd", args.genotype_vcf]
else:
vcf_cmd = ["cat", args.genotype_vcf]
vcf_proc = subprocess.Popen(vcf_cmd,
stdout=subprocess.PIPE,
stderr=open("/dev/null", 'w'))
for line in vcf_proc.stdout:
if line.startswith("#"):
continue
info = line.strip().split()[-1]
if info.find("GENEINFO=") == -1:
continue
gene = info.split("GENEINFO=")[1]
gene = gene.split(':')[0]
genes.add(gene)
args.genotype_gene_list = list(genes)
else:
args.genotype_gene_list = args.genotype_gene_list.split(',')
if len(args.genotype_gene_list) == 0:
print >> sys.stderr, "Error: please specify --genotype-gene-list."
sys.exit(1)
else:
args.genotype_gene_list = []
main(args.genome_file,
args.VCF_fnames,
args.base_fname,
args.inter_gap,
args.intra_gap,
args.only_rs,
args.reference_type,
args.genotype_vcf,
args.genotype_gene_list,
args.extra_files,
args.verbose)
| gpl-3.0 |
ubenu/Blits | src/blitspak/blits.py | 1 | 50733 | """
Blits:
Created on 23 May 2017
Original Blivion:
Created on Tue Oct 25 13:11:32 2016
@author: Maria Schilstra
"""
#from PyQt5.uic import loadUiType
from PyQt5 import QtCore as qt
from PyQt5 import QtWidgets as widgets
from PyQt5 import QtGui as gui
import pandas as pd, numpy as np, copy as cp
from blitspak.blits_mpl import MplCanvas, NavigationToolbar
from blitspak.blits_data import BlitsData
from blitspak.function_dialog import FunctionSelectionDialog
from blitspak.data_creation_dialog import DataCreationDialog
from functions.framework import FunctionsFramework
# from PyQt5.uic import loadUiType
# Ui_MainWindow, QMainWindow = loadUiType('..\\Resources\\UI\\blits.ui')
# Original:
# To avoid using .ui file (from QtDesigner) and loadUIType,
# created a python-version of the .ui file using pyuic5 from command line
# Here: pyuic5 blits.ui -o blits_ui.py
# Also: cannot (easily) use .qrc file, so need to create _rc.py file
# with icon definitions: pyrcc5 -o blits_rc.py blits.qrc
# Then import .py package, as below.
# (QMainWindow is a QtWidget; UI_MainWindow is generated by the converted .ui)
import blitspak.blits_ui as ui
class Main(widgets.QMainWindow, ui.Ui_MainWindow):
N_STATES = 5
ST_START, ST_DATA_ONLY, FUNCTION_ONLY, ST_READY, REJECT = range(N_STATES)
N_PS_SPECTYPES = 7
PS_VALUES, PS_LEDITS, PS_VALUE_FIXED, PS_FIX_CBOXES, PS_GROUPS, PS_COMBOS, PS_SIGMAS = range(N_PS_SPECTYPES)
N_P_SPECTYPES = 4
P_ALL_FIXED, P_FIX_CBOXES, P_ALL_LINKED, P_LINK_CBOXES = range(N_P_SPECTYPES)
N_S_SPECTYPES = 3
S_INCLUDED, S_INCLUDE_CBOXES, S_FTOL = range(N_S_SPECTYPES)
ps_types = ['param_values', 'param_line_edits', 'param_values_fixed', 'param_fix_cboxes', 'series_groups', 'series_combos', 'sigmas']
s_types = ['included', 'included_cboxes', 'ftol']
p_types = ['all_fixed', 'all_fixed_cboxes', 'all_linked', 'all_linked_cboxes']
def __init__(self, ):
super(Main, self).__init__()
self.setupUi(self)
self.scrutinize_dialog = None
self.function_dialog = None
self.create_data_set_dialog = None
self.canvas = MplCanvas(self.mpl_window)
self.plot_toolbar = NavigationToolbar(self.canvas, self.mpl_window)
self.mpl_layout.addWidget(self.canvas)
self.grp_show_axis = widgets.QGroupBox()
self.axis_layout = widgets.QHBoxLayout()
self.grp_show_axis.setLayout(self.axis_layout)
self.grp_show_axis.setSizePolicy(widgets.QSizePolicy.Maximum, widgets.QSizePolicy.Maximum)
self.axisgrp_layout = widgets.QHBoxLayout()
self.axisgrp_layout.addWidget(self.grp_show_axis)
self.mpl_layout.addLayout(self.axisgrp_layout)
self.mpl_layout.addWidget(self.plot_toolbar)
ft = gui.QFont('Calibri', 14)
self.btn_est = widgets.QPushButton("Estimate")
self.btn_est.setFont(ft)
self.btn_apply = widgets.QPushButton("Calculate")
self.btn_apply.setFont(ft)
self.btn_fit = widgets.QPushButton("Fit")
self.btn_fit.setFont(ft)
self.bbox_fit.addButton(self.btn_apply, widgets.QDialogButtonBox.ActionRole)
self.bbox_fit.addButton(self.btn_est, widgets.QDialogButtonBox.ActionRole)
self.bbox_fit.addButton(self.btn_fit, widgets.QDialogButtonBox.ActionRole)
self.action_open.triggered.connect(self.on_open)
self.action_create.triggered.connect(self.on_create)
self.action_close.triggered.connect(self.on_close_data)
self.action_save.triggered.connect(self.on_save)
self.action_select_function.triggered.connect(self.on_select_function)
self.action_analyze.triggered.connect(self.on_analyze)
self.action_quit.triggered.connect(self.close)
self.action_apply.triggered.connect(self.on_calculate)
self.action_estimate.triggered.connect(self.on_estimate)
self.btn_est.clicked.connect(self.on_estimate)
self.btn_apply.clicked.connect(self.on_calculate)
self.btn_fit.clicked.connect(self.on_analyze)
self.chk_global.stateChanged.connect(self.on_global_changed)
self.blits_data = BlitsData()
self.blits_fitted = BlitsData()
self.blits_residuals = BlitsData()
self.pn_fit_spec = None
self.df_params_spec = None
self.df_series_spec = None
self.df_xlimits = None
self.current_xaxis = None
self.axis_selector_buttons = None
self.current_function = None
self.nfitted_points = 100
self.npoints_max = 1000
self.current_state = self.ST_START
self.update_controls()
def init_fit_spec(self):
self.df_xlimits = None
self.pn_fit_spec = None
self.df_series_spec = None
self.df_params_spec = None
if self.current_state in (self.ST_READY, ):
series_names = self.blits_data.get_series_names()
param_names = self.current_function.get_parameter_names()
axis_names = self.blits_data.get_axes_names()
self.df_xlimits = pd.DataFrame(columns=['min', 'max'], index=axis_names)
mins, maxs = self.blits_data.series_extremes()
xmins, xmaxs = mins.iloc[:, :-1].min(axis=0), maxs.iloc[:, :-1].max(axis=0)
self.df_xlimits.loc[:, 'min'] = xmins
self.df_xlimits.loc[:, 'max'] = xmaxs
self.pn_fit_spec = pd.Panel(major_axis=param_names, minor_axis=series_names, items=self.ps_types)
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES]] = 1.0
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUE_FIXED]] = qt.Qt.Unchecked
self.df_series_spec = pd.DataFrame(index=series_names, columns=self.s_types)
self.df_series_spec.loc[:, self.s_types[self.S_INCLUDED]] = qt.Qt.Checked
self.df_params_spec = pd.DataFrame(index=param_names, columns=self.p_types)
self.df_params_spec.loc[:, self.p_types[self.P_ALL_FIXED]] = qt.Qt.Unchecked
self.df_params_spec.loc[:, self.p_types[self.P_ALL_LINKED]] = qt.Qt.Unchecked
for sname in series_names:
cbx = widgets.QCheckBox()
cbx.setText("")
cbx.setToolTip("Uncheck to exclude from analysis")
cbx.setCheckState(int(self.df_series_spec.loc[sname, self.s_types[self.S_INCLUDED]]))
# int() is necessary for the checkbox to recognise the type as valid (int64 isn't)
self.df_series_spec.loc[sname, self.s_types[self.S_INCLUDE_CBOXES]] = cbx
cbx.stateChanged.connect(self.on_series_selected_changed)
for pname in param_names:
cb_lnk = widgets.QCheckBox()
cb_lnk.setCheckState(qt.Qt.Unchecked)
cb_lnk.setText("")
cb_lnk.setToolTip("Check to link " + pname + " across all series")
cb_lnk.stateChanged.connect(self.on_all_linked_changed)
cb_fix = widgets.QCheckBox()
cb_fix.setCheckState(qt.Qt.Unchecked)
cb_fix.setText("")
cb_fix.setToolTip("Check to keep " + pname + " constant for all series")
cb_fix.stateChanged.connect(self.on_all_fixed_changed)
self.df_params_spec.loc[pname, self.p_types[self.P_ALL_LINKED]] = int(cb_lnk.checkState())
self.df_params_spec.loc[pname, self.p_types[self.P_LINK_CBOXES]] = cb_lnk
self.df_params_spec.loc[pname, self.p_types[self.P_ALL_FIXED]] = int(cb_fix.checkState())
self.df_params_spec.loc[pname, self.p_types[self.P_FIX_CBOXES]] = cb_fix
for pname in param_names:
for sname in series_names:
edt = widgets.QLineEdit()
edt.setValidator(gui.QDoubleValidator())
edt.setText("{:.3g}".format(self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], pname, sname]))
edt.textChanged.connect(self.on_param_val_changed)
cbx = widgets.QCheckBox()
cbx.setToolTip("Check to keep " + pname + " constant for series " + sname)
cbx.setCheckState(qt.Qt.Unchecked)
cbx.stateChanged.connect(self.on_param_fix_changed)
combo = widgets.QComboBox()
combo.addItems(series_names)
combo.setEditable(False)
combo.setCurrentText(sname)
combo.currentIndexChanged.connect(self.on_linkage_changed)
try:
sp_vals = [float(edt.text()), edt, cbx.checkState(), cbx, combo.currentText(), combo]
for sp, val in zip(self.ps_types, sp_vals):
self.pn_fit_spec.loc[sp, pname, sname] = val
except Exception as e:
print(e)
def init_ui(self):
self.tbl_series_links.clear()
self.tbl_series_links.setRowCount(0)
self.tbl_series_links.setColumnCount(0)
self.tbl_param_values.clear()
self.tbl_param_values.setRowCount(0)
self.tbl_param_values.setColumnCount(0)
if self.current_state not in (self.ST_START, self.ST_DATA_ONLY,): # there is a current function
self.lbl_fn_name.setText("Selected function: " + self.current_function.name)
self.txt_description.setText(self.current_function.long_description)
else:
self.lbl_fn_name.setText("Selected function: None")
self.txt_description.setText("")
if self.current_state in (self.ST_READY, ):
if self.pn_fit_spec is not None:
params = self.pn_fit_spec.major_axis.values
series = self.pn_fit_spec.minor_axis.values
colours = self.canvas.curve_colours
ptbl_vheader = [widgets.QTableWidgetItem("All")]
for sname in series:
i = widgets.QTableWidgetItem(sname)
i.setIcon(self.line_icon(colours[sname]))
ptbl_vheader.extend([i])
self.tbl_param_values.setRowCount(len(ptbl_vheader))
for i in range(len(ptbl_vheader)):
self.tbl_param_values.setVerticalHeaderItem(i, ptbl_vheader[i])
ptbl_hheader = ["Include"]
ptbl_hheader.extend(params)
self.tbl_param_values.setColumnCount(len(ptbl_hheader))
self.tbl_param_values.setHorizontalHeaderLabels(ptbl_hheader)
ltbl_vheader = [widgets.QTableWidgetItem("All")]
for sname in series:
i = widgets.QTableWidgetItem(sname)
i.setIcon(self.line_icon(colours[sname]))
ltbl_vheader .extend([i])
self.tbl_series_links.setRowCount(len(ltbl_vheader))
for i in range(len(ltbl_vheader )):
self.tbl_series_links.setVerticalHeaderItem(i, ltbl_vheader[i])
ltbl_hheader = []
ltbl_hheader.extend(params)
self.tbl_series_links.setColumnCount(len(ltbl_hheader))
self.tbl_series_links.setHorizontalHeaderLabels(ltbl_hheader)
# create the parameter values table
vrange = range(len(ptbl_vheader)-len(series), len(ptbl_vheader))
hrange = range((len(ptbl_hheader)-len(params)), len(ptbl_hheader))
for sname, row in zip(series, vrange):
w = self.centred_tablewidget(self.df_series_spec.loc[sname, self.s_types[self.S_INCLUDE_CBOXES]])
self.tbl_param_values.setCellWidget(row, 0, w)
for pname, col in zip(params, hrange):
w = self.centred_tablewidget(self.df_params_spec.loc[pname, self.p_types[self.P_FIX_CBOXES]])
self.tbl_param_values.setCellWidget(0, col, w)
for sname, row in zip(series, vrange):
for pname, col in zip(params, hrange):
edt = self.pn_fit_spec.loc[self.ps_types[self.PS_LEDITS], pname, sname]
cbx = self.pn_fit_spec.loc[self.ps_types[self.PS_FIX_CBOXES], pname, sname]
w = self.checkable_edit_widget(cbx, edt)
self.tbl_param_values.setCellWidget(row, col, w)
# create the linkage table
vrange = range(len(ltbl_vheader)-len(series), len(ltbl_vheader))
hrange = range((len(ltbl_hheader)-len(params)), len(ltbl_hheader))
for pname, col in zip(params, hrange):
w = self.centred_tablewidget(self.df_params_spec.loc[pname, 'all_linked_cboxes'])
self.tbl_series_links.setCellWidget(0, col, w)
for sname, row in zip(series, vrange):
for pname, col in zip(params, hrange):
self.tbl_series_links.setCellWidget(row, col, self.pn_fit_spec.loc['series_combos', pname, sname])
self.tbl_param_values.resizeRowsToContents()
self.tbl_series_links.resizeRowsToContents()
self.on_global_changed()
def on_all_fixed_changed(self):
if self.current_state in (self.ST_READY, ):
param, col = self.find_sender_index(self.df_params_spec)
if param is not None:
checkstate = int(self.df_params_spec.loc[param, col].checkState())
self.df_params_spec.loc[param, self.p_types[self.P_ALL_FIXED]] = checkstate # synchronise with logical representation
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUE_FIXED], param] = checkstate
self.update_param_vals_table()
def on_all_linked_changed(self):
if self.current_state in (self.ST_READY, ):
param, col = self.find_sender_index(self.df_params_spec)
if param is not None:
checkstate = self.df_params_spec.loc[param, col].checkState()
self.df_params_spec.loc[param, self.p_types[self.P_ALL_LINKED]] = checkstate # synchronise with logical representation
linkto = self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], param].iloc[0]
for series in self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], param].index:
if checkstate == qt.Qt.Unchecked:
linkto = series
self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], param, series] = linkto
self.update_linkage_table()
def on_analyze(self):
if self.current_state in (self.ST_READY, ):
try:
params = self.current_function.parameters
series = self.get_selected_series_names()
fitted_params, sigmas, confidence_intervals, tol = self.perform_fit()
df_pars = pd.DataFrame(fitted_params.transpose(), index=params, columns=series)
df_sigm = pd.DataFrame(sigmas.transpose(), index=params, columns=series)
sr_ftol = pd.Series(tol, index=series)
for pname, row in df_pars.iterrows():
for sname, val in row.iteritems():
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], pname, sname] = val
for pname, row in df_sigm.iterrows():
for sname, val in row.iteritems():
self.pn_fit_spec.loc[self.ps_types[self.PS_SIGMAS], pname, sname] = val
for sname, val in sr_ftol.iteritems():
self.df_series_spec.loc[sname, self.s_types[self.S_FTOL]] = val
self.on_calculate()
self.update_controls()
self.update_param_vals_table()
self.show_selected_data()
self.show_smooth_line()
self.show_fitted_params()
except Exception as e:
print(e)
pass
def on_calculate(self):
if self.current_state in (self.ST_READY, ):
self.set_calculated_curves()
self.set_residual_curves()
self.draw_current_data_set()
pass
def on_close_data(self):
if self.current_state in (self.ST_DATA_ONLY, self.ST_READY, ):
self.current_xaxis = None
self.set_axis_selector()
self.canvas.clear_plots()
self.blits_data = BlitsData()
self.blits_fitted = BlitsData()
self.blits_residuals = BlitsData()
if self.current_state == self.ST_DATA_ONLY:
self.current_state = self.ST_START
else:
self.current_state = self.FUNCTION_ONLY
self.init_fit_spec()
self.init_ui()
self.update_controls()
pass
def on_create(self):
if self.current_state in (self.FUNCTION_ONLY, ):
self.create_data_set_dialog = DataCreationDialog(None, self.current_function)
if self.create_data_set_dialog.exec() == widgets.QDialog.Accepted:
self.blits_data = BlitsData()
self.blits_data.series_names = self.create_data_set_dialog.get_series_names()
self.blits_data.axis_names = self.create_data_set_dialog.get_axes()
self.blits_data.series_dict = self.create_data_set_dialog.get_series_dict()
df_pars = self.create_data_set_dialog.get_parameters()
self.current_state = self.ST_READY
self.current_xaxis = self.blits_data.get_axes_names()[0]
try:
self.set_axis_selector()
self.draw_current_data_set()
self.init_fit_spec()
for pname, row in df_pars.iterrows():
for sname, val in row.iteritems():
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], pname, sname] = val
self.init_ui()
except Exception as e:
print(e)
self.update_controls()
self.on_select_function()
pass
pass
def on_estimate(self):
if self.current_state in (self.ST_READY, ):
fn_p0 = self.current_function.p0
params = self.current_function.parameters
series = self.get_selected_series_names()
data = self.get_data_for_fitting(series)
ffw = FunctionsFramework()
values = ffw.get_initial_param_estimates(data, fn_p0, len(params)).transpose()
df_pars = pd.DataFrame(values, index=params, columns=series)
try:
for pname, row in df_pars.iterrows():
for sname, val in row.iteritems():
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], pname, sname] = val
except Exception as e:
print(e)
self.update_param_vals_table()
self.on_calculate()
pass
def on_global_changed(self):
if self.chk_global.checkState() == qt.Qt.Checked:
self.tbl_series_links.setEnabled(True)
else:
self.tbl_series_links.setEnabled(False)
def on_linkage_changed(self):
if self.current_state in (self.ST_READY, ):
df = self.pn_fit_spec.loc[self.ps_types[self.PS_COMBOS]]
param, series = self.find_sender_index(df)
if param is not None and series is not None:
link = df.loc[param, series].currentText()
self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], param, series] = link
self.rationalise_groups(param)
self.update_linkage_table()
pass
pass
def on_open(self):
if self.current_state in (self.ST_START, self.FUNCTION_ONLY, ):
file_path = widgets.QFileDialog.getOpenFileName(self,
"Open Data File", "", "CSV data files (*.csv);;All files (*.*)")[0]
if file_path:
self.blits_data.import_data(file_path)
axes = self.blits_data.get_axes_names() #cp.deepcopy(self.blits_data.get_axes_names())
self.current_xaxis = axes[0] #self.blits_data.get_axes_names()[0]
if self.current_state == self.ST_START:
self.current_state = self.ST_DATA_ONLY
else:
if len(self.current_function.independents) <= len(axes):
self.current_state = self.ST_READY
else:
self.current_function = None
self.current_state = self.ST_DATA_ONLY
self.set_axis_selector()
self.init_fit_spec()
self.init_ui()
self.update_controls()
self.on_select_function()
def on_param_fix_changed(self):
if self.current_state in (self.ST_READY, ):
param, series = None, None
df = self.pn_fit_spec.loc[self.ps_types[self.PS_FIX_CBOXES]]
param, series = self.find_sender_index(df)
if param is not None and series is not None:
param, series = self.find_sender_index(df)
try:
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUE_FIXED], param, series] = int(self.sender().checkState())
except Exception as e:
print(e)
def on_param_val_changed(self):
if self.current_state in (self.ST_READY, ):
param, series = None, None
df = self.pn_fit_spec.loc[self.ps_types[self.PS_LEDITS]]
param, series = self.find_sender_index(df)
if param is not None and series is not None:
param, series = self.find_sender_index(df)
try:
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], param, series] = float(self.sender().text())
except Exception as e:
print(e)
def on_series_selected_changed(self):
if self.current_state in (self.ST_READY, ):
series, col = None, None
series, col = self.find_sender_index(self.df_series_spec)
if series is not None:
try:
checkstate = self.df_series_spec.loc[series, col].checkState()
self.df_series_spec.loc[series, self.s_types[self.S_INCLUDED]] = int(checkstate)
# synchronise with logical representation; int is necessary to make sure Qt recognises it (won't recognise int64 (??))
except Exception as e:
print(e)
def on_save(self):
file_path = ""
if self.current_state in (self.ST_READY, ):
file_path = widgets.QFileDialog.getSaveFileName(self,
"Save all", "", "Excel files (*.xlsx);;All files (*.*)")[0]
if file_path:
smooth_lines = self.get_xs_fitted_smooth_df()
obs_fit_res = self.get_xs_obs_fit_res_df()
# pd.concat((obs_fit_res, smooth_lines), axis=1)
params = self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES]]
try:
writer = pd.ExcelWriter(file_path)
obs_fit_res.to_excel(writer,'Data')
smooth_lines.to_excel(writer, 'Fit')
params.to_excel(writer,'Parameters')
writer.save()
writer.close()
except Exception as e:
print(e)
def on_select_function(self):
if self.current_state in range(self.N_STATES): # should work from all states
name, n_axes = "", 0
if not self.current_state in (self.ST_START, self.ST_DATA_ONLY): # a current function exists
name = self.current_function.name
if self.current_state in (self.ST_DATA_ONLY, self.ST_READY, ):
n_axes = len(self.blits_data.get_axes_names())
self.function_dialog = FunctionSelectionDialog(self, n_axes=n_axes, selected_fn_name=name)
if self.function_dialog.exec() == widgets.QDialog.Accepted:
self.current_function = self.function_dialog.get_selected_function()
self.blits_fitted = BlitsData()
self.blits_residuals = BlitsData()
if self.current_state in (self.ST_START, self.FUNCTION_ONLY):
self.current_state = self.FUNCTION_ONLY
else:
self.current_state = self.ST_READY
self.init_fit_spec()
self.init_ui()
self.draw_current_data_set()
self.update_controls()
def on_xaxis_changed(self, checked):
if self.current_state not in (self.ST_START, self.FUNCTION_ONLY, ):
btn = self.sender()
xaxis = btn.text()
if btn.isChecked():
self.preserve_xlimits()
self.current_xaxis = xaxis
self.draw_current_data_set()
def draw_current_data_set(self):
self.canvas.clear_plots()
if self.current_state not in (self.ST_START, self.FUNCTION_ONLY, ):
if self.blits_data.has_data():
self.canvas.set_colours(self.blits_data.series_names.tolist())
for key in self.blits_data.series_names:
series = self.blits_data.series_dict[key]
x = series[self.current_xaxis]
y = series[key]
self.canvas.draw_series(key, x, y, 'primary')
if self.blits_fitted.has_data():
for key in self.blits_fitted.series_names:
series = self.blits_fitted.series_dict[key]
x = series[self.current_xaxis]
y = series[key]
self.canvas.draw_series(key, x, y, 'calculated')
if self.blits_residuals.has_data():
for key in self.blits_residuals.series_names:
series = self.blits_residuals.series_dict[key]
x = series[self.current_xaxis]
y = series[key]
self.canvas.draw_series(key, x, y, 'residuals')
if self.df_xlimits is not None:
self.canvas.set_vlines(self.df_xlimits.loc[self.current_xaxis].as_matrix())
def get_constant_params_for_fitting(self, series_names):
"""
Returns an (n_curves, n_params)-shaped array of Boolean values
(with rows and columns parallel to self.series_names and self.current_function.parameters,
respectively) with values for each parameter for each series); if True,
parameter values is constant, if False, parameter value is variable.
"""
selected = (self.pn_fit_spec.loc[self.ps_types[self.PS_VALUE_FIXED], :, series_names] == qt.Qt.Checked).transpose()
return selected.as_matrix()
def get_data_for_fitting(self, series_names):
data = []
self.preserve_xlimits()
start, stop = self.df_xlimits.loc[self.current_xaxis].as_matrix() # self.canvas.get_vline_positions()
for s in series_names:
series = self.blits_data.series_dict[s] # the full data set
indmin = series[self.current_xaxis].searchsorted(start, side='left')[0]
indmax = series[self.current_xaxis].searchsorted(stop, side='right')[0]
selection = cp.deepcopy(series[indmin:indmax]).as_matrix().transpose()
if len(data) == 0:
data = [selection]
else:
data.append(selection)
return data
def get_param_values_for_fitting(self, series_names):
"""
Returns an (n_curves, n_params)-shaped array (with rows and columns
parallel to self.series_names and self.current_function.parameters,
respectively) with values for each parameter for each series).
"""
selected = self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], :, series_names]
params = selected.as_matrix().transpose()
return params
def get_selected_series_names(self):
"""
Returns a numpy array of the selected series names
"""
selected = self.df_series_spec.loc[:, self.s_types[self.S_INCLUDED]] == qt.Qt.Checked
all_series = self.df_series_spec.index.values
return all_series[selected]
def get_series_linkage_for_fitting(self, series_names):
"""
Returns an (n_curves, n_params)-shaped array (with rows and columns parallel to
self.series_names and self.current_function.parameters, respectively)
of integers, in which linked parameters are grouped by their values.
Example for 4 curves and 3 parameters:
p0 p1 p2
c0 0 2 3
c1 0 2 4
c2 1 2 5
c3 1 2 6
indicates that parameter p0 is assumed to have the same value in
curves c0 and c1, and in curves c2 and c3 (a different value),
and that the value for p1 is the same in all curves, whereas
the value of p2 is different for all curves.
"""
selected = self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], :, series_names].transpose()
links_array = cp.deepcopy(selected)
for series, row in selected.iterrows():
for param, txt in row.iteritems():
links_array.loc[series, param] = param + "_" + txt
return links_array.as_matrix()
def perform_fit(self):
# Collect the required information
func = self.current_function.func
series_names = self.get_selected_series_names()
data = self.get_data_for_fitting(series_names)
param_values = self.get_param_values_for_fitting(series_names)
const_params = self.get_constant_params_for_fitting(series_names)
links = self.get_series_linkage_for_fitting(series_names)
# set up for the fitting procedure
fitted_params = cp.deepcopy(param_values)
sigmas = np.empty_like(fitted_params)
confidence_intervals = np.empty_like(fitted_params)
tol = None
results = None
ffw = FunctionsFramework()
# Do the fit
if self.chk_global.checkState() == qt.Qt.Checked: # Global
results = ffw.perform_global_curve_fit(data, func, param_values, const_params, links)
fitted_params = results[0]
sigmas = results[1]
confidence_intervals = results[2]
tol = results[3]
else: # not global)
tol = []
n = 0
for d, p, c, l in zip(data, param_values, const_params, links):
d = [d, ]
p = np.reshape(p, (1, p.shape[0]))
c = np.reshape(c, (1, c.shape[0]))
l = np.reshape(l, (1, l.shape[0]))
results = ffw.perform_global_curve_fit(d, func, p, c, l)
fitted_params[n] = results[0]
sigmas[n] = results[1]
confidence_intervals[n] = results[2]
tol.append(results[3])
n += 1
return fitted_params, sigmas, confidence_intervals, tol
def preserve_xlimits(self):
if self.current_state in (self.ST_READY, ):
if self.df_xlimits is not None: # its shouldn't be, but just to be sure
self.df_xlimits.loc[self.current_xaxis] = self.canvas.get_vline_positions()
else:
self.df_xlimits = None # probably superfluous as well
def set_axis_selector(self):
self.axis_selector_buttons = {}
self.clearLayout(self.axis_layout)
if self.blits_data.has_data():
self.axis_layout.addStretch()
for name in self.blits_data.get_axes_names():
btn = widgets.QRadioButton()
btn.setText(name)
btn.toggled.connect(self.on_xaxis_changed)
self.axis_layout.addWidget(btn)
self.axis_selector_buttons[btn.text()] = btn
self.axis_layout.addStretch()
if not self.current_xaxis is None:
if self.current_xaxis in self.axis_selector_buttons:
self.axis_selector_buttons[self.current_xaxis].setChecked(True)
def set_calculated_curves(self):
selected_series = self.get_selected_series_names()
params = self.get_param_values_for_fitting(selected_series)
data = self.get_data_for_fitting(selected_series)
axes = self.blits_data.get_axes_names()
series_dict = {}
for series_name, series_params, i in zip(selected_series, params, range(len(selected_series))):
x_all = data[i][:-1]
x = np.zeros((x_all.shape[0], self.nfitted_points))
for i in range(x_all.shape[0]):
start, stop = x_all[i][0], x_all[i][-1]
x[i] = np.linspace(start, stop, self.nfitted_points)
y_fit = np.atleast_2d(self.current_function.func(x, series_params))
# create the y values and put them in a DataFrame, transpose for easy concatenation
df_x = pd.DataFrame(x, index=axes)
df_y = pd.DataFrame(y_fit, index=[series_name])
df_data = pd.concat((df_x, df_y)).transpose()
series_dict[series_name] = df_data
self.blits_fitted = BlitsData()
self.blits_fitted.series_names= np.array(selected_series)
self.blits_fitted.axis_names = cp.deepcopy(axes)
self.blits_fitted.series_dict = series_dict
def set_residual_curves(self):
selected_series = self.get_selected_series_names()
params = self.get_param_values_for_fitting(selected_series)
data = self.get_data_for_fitting(selected_series)
axes = self.blits_data.get_axes_names()
series_dict = {}
for series_name, series_params, i in zip(selected_series, params, range(len(selected_series))):
x = data[i][:-1]
y_obs = data[i][-1]
y_fit = self.current_function.func(x, series_params)
y_res = np.atleast_2d(y_obs - y_fit)
# create the y values and put them in a DataFrame, transpose for easy concatenation
df_x = pd.DataFrame(x, index=axes)
df_y = pd.DataFrame(y_res, index=[series_name])
df_data = pd.concat((df_x, df_y)).transpose()
series_dict[series_name] = df_data
self.blits_residuals = BlitsData()
self.blits_residuals.series_names = np.array(selected_series)
self.blits_residuals.axis_names = cp.deepcopy(axes)
self.blits_residuals.series_dict = series_dict
def get_xs_obs_fit_res_df(self):
selected_series = self.get_selected_series_names()
params = self.get_param_values_for_fitting(selected_series)
data = self.get_data_for_fitting(selected_series)
daxes = self.blits_data.get_axes_names()
faxes = self.current_function.independents
axes = np.array([f + "\n(" + a + ")" for a, f in zip(daxes, faxes)])
df_data = None
for series_name, series_params, i in zip(selected_series, params, range(len(selected_series))):
x = data[i][:-1]
y_obs = np.atleast_2d(data[i][-1])
y_fit = np.atleast_2d(self.current_function.func(x, series_params))
y_res = np.atleast_2d(y_obs - y_fit)
df_x = pd.DataFrame(x, index=axes) # no series name, get confusing
df_y_obs = pd.DataFrame(y_obs, index=[' y-obs \n(' + series_name + ')' ])
df_y_fit = pd.DataFrame(y_fit, index=[' y-fit\n(' + series_name + ')'])
df_y_res = pd.DataFrame(y_res, index=[' y-res\n(' + series_name + ')'])
df_data = pd.concat((df_data, df_x, df_y_obs, df_y_fit, df_y_res))
return df_data.transpose()
def get_xs_fitted_smooth_df(self):
selected_series = self.get_selected_series_names()
params = self.get_param_values_for_fitting(selected_series)
data = self.get_data_for_fitting(selected_series)
daxes = self.blits_data.get_axes_names()
faxes = self.current_function.independents
axes = np.array([f + "\n(" + a + ")" for a, f in zip(daxes, faxes)])
df_data = None
for series_name, series_params, i in zip(selected_series, params, range(len(selected_series))):
x0 = data[i][:-1, 0]
x1 = data[i][:-1, -1]
x = np.empty((len(axes), self.nfitted_points))
for i, i0, i1 in zip(range(len(axes)), x0, x1):
x[i] = np.linspace(i0, i1, self.nfitted_points, dtype=float)
y_fit = np.atleast_2d(self.current_function.func(x, series_params))
df_x = pd.DataFrame(x, index=axes) # no series name, get confusing
df_y_fit = pd.DataFrame(y_fit, index=[' y-fit\n(' + series_name + ')'])
df_data = pd.concat((df_data, df_x, df_y_fit))
return df_data.transpose()
def show_selected_data(self):
self.tbl_fitted_data.clear()
self.tbl_fitted_data.setColumnCount(0)
self.tbl_fitted_data.setRowCount(0)
all_data = self.get_xs_obs_fit_res_df()
self.tbl_fitted_data.setRowCount(all_data.shape[0])
self.tbl_fitted_data.setColumnCount(all_data.shape[1])
self.tbl_fitted_data.setHorizontalHeaderLabels(all_data.columns.values)
for i in range(self.tbl_fitted_data.rowCount()):
for j in range(self.tbl_fitted_data.columnCount()):
w = widgets.QTableWidgetItem()
txt = ""
if not np.isnan(all_data.iloc[i, j]):
txt = "{:8.3g}".format(all_data.iloc[i, j])
w.setText(txt)
self.tbl_fitted_data.setItem(i, j, w)
self.tbl_fitted_data.resizeColumnsToContents()
def show_smooth_line(self):
self.tbl_smooth_line.clear()
self.tbl_smooth_line.setColumnCount(0)
self.tbl_smooth_line.setRowCount(0)
all_data = self.get_xs_fitted_smooth_df()
self.tbl_smooth_line.setRowCount(all_data.shape[0])
self.tbl_smooth_line.setColumnCount(all_data.shape[1])
self.tbl_smooth_line.setHorizontalHeaderLabels(all_data.columns.values)
for i in range(self.tbl_smooth_line.rowCount()):
for j in range(self.tbl_smooth_line.columnCount()):
w = widgets.QTableWidgetItem()
txt = ""
if not np.isnan(all_data.iloc[i, j]):
txt = "{:8.3g}".format(all_data.iloc[i, j])
w.setText(txt)
self.tbl_smooth_line.setItem(i, j, w)
self.tbl_smooth_line.resizeColumnsToContents()
def show_fitted_params(self):
self.tbl_fitted_params.clear()
self.tbl_fitted_params.setColumnCount(0)
self.tbl_fitted_params.setRowCount(0)
pnames = self.pn_fit_spec.major_axis.values
pheader = np.vstack((pnames, np.array(["Stderr\non " + pname for pname in pnames]))).transpose().ravel()
pheader = np.hstack((pheader, np.array(["ftol"])))
sheader = self.pn_fit_spec.minor_axis.values
self.tbl_fitted_params.setColumnCount(len(pheader))
self.tbl_fitted_params.setHorizontalHeaderLabels(pheader)
self.tbl_fitted_params.setRowCount(len(sheader))
self.tbl_fitted_params.setVerticalHeaderLabels(sheader)
irow = -1
for sname in self.pn_fit_spec.minor_axis.values:
irow += 1
icol = -1
for pname in self.pn_fit_spec.major_axis.values:
pval = self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], pname, sname]
perr = self.pn_fit_spec.loc[self.ps_types[self.PS_SIGMAS], pname, sname]
spval, sperr = "", ""
if not np.isnan(pval):
spval = '{:8.3g}'.format(pval)
if not np.isnan(perr):
sperr = '{:8.3g}'.format(perr)
icol += 1
wi = widgets.QTableWidgetItem(spval)
self.tbl_fitted_params.setItem(irow, icol, wi)
icol += 1
wi = widgets.QTableWidgetItem(sperr)
self.tbl_fitted_params.setItem(irow, icol, wi)
icol += 1
ftol = self.df_series_spec.loc[sname, self.s_types[self.S_FTOL]]
sftol = ""
if not np.isnan(ftol):
sftol = '{:8.3g}'.format(ftol)
wi = widgets.QTableWidgetItem(sftol)
self.tbl_fitted_params.setItem(irow, icol, wi)
self.tbl_fitted_params.resizeColumnsToContents()
def rationalise_groups(self, parameter):
if self.current_state in (self.ST_READY, ) and parameter != '':
prow = self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], parameter]
x = prow.index
df_wf = pd.DataFrame(np.zeros((len(x), len(x))), index=x, columns=x, dtype=bool) # set up the matrix
for series, val in prow.iteritems():
df_wf.loc[series, series] = True # make the matrix reflexive
if series != val:
df_wf.loc[series, val] = True
df_wf.loc[val, series] = True # make the matrix symmetrical
# make matrix transitive (Warshall-Floyd)
for k in range(len(x)):
for i in range(len(x)):
for j in range(len(x)):
df_wf.iloc[i, j] = df_wf.iloc[i, j] or (df_wf.iloc[i, k] == 1 and df_wf.iloc[k, j] == 1)
# Find the equivalence classes for this parameter
seen = []
sr_equiv_clss = pd.Series(index=x)
for series0, row in df_wf.iterrows():
for series1, val in row.iteritems():
if val:
if series1 not in seen:
sr_equiv_clss.loc[series1] = series0
seen.append(series1)
for series in x:
self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], parameter, series] = sr_equiv_clss.loc[series]
pass
def update_controls(self):
"""
Enables and disables controls for each state
"""
if self.current_state == self.ST_START:
self.action_open.setEnabled(True)
self.action_create.setEnabled(False)
self.action_close.setEnabled(False)
self.action_save.setEnabled(False)
self.action_select_function.setEnabled(True)
self.action_analyze.setEnabled(False)
self.action_estimate.setEnabled(False)
self.action_apply.setEnabled(False)
self.btn_apply.setEnabled(False)
self.btn_fit.setEnabled(False)
self.btn_est.setEnabled(False)
self.action_quit.setEnabled(True)
elif self.current_state == self.ST_DATA_ONLY:
self.action_open.setEnabled(False)
self.action_create.setEnabled(False)
self.action_close.setEnabled(True)
self.action_save.setEnabled(True)
self.action_select_function.setEnabled(True)
self.action_analyze.setEnabled(False)
self.action_estimate.setEnabled(False)
self.action_apply.setEnabled(False)
self.btn_apply.setEnabled(False)
self.btn_fit.setEnabled(False)
self.btn_est.setEnabled(False)
self.action_quit.setEnabled(True)
elif self.current_state == self.FUNCTION_ONLY:
self.action_open.setEnabled(True)
self.action_create.setEnabled(True)
self.action_close.setEnabled(False)
self.action_save.setEnabled(False)
self.action_select_function.setEnabled(True)
self.action_analyze.setEnabled(False)
self.action_estimate.setEnabled(False)
self.action_apply.setEnabled(False)
self.btn_apply.setEnabled(False)
self.btn_fit.setEnabled(False)
self.btn_est.setEnabled(False)
self.action_quit.setEnabled(True)
elif self.current_state == self.ST_READY:
self.action_open.setEnabled(False)
self.action_create.setEnabled(False)
self.action_close.setEnabled(True)
self.action_save.setEnabled(True)
self.action_select_function.setEnabled(True)
self.action_analyze.setEnabled(True)
self.action_estimate.setEnabled(True)
self.action_apply.setEnabled(True)
self.btn_apply.setEnabled(True)
self.btn_fit.setEnabled(True)
self.btn_est.setEnabled(True)
self.action_quit.setEnabled(True)
else:
print('Illegal state')
def update_linkage_table(self):
"""
Sets combo-boxes in linkage_combos to the current values in linkage_groups
"""
if self.current_state in (self.ST_READY, ):
combos = self.pn_fit_spec.loc[self.ps_types[self.PS_COMBOS]]
vals = self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS]]
try:
for i, row in vals.iterrows():
for j, val in row.iteritems():
box = combos.loc[i, j]
if box.currentText() != val:
box.currentIndexChanged.disconnect()
box.setCurrentText(val)
box.currentIndexChanged.connect(self.on_linkage_changed)
except Exception as e:
print(e)
def update_param_vals_table(self):
"""
Sets text and checkstate of values table items to their corresponding
logical values in pn_fit_spec
"""
if self.current_state in (self.ST_READY, ):
edts = self.pn_fit_spec.loc[self.ps_types[self.PS_LEDITS]]
cbxs = self.pn_fit_spec.loc[self.ps_types[self.PS_FIX_CBOXES]]
vals = self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES]]
chks = self.pn_fit_spec.loc[self.ps_types[self.PS_VALUE_FIXED]]
try:
for i, row in vals.iterrows():
for j, val in row.iteritems():
edt = edts.loc[i, j]
cbx = cbxs.loc[i, j]
checkstate = chks.loc[i, j]
if float(edt.text()) != val:
edt.textChanged.disconnect()
edt.setText('{:.3g}'.format(val))
edt.textChanged.connect(self.on_param_val_changed)
if cbx.checkState() != checkstate:
cbx.stateChanged.disconnect()
cbx.setCheckState(qt.Qt.Unchecked)
if checkstate == qt.Qt.Checked:
cbx.setCheckState(qt.Qt.Checked)
cbx.stateChanged.connect(self.on_param_fix_changed)
except Exception as e:
print(e)
def write_param_values_to_table(self, param_values):
pass
#self.parameters_model.change_content(param_values.transpose())
#self.parameters_model.df_data[:] = param_values.transpose()
#self.tbl_params.resizeColumnsToContents() # This redraws the table (necessary)
### Convenience functions and procedures
def circle_icon(self, color):
pix = gui.QPixmap(30,30)
pix.fill(gui.QColor("transparent"))
paint = gui.QPainter()
paint.begin(pix)
paint.setBrush(gui.QColor(color))
paint.setPen(gui.QColor("transparent"))
paint.drawEllipse(0,0,30,30)
paint.end()
icon = gui.QIcon(pix)
return icon
def clearLayout(self, layout):
while layout.count():
child = layout.takeAt(0)
if child.widget() is not None:
child.widget().deleteLater()
elif child.layout() is not None:
self.clearLayout(child.layout())
def find_sender_index(self, dataframe):
sender_i, sender_j = None, None
for i, row, in dataframe.iterrows():
for j, item in row.iteritems():
if item is self.sender():
sender_i = i
sender_j = j
return sender_i, sender_j
def centred_tablewidget(self, qtwidget):
wid = widgets.QWidget()
hlo = widgets.QVBoxLayout()
hlo.setContentsMargins(12, 0, 12, 0)
hlo.setAlignment(qt.Qt.AlignCenter)
wid.setLayout(hlo)
hlo.addWidget(qtwidget)
return wid
def checkable_edit_widget(self, checkbox, textbox):
wid = widgets.QWidget()
hlo = widgets.QHBoxLayout()
hlo.setContentsMargins(12, 0, 12, 0)
wid.setLayout(hlo)
hlo.addWidget(textbox)
hlo.addStretch()
hlo.addWidget(checkbox)
return wid
def is_number(self, s):
try:
float(s)
return True
except ValueError:
return False
def line_icon(self, color):
pixmap = gui.QPixmap(50,10)
pixmap.fill(gui.QColor(color))
icon = gui.QIcon(pixmap)
return icon
# Standard main loop code
if __name__ == '__main__':
import sys
app = widgets.QApplication(sys.argv)
main = Main()
main.show()
sys.exit(app.exec_())
| gpl-3.0 |
ljwolf/pysal | pysal/spreg/tests/test_probit.py | 7 | 5150 | import unittest
import pysal
import numpy as np
from pysal.spreg import probit as PB
from pysal.common import RTOL
class TestBaseProbit(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("CRIME"))
y = np.reshape(y, (49,1))
self.y = (y>40).astype(float)
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("HOVAL"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = PB.BaseProbit(self.y, self.X, w=self.w)
betas = np.array([[ 3.35381078], [-0.1996531 ], [-0.02951371]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
predy = np.array([ 0.00174739])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n,RTOL)
k = 3
np.testing.assert_allclose(reg.k,k,RTOL)
y = np.array([ 0.])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 19.531 , 80.467003])
np.testing.assert_allclose(reg.x[0],x,RTOL)
vm = np.array([[ 8.52813879e-01, -4.36272459e-02, -8.05171472e-03], [ -4.36272459e-02, 4.11381444e-03, -1.92834842e-04], [ -8.05171472e-03, -1.92834842e-04, 3.09660240e-04]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
xmean = np.array([[ 1. ], [ 14.37493876], [ 38.43622447 ]])
np.testing.assert_allclose(reg.xmean,xmean,RTOL)
predpc = 85.714285714285708
np.testing.assert_allclose(reg.predpc,predpc,RTOL)
logl = -20.06009093055782
np.testing.assert_allclose(reg.logl,logl,RTOL)
scale = 0.23309310130643665
np.testing.assert_allclose(reg.scale,scale,RTOL)
slopes = np.array([[-0.04653776], [-0.00687944]])
np.testing.assert_allclose(reg.slopes,slopes,RTOL)
slopes_vm = np.array([[ 1.77101993e-04, -1.65021168e-05], [ -1.65021168e-05, 1.60575016e-05]])
np.testing.assert_allclose(reg.slopes_vm,slopes_vm,RTOL)
LR = 25.317683245671716
np.testing.assert_allclose(reg.LR[0],LR,RTOL)
Pinkse_error = 2.9632385352516728
np.testing.assert_allclose(reg.Pinkse_error[0],Pinkse_error,RTOL)
KP_error = 1.6509224700582124
np.testing.assert_allclose(reg.KP_error[0],KP_error,RTOL)
PS_error = 2.3732463777623511
np.testing.assert_allclose(reg.PS_error[0],PS_error,RTOL)
class TestProbit(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("CRIME"))
y = np.reshape(y, (49,1))
self.y = (y>40).astype(float)
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("HOVAL"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = PB.Probit(self.y, self.X, w=self.w)
betas = np.array([[ 3.35381078], [-0.1996531 ], [-0.02951371]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
predy = np.array([ 0.00174739])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n,RTOL)
k = 3
np.testing.assert_allclose(reg.k,k,RTOL)
y = np.array([ 0.])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 19.531 , 80.467003])
np.testing.assert_allclose(reg.x[0],x,RTOL)
vm = np.array([[ 8.52813879e-01, -4.36272459e-02, -8.05171472e-03], [ -4.36272459e-02, 4.11381444e-03, -1.92834842e-04], [ -8.05171472e-03, -1.92834842e-04, 3.09660240e-04]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
xmean = np.array([[ 1. ], [ 14.37493876], [ 38.43622447 ]])
np.testing.assert_allclose(reg.xmean,xmean,RTOL)
predpc = 85.714285714285708
np.testing.assert_allclose(reg.predpc,predpc,RTOL)
logl = -20.06009093055782
np.testing.assert_allclose(reg.logl,logl,RTOL)
scale = 0.23309310130643665
np.testing.assert_allclose(reg.scale,scale,RTOL)
slopes = np.array([[-0.04653776], [-0.00687944]])
np.testing.assert_allclose(reg.slopes,slopes,RTOL)
slopes_vm = np.array([[ 1.77101993e-04, -1.65021168e-05], [ -1.65021168e-05, 1.60575016e-05]])
np.testing.assert_allclose(reg.slopes_vm,slopes_vm,RTOL)
LR = 25.317683245671716
np.testing.assert_allclose(reg.LR[0],LR,RTOL)
Pinkse_error = 2.9632385352516728
np.testing.assert_allclose(reg.Pinkse_error[0],Pinkse_error,RTOL)
KP_error = 1.6509224700582124
np.testing.assert_allclose(reg.KP_error[0],KP_error,RTOL)
PS_error = 2.3732463777623511
np.testing.assert_allclose(reg.PS_error[0],PS_error,RTOL)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
k3nnyfr/s2a_fr-nsis | s2a/Python/Lib/ctypes/test/test_pep3118.py | 76 | 7479 | import unittest
from ctypes import *
import re, sys
if sys.byteorder == "little":
THIS_ENDIAN = "<"
OTHER_ENDIAN = ">"
else:
THIS_ENDIAN = ">"
OTHER_ENDIAN = "<"
def normalize(format):
# Remove current endian specifier and white space from a format
# string
if format is None:
return ""
format = format.replace(OTHER_ENDIAN, THIS_ENDIAN)
return re.sub(r"\s", "", format)
class Test(unittest.TestCase):
def test_native_types(self):
for tp, fmt, shape, itemtp in native_types:
ob = tp()
v = memoryview(ob)
try:
self.assertEqual(normalize(v.format), normalize(fmt))
if shape is not None:
self.assertEqual(len(v), shape[0])
else:
self.assertEqual(len(v) * sizeof(itemtp), sizeof(ob))
self.assertEqual(v.itemsize, sizeof(itemtp))
self.assertEqual(v.shape, shape)
# ctypes object always have a non-strided memory block
self.assertEqual(v.strides, None)
# they are always read/write
self.assertFalse(v.readonly)
if v.shape:
n = 1
for dim in v.shape:
n = n * dim
self.assertEqual(n * v.itemsize, len(v.tobytes()))
except:
# so that we can see the failing type
print(tp)
raise
def test_endian_types(self):
for tp, fmt, shape, itemtp in endian_types:
ob = tp()
v = memoryview(ob)
try:
self.assertEqual(v.format, fmt)
if shape is not None:
self.assertEqual(len(v), shape[0])
else:
self.assertEqual(len(v) * sizeof(itemtp), sizeof(ob))
self.assertEqual(v.itemsize, sizeof(itemtp))
self.assertEqual(v.shape, shape)
# ctypes object always have a non-strided memory block
self.assertEqual(v.strides, None)
# they are always read/write
self.assertFalse(v.readonly)
if v.shape:
n = 1
for dim in v.shape:
n = n * dim
self.assertEqual(n, len(v))
except:
# so that we can see the failing type
print(tp)
raise
# define some structure classes
class Point(Structure):
_fields_ = [("x", c_long), ("y", c_long)]
class PackedPoint(Structure):
_pack_ = 2
_fields_ = [("x", c_long), ("y", c_long)]
class Point2(Structure):
pass
Point2._fields_ = [("x", c_long), ("y", c_long)]
class EmptyStruct(Structure):
_fields_ = []
class aUnion(Union):
_fields_ = [("a", c_int)]
class Incomplete(Structure):
pass
class Complete(Structure):
pass
PComplete = POINTER(Complete)
Complete._fields_ = [("a", c_long)]
################################################################
#
# This table contains format strings as they look on little endian
# machines. The test replaces '<' with '>' on big endian machines.
#
native_types = [
# type format shape calc itemsize
## simple types
(c_char, "<c", None, c_char),
(c_byte, "<b", None, c_byte),
(c_ubyte, "<B", None, c_ubyte),
(c_short, "<h", None, c_short),
(c_ushort, "<H", None, c_ushort),
# c_int and c_uint may be aliases to c_long
#(c_int, "<i", None, c_int),
#(c_uint, "<I", None, c_uint),
(c_long, "<l", None, c_long),
(c_ulong, "<L", None, c_ulong),
# c_longlong and c_ulonglong are aliases on 64-bit platforms
#(c_longlong, "<q", None, c_longlong),
#(c_ulonglong, "<Q", None, c_ulonglong),
(c_float, "<f", None, c_float),
(c_double, "<d", None, c_double),
# c_longdouble may be an alias to c_double
(c_bool, "<?", None, c_bool),
(py_object, "<O", None, py_object),
## pointers
(POINTER(c_byte), "&<b", None, POINTER(c_byte)),
(POINTER(POINTER(c_long)), "&&<l", None, POINTER(POINTER(c_long))),
## arrays and pointers
(c_double * 4, "(4)<d", (4,), c_double),
(c_float * 4 * 3 * 2, "(2,3,4)<f", (2,3,4), c_float),
(POINTER(c_short) * 2, "(2)&<h", (2,), POINTER(c_short)),
(POINTER(c_short) * 2 * 3, "(3,2)&<h", (3,2,), POINTER(c_short)),
(POINTER(c_short * 2), "&(2)<h", None, POINTER(c_short)),
## structures and unions
(Point, "T{<l:x:<l:y:}", None, Point),
# packed structures do not implement the pep
(PackedPoint, "B", None, PackedPoint),
(Point2, "T{<l:x:<l:y:}", None, Point2),
(EmptyStruct, "T{}", None, EmptyStruct),
# the pep does't support unions
(aUnion, "B", None, aUnion),
## pointer to incomplete structure
(Incomplete, "B", None, Incomplete),
(POINTER(Incomplete), "&B", None, POINTER(Incomplete)),
# 'Complete' is a structure that starts incomplete, but is completed after the
# pointer type to it has been created.
(Complete, "T{<l:a:}", None, Complete),
# Unfortunately the pointer format string is not fixed...
(POINTER(Complete), "&B", None, POINTER(Complete)),
## other
# function signatures are not implemented
(CFUNCTYPE(None), "X{}", None, CFUNCTYPE(None)),
]
class BEPoint(BigEndianStructure):
_fields_ = [("x", c_long), ("y", c_long)]
class LEPoint(LittleEndianStructure):
_fields_ = [("x", c_long), ("y", c_long)]
################################################################
#
# This table contains format strings as they really look, on both big
# and little endian machines.
#
endian_types = [
(BEPoint, "T{>l:x:>l:y:}", None, BEPoint),
(LEPoint, "T{<l:x:<l:y:}", None, LEPoint),
(POINTER(BEPoint), "&T{>l:x:>l:y:}", None, POINTER(BEPoint)),
(POINTER(LEPoint), "&T{<l:x:<l:y:}", None, POINTER(LEPoint)),
]
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
InsomniaROM/platform_external_skia | tools/test_pdfs.py | 231 | 1801 | '''
Compares the rendererings of serialized SkPictures to expected images.
Launch with --help to see more information.
Copyright 2012 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
# common Python modules
import os
import optparse
import sys
import shutil
import tempfile
import test_rendering
USAGE_STRING = 'Usage: %s input... expectedDir'
HELP_STRING = '''
Takes input SkPicture files and renders them as PDF files, and then compares
those resulting PDF files against PDF files found in expectedDir.
Each instance of "input" can be either a file (name must end in .skp), or a
directory (in which case this script will process all .skp files within the
directory).
'''
def Main(args):
"""Allow other scripts to call this script with fake command-line args.
@param The commandline argument list
"""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option('--render_dir', dest='render_dir',
help = ('specify the location to output the rendered '
'files. Default is a temp directory.'))
parser.add_option('--diff_dir', dest='diff_dir',
help = ('specify the location to output the diff files. '
'Default is a temp directory.'))
options, arguments = parser.parse_args(args)
if (len(arguments) < 3):
print("Expected at least one input and one ouput folder.")
parser.print_help()
sys.exit(-1)
inputs = arguments[1:-1]
expected_dir = arguments[-1]
test_rendering.TestRenderSkps(inputs, expected_dir, options.render_dir,
options.diff_dir, 'render_pdfs', '')
if __name__ == '__main__':
Main(sys.argv)
| bsd-3-clause |
stanmoore1/lammps | python/examples/matplotlib_plot.py | 4 | 2246 | #!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# matplotlib_plot.py
# Purpose: plot Temp of running LAMMPS simulation via matplotlib
# Syntax: plot.py in.lammps Nfreq Nsteps compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point every this many steps
# Nsteps = run for this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
from __future__ import print_function
import sys
import matplotlib.pyplot as plt
# parse command line
argv = sys.argv
if len(argv) != 5:
print("Syntax: plot.py in.lammps Nfreq Nsteps compute-ID")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
compute = sys.argv[4]
me = 0
# uncomment this if running in parallel via mpi4py
#from mpi4py import MPI
#me = MPI.COMM_WORLD.Get_rank()
#nprocs = MPI.COMM_WORLD.Get_size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
# initial 0-step run to generate initial 1-point plot
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
# create matplotlib plot
# just proc 0 handles plotting
if me == 0:
fig = plt.figure()
line, = plt.plot(xaxis, yaxis)
plt.xlim([0, nsteps])
plt.title(compute)
plt.xlabel("Timestep")
plt.ylabel("Temperature")
plt.show(block=False)
# run nfreq steps at a time w/out pre/post, query compute, refresh plot
import time
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
if me == 0:
line.set_xdata(xaxis)
line.set_ydata(yaxis)
ax = plt.gca()
ax.relim()
ax.autoscale_view(True, True, True)
plt.pause(0.001)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via mpi4py
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
if me == 0:
if sys.version_info[0] == 3:
input("Press Enter to exit...")
else:
raw_input("Press Enter to exit...")
| gpl-2.0 |
rajpushkar83/cloudmesh | cloudmesh/shell/cm_list.py | 1 | 20175 | from __future__ import print_function
from cloudmesh.config.cm_config import cm_config
from cloudmesh.shell.cm_cloud import CloudManage
from cloudmesh_base.logger import LOGGER
from tabulate import tabulate
from cmd3.console import Console
from cloudmesh.shell.cm_cloud import shell_command_cloud
from docopt import docopt
from cloudmesh.shell.shellutil import shell_commands_dict_output, get_command_list_refresh_default_setting
from pprint import pprint
# list_command_table_format = "simple"
list_command_table_format = "grid"
log = LOGGER(__file__)
def shell_command_list(arguments):
"""
::
List available flavors, images, vms, projects and clouds
Usage:
list flavor [CLOUD|--all]
[--refresh]
[--format=FORMAT]
[--column=COLUMN]
list image [CLOUD|--all]
[--refresh]
[--format=FORMAT]
[--column=COLUMN]
list vm [CLOUD|--all]
[--group=<group>]
[--refresh]
[--format=FORMAT]
[--column=COLUMN]
[--detail]
list project
list cloud [--column=COLUMN]
Arguments:
CLOUD the name of the cloud e.g. india
Options:
--all list information of all active clouds
--refresh refresh data before list
--group=<group> give the group name in list vm
--detail for table print format, a brief version
is used as default, use this flag to print
detailed table
--column=COLUMN specify what information to display in
the columns of the list command. For
example, --column=active,label prints
the columns active and label. Available
columns are active, label, host,
type/version, type, heading, user,
credentials, defaults (all to display
all, email to display all except
credentials and defaults)
--format=FORMAT output format: table, json, csv
Description:
List clouds and projects information, if the CLOUD argument is not specified, the
selected default cloud will be used. You can interactively set the default cloud with the command
'cloud select'.
list flavor
: list the flavors
list image
: list the images
list vm
: list the vms
list project
: list the projects
list cloud
: same as cloud list
See Also:
man cloud
"""
call = ListInfo(arguments)
call.execute()
class ListInfo(object):
def __init__(self, arguments):
self.cloudmanage = CloudManage()
try:
self.config = cm_config()
except:
Console.error("There is a problem with the configuration yaml files")
self.username = self.config['cloudmesh']['profile']['username']
self.arguments = arguments
# pprint(self.arguments)
self.cloudmanage = CloudManage()
try:
self.config = cm_config()
except:
Console.error("There is a problem with the configuration yaml files")
self.username = self.config['cloudmesh']['profile']['username']
self.refresh_default_setting = get_command_list_refresh_default_setting(self.username)
def _list_flavor(self):
self.cloudmanage._connect_to_mongo()
clouds = self.get_working_cloud_name()
if clouds:
itemkeys = [
['id', 'id'],
['name', 'name'],
['vcpus', 'vcpus'],
['ram', 'ram'],
['disk', 'disk'],
['refresh time', 'cm_refresh']
]
if self.refresh_default_setting or self.arguments['--refresh']:
self.cloudmanage.mongo.activate(
cm_user_id=self.username, names=clouds)
self.cloudmanage.mongo.refresh(
cm_user_id=self.username, names=clouds, types=['flavors'])
# --format
p_format = self.arguments['--format']
# --column
# available columns are: id, name, vcpus, ram, disk, refresh time,
# and all
if self.arguments['--column']:
if self.arguments['--column'] != "all":
s_column = [x.strip() for x in
self.arguments['--column'].split(',')]
new_itemkeys = []
for item in itemkeys:
if item[0] in s_column:
new_itemkeys.append(item)
itemkeys = new_itemkeys
for cloud in clouds:
self.cloudmanage.print_cloud_flavors(username=self.username,
cloudname=cloud.encode(
"ascii"),
itemkeys=itemkeys,
refresh=False,
output=False,
print_format=p_format)
else:
return
def _list_image(self):
self.cloudmanage._connect_to_mongo()
clouds = self.get_working_cloud_name()
if clouds:
itemkeys = {"openstack":
[
# [ "Metadata", "metadata"],
["name", "name"],
["status", "status"],
["id", "id"],
["type_id", "metadata", "instance_type_id"],
["iname", "metadata", "instance_type_name"],
["location", "metadata", "image_location"],
["state", "metadata", "image_state"],
["updated", "updated"],
# [ "minDisk" , "minDisk"],
["memory_mb", "metadata",
'instance_type_memory_mb'],
["fid", "metadata", "instance_type_flavorid"],
["vcpus", "metadata", "instance_type_vcpus"],
# [ "user_id" , "metadata", "user_id"],
# [ "owner_id" , "metadata", "owner_id"],
# [ "gb" , "metadata", "instance_type_root_gb"],
# [ "arch", ""]
],
"ec2":
[
# [ "Metadata", "metadata"],
["state", "extra", "state"],
["name", "name"],
["id", "id"],
["public", "extra", "is_public"],
["ownerid", "extra", "owner_id"],
["imagetype", "extra", "image_type"]
],
"azure":
[
["name", "label"],
["category", "category"],
["id", "id"],
["size", "logical_size_in_gb"],
["os", "os"]
],
"aws":
[
["state", "extra", "state"],
["name", "name"],
["id", "id"],
["public", "extra", "ispublic"],
["ownerid", "extra", "ownerid"],
["imagetype", "extra", "imagetype"]
]
}
if self.refresh_default_setting or self.arguments['--refresh']:
self.cloudmanage.mongo.activate(
cm_user_id=self.username, names=clouds)
self.cloudmanage.mongo.refresh(
cm_user_id=self.username, names=clouds, types=['images'])
p_format = self.arguments['--format']
# --column
# available columns are: id, name, vcpus, ram, disk, refresh time,
# and all
if self.arguments['--column']:
if self.arguments['--column'] != "all":
s_column = [x.strip() for x in
self.arguments['--column'].split(',')]
new_itemkeys = {x: [] for x in itemkeys.keys()}
for cloud, items in itemkeys.iteritems():
for item in items:
if item[0] in s_column:
new_itemkeys[cloud].append(item)
itemkeys = new_itemkeys
for cloud in clouds:
self.cloudmanage.print_cloud_images(username=self.username,
cloudname=cloud.encode(
"ascii"),
itemkeys=itemkeys,
refresh=False, output=False,
print_format=p_format)
else:
return
def _list_server(self):
self.cloudmanage._connect_to_mongo()
clouds = self.get_working_cloud_name()
if clouds:
itemkeys = {"openstack":
[
['name', 'name'],
['status', 'status'],
['addresses', 'addresses'],
['id', 'id'],
['flavor', 'flavor', 'id'],
['image', 'image', 'id'],
['user_id', 'cm_user_id'],
['metadata', 'metadata'],
['key_name', 'key_name'],
['created', 'created'],
['cloud', 'cm_cloud']
],
"ec2":
[
["name", "id"],
["status", "extra", "status"],
["addresses", "public_ips"],
["flavor", "extra", "instance_type"],
['id', 'id'],
['image', 'extra', 'imageId'],
["user_id", 'user_id'],
["metadata", "metadata"],
["key_name", "extra", "key_name"],
["created", "extra", "launch_time"]
],
"aws":
[
["name", "name"],
["status", "extra", "status"],
["addresses", "public_ips"],
["flavor", "extra", "instance_type"],
['id', 'id'],
['image', 'extra', 'image_id'],
["user_id", "user_id"],
["metadata", "metadata"],
["key_name", "extra", "key_name"],
["created", "extra", "launch_time"]
],
"azure":
[
['name', 'name'],
['status', 'status'],
['addresses', 'vip'],
['flavor', 'flavor', 'id'],
['id', 'id'],
['image', 'image', 'id'],
['user_id', 'user_id'],
['metadata', 'metadata'],
['key_name', 'key_name'],
['created', 'created'],
]
}
itemkeys_short = {"openstack":
[
['name', 'name'],
['status', 'status'],
['addresses', 'addresses'],
['flavor', 'flavor', 'id'],
['image', 'image', 'id']
],
"ec2":
[
["name", "id"],
["status", "extra", "status"],
["addresses", "public_ips"],
["flavor", "extra", "instance_type"],
['image', 'extra', 'imageId']
],
"aws":
[
["name", "name"],
["status", "extra", "status"],
["addresses", "public_ips"],
["flavor", "extra", "instance_type"],
['image', 'extra', 'image_id']
],
"azure":
[
['name', 'name'],
['status', 'status'],
['addresses', 'vip'],
['flavor', 'flavor', 'id'],
['image', 'image', 'id']
]
}
if self.refresh_default_setting or self.arguments['--refresh']:
self.cloudmanage.mongo.activate(
cm_user_id=self.username, names=clouds)
self.cloudmanage.mongo.refresh(
cm_user_id=self.username, names=clouds, types=['servers'])
p_format = self.arguments['--format']
# --column
# available columns are: id, name, vcpus, ram, disk, refresh time,
# and all
if self.arguments['--column']:
if self.arguments['--column'] != "all":
s_column = [x.strip() for x in
self.arguments['--column'].split(',')]
new_itemkeys = {x: [] for x in itemkeys.keys()}
for cloud, items in itemkeys.iteritems():
for item in items:
if item[0] in s_column:
new_itemkeys[cloud].append(item)
itemkeys = new_itemkeys
else:
if not self.arguments['--detail']:
itemkeys = itemkeys_short
for cloud in clouds:
self.cloudmanage.print_cloud_servers(username=self.username,
cloudname=cloud.encode(
"ascii"),
itemkeys=itemkeys,
refresh=False,
output=False,
print_format=p_format,
group=self.arguments['--group'])
else:
return
def _list_project(self):
self.cloudmanage._connect_to_mongo()
selected_project = None
try:
selected_project = self.cloudmanage.mongo.db_defaults.find_one(
{'cm_user_id': self.username + "OIO"})['project']
except Exception, NoneType:
Console.warning("could not find selected project in the database")
except Exception, e:
Console.error("could not connect to the database")
print(e)
print("\n")
print(tabulate([[selected_project]], ["selected project"], tablefmt=list_command_table_format))
#
# active projects
#
projects = {}
for state in ["active", "completed"]:
projects[state] = None
try:
projects[state] = self.cloudmanage.mongo.db_user.find_one(
{'cm_user_id': self.username})['projects'][state]
except:
Console.error(
"could not find objects or connect to the database containing the projects")
to_print = []
if projects[state] is None:
to_print = [[None]]
else:
to_print = [[str(p)] for p in projects[state]]
print("\n")
print(tabulate(to_print, ["{0} projects".format(state)], tablefmt=list_command_table_format))
def _list_cloud(self):
""" same as the shell_command_cloud list"""
arguments = dict(self.arguments)
arguments["list"] = True
shell_command_cloud(arguments)
"""
self.cloudmanage._connect_to_mongo()
active_clouds = []
other_clouds = []
activeclouds = self.cloudmanage.mongo.active_clouds(self.username)
clouds = self.cloudmanage.get_clouds(self.username)
clouds = clouds.sort([('cm_cloud', 1)])
for cloud in clouds:
name = cloud['cm_cloud']
if name in activeclouds:
active_clouds.append([str(name)])
else:
other_clouds.append([str(name)])
if active_clouds == []: active_clouds = [None]
if other_clouds == []: other_clouds = [None]
print tabulate(active_clouds, ["active clouds"], tablefmt=list_command_table_format)
print "\n"
print tabulate(other_clouds, ["other clouds"], tablefmt=list_command_table_format)
print "\n"
"""
# --------------------------------------------------------------------------
def get_working_cloud_name(self):
'''
get the name of a cloud to be work on, if CLOUD not given, will pick the
slected cloud, is --all, will return a list of active clouds
'''
self.cloudmanage._connect_to_mongo()
activeclouds = None
try:
activeclouds = self.cloudmanage.mongo.active_clouds(self.username)
except:
pass
if self.arguments['--all']:
if activeclouds is None:
print("no active cloud, please activate a cloud by 'cloud on [CLOUD]'")
return False
return activeclouds
else:
if self.arguments['CLOUD']:
name = self.arguments['CLOUD']
else:
name = self.cloudmanage.get_selected_cloud(self.username)
if self.cloudmanage.get_clouds(self.username, getone=True, cloudname=name) is None:
Console.error(
"no cloud information of '{0}' in database".format(name))
return False
if name not in activeclouds:
Console.warning(
"cloud '{0}' is not active, to activate a cloud: cloud on [CLOUD]".format(name))
return False
return [name]
def execute(self):
if self.arguments['vm']:
self._list_server()
elif self.arguments['flavor']:
self._list_flavor()
elif self.arguments['image']:
self._list_image()
elif self.arguments['project']:
self._list_project()
elif self.arguments['cloud']:
self._list_cloud()
def main():
arguments = docopt(shell_command_list.__doc__)
shell_command_list(arguments)
if __name__ == '__main__':
main()
| apache-2.0 |
scorphus/django | django/core/serializers/pyyaml.py | 439 | 2843 | """
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
import collections
import decimal
import sys
from io import StringIO
import yaml
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.db import models
from django.utils import six
# Use the C (faster) implementation if possible
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
class DjangoSafeDumper(SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
def represent_ordered_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data.items())
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
DjangoSafeDumper.add_representer(collections.OrderedDict, DjangoSafeDumper.represent_ordered_dict)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
if isinstance(stream_or_string, six.string_types):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
| bsd-3-clause |
Protoneer/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/_collections.py | 309 | 2903 | # urllib3/_collections.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import MutableMapping
from threading import Lock
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
__all__ = ['RecentlyUsedContainer']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self._lock = Lock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self._lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self._lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self._lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self._lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self._lock:
# Copy pointers to all values, then wipe the mapping
# under Python 2, this copies the list of values twice :-|
values = list(self._container.values())
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self._lock:
return self._container.keys()
| lgpl-2.1 |
veo-labs/python-socketio-client | socketio_client/parser.py | 1 | 4920 | """
Parser for socket.io protocol
This implementation is inspired from the package python-socketio
written by Miguel Grinberg and available under the MIT license at
https://github.com/miguelgrinberg/python-socketio
"""
from engineio_client.emitter import Emitter
import six
import re
import json
import functools
import logging
logger = logging.getLogger(__name__)
class Packet(object):
CONNECT = 0
DISCONNECT = 1
EVENT = 2
ACK = 3
ERROR = 4
BINARY_EVENT = 5
BINARY_ACK = 6
def __init__(self, type=None, data=None, namespace=None, id=None):
self.type = type
self.data = data
self.namespace = namespace or '/'
self.id = id
@property
def type_string(self):
return {
self.CONNECT: 'connect',
self.DISCONNECT: 'disconnect',
self.EVENT: 'event',
self.ACK: 'ack',
self.ERROR: 'error',
self.BINARY_EVENT: 'binary_event',
self.BINARY_ACK: 'binary_ack'
}[self.type]
def __str__(self):
return ' - '.join([str(i) for i in [self.type_string, self.id, self.namespace, self.data] if i])
PATTERN = '^'
PATTERN += '([0-6])' # type
PATTERN += '(?:(\d+)-)?' # number of attachments (optional)
PATTERN += '(?:(/[^,]+),?)?' # namespace (optional)
PATTERN += '(\d*)' # message id (optional)
PATTERN += '(.*)' # data
PATTERN += '$'
class Parser(object):
def __init__(self):
self.reset()
def reset(self):
self.packet = None
self.raw_data = None
self.num_attachments = 0
self.attachments = []
def decode(self, bytes):
if not self.packet:
packet_type, num_attachments, namespace, packet_id, data = self.decode_packet(bytes)
self.packet = Packet(type=packet_type, namespace=namespace, id=packet_id)
self.raw_data = data
self.num_attachments = num_attachments
else:
self.attachments.append(bytes)
if self.num_attachments != len(self.attachments):
return None
packet = self.packet
packet.data = self.construct_data(self.raw_data, self.attachments)
self.reset()
return packet
def decode_packet(self, bytes):
matches = re.findall(PATTERN, bytes)
if not matches:
raise ParserException("Decoded packet is invalid: %s" % repr(bytes))
items = matches[0]
packet_type = int(items[0])
num_attachments = int(items[1]) if items[1] else 0
namespace = items[2]
packet_id = int(items[3]) if items[3] else None
data = json.loads(items[4]) if items[4] else None
return packet_type, num_attachments, namespace, packet_id, data
def construct_data(self, data, attachments):
ret = data
if isinstance(data, list):
ret = [self.construct_data(item, attachments) for item in data]
elif isinstance(data, dict):
if data.get('_placeholder', False) and 0 <= data.get('num', -1) < len(attachments):
ret = bytearray(attachments[data['num']])
else:
ret = {key: self.construct_data(value, attachments) for key, value in six.iteritems(data)}
return ret
def encode(self, packet):
bytes = six.text_type()
data, attachments = self.deconstruct_data(packet.data)
if attachments:
bytes += six.text_type(len(attachments)) + '-'
if packet.type == Packet.EVENT:
packet.type = Packet.BINARY_EVENT
elif packet.type == Packet.ACK:
packet.type = Packet.BINARY_ACK
bytes = six.text_type(packet.type) + bytes
if packet.namespace and packet.namespace != '/':
bytes += packet.namespace
if packet.id or data:
bytes += ','
if packet.id is not None:
bytes += six.text_type(packet.id)
if data is not None:
bytes += json.dumps(data, separators=(',', ':'))
return [bytes] + attachments
def deconstruct_data(self, data, attachments=None):
if attachments is None:
attachments = []
ret = data
if isinstance(data, bytearray):
attachments.append(data)
ret = {'_placeholder': True, 'num': len(attachments) - 1}
elif isinstance(data, (tuple, list)):
ret = [self.deconstruct_data(item, attachments)[0]
for item in data]
elif isinstance(data, dict):
ret = {key: self.deconstruct_data(value, attachments)[0]
for key, value in six.iteritems(data)}
return ret, attachments
class ParserException(Exception):
pass
class PacketException(Exception):
pass
| mit |
shinglyu/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_parseopt.py | 171 | 12294 | from __future__ import with_statement
import sys
import os
import py, pytest
from _pytest import config as parseopt
@pytest.fixture
def parser():
return parseopt.Parser()
class TestParser:
def test_no_help_by_default(self, capsys):
parser = parseopt.Parser(usage="xyz")
pytest.raises(SystemExit, lambda: parser.parse(["-h"]))
out, err = capsys.readouterr()
assert err.find("error: unrecognized arguments") != -1
def test_argument(self):
with pytest.raises(parseopt.ArgumentError):
# need a short or long option
argument = parseopt.Argument()
argument = parseopt.Argument('-t')
assert argument._short_opts == ['-t']
assert argument._long_opts == []
assert argument.dest == 't'
argument = parseopt.Argument('-t', '--test')
assert argument._short_opts == ['-t']
assert argument._long_opts == ['--test']
assert argument.dest == 'test'
argument = parseopt.Argument('-t', '--test', dest='abc')
assert argument.dest == 'abc'
def test_argument_type(self):
argument = parseopt.Argument('-t', dest='abc', type='int')
assert argument.type is int
argument = parseopt.Argument('-t', dest='abc', type='string')
assert argument.type is str
argument = parseopt.Argument('-t', dest='abc', type=float)
assert argument.type is float
with pytest.raises(KeyError):
argument = parseopt.Argument('-t', dest='abc', type='choice')
argument = parseopt.Argument('-t', dest='abc', type='choice',
choices=['red', 'blue'])
assert argument.type is str
def test_argument_processopt(self):
argument = parseopt.Argument('-t', type=int)
argument.default = 42
argument.dest = 'abc'
res = argument.attrs()
assert res['default'] == 42
assert res['dest'] == 'abc'
def test_group_add_and_get(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
def test_getgroup_simple(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
group2 = parser.getgroup("hello")
assert group2 is group
def test_group_ordering(self, parser):
parser.getgroup("1")
parser.getgroup("2")
parser.getgroup("3", after="1")
groups = parser._groups
groups_names = [x.name for x in groups]
assert groups_names == list("132")
def test_group_addoption(self):
group = parseopt.OptionGroup("hello")
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
assert isinstance(group.options[0], parseopt.Argument)
def test_group_shortopt_lowercase(self, parser):
group = parser.getgroup("hello")
pytest.raises(ValueError, """
group.addoption("-x", action="store_true")
""")
assert len(group.options) == 0
group._addoption("-x", action="store_true")
assert len(group.options) == 1
def test_parser_addoption(self, parser):
group = parser.getgroup("custom options")
assert len(group.options) == 0
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
def test_parse(self, parser):
parser.addoption("--hello", dest="hello", action="store")
args = parser.parse(['--hello', 'world'])
assert args.hello == "world"
assert not getattr(args, parseopt.FILE_OR_DIR)
def test_parse2(self, parser):
args = parser.parse([py.path.local()])
assert getattr(args, parseopt.FILE_OR_DIR)[0] == py.path.local()
def test_parse_known_args(self, parser):
parser.parse_known_args([py.path.local()])
parser.addoption("--hello", action="store_true")
ns = parser.parse_known_args(["x", "--y", "--hello", "this"])
assert ns.hello
assert ns.file_or_dir == ['x']
def test_parse_known_and_unknown_args(self, parser):
parser.addoption("--hello", action="store_true")
ns, unknown = parser.parse_known_and_unknown_args(["x", "--y",
"--hello", "this"])
assert ns.hello
assert ns.file_or_dir == ['x']
assert unknown == ['--y', 'this']
def test_parse_will_set_default(self, parser):
parser.addoption("--hello", dest="hello", default="x", action="store")
option = parser.parse([])
assert option.hello == "x"
del option.hello
parser.parse_setoption([], option)
assert option.hello == "x"
def test_parse_setoption(self, parser):
parser.addoption("--hello", dest="hello", action="store")
parser.addoption("--world", dest="world", default=42)
class A: pass
option = A()
args = parser.parse_setoption(['--hello', 'world'], option)
assert option.hello == "world"
assert option.world == 42
assert not args
def test_parse_special_destination(self, parser):
parser.addoption("--ultimate-answer", type=int)
args = parser.parse(['--ultimate-answer', '42'])
assert args.ultimate_answer == 42
def test_parse_split_positional_arguments(self, parser):
parser.addoption("-R", action='store_true')
parser.addoption("-S", action='store_false')
args = parser.parse(['-R', '4', '2', '-S'])
assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
args = parser.parse(['-R', '-S', '4', '2', '-R'])
assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
assert args.R == True
assert args.S == False
args = parser.parse(['-R', '4', '-S', '2'])
assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
assert args.R == True
assert args.S == False
def test_parse_defaultgetter(self):
def defaultget(option):
if not hasattr(option, 'type'):
return
if option.type is int:
option.default = 42
elif option.type is str:
option.default = "world"
parser = parseopt.Parser(processopt=defaultget)
parser.addoption("--this", dest="this", type="int", action="store")
parser.addoption("--hello", dest="hello", type="string", action="store")
parser.addoption("--no", dest="no", action="store_true")
option = parser.parse([])
assert option.hello == "world"
assert option.this == 42
assert option.no is False
def test_drop_short_helper(self):
parser = py.std.argparse.ArgumentParser(formatter_class=parseopt.DropShorterLongHelpFormatter)
parser.add_argument('-t', '--twoword', '--duo', '--two-word', '--two',
help='foo').map_long_option = {'two': 'two-word'}
# throws error on --deux only!
parser.add_argument('-d', '--deuxmots', '--deux-mots',
action='store_true', help='foo').map_long_option = {'deux': 'deux-mots'}
parser.add_argument('-s', action='store_true', help='single short')
parser.add_argument('--abc', '-a',
action='store_true', help='bar')
parser.add_argument('--klm', '-k', '--kl-m',
action='store_true', help='bar')
parser.add_argument('-P', '--pq-r', '-p', '--pqr',
action='store_true', help='bar')
parser.add_argument('--zwei-wort', '--zweiwort', '--zweiwort',
action='store_true', help='bar')
parser.add_argument('-x', '--exit-on-first', '--exitfirst',
action='store_true', help='spam').map_long_option = {'exitfirst': 'exit-on-first'}
parser.add_argument('files_and_dirs', nargs='*')
args = parser.parse_args(['-k', '--duo', 'hallo', '--exitfirst'])
assert args.twoword == 'hallo'
assert args.klm is True
assert args.zwei_wort is False
assert args.exit_on_first is True
assert args.s is False
args = parser.parse_args(['--deux-mots'])
with pytest.raises(AttributeError):
assert args.deux_mots is True
assert args.deuxmots is True
args = parser.parse_args(['file', 'dir'])
assert '|'.join(args.files_and_dirs) == 'file|dir'
def test_drop_short_0(self, parser):
parser.addoption('--funcarg', '--func-arg', action='store_true')
parser.addoption('--abc-def', '--abc-def', action='store_true')
parser.addoption('--klm-hij', action='store_true')
args = parser.parse(['--funcarg', '--k'])
assert args.funcarg is True
assert args.abc_def is False
assert args.klm_hij is True
def test_drop_short_2(self, parser):
parser.addoption('--func-arg', '--doit', action='store_true')
args = parser.parse(['--doit'])
assert args.func_arg is True
def test_drop_short_3(self, parser):
parser.addoption('--func-arg', '--funcarg', '--doit', action='store_true')
args = parser.parse(['abcd'])
assert args.func_arg is False
assert args.file_or_dir == ['abcd']
def test_drop_short_help0(self, parser, capsys):
parser.addoption('--func-args', '--doit', help = 'foo',
action='store_true')
parser.parse([])
help = parser.optparser.format_help()
assert '--func-args, --doit foo' in help
# testing would be more helpful with all help generated
def test_drop_short_help1(self, parser, capsys):
group = parser.getgroup("general")
group.addoption('--doit', '--func-args', action='store_true', help='foo')
group._addoption("-h", "--help", action="store_true", dest="help",
help="show help message and configuration info")
parser.parse(['-h'])
help = parser.optparser.format_help()
assert '-doit, --func-args foo' in help
def test_argcomplete(testdir, monkeypatch):
if not py.path.local.sysfind('bash'):
pytest.skip("bash not available")
script = str(testdir.tmpdir.join("test_argcomplete"))
pytest_bin = sys.argv[0]
if "py.test" not in os.path.basename(pytest_bin):
pytest.skip("need to be run with py.test executable, not %s" %(pytest_bin,))
with open(str(script), 'w') as fp:
# redirect output from argcomplete to stdin and stderr is not trivial
# http://stackoverflow.com/q/12589419/1307905
# so we use bash
fp.write('COMP_WORDBREAKS="$COMP_WORDBREAKS" %s 8>&1 9>&2' % pytest_bin)
# alternative would be exteneded Testdir.{run(),_run(),popen()} to be able
# to handle a keyword argument env that replaces os.environ in popen or
# extends the copy, advantage: could not forget to restore
monkeypatch.setenv('_ARGCOMPLETE', "1")
monkeypatch.setenv('_ARGCOMPLETE_IFS',"\x0b")
monkeypatch.setenv('COMP_WORDBREAKS', ' \\t\\n"\\\'><=;|&(:')
arg = '--fu'
monkeypatch.setenv('COMP_LINE', "py.test " + arg)
monkeypatch.setenv('COMP_POINT', str(len("py.test " + arg)))
result = testdir.run('bash', str(script), arg)
if result.ret == 255:
# argcomplete not found
pytest.skip("argcomplete not available")
elif not result.stdout.str():
pytest.skip("bash provided no output, argcomplete not available?")
else:
if py.std.sys.version_info < (2,7):
result.stdout.lines = result.stdout.lines[0].split('\x0b')
result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"])
else:
result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"])
if py.std.sys.version_info < (2,7):
return
os.mkdir('test_argcomplete.d')
arg = 'test_argc'
monkeypatch.setenv('COMP_LINE', "py.test " + arg)
monkeypatch.setenv('COMP_POINT', str(len('py.test ' + arg)))
result = testdir.run('bash', str(script), arg)
result.stdout.fnmatch_lines(["test_argcomplete", "test_argcomplete.d/"])
| mpl-2.0 |
abhinavp13/IITBX-edx-platform-dev | lms/djangoapps/courseware/tests/test_views.py | 2 | 5363 | from mock import MagicMock
import datetime
from django.test import TestCase
from django.http import Http404
from django.test.utils import override_settings
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
import courseware.views as views
from xmodule.modulestore import Location
from pytz import UTC
from modulestore_config import TEST_DATA_XML_MODULESTORE
class Stub():
pass
@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE)
class TestJumpTo(TestCase):
"""Check the jumpto link for a course"""
def setUp(self):
self._MODULESTORES = {}
# Toy courses should be loaded
self.course_name = 'edX/toy/2012_Fall'
self.toy_course = modulestore().get_course('edX/toy/2012_Fall')
def test_jumpto_invalid_location(self):
location = Location('i4x', 'edX', 'toy', 'NoSuchPlace', None)
jumpto_url = '%s/%s/jump_to/%s' % ('/courses', self.course_name, location)
response = self.client.get(jumpto_url)
self.assertEqual(response.status_code, 404)
def test_jumpto_from_chapter(self):
location = Location('i4x', 'edX', 'toy', 'chapter', 'Overview')
jumpto_url = '%s/%s/jump_to/%s' % ('/courses', self.course_name, location)
expected = 'courses/edX/toy/2012_Fall/courseware/Overview/'
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
class ViewsTestCase(TestCase):
def setUp(self):
self.user = User.objects.create(username='dummy', password='123456',
email='test@mit.edu')
self.date = datetime.datetime(2013, 1, 22, tzinfo=UTC)
self.course_id = 'edX/toy/2012_Fall'
self.enrollment = CourseEnrollment.objects.get_or_create(user=self.user,
course_id=self.course_id,
created=self.date)[0]
self.location = ['tag', 'org', 'course', 'category', 'name']
self._MODULESTORES = {}
# This is a CourseDescriptor object
self.toy_course = modulestore().get_course('edX/toy/2012_Fall')
self.request_factory = RequestFactory()
chapter = 'Overview'
self.chapter_url = '%s/%s/%s' % ('/courses', self.course_id, chapter)
def test_user_groups(self):
# depreciated function
mock_user = MagicMock()
mock_user.is_authenticated.return_value = False
self.assertEquals(views.user_groups(mock_user), [])
def test_get_current_child(self):
self.assertIsNone(views.get_current_child(Stub()))
mock_xmodule = MagicMock()
mock_xmodule.position = -1
mock_xmodule.get_display_items.return_value = ['one', 'two']
self.assertEquals(views.get_current_child(mock_xmodule), 'one')
mock_xmodule_2 = MagicMock()
mock_xmodule_2.position = 3
mock_xmodule_2.get_display_items.return_value = []
self.assertIsNone(views.get_current_child(mock_xmodule_2))
def test_redirect_to_course_position(self):
mock_module = MagicMock()
mock_module.descriptor.id = 'Underwater Basketweaving'
mock_module.position = 3
mock_module.get_display_items.return_value = []
self.assertRaises(Http404, views.redirect_to_course_position,
mock_module)
def test_registered_for_course(self):
self.assertFalse(views.registered_for_course('Basketweaving', None))
mock_user = MagicMock()
mock_user.is_authenticated.return_value = False
self.assertFalse(views.registered_for_course('dummy', mock_user))
mock_course = MagicMock()
mock_course.id = self.course_id
self.assertTrue(views.registered_for_course(mock_course, self.user))
def test_jump_to_invalid(self):
request = self.request_factory.get(self.chapter_url)
self.assertRaisesRegexp(Http404, 'Invalid location', views.jump_to,
request, 'bar', ())
self.assertRaisesRegexp(Http404, 'No data*', views.jump_to, request,
'dummy', self.location)
def test_no_end_on_about_page(self):
# Toy course has no course end date or about/end_date blob
self.verify_end_date(self.course_id)
def test_no_end_about_blob(self):
# test_end has a course end date, no end_date HTML blob
self.verify_end_date("edX/test_end/2012_Fall", "Sep 17, 2015")
def test_about_blob_end_date(self):
# test_about_blob_end_date has both a course end date and an end_date HTML blob.
# HTML blob wins
self.verify_end_date("edX/test_about_blob_end_date/2012_Fall", "Learning never ends")
def verify_end_date(self, course_id, expected_end_text=None):
request = self.request_factory.get("foo")
request.user = self.user
result = views.course_about(request, course_id)
if expected_end_text is not None:
self.assertContains(result, "Classes End")
self.assertContains(result, expected_end_text)
else:
self.assertNotContains(result, "Classes End")
| agpl-3.0 |
TRESCLOUD/odoo | addons/document_webdav/dav_fs.py | 54 | 36371 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import time
import errno
import re
import urlparse
import urllib
try:
from pywebdav.lib.constants import COLLECTION # , OBJECT
from pywebdav.lib.errors import DAV_Error, DAV_Forbidden, DAV_NotFound
from pywebdav.lib.iface import dav_interface
from pywebdav.lib.davcmd import copyone, copytree, moveone, movetree, delone, deltree
except ImportError:
from DAV.constants import COLLECTION #, OBJECT
from DAV.errors import DAV_Error, DAV_Forbidden, DAV_NotFound
from DAV.iface import dav_interface
from DAV.davcmd import copyone, copytree, moveone, movetree, delone, deltree
import openerp
from openerp import pooler, sql_db, netsvc
from openerp.tools import misc
from cache import memoize
from webdav import mk_lock_response
CACHE_SIZE=20000
#hack for urlparse: add webdav in the net protocols
urlparse.uses_netloc.append('webdav')
urlparse.uses_netloc.append('webdavs')
day_names = { 0: 'Mon', 1: 'Tue' , 2: 'Wed', 3: 'Thu', 4: 'Fri', 5: 'Sat', 6: 'Sun' }
month_names = { 1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun',
7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec' }
def dict_merge2(*dicts):
""" Return a dict with all values of dicts.
If some key appears twice and contains iterable objects, the values
are merged (instead of overwritten).
"""
res = {}
for d in dicts:
for k in d.keys():
if k in res and isinstance(res[k], (list, tuple)):
res[k] = res[k] + d[k]
elif k in res and isinstance(res[k], dict):
res[k].update(d[k])
else:
res[k] = d[k]
return res
class DAV_NotFound2(DAV_NotFound):
"""404 exception, that accepts our list uris
"""
def __init__(self, *args):
if len(args) and isinstance(args[0], (tuple, list)):
path = ''.join([ '/' + x for x in args[0]])
args = (path, )
DAV_NotFound.__init__(self, *args)
def _str2time(cre):
""" Convert a string with time representation (from db) into time (float)
"""
if not cre:
return time.time()
frac = 0.0
if isinstance(cre, basestring) and '.' in cre:
fdot = cre.find('.')
frac = float(cre[fdot:])
cre = cre[:fdot]
return time.mktime(time.strptime(cre,'%Y-%m-%d %H:%M:%S')) + frac
class BoundStream2(object):
"""Wraps around a seekable buffer, reads a determined range of data
Note that the supplied stream object MUST support a size() which
should return its data length (in bytes).
A variation of the class in websrv_lib.py
"""
def __init__(self, stream, offset=None, length=None, chunk_size=None):
self._stream = stream
self._offset = offset or 0
self._length = length or self._stream.size()
self._rem_length = length
assert length and isinstance(length, (int, long))
assert length and length >= 0, length
self._chunk_size = chunk_size
if offset is not None:
self._stream.seek(offset)
def read(self, size=-1):
if not self._stream:
raise IOError(errno.EBADF, "read() without stream.")
if self._rem_length == 0:
return ''
elif self._rem_length < 0:
raise EOFError()
rsize = self._rem_length
if size > 0 and size < rsize:
rsize = size
if self._chunk_size and self._chunk_size < rsize:
rsize = self._chunk_size
data = self._stream.read(rsize)
self._rem_length -= len(data)
return data
def __len__(self):
return self._length
def tell(self):
res = self._stream.tell()
if self._offset:
res -= self._offset
return res
def __iter__(self):
return self
def next(self):
return self.read(65536)
def seek(self, pos, whence=os.SEEK_SET):
""" Seek, computing our limited range
"""
if whence == os.SEEK_SET:
if pos < 0 or pos > self._length:
raise IOError(errno.EINVAL,"Cannot seek.")
self._stream.seek(pos - self._offset)
self._rem_length = self._length - pos
elif whence == os.SEEK_CUR:
if pos > 0:
if pos > self._rem_length:
raise IOError(errno.EINVAL,"Cannot seek past end.")
elif pos < 0:
oldpos = self.tell()
if oldpos + pos < 0:
raise IOError(errno.EINVAL,"Cannot seek before start.")
self._stream.seek(pos, os.SEEK_CUR)
self._rem_length -= pos
elif whence == os.SEEK_END:
if pos > 0:
raise IOError(errno.EINVAL,"Cannot seek past end.")
else:
if self._length + pos < 0:
raise IOError(errno.EINVAL,"Cannot seek before start.")
newpos = self._offset + self._length + pos
self._stream.seek(newpos, os.SEEK_SET)
self._rem_length = 0 - pos
class openerp_dav_handler(dav_interface):
"""
This class models a OpenERP interface for the DAV server
"""
PROPS={'DAV:': dav_interface.PROPS['DAV:'],}
M_NS={ "DAV:" : dav_interface.M_NS['DAV:'],}
def __init__(self, parent, verbose=False):
self.db_name_list=[]
self.parent = parent
self.baseuri = parent.baseuri
self.verbose = verbose
def get_propnames(self, uri):
props = self.PROPS
self.parent.log_message('get propnames: %s' % uri)
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
if not dbname:
if cr: cr.close()
# TODO: maybe limit props for databases..?
return props
node = self.uri2object(cr, uid, pool, uri2)
if node:
props = dict_merge2(props, node.get_dav_props(cr))
cr.close()
return props
def _try_function(self, funct, args, opname='run function', cr=None,
default_exc=DAV_Forbidden):
""" Try to run a function, and properly convert exceptions to DAV ones.
@objname the name of the operation being performed
@param cr if given, the cursor to close at exceptions
"""
try:
return funct(*args)
except DAV_Error:
if cr: cr.close()
raise
except NotImplementedError, e:
if cr: cr.close()
import traceback
self.parent.log_error("Cannot %s: %s", opname, str(e))
self.parent.log_message("Exc: %s",traceback.format_exc())
# see par 9.3.1 of rfc
raise DAV_Error(403, str(e) or 'Not supported at this path.')
except EnvironmentError, err:
if cr: cr.close()
import traceback
self.parent.log_error("Cannot %s: %s", opname, err.strerror)
self.parent.log_message("Exc: %s",traceback.format_exc())
raise default_exc(err.strerror)
except Exception, e:
import traceback
if cr: cr.close()
self.parent.log_error("Cannot %s: %s", opname, str(e))
self.parent.log_message("Exc: %s",traceback.format_exc())
raise default_exc("Operation failed.")
def _get_dav_lockdiscovery(self, uri):
""" We raise that so that the node API is used """
raise DAV_NotFound
def _get_dav_supportedlock(self, uri):
""" We raise that so that the node API is used """
raise DAV_NotFound
def match_prop(self, uri, match, ns, propname):
if self.M_NS.has_key(ns):
return match == dav_interface.get_prop(self, uri, ns, propname)
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
if not dbname:
if cr: cr.close()
raise DAV_NotFound
node = self.uri2object(cr, uid, pool, uri2)
if not node:
cr.close()
raise DAV_NotFound
res = node.match_dav_eprop(cr, match, ns, propname)
cr.close()
return res
def prep_http_options(self, uri, opts):
"""see HttpOptions._prep_OPTIONS """
self.parent.log_message('get options: %s' % uri)
cr, uid, pool, dbname, uri2 = self.get_cr(uri, allow_last=True)
if not dbname:
if cr: cr.close()
return opts
node = self.uri2object(cr, uid, pool, uri2[:])
if not node:
if cr: cr.close()
return opts
else:
if hasattr(node, 'http_options'):
ret = opts.copy()
for key, val in node.http_options.items():
if isinstance(val, basestring):
val = [val, ]
if key in ret:
ret[key] = ret[key][:] # copy the orig. array
else:
ret[key] = []
ret[key].extend(val)
self.parent.log_message('options: %s' % ret)
else:
ret = opts
cr.close()
return ret
def reduce_useragent(self):
ua = self.parent.headers.get('User-Agent', False)
ctx = {}
if ua:
if 'iPhone' in ua:
ctx['DAV-client'] = 'iPhone'
elif 'Konqueror' in ua:
ctx['DAV-client'] = 'GroupDAV'
return ctx
def get_prop(self, uri, ns, propname):
""" return the value of a given property
uri -- uri of the object to get the property of
ns -- namespace of the property
pname -- name of the property
"""
if self.M_NS.has_key(ns):
try:
# if it's not in the interface class, a "DAV:" property
# may be at the node class. So shouldn't give up early.
return dav_interface.get_prop(self, uri, ns, propname)
except DAV_NotFound:
pass
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
if not dbname:
if cr: cr.close()
raise DAV_NotFound
try:
node = self.uri2object(cr, uid, pool, uri2)
if not node:
raise DAV_NotFound
res = node.get_dav_eprop(cr, ns, propname)
finally:
cr.close()
return res
def get_db(self, uri, rest_ret=False, allow_last=False):
"""Parse the uri and get the dbname and the rest.
Db name should be the first component in the unix-like
path supplied in uri.
@param rest_ret Instead of the db_name, return (db_name, rest),
where rest is the remaining path
@param allow_last If the dbname is the last component in the
path, allow it to be resolved. The default False value means
we will not attempt to use the db, unless there is more
path.
@return db_name or (dbname, rest) depending on rest_ret,
will return dbname=False when component is not found.
"""
uri2 = self.uri2local(uri)
if uri2.startswith('/'):
uri2 = uri2[1:]
names=uri2.split('/',1)
db_name=False
rest = None
if allow_last:
ll = 0
else:
ll = 1
if len(names) > ll and names[0]:
db_name = names[0]
names = names[1:]
if rest_ret:
if len(names):
rest = names[0]
return db_name, rest
return db_name
def urijoin(self,*ajoin):
""" Return the base URI of this request, or even join it with the
ajoin path elements
"""
return self.parent.get_baseuri(self) + '/'.join(ajoin)
@memoize(4)
def _all_db_list(self):
"""return all databases who have module document_webdav installed"""
s = netsvc.ExportService.getService('db')
result = s.exp_list()
self.db_name_list=[]
for db_name in result:
cr = None
try:
db = sql_db.db_connect(db_name)
cr = db.cursor()
cr.execute("SELECT id FROM ir_module_module WHERE name = 'document_webdav' AND state='installed' ")
res=cr.fetchone()
if res and len(res):
self.db_name_list.append(db_name)
except Exception, e:
self.parent.log_error("Exception in db list: %s" % e)
finally:
if cr:
cr.close()
return self.db_name_list
def db_list(self, uri):
# import pudb;pudb.set_trace()
u = urlparse.urlsplit(uri)
h = u.hostname
d = h.split('.')[0]
r = openerp.tools.config['dbfilter'].replace('%h', h).replace('%d',d)
dbs = [i for i in self._all_db_list() if re.match(r, i)]
return dbs
def get_childs(self,uri, filters=None):
""" return the child objects as self.baseuris for the given URI """
self.parent.log_message('get children: %s' % uri)
cr, uid, pool, dbname, uri2 = self.get_cr(uri, allow_last=True)
if not dbname:
if cr: cr.close()
res = map(lambda x: self.urijoin(x), self.db_list(uri))
return res
result = []
node = self.uri2object(cr, uid, pool, uri2[:])
try:
if not node:
raise DAV_NotFound2(uri2)
else:
fp = node.full_path()
if fp and len(fp):
fp = '/'.join(fp)
self.parent.log_message('children for: %s' % fp)
else:
fp = None
domain = None
if filters:
domain = node.get_domain(cr, filters)
if hasattr(filters, 'getElementsByTagNameNS'):
hrefs = filters.getElementsByTagNameNS('DAV:', 'href')
if hrefs:
ul = self.parent.davpath + self.uri2local(uri)
for hr in hrefs:
turi = ''
for tx in hr.childNodes:
if tx.nodeType == hr.TEXT_NODE:
turi += tx.data
if not turi.startswith('/'):
# it may be an absolute URL, decode to the
# relative part, because ul is relative, anyway
uparts=urlparse.urlparse(turi)
turi=uparts[2]
if uparts[3]:
turi += ';' + uparts[3]
if turi.startswith(ul):
result.append( turi[len(self.parent.davpath):])
else:
self.parent.log_error("ignore href %s because it is not under request path %s", turi, ul)
return result
# We don't want to continue with the children found below
# Note the exceptions and that 'finally' will close the
# cursor
for d in node.children(cr, domain):
self.parent.log_message('child: %s' % d.path)
if fp:
result.append( self.urijoin(dbname,fp,d.path) )
else:
result.append( self.urijoin(dbname,d.path) )
except DAV_Error:
raise
except Exception, e:
self.parent.log_error("Cannot get_children: "+str(e)+".")
raise
finally:
if cr: cr.close()
return result
def uri2local(self, uri):
uparts=urlparse.urlparse(uri)
reluri=uparts[2]
if uparts[3]:
reluri += ';'+uparts[3]
if reluri and reluri[-1]=="/":
reluri=reluri[:-1]
return reluri
#
# pos: -1 to get the parent of the uri
#
def get_cr(self, uri, allow_last=False):
""" Split the uri, grab a cursor for that db
"""
pdb = self.parent.auth_provider.last_auth
dbname, uri2 = self.get_db(uri, rest_ret=True, allow_last=allow_last)
uri2 = (uri2 and uri2.split('/')) or []
if not dbname:
return None, None, None, False, uri2
# if dbname was in our uri, we should have authenticated
# against that.
assert pdb == dbname, " %s != %s" %(pdb, dbname)
res = self.parent.auth_provider.auth_creds.get(dbname, False)
if not res:
self.parent.auth_provider.checkRequest(self.parent, uri, dbname)
res = self.parent.auth_provider.auth_creds[dbname]
user, passwd, dbn2, uid = res
db,pool = pooler.get_db_and_pool(dbname)
cr = db.cursor()
return cr, uid, pool, dbname, uri2
def uri2object(self, cr, uid, pool, uri):
if not uid:
return None
context = self.reduce_useragent()
return pool.get('document.directory').get_object(cr, uid, uri, context=context)
def get_data(self,uri, rrange=None):
self.parent.log_message('GET: %s' % uri)
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
try:
if not dbname:
raise DAV_Error, 409
node = self.uri2object(cr, uid, pool, uri2)
if not node:
raise DAV_NotFound2(uri2)
# TODO: if node is a collection, for some specific set of
# clients ( web browsers; available in node context),
# we may return a pseydo-html page with the directory listing.
try:
res = node.open_data(cr,'r')
if rrange:
assert isinstance(rrange, (tuple,list))
start, end = map(long, rrange)
if not start:
start = 0
assert start >= 0
if end and end < start:
self.parent.log_error("Invalid range for data: %s-%s" %(start, end))
raise DAV_Error(416, "Invalid range for data.")
if end:
if end >= res.size():
raise DAV_Error(416, "Requested data exceeds available size.")
length = (end + 1) - start
else:
length = res.size() - start
res = BoundStream2(res, offset=start, length=length)
except TypeError,e:
# for the collections that return this error, the DAV standard
# says we'd better just return 200 OK with empty data
return ''
except IndexError,e :
self.parent.log_error("GET IndexError: %s", str(e))
raise DAV_NotFound2(uri2)
except Exception,e:
import traceback
self.parent.log_error("GET exception: %s",str(e))
self.parent.log_message("Exc: %s", traceback.format_exc())
raise DAV_Error, 409
return res
finally:
if cr: cr.close()
@memoize(CACHE_SIZE)
def _get_dav_resourcetype(self, uri):
""" return type of object """
self.parent.log_message('get RT: %s' % uri)
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
try:
if not dbname:
return COLLECTION
node = self.uri2object(cr, uid, pool, uri2)
if not node:
raise DAV_NotFound2(uri2)
try:
return node.get_dav_resourcetype(cr)
except NotImplementedError:
if node.type in ('collection','database'):
return ('collection', 'DAV:')
return ''
finally:
if cr: cr.close()
def _get_dav_displayname(self,uri):
self.parent.log_message('get DN: %s' % uri)
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
if not dbname:
if cr: cr.close()
# at root, dbname, just return the last component
# of the path.
if uri2 and len(uri2) < 2:
return uri2[-1]
return ''
node = self.uri2object(cr, uid, pool, uri2)
if not node:
if cr: cr.close()
raise DAV_NotFound2(uri2)
cr.close()
return node.displayname
@memoize(CACHE_SIZE)
def _get_dav_getcontentlength(self, uri):
""" return the content length of an object """
self.parent.log_message('get length: %s' % uri)
result = 0
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
if not dbname:
if cr: cr.close()
return str(result)
node = self.uri2object(cr, uid, pool, uri2)
if not node:
if cr: cr.close()
raise DAV_NotFound2(uri2)
result = node.content_length or 0
cr.close()
return str(result)
@memoize(CACHE_SIZE)
def _get_dav_getetag(self,uri):
""" return the ETag of an object """
self.parent.log_message('get etag: %s' % uri)
result = 0
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
if not dbname:
if cr: cr.close()
return '0'
node = self.uri2object(cr, uid, pool, uri2)
if not node:
cr.close()
raise DAV_NotFound2(uri2)
result = self._try_function(node.get_etag ,(cr,), "etag %s" %uri, cr=cr)
cr.close()
return str(result)
@memoize(CACHE_SIZE)
def get_lastmodified(self, uri):
""" return the last modified date of the object """
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
if not dbname:
return time.time()
try:
node = self.uri2object(cr, uid, pool, uri2)
if not node:
raise DAV_NotFound2(uri2)
return _str2time(node.write_date)
finally:
if cr: cr.close()
def _get_dav_getlastmodified(self,uri):
""" return the last modified date of a resource
"""
d=self.get_lastmodified(uri)
# format it. Note that we explicitly set the day, month names from
# an array, so that strftime() doesn't use its own locale-aware
# strings.
gmt = time.gmtime(d)
return time.strftime("%%s, %d %%s %Y %H:%M:%S GMT", gmt ) % \
(day_names[gmt.tm_wday], month_names[gmt.tm_mon])
@memoize(CACHE_SIZE)
def get_creationdate(self, uri):
""" return the last modified date of the object """
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
if not dbname:
raise DAV_Error, 409
try:
node = self.uri2object(cr, uid, pool, uri2)
if not node:
raise DAV_NotFound2(uri2)
return _str2time(node.create_date)
finally:
if cr: cr.close()
@memoize(CACHE_SIZE)
def _get_dav_getcontenttype(self,uri):
self.parent.log_message('get contenttype: %s' % uri)
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
if not dbname:
if cr: cr.close()
return 'httpd/unix-directory'
try:
node = self.uri2object(cr, uid, pool, uri2)
if not node:
raise DAV_NotFound2(uri2)
result = str(node.mimetype)
return result
#raise DAV_NotFound, 'Could not find %s' % path
finally:
if cr: cr.close()
def mkcol(self,uri):
""" create a new collection
see par. 9.3 of rfc4918
"""
self.parent.log_message('MKCOL: %s' % uri)
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
if not uri2[-1]:
if cr: cr.close()
raise DAV_Error(409, "Cannot create nameless collection.")
if not dbname:
if cr: cr.close()
raise DAV_Error, 409
node = self.uri2object(cr,uid,pool, uri2[:-1])
if not node:
cr.close()
raise DAV_Error(409, "Parent path %s does not exist" % uri2[:-1])
nc = node.child(cr, uri2[-1])
if nc:
cr.close()
raise DAV_Error(405, "Path already exists.")
self._try_function(node.create_child_collection, (cr, uri2[-1]),
"create col %s" % uri2[-1], cr=cr)
cr.commit()
cr.close()
return True
def put(self, uri, data, content_type=None):
""" put the object into the filesystem """
self.parent.log_message('Putting %s (%d), %s'%( misc.ustr(uri), data and len(data) or 0, content_type))
cr, uid, pool,dbname, uri2 = self.get_cr(uri)
if not dbname:
if cr: cr.close()
raise DAV_Forbidden
try:
node = self.uri2object(cr, uid, pool, uri2[:])
except Exception:
node = False
objname = misc.ustr(uri2[-1])
ret = None
if not node:
dir_node = self.uri2object(cr, uid, pool, uri2[:-1])
if not dir_node:
cr.close()
raise DAV_NotFound('Parent folder not found.')
newchild = self._try_function(dir_node.create_child, (cr, objname, data),
"create %s" % objname, cr=cr)
if not newchild:
cr.commit()
cr.close()
raise DAV_Error(400, "Failed to create resource.")
uparts=urlparse.urlparse(uri)
fileloc = '/'.join(newchild.full_path())
if isinstance(fileloc, unicode):
fileloc = fileloc.encode('utf-8')
# the uri we get is a mangled one, where the davpath has been removed
davpath = self.parent.get_davpath()
surl = '%s://%s' % (uparts[0], uparts[1])
uloc = urllib.quote(fileloc)
hurl = False
if uri != ('/'+uloc) and uri != (surl + '/' + uloc):
hurl = '%s%s/%s/%s' %(surl, davpath, dbname, uloc)
etag = False
try:
etag = str(newchild.get_etag(cr))
except Exception, e:
self.parent.log_error("Cannot get etag for node: %s" % e)
ret = (str(hurl), etag)
else:
self._try_function(node.set_data, (cr, data), "save %s" % objname, cr=cr)
cr.commit()
cr.close()
return ret
def rmcol(self,uri):
""" delete a collection """
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
if not dbname:
if cr: cr.close()
raise DAV_Error, 409
node = self.uri2object(cr, uid, pool, uri2)
self._try_function(node.rmcol, (cr,), "rmcol %s" % uri, cr=cr)
cr.commit()
cr.close()
return 204
def rm(self,uri):
cr, uid, pool,dbname, uri2 = self.get_cr(uri)
if not dbname:
if cr: cr.close()
raise DAV_Error, 409
node = self.uri2object(cr, uid, pool, uri2)
res = self._try_function(node.rm, (cr,), "rm %s" % uri, cr=cr)
if not res:
if cr: cr.close()
raise OSError(1, 'Invalid Action!')
cr.commit()
cr.close()
return 204
### DELETE handlers (examples)
### (we use the predefined methods in davcmd instead of doing
### a rm directly
###
def delone(self, uri):
""" delete a single resource
You have to return a result dict of the form
uri:error_code
or None if everything's ok
"""
if uri[-1]=='/':uri=uri[:-1]
res=delone(self,uri)
# parent='/'.join(uri.split('/')[:-1])
return res
def deltree(self, uri):
""" delete a collection
You have to return a result dict of the form
uri:error_code
or None if everything's ok
"""
if uri[-1]=='/':uri=uri[:-1]
res=deltree(self, uri)
# parent='/'.join(uri.split('/')[:-1])
return res
###
### MOVE handlers (examples)
###
def moveone(self, src, dst, overwrite):
""" move one resource with Depth=0
an alternative implementation would be
result_code=201
if overwrite:
result_code=204
r=os.system("rm -f '%s'" %dst)
if r: return 412
r=os.system("mv '%s' '%s'" %(src,dst))
if r: return 412
return result_code
(untested!). This would not use the davcmd functions
and thus can only detect errors directly on the root node.
"""
res=moveone(self, src, dst, overwrite)
return res
def movetree(self, src, dst, overwrite):
""" move a collection with Depth=infinity
an alternative implementation would be
result_code=201
if overwrite:
result_code=204
r=os.system("rm -rf '%s'" %dst)
if r: return 412
r=os.system("mv '%s' '%s'" %(src,dst))
if r: return 412
return result_code
(untested!). This would not use the davcmd functions
and thus can only detect errors directly on the root node"""
res=movetree(self, src, dst, overwrite)
return res
###
### COPY handlers
###
def copyone(self, src, dst, overwrite):
""" copy one resource with Depth=0
an alternative implementation would be
result_code=201
if overwrite:
result_code=204
r=os.system("rm -f '%s'" %dst)
if r: return 412
r=os.system("cp '%s' '%s'" %(src,dst))
if r: return 412
return result_code
(untested!). This would not use the davcmd functions
and thus can only detect errors directly on the root node.
"""
res=copyone(self, src, dst, overwrite)
return res
def copytree(self, src, dst, overwrite):
""" copy a collection with Depth=infinity
an alternative implementation would be
result_code=201
if overwrite:
result_code=204
r=os.system("rm -rf '%s'" %dst)
if r: return 412
r=os.system("cp -r '%s' '%s'" %(src,dst))
if r: return 412
return result_code
(untested!). This would not use the davcmd functions
and thus can only detect errors directly on the root node"""
res=copytree(self, src, dst, overwrite)
return res
###
### copy methods.
### This methods actually copy something. low-level
### They are called by the davcmd utility functions
### copytree and copyone (not the above!)
### Look in davcmd.py for further details.
###
def copy(self, src, dst):
src=urllib.unquote(src)
dst=urllib.unquote(dst)
ct = self._get_dav_getcontenttype(src)
data = self.get_data(src)
self.put(dst, data, ct)
return 201
def copycol(self, src, dst):
""" copy a collection.
As this is not recursive (the davserver recurses itself)
we will only create a new directory here. For some more
advanced systems we might also have to copy properties from
the source to the destination.
"""
return self.mkcol(dst)
def exists(self, uri):
""" test if a resource exists """
result = False
cr, uid, pool,dbname, uri2 = self.get_cr(uri)
if not dbname:
if cr: cr.close()
return True
try:
node = self.uri2object(cr, uid, pool, uri2)
if node:
result = True
except Exception:
pass
cr.close()
return result
def unlock(self, uri, token):
""" Unlock a resource from that token
@return True if unlocked, False if no lock existed, Exceptions
"""
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
if not dbname:
if cr: cr.close()
raise DAV_Error, 409
node = self.uri2object(cr, uid, pool, uri2)
try:
node_fn = node.dav_unlock
except AttributeError:
# perhaps the node doesn't support locks
cr.close()
raise DAV_Error(400, 'No locks for this resource.')
res = self._try_function(node_fn, (cr, token), "unlock %s" % uri, cr=cr)
cr.commit()
cr.close()
return res
def lock(self, uri, lock_data):
""" Lock (may create) resource.
Data is a dict, may contain:
depth, token, refresh, lockscope, locktype, owner
"""
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
created = False
if not dbname:
if cr: cr.close()
raise DAV_Error, 409
try:
node = self.uri2object(cr, uid, pool, uri2[:])
except Exception:
node = False
objname = misc.ustr(uri2[-1])
if not node:
dir_node = self.uri2object(cr, uid, pool, uri2[:-1])
if not dir_node:
cr.close()
raise DAV_NotFound('Parent folder not found.')
# We create a new node (file) but with empty data=None,
# as in RFC4918 p. 9.10.4
node = self._try_function(dir_node.create_child, (cr, objname, None),
"create %s" % objname, cr=cr)
if not node:
cr.commit()
cr.close()
raise DAV_Error(400, "Failed to create resource.")
created = True
try:
node_fn = node.dav_lock
except AttributeError:
# perhaps the node doesn't support locks
cr.close()
raise DAV_Error(400, 'No locks for this resource.')
# Obtain the lock on the node
lres, pid, token = self._try_function(node_fn, (cr, lock_data), "lock %s" % objname, cr=cr)
if not lres:
cr.commit()
cr.close()
raise DAV_Error(423, "Resource already locked.")
assert isinstance(lres, list), 'lres: %s' % repr(lres)
try:
data = mk_lock_response(self, uri, lres)
cr.commit()
except Exception:
cr.close()
raise
cr.close()
return created, data, token
@memoize(CACHE_SIZE)
def is_collection(self, uri):
""" test if the given uri is a collection """
cr, uid, pool, dbname, uri2 = self.get_cr(uri)
try:
if not dbname:
return True
node = self.uri2object(cr,uid,pool, uri2)
if not node:
raise DAV_NotFound2(uri2)
if node.type in ('collection','database'):
return True
return False
finally:
if cr: cr.close()
#eof
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
LSST-nonproject/obs_omegacam | python/lsst/obs/decam/ingest.py | 1 | 1674 | #
# LSST Data Management System
# Copyright 2012 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
from lsst.pipe.tasks.ingest import ParseTask
def parseExtname(md):
side, ccd = "X", 0
if not md.exists("EXTNAME"):
return side, ccd
extname = md.get("EXTNAME").strip()
if extname[0] in "NS":
side = extname[0]
ccd = int(extname[1:])
return side, ccd
class DecamParseTask(ParseTask):
def translate_side(self, md):
side, ccd = parseExtname(md)
return side
def translate_ccd(self, md):
side, ccd = parseExtname(md)
return ccd
def translate_object(self, md):
obj = None
if md.exists("OBJECT"):
obj = md.get("OBJECT").strip()
if obj is None or len(obj) == 0 and md.exists("OBSTYPE"):
obj = md.get("OBSTYPE").strip()
if obj is None or len(obj) == 0:
return "UNKNOWN"
return obj
| gpl-3.0 |
bigswitch/sample-scripts | bmf/out_of_band.py | 1 | 8587 | #!/usr/bin/env python
# Big Monitoring Fabric Out-of-band config script
import requests
import json
import sys
requests.packages.urllib3.disable_warnings()
class Controller(object):
"""
controller version 5.x
"""
def __init__(self, controller_ip, username, password):
self.cookie = ""
self.bigtap_path = '/api/v1/data/controller/applications/bigtap/'
self.controller_path = '/api/v1/data/controller/core/'
self.username = username
self.password = password
self.controller_ip = controller_ip
self.get_cookie()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.release_cookie()
def controller_request(self, method, path, data="", dry_run=False):
if not self.controller_ip:
print 'You must set controller_ip to the IP address of your controller'
controller_url = "https://%s:443" % self.controller_ip
# append path to the controller url, e.g. "https://192.168.23.98:8443" + "/api/v1/auth/login"
url = controller_url + path
# if a cookie exists then use it in the header, otherwise create a header without a cookie
if self.cookie:
session_cookie = 'session_cookie=%s' % self.cookie
headers = {"content-type": "application/json", 'Cookie': session_cookie}
else:
headers = {"content-type": "application/json"}
if dry_run:
print 'METHOD ', method, ' URL ', url, ' DATA ', data, ' HEADERS ', headers
return None
else:
# submit the request
response = requests.request(method, url, data=data, headers=headers, verify=False)
# if content exists then return it, otherwise return the HTTP status code
if response.content:
return json.loads(response.content)
else:
return response.status_code
def make_request(self, verb, path, data):
result = self.controller_request(verb, path, data=data)
if result == 200:
return True
print result
sys.exit(1)
def get_cookie(self):
method = 'POST'
path = "/auth/login"
data = '{"user":"%s", "password":"%s"}' % (self.username, self.password)
json_content = self.controller_request(method, path, data)
self.cookie = json_content['session_cookie']
print 'Login to %s successful' % self.controller_ip
def release_cookie(self):
method = "DELETE"
path = '/api/v1/data/controller/core/aaa/session[auth-token="%s"]' % self.cookie
status_code = self.controller_request(method, path)
if status_code == 200:
print 'Logout successful'
def get_controller_version(self):
method = 'GET'
path = '/rest/v1/system/version'
data = '{}'
json_content = self.controller_request(method, path, data=data, dry_run=True)
return json_content[0] if type(json_content) == list else None
def configure_bigtap_interface_role(self, switch_dpid, interface, name, role):
path = self.bigtap_path+ 'interface-config[interface="%s"][switch="%s"]' % (interface, switch_dpid)
data = '{"interface": "%s", "switch": "%s", "role": "%s", "name": "%s"}' % (interface, switch_dpid, role, name)
self.make_request('PUT', path, data=data)
def add_policy(self, specs):
try:
name = specs['name']
action = specs['action']
priority = specs['priority']
duration = specs['duration']
start_time = specs['start_time']
delivery_packet_count = specs['delivery_packet_count']
interfaces = specs['interfaces']
rules = specs['rules']
except KeyError, e:
print "policy specs error %s" % str(e)
sys.exit(1)
path = self.bigtap_path+ 'view[name="admin-view"]/policy[name="%s"]' % name
data = '{"name": "%s"}' % name
self.make_request('PUT', path, data)
path = self.bigtap_path+ 'view[name="admin-view"]/policy[name="%s"]' % name
data = '{"action": "%s"}' % action
self.make_request('PATCH', path, data)
path = self.bigtap_path+ 'view[name="admin-view"]/policy[name="%s"]' % name
data = '{"priority": %s}' % priority
self.make_request('PATCH', path, data)
duration = 0
delivery_packet_count = 0
path = self.bigtap_path+ 'view[name="admin-view"]/policy[name="%s"]' % name
data = '{"duration": %s, "start-time": %s, "delivery-packet-count": %s}' % (duration, start_time, delivery_packet_count)
self.make_request('PATCH', path, data)
index = 5
for interface in interfaces:
role = interfaces[interface]
path = self.bigtap_path+ 'view[name="admin-view"]/policy[name="%s"]/%s-group[name="%s"]' % (name, role, interface)
data = '{"name": "%s"}' % interface
self.make_request('PUT', path, data)
index += 1
for rule in rules:
path = self.bigtap_path+ 'view[name="admin-view"]/policy[name="%s"]/rule[sequence=%s]' % (name, rule['sequence'])
data = json.dumps(rule)
self.make_request('PUT', path, data)
index += 1
def create_tunnel(self, specs, dry_run=False):
try:
switch_dpid = specs['switch_dpid']
tunnel_name = specs['tunnel_name']
destination_ip = specs['destination_ip']
source_ip = specs['source_ip']
mask = specs['mask']
gateway_ip = specs['gateway_ip']
tunnel_src_ip = specs['destination_ip']
vpn_key = specs['vpn_key']
encap_type = specs['encap_type']
interface = specs['interface']
direction = specs['direction']
loopback_interface = ''
if direction == 'bidirectional' or direction == 'transmit-only':
loopback_interface = specs['loopback_interface']
except KeyError, e:
print "tunnel specs error %s" % str(e)
sys.exit(1)
path = self.controller_path+ 'switch[dpid="%s"]/interface[name="%s"]' % (switch_dpid, tunnel_name)
data = '{"name": "%s"}' % tunnel_name
self.make_request('PUT', path, data)
path = self.controller_path+ 'switch[dpid="%s"]/interface[name="%s"]/ip-config' % (switch_dpid, tunnel_name)
data = '{"destination-ip": "%s"}' % destination_ip
self.make_request('PUT', path, data)
path = self.controller_path+ 'switch[dpid="%s"]/interface[name="%s"]' % (switch_dpid, tunnel_name)
data = '{"vpn-key": %s, "encap-type": "%s"}' % (vpn_key, encap_type)
self.make_request('PATCH', path, data)
path = self.controller_path+ 'switch[dpid="%s"]/interface[name="%s"]' % (switch_dpid, tunnel_name)
data = '{"parent-interface": "%s"}' % interface
self.make_request('PATCH', path, data)
path = self.controller_path+ 'switch[dpid="%s"]/interface[name="%s"]' % (switch_dpid, tunnel_name)
data = '{"direction": "%s", "loopback-interface": "%s", "type": "tunnel"}' % (direction, loopback_interface)
self.make_request('PATCH', path, data)
path = self.controller_path+ 'switch[dpid="%s"]/interface[name="%s"]/ip-config' % (switch_dpid, tunnel_name)
data = '{"source-ip": "%s", "ip-mask": "%s", "gateway-ip": "%s"}' % (source_ip, mask, gateway_ip)
self.make_request('PATCH', path, data)
def create_ip_address_set(self, name, ip_type, dry_run=False):
path = self.bigtap_path+ 'ip-address-set[name="%s"]' % name
data = '{"name": "%s"}' % name
self.make_request('PUT', path, data)
path = self.bigtap_path+ 'ip-address-set[name="%s"]' % name
data = '{"ip-address-type": "%s"}' % ip_type
self.make_request('PATCH', path, data)
def add_ip_to_group(self, name, ip, mask, dry_run=False):
path = self.bigtap_path+ 'ip-address-set[name="%s"]/address-mask-set[ip="%s"][ip-mask="%s"]' % (name, ip, mask)
data = '{"ip": "%s", "ip-mask": "%s"}' % (ip, mask)
self.make_request('PUT', path, data)
def delete_ip_from_group(self, name, ip, mask, dry_run=False):
path = self.bigtap_path+ 'ip-address-set[name="%s"]/address-mask-set[ip="%s"][ip-mask="%s"]' % (name, ip, mask)
data = '{}'
self.make_request('DELETE', path, data)
| mit |
abdulhaq-e/django-rest-framework | tests/test_description.py | 79 | 3688 | # -- coding: utf-8 --
from __future__ import unicode_literals
from django.test import TestCase
from django.utils.encoding import python_2_unicode_compatible, smart_text
from rest_framework.compat import apply_markdown
from rest_framework.views import APIView
from .description import (
UTF8_TEST_DOCSTRING, ViewWithNonASCIICharactersInDocstring
)
# We check that docstrings get nicely un-indented.
DESCRIPTION = """an example docstring
====================
* list
* list
another header
--------------
code block
indented
# hash style header #"""
# If markdown is installed we also test it's working
# (and that our wrapped forces '=' to h2 and '-' to h3)
# We support markdown < 2.1 and markdown >= 2.1
MARKED_DOWN_lt_21 = """<h2>an example docstring</h2>
<ul>
<li>list</li>
<li>list</li>
</ul>
<h3>another header</h3>
<pre><code>code block
</code></pre>
<p>indented</p>
<h2 id="hash_style_header">hash style header</h2>"""
MARKED_DOWN_gte_21 = """<h2 id="an-example-docstring">an example docstring</h2>
<ul>
<li>list</li>
<li>list</li>
</ul>
<h3 id="another-header">another header</h3>
<pre><code>code block
</code></pre>
<p>indented</p>
<h2 id="hash-style-header">hash style header</h2>"""
class TestViewNamesAndDescriptions(TestCase):
def test_view_name_uses_class_name(self):
"""
Ensure view names are based on the class name.
"""
class MockView(APIView):
pass
self.assertEqual(MockView().get_view_name(), 'Mock')
def test_view_description_uses_docstring(self):
"""Ensure view descriptions are based on the docstring."""
class MockView(APIView):
"""an example docstring
====================
* list
* list
another header
--------------
code block
indented
# hash style header #"""
self.assertEqual(MockView().get_view_description(), DESCRIPTION)
def test_view_description_supports_unicode(self):
"""
Unicode in docstrings should be respected.
"""
self.assertEqual(
ViewWithNonASCIICharactersInDocstring().get_view_description(),
smart_text(UTF8_TEST_DOCSTRING)
)
def test_view_description_can_be_empty(self):
"""
Ensure that if a view has no docstring,
then it's description is the empty string.
"""
class MockView(APIView):
pass
self.assertEqual(MockView().get_view_description(), '')
def test_view_description_can_be_promise(self):
"""
Ensure a view may have a docstring that is actually a lazily evaluated
class that can be converted to a string.
See: https://github.com/tomchristie/django-rest-framework/issues/1708
"""
# use a mock object instead of gettext_lazy to ensure that we can't end
# up with a test case string in our l10n catalog
@python_2_unicode_compatible
class MockLazyStr(object):
def __init__(self, string):
self.s = string
def __str__(self):
return self.s
class MockView(APIView):
__doc__ = MockLazyStr("a gettext string")
self.assertEqual(MockView().get_view_description(), 'a gettext string')
def test_markdown(self):
"""
Ensure markdown to HTML works as expected.
"""
if apply_markdown:
gte_21_match = apply_markdown(DESCRIPTION) == MARKED_DOWN_gte_21
lt_21_match = apply_markdown(DESCRIPTION) == MARKED_DOWN_lt_21
self.assertTrue(gte_21_match or lt_21_match)
| bsd-2-clause |
andrejb/leap_pycommon | src/leap/common/events/daemon.py | 3 | 6122 | # -*- coding: utf-8 -*-
# daemon.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A singleton daemon for running RPC services using protobuf.socketrpc.
"""
import logging
import threading
from protobuf.socketrpc.server import (
SocketRpcServer,
ThreadedTCPServer,
SocketHandler,
)
logger = logging.getLogger(__name__)
class ServiceAlreadyRunningException(Exception):
"""
Raised whenever a service is already running in this process but someone
attemped to start it in a different port.
"""
class EventsRpcServer(SocketRpcServer):
"""
RPC server used in server and client interfaces to receive messages.
"""
def __init__(self, port, host='localhost'):
"""
Initialize a RPC server.
:param port: the port in which to listen for incoming messages
:type port: int
:param host: the address to bind to
:type host: str
"""
SocketRpcServer.__init__(self, port, host)
self._server = None
def run(self):
"""
Run the server.
"""
logger.info('Running server on port %d.' % self.port)
# parent implementation does not hold the server instance, so we do it
# here.
self._server = ThreadedTCPServer((self.host, self.port),
SocketHandler, self)
# if we chose to use a random port, fetch the port number info.
if self.port is 0:
self.port = self._server.socket.getsockname()[1]
self._server.serve_forever()
def stop(self):
"""
Stop the server.
"""
self._server.shutdown()
class EventsSingletonDaemon(threading.Thread):
"""
Singleton class for for launching and terminating a daemon.
This class is used so every part of the mechanism that needs to listen for
messages can launch its own daemon (thread) to do the job.
"""
# Singleton instance
__instance = None
def __new__(cls, *args, **kwargs):
"""
Return a singleton instance if it exists or create and initialize one.
"""
if len(args) is not 2:
raise TypeError("__init__() takes exactly 2 arguments (%d given)"
% len(args))
if cls.__instance is None:
cls.__instance = object.__new__(
EventsSingletonDaemon)
cls.__initialize(cls.__instance, args[0], args[1])
return cls.__instance
@staticmethod
def __initialize(self, port, service):
"""
Initialize a singleton daemon.
This is a static method disguised as instance method that actually
does the initialization of the daemon instance.
:param port: the port in which to listen for incoming messages
:type port: int
:param service: the service to provide in this daemon
:type service: google.protobuf.service.Service
"""
threading.Thread.__init__(self)
self._port = port
self._service = service
self._server = EventsRpcServer(self._port)
self._server.registerService(self._service)
self.daemon = True
def __init__(self):
"""
Singleton placeholder initialization method.
Initialization is made in __new__ so we can always return the same
instance upon object creation.
"""
pass
@classmethod
def ensure(cls, port):
"""
Make sure the daemon instance is running.
Each implementation of this method should call `self.ensure_service`
with the appropriate service from the `events.proto` definitions, and
return the daemon instance.
:param port: the port in which the daemon should be listening
:type port: int
:return: a daemon instance
:rtype: EventsSingletonDaemon
"""
raise NotImplementedError(self.ensure)
@classmethod
def ensure_service(cls, port, service):
"""
Start the singleton instance if not already running.
Might return ServiceAlreadyRunningException
:param port: the port in which the daemon should be listening
:type port: int
:return: a daemon instance
:rtype: EventsSingletonDaemon
"""
daemon = cls(port, service)
if not daemon.is_alive():
daemon.start()
elif port and port != cls.__instance._port:
# service is running in this process but someone is trying to
# start it in another port
raise ServiceAlreadyRunningException(
"Service is already running in this process on port %d."
% self.__instance._port)
return daemon
@classmethod
def get_instance(cls):
"""
Retrieve singleton instance of this daemon.
:return: a daemon instance
:rtype: EventsSingletonDaemon
"""
return cls.__instance
def run(self):
"""
Run the server.
"""
self._server.run()
def stop(self):
"""
Stop the daemon.
"""
self._server.stop()
def get_port(self):
"""
Retrieve the value of the port to which the service running in this
daemon is binded to.
:return: the port to which the daemon is binded to
:rtype: int
"""
if self._port is 0:
self._port = self._server.port
return self._port
| gpl-3.0 |
hujiajie/pa-chromium | PRESUBMIT.py | 6 | 36931 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Chromium.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
import subprocess
import sys
_EXCLUDED_PATHS = (
r"^breakpad[\\\/].*",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_rules.py",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_simple.py",
r"^native_client_sdk[\\\/]src[\\\/]tools[\\\/].*.mk",
r"^net[\\\/]tools[\\\/]spdyshark[\\\/].*",
r"^skia[\\\/].*",
r"^v8[\\\/].*",
r".*MakeFile$",
r".+_autogen\.h$",
r".+[\\\/]pnacl_shim\.c$",
)
# Fragment of a regular expression that matches C++ and Objective-C++
# implementation files.
_IMPLEMENTATION_EXTENSIONS = r'\.(cc|cpp|cxx|mm)$'
# Regular expression that matches code only used for test binaries
# (best effort).
_TEST_CODE_EXCLUDED_PATHS = (
r'.*[/\\](fake_|test_|mock_).+%s' % _IMPLEMENTATION_EXTENSIONS,
r'.+_test_(base|support|util)%s' % _IMPLEMENTATION_EXTENSIONS,
r'.+_(api|browser|perf|pixel|unit|ui)?test(_[a-z]+)?%s' %
_IMPLEMENTATION_EXTENSIONS,
r'.+profile_sync_service_harness%s' % _IMPLEMENTATION_EXTENSIONS,
r'.*[/\\](test|tool(s)?)[/\\].*',
# content_shell is used for running layout tests.
r'content[/\\]shell[/\\].*',
# At request of folks maintaining this folder.
r'chrome[/\\]browser[/\\]automation[/\\].*',
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.\n'
'Email joi@chromium.org if you have questions.')
_INCLUDE_ORDER_WARNING = (
'Your #include order seems to be broken. Send mail to\n'
'marja@chromium.org if this is not the case.')
_BANNED_OBJC_FUNCTIONS = (
(
'addTrackingRect:',
(
'The use of -[NSView addTrackingRect:owner:userData:assumeInside:] is'
'prohibited. Please use CrTrackingArea instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'NSTrackingArea',
(
'The use of NSTrackingAreas is prohibited. Please use CrTrackingArea',
'instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'convertPointFromBase:',
(
'The use of -[NSView convertPointFromBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertPointToBase:',
(
'The use of -[NSView convertPointToBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectFromBase:',
(
'The use of -[NSView convertRectFromBase:] is almost certainly wrong.',
'Please use |convertRect:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectToBase:',
(
'The use of -[NSView convertRectToBase:] is almost certainly wrong.',
'Please use |convertRect:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeFromBase:',
(
'The use of -[NSView convertSizeFromBase:] is almost certainly wrong.',
'Please use |convertSize:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeToBase:',
(
'The use of -[NSView convertSizeToBase:] is almost certainly wrong.',
'Please use |convertSize:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
)
_BANNED_CPP_FUNCTIONS = (
# Make sure that gtest's FRIEND_TEST() macro is not used; the
# FRIEND_TEST_ALL_PREFIXES() macro from base/gtest_prod_util.h should be
# used instead since that allows for FLAKY_ and DISABLED_ prefixes.
(
'FRIEND_TEST(',
(
'Chromium code should not use gtest\'s FRIEND_TEST() macro. Include',
'base/gtest_prod_util.h and use FRIEND_TEST_ALL_PREFIXES() instead.',
),
False,
(),
),
(
'ScopedAllowIO',
(
'New code should not use ScopedAllowIO. Post a task to the blocking',
'pool or the FILE thread instead.',
),
True,
(
r"^content[\\\/]shell[\\\/]shell_browser_main\.cc$",
r"^net[\\\/]disk_cache[\\\/]cache_util\.cc$",
),
),
(
'SkRefPtr',
(
'The use of SkRefPtr is prohibited. ',
'Please use skia::RefPtr instead.'
),
True,
(),
),
(
'SkAutoRef',
(
'The indirect use of SkRefPtr via SkAutoRef is prohibited. ',
'Please use skia::RefPtr instead.'
),
True,
(),
),
(
'SkAutoTUnref',
(
'The use of SkAutoTUnref is dangerous because it implicitly ',
'converts to a raw pointer. Please use skia::RefPtr instead.'
),
True,
(),
),
(
'SkAutoUnref',
(
'The indirect use of SkAutoTUnref through SkAutoUnref is dangerous ',
'because it implicitly converts to a raw pointer. ',
'Please use skia::RefPtr instead.'
),
True,
(),
),
)
_VALID_OS_MACROS = (
# Please keep sorted.
'OS_ANDROID',
'OS_BSD',
'OS_CAT', # For testing.
'OS_CHROMEOS',
'OS_FREEBSD',
'OS_IOS',
'OS_LINUX',
'OS_MACOSX',
'OS_NACL',
'OS_OPENBSD',
'OS_POSIX',
'OS_SOLARIS',
'OS_WIN',
)
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files and the like, as the declaration of
# for-testing functions in header files are hard to distinguish from
# calls to such functions without a proper C++ parser.
file_inclusion_pattern = r'.+%s' % _IMPLEMENTATION_EXTENSIONS
base_function_pattern = r'ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
comment_pattern = input_api.re.compile(r'//.*%s' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
lines = input_api.ReadFile(f).splitlines()
line_number = 0
for line in lines:
if (inclusion_pattern.search(line) and
not comment_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
line_number += 1
if problems:
return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CheckNoIOStreamInHeaders(input_api, output_api):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static '
'initialization into every file including the header. Instead, '
'#include <ostream>. See http://crbug.com/94794',
files) ]
return []
def _CheckNoUNIT_TESTInSourceFiles(input_api, output_api):
"""Checks to make sure no source files use UNIT_TEST"""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.mm'))):
continue
for line_num, line in f.ChangedContents():
if 'UNIT_TEST' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('UNIT_TEST is only for headers.\n' +
'\n'.join(problems))]
def _CheckNoNewWStrings(input_api, output_api):
"""Checks to make sure we don't introduce use of wstrings."""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.h')) or
f.LocalPath().endswith('test.cc')):
continue
allowWString = False
for line_num, line in f.ChangedContents():
if 'presubmit: allow wstring' in line:
allowWString = True
elif not allowWString and 'wstring' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
allowWString = False
else:
allowWString = False
if not problems:
return []
return [output_api.PresubmitPromptWarning('New code should not use wstrings.'
' If you are calling a cross-platform API that accepts a wstring, '
'fix the API.\n' +
'\n'.join(problems))]
def _CheckNoDEPSGIT(input_api, output_api):
"""Make sure .DEPS.git is never modified manually."""
if any(f.LocalPath().endswith('.DEPS.git') for f in
input_api.AffectedFiles()):
return [output_api.PresubmitError(
'Never commit changes to .DEPS.git. This file is maintained by an\n'
'automated system based on what\'s in DEPS and your changes will be\n'
'overwritten.\n'
'See http://code.google.com/p/chromium/wiki/UsingNewGit#Rolling_DEPS\n'
'for more information')]
return []
def _CheckNoBannedFunctions(input_api, output_api):
"""Make sure that banned functions are not used."""
warnings = []
errors = []
file_filter = lambda f: f.LocalPath().endswith(('.mm', '.m', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error in _BANNED_OBJC_FUNCTIONS:
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.mm', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error, excluded_paths in _BANNED_CPP_FUNCTIONS:
def IsBlacklisted(affected_file, blacklist):
local_path = affected_file.LocalPath()
for item in blacklist:
if input_api.re.match(item, local_path):
return True
return False
if IsBlacklisted(f, excluded_paths):
continue
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
result = []
if (warnings):
result.append(output_api.PresubmitPromptWarning(
'Banned functions were used.\n' + '\n'.join(warnings)))
if (errors):
result.append(output_api.PresubmitError(
'Banned functions were used.\n' + '\n'.join(errors)))
return result
def _CheckNoPragmaOnce(input_api, output_api):
"""Make sure that banned functions are not used."""
files = []
pattern = input_api.re.compile(r'^#pragma\s+once',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if files:
return [output_api.PresubmitError(
'Do not use #pragma once in header files.\n'
'See http://www.chromium.org/developers/coding-style#TOC-File-headers',
files)]
return []
def _CheckNoTrinaryTrueFalse(input_api, output_api):
"""Checks to make sure we don't introduce use of foo ? true : false."""
problems = []
pattern = input_api.re.compile(r'\?\s*(true|false)\s*:\s*(true|false)')
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.cc', '.h', '.inl', '.m', '.mm')):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning(
'Please consider avoiding the "? true : false" pattern if possible.\n' +
'\n'.join(problems))]
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckFilePermissions(input_api, output_api):
"""Check that all files have their permissions properly set."""
args = [sys.executable, 'tools/checkperms/checkperms.py', '--root',
input_api.change.RepositoryRoot()]
for f in input_api.AffectedFiles():
args += ['--file', f.LocalPath()]
errors = []
(errors, stderrdata) = subprocess.Popen(args).communicate()
results = []
if errors:
results.append(output_api.PresubmitError('checkperms.py failed.',
errors))
return results
def _CheckNoAuraWindowPropertyHInHeaders(input_api, output_api):
"""Makes sure we don't include ui/aura/window_property.h
in header files.
"""
pattern = input_api.re.compile(r'^#include\s*"ui/aura/window_property.h"')
errors = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('.h'):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d' % (f.LocalPath(), line_num))
results = []
if errors:
results.append(output_api.PresubmitError(
'Header files should not include ui/aura/window_property.h', errors))
return results
def _CheckIncludeOrderForScope(scope, input_api, file_path, changed_linenums):
"""Checks that the lines in scope occur in the right order.
1. C system files in alphabetical order
2. C++ system files in alphabetical order
3. Project's .h files
"""
c_system_include_pattern = input_api.re.compile(r'\s*#include <.*\.h>')
cpp_system_include_pattern = input_api.re.compile(r'\s*#include <.*>')
custom_include_pattern = input_api.re.compile(r'\s*#include ".*')
C_SYSTEM_INCLUDES, CPP_SYSTEM_INCLUDES, CUSTOM_INCLUDES = range(3)
state = C_SYSTEM_INCLUDES
previous_line = ''
previous_line_num = 0
problem_linenums = []
for line_num, line in scope:
if c_system_include_pattern.match(line):
if state != C_SYSTEM_INCLUDES:
problem_linenums.append((line_num, previous_line_num))
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
elif cpp_system_include_pattern.match(line):
if state == C_SYSTEM_INCLUDES:
state = CPP_SYSTEM_INCLUDES
elif state == CUSTOM_INCLUDES:
problem_linenums.append((line_num, previous_line_num))
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
elif custom_include_pattern.match(line):
if state != CUSTOM_INCLUDES:
state = CUSTOM_INCLUDES
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
else:
problem_linenums.append(line_num)
previous_line = line
previous_line_num = line_num
warnings = []
for (line_num, previous_line_num) in problem_linenums:
if line_num in changed_linenums or previous_line_num in changed_linenums:
warnings.append(' %s:%d' % (file_path, line_num))
return warnings
def _CheckIncludeOrderInFile(input_api, f, changed_linenums):
"""Checks the #include order for the given file f."""
system_include_pattern = input_api.re.compile(r'\s*#include \<.*')
# Exclude #include <.../...> includes from the check; e.g., <sys/...> includes
# often need to appear in a specific order.
excluded_include_pattern = input_api.re.compile(r'\s*#include \<.*/.*')
custom_include_pattern = input_api.re.compile(r'\s*#include "(?P<FILE>.*)"')
if_pattern = input_api.re.compile(
r'\s*#\s*(if|elif|else|endif|define|undef).*')
# Some files need specialized order of includes; exclude such files from this
# check.
uncheckable_includes_pattern = input_api.re.compile(
r'\s*#include '
'("ipc/.*macros\.h"|<windows\.h>|".*gl.*autogen.h")\s*')
contents = f.NewContents()
warnings = []
line_num = 0
# Handle the special first include. If the first include file is
# some/path/file.h, the corresponding including file can be some/path/file.cc,
# some/other/path/file.cc, some/path/file_platform.cc, some/path/file-suffix.h
# etc. It's also possible that no special first include exists.
for line in contents:
line_num += 1
if system_include_pattern.match(line):
# No special first include -> process the line again along with normal
# includes.
line_num -= 1
break
match = custom_include_pattern.match(line)
if match:
match_dict = match.groupdict()
header_basename = input_api.os_path.basename(
match_dict['FILE']).replace('.h', '')
if header_basename not in input_api.os_path.basename(f.LocalPath()):
# No special first include -> process the line again along with normal
# includes.
line_num -= 1
break
# Split into scopes: Each region between #if and #endif is its own scope.
scopes = []
current_scope = []
for line in contents[line_num:]:
line_num += 1
if uncheckable_includes_pattern.match(line):
return []
if if_pattern.match(line):
scopes.append(current_scope)
current_scope = []
elif ((system_include_pattern.match(line) or
custom_include_pattern.match(line)) and
not excluded_include_pattern.match(line)):
current_scope.append((line_num, line))
scopes.append(current_scope)
for scope in scopes:
warnings.extend(_CheckIncludeOrderForScope(scope, input_api, f.LocalPath(),
changed_linenums))
return warnings
def _CheckIncludeOrder(input_api, output_api):
"""Checks that the #include order is correct.
1. The corresponding header for source files.
2. C system files in alphabetical order
3. C++ system files in alphabetical order
4. Project's .h files in alphabetical order
Each region separated by #if, #elif, #else, #endif, #define and #undef follows
these rules separately.
"""
warnings = []
for f in input_api.AffectedFiles():
if f.LocalPath().endswith(('.cc', '.h')):
changed_linenums = set(line_num for line_num, _ in f.ChangedContents())
warnings.extend(_CheckIncludeOrderInFile(input_api, f, changed_linenums))
results = []
if warnings:
results.append(output_api.PresubmitPromptOrNotify(_INCLUDE_ORDER_WARNING,
warnings))
return results
def _CheckForVersionControlConflictsInFile(input_api, f):
pattern = input_api.re.compile('^(?:<<<<<<<|>>>>>>>) |^=======$')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckForVersionControlConflicts(input_api, output_api):
"""Usually this is not intentional and will cause a compile failure."""
errors = []
for f in input_api.AffectedFiles():
errors.extend(_CheckForVersionControlConflictsInFile(input_api, f))
results = []
if errors:
results.append(output_api.PresubmitError(
'Version control conflict markers found, please resolve.', errors))
return results
def _CheckHardcodedGoogleHostsInLowerLayers(input_api, output_api):
def FilterFile(affected_file):
"""Filter function for use with input_api.AffectedSourceFiles,
below. This filters out everything except non-test files from
top-level directories that generally speaking should not hard-code
service URLs (e.g. src/android_webview/, src/content/ and others).
"""
return input_api.FilterSourceFile(
affected_file,
white_list=(r'^(android_webview|base|content|net)[\\\/].*', ),
black_list=(_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST))
base_pattern = '"[^"]*google\.com[^"]*"'
comment_pattern = input_api.re.compile('//.*%s' % base_pattern)
pattern = input_api.re.compile(base_pattern)
problems = [] # items are (filename, line_number, line)
for f in input_api.AffectedSourceFiles(FilterFile):
for line_num, line in f.ChangedContents():
if not comment_pattern.search(line) and pattern.search(line):
problems.append((f.LocalPath(), line_num, line))
if problems:
return [output_api.PresubmitPromptOrNotify(
'Most layers below src/chrome/ should not hardcode service URLs.\n'
'Are you sure this is correct? (Contact: joi@chromium.org)',
[' %s:%d: %s' % (
problem[0], problem[1], problem[2]) for problem in problems])]
else:
return []
def _CheckNoAbbreviationInPngFileName(input_api, output_api):
"""Makes sure there are no abbreviations in the name of PNG files.
"""
pattern = input_api.re.compile(r'.*_[a-z]_.*\.png$|.*_[a-z]\.png$')
errors = []
for f in input_api.AffectedFiles(include_deletes=False):
if pattern.match(f.LocalPath()):
errors.append(' %s' % f.LocalPath())
results = []
if errors:
results.append(output_api.PresubmitError(
'The name of PNG files should not have abbreviations. \n'
'Use _hover.png, _center.png, instead of _h.png, _c.png.\n'
'Contact oshima@chromium.org if you have questions.', errors))
return results
def _CheckAddedDepsHaveTargetApprovals(input_api, output_api):
"""When a dependency prefixed with + is added to a DEPS file, we
want to make sure that the change is reviewed by an OWNER of the
target file or directory, to avoid layering violations from being
introduced. This check verifies that this happens.
"""
changed_lines = set()
for f in input_api.AffectedFiles():
filename = input_api.os_path.basename(f.LocalPath())
if filename == 'DEPS':
changed_lines |= set(line.strip()
for line_num, line
in f.ChangedContents())
if not changed_lines:
return []
virtual_depended_on_files = set()
# This pattern grabs the path without basename in the first
# parentheses, and the basename (if present) in the second. It
# relies on the simple heuristic that if there is a basename it will
# be a header file ending in ".h".
pattern = input_api.re.compile(
r"""['"]\+([^'"]+?)(/[a-zA-Z0-9_]+\.h)?['"].*""")
for changed_line in changed_lines:
m = pattern.match(changed_line)
if m:
virtual_depended_on_files.add('%s/DEPS' % m.group(1))
if not virtual_depended_on_files:
return []
if input_api.is_committing:
if input_api.tbr:
return [output_api.PresubmitNotifyResult(
'--tbr was specified, skipping OWNERS check for DEPS additions')]
if not input_api.change.issue:
return [output_api.PresubmitError(
"DEPS approval by OWNERS check failed: this change has "
"no Rietveld issue number, so we can't check it for approvals.")]
output = output_api.PresubmitError
else:
output = output_api.PresubmitNotifyResult
owners_db = input_api.owners_db
owner_email, reviewers = input_api.canned_checks._RietveldOwnerAndReviewers(
input_api,
owners_db.email_regexp,
approval_needed=input_api.is_committing)
owner_email = owner_email or input_api.change.author_email
reviewers_plus_owner = set(reviewers)
if owner_email:
reviewers_plus_owner.add(owner_email)
missing_files = owners_db.files_not_covered_by(virtual_depended_on_files,
reviewers_plus_owner)
unapproved_dependencies = ["'+%s'," % path[:-len('/DEPS')]
for path in missing_files]
if unapproved_dependencies:
output_list = [
output('Missing LGTM from OWNERS of directories added to DEPS:\n %s' %
'\n '.join(sorted(unapproved_dependencies)))]
if not input_api.is_committing:
suggested_owners = owners_db.reviewers_for(missing_files, owner_email)
output_list.append(output(
'Suggested missing target path OWNERS:\n %s' %
'\n '.join(suggested_owners or [])))
return output_list
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS))
results.extend(_CheckAuthorizedAuthor(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(_CheckNoIOStreamInHeaders(input_api, output_api))
results.extend(_CheckNoUNIT_TESTInSourceFiles(input_api, output_api))
results.extend(_CheckNoNewWStrings(input_api, output_api))
results.extend(_CheckNoDEPSGIT(input_api, output_api))
results.extend(_CheckNoBannedFunctions(input_api, output_api))
results.extend(_CheckNoPragmaOnce(input_api, output_api))
results.extend(_CheckNoTrinaryTrueFalse(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(_CheckFilePermissions(input_api, output_api))
results.extend(_CheckNoAuraWindowPropertyHInHeaders(input_api, output_api))
results.extend(_CheckIncludeOrder(input_api, output_api))
results.extend(_CheckForVersionControlConflicts(input_api, output_api))
results.extend(_CheckPatchFiles(input_api, output_api))
results.extend(_CheckHardcodedGoogleHostsInLowerLayers(input_api, output_api))
results.extend(_CheckNoAbbreviationInPngFileName(input_api, output_api))
results.extend(_CheckForInvalidOSMacros(input_api, output_api))
results.extend(_CheckAddedDepsHaveTargetApprovals(input_api, output_api))
if any('PRESUBMIT.py' == f.LocalPath() for f in input_api.AffectedFiles()):
results.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api,
input_api.PresubmitLocalPath(),
whitelist=[r'^PRESUBMIT_test\.py$']))
return results
def _CheckSubversionConfig(input_api, output_api):
"""Verifies the subversion config file is correctly setup.
Checks that autoprops are enabled, returns an error otherwise.
"""
join = input_api.os_path.join
if input_api.platform == 'win32':
appdata = input_api.environ.get('APPDATA', '')
if not appdata:
return [output_api.PresubmitError('%APPDATA% is not configured.')]
path = join(appdata, 'Subversion', 'config')
else:
home = input_api.environ.get('HOME', '')
if not home:
return [output_api.PresubmitError('$HOME is not configured.')]
path = join(home, '.subversion', 'config')
error_msg = (
'Please look at http://dev.chromium.org/developers/coding-style to\n'
'configure your subversion configuration file. This enables automatic\n'
'properties to simplify the project maintenance.\n'
'Pro-tip: just download and install\n'
'http://src.chromium.org/viewvc/chrome/trunk/tools/build/slave/config\n')
try:
lines = open(path, 'r').read().splitlines()
# Make sure auto-props is enabled and check for 2 Chromium standard
# auto-prop.
if (not '*.cc = svn:eol-style=LF' in lines or
not '*.pdf = svn:mime-type=application/pdf' in lines or
not 'enable-auto-props = yes' in lines):
return [
output_api.PresubmitNotifyResult(
'It looks like you have not configured your subversion config '
'file or it is not up-to-date.\n' + error_msg)
]
except (OSError, IOError):
return [
output_api.PresubmitNotifyResult(
'Can\'t find your subversion config file.\n' + error_msg)
]
return []
def _CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
"""
# TODO(maruel): Add it to input_api?
import fnmatch
author = input_api.change.author_email
if not author:
input_api.logging.info('No author, skipping AUTHOR check')
return []
authors_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'AUTHORS')
valid_authors = (
input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
for line in open(authors_path))
valid_authors = [item.group(1).lower() for item in valid_authors if item]
if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
return [output_api.PresubmitPromptWarning(
('%s is not in AUTHORS file. If you are a new contributor, please visit'
'\n'
'http://www.chromium.org/developers/contributing-code and read the '
'"Legal" section\n'
'If you are a chromite, verify the contributor signed the CLA.') %
author)]
return []
def _CheckPatchFiles(input_api, output_api):
problems = [f.LocalPath() for f in input_api.AffectedFiles()
if f.LocalPath().endswith(('.orig', '.rej'))]
if problems:
return [output_api.PresubmitError(
"Don't commit .rej and .orig files.", problems)]
else:
return []
def _DidYouMeanOSMacro(bad_macro):
try:
return {'A': 'OS_ANDROID',
'B': 'OS_BSD',
'C': 'OS_CHROMEOS',
'F': 'OS_FREEBSD',
'L': 'OS_LINUX',
'M': 'OS_MACOSX',
'N': 'OS_NACL',
'O': 'OS_OPENBSD',
'P': 'OS_POSIX',
'S': 'OS_SOLARIS',
'W': 'OS_WIN'}[bad_macro[3].upper()]
except KeyError:
return ''
def _CheckForInvalidOSMacrosInFile(input_api, f):
"""Check for sensible looking, totally invalid OS macros."""
preprocessor_statement = input_api.re.compile(r'^\s*#')
os_macro = input_api.re.compile(r'defined\((OS_[^)]+)\)')
results = []
for lnum, line in f.ChangedContents():
if preprocessor_statement.search(line):
for match in os_macro.finditer(line):
if not match.group(1) in _VALID_OS_MACROS:
good = _DidYouMeanOSMacro(match.group(1))
did_you_mean = ' (did you mean %s?)' % good if good else ''
results.append(' %s:%d %s%s' % (f.LocalPath(),
lnum,
match.group(1),
did_you_mean))
return results
def _CheckForInvalidOSMacros(input_api, output_api):
"""Check all affected files for invalid OS macros."""
bad_macros = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.py', '.js', '.html', '.css')):
bad_macros.extend(_CheckForInvalidOSMacrosInFile(input_api, f))
if not bad_macros:
return []
return [output_api.PresubmitError(
'Possibly invalid OS macro[s] found. Please fix your code\n'
'or add your macro to src/PRESUBMIT.py.', bad_macros)]
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
# TODO(thestig) temporarily disabled, doesn't work in third_party/
#results.extend(input_api.canned_checks.CheckSvnModifiedDirectories(
# input_api, output_api, sources))
# Make sure the tree is 'open'.
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://chromium-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckRietveldTryJobExecution(input_api,
output_api, 'http://codereview.chromium.org',
('win_rel', 'linux_rel', 'mac_rel, win:compile'),
'tryserver@chromium.org'))
results.extend(input_api.canned_checks.CheckChangeHasBugField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_CheckSubversionConfig(input_api, output_api))
return results
def GetPreferredTrySlaves(project, change):
files = change.LocalPaths()
if not files or all(re.search(r'[\\/]OWNERS$', f) for f in files):
return []
if all(re.search('\.(m|mm)$|(^|[/_])mac[/_.]', f) for f in files):
return ['mac_rel', 'mac_asan', 'mac:compile']
if all(re.search('(^|[/_])win[/_.]', f) for f in files):
return ['win_rel', 'win7_aura', 'win:compile']
if all(re.search('(^|[/_])android[/_.]', f) for f in files):
return ['android_dbg', 'android_clang_dbg']
if all(re.search('^native_client_sdk', f) for f in files):
return ['linux_nacl_sdk', 'win_nacl_sdk', 'mac_nacl_sdk']
if all(re.search('[/_]ios[/_.]', f) for f in files):
return ['ios_rel_device', 'ios_dbg_simulator']
trybots = [
'android_clang_dbg',
'android_dbg',
'ios_dbg_simulator',
'ios_rel_device',
'linux_asan',
'linux_aura',
'linux_chromeos',
'linux_clang:compile',
'linux_rel',
'mac_asan',
'mac_rel',
'mac:compile',
'win7_aura',
'win_rel',
'win:compile',
]
# Match things like path/aura/file.cc and path/file_aura.cc.
# Same for chromeos.
if any(re.search('[/_](aura|chromeos)', f) for f in files):
trybots += ['linux_chromeos_clang:compile', 'linux_chromeos_asan']
return trybots
| bsd-3-clause |
chyh1990/qemu-thumips | scripts/qapi-types.py | 47 | 6007 | #
# QAPI types generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_fwd_struct(name, members):
return mcgen('''
typedef struct %(name)s %(name)s;
typedef struct %(name)sList
{
%(name)s *value;
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_struct(structname, fieldname, members):
ret = mcgen('''
struct %(name)s
{
''',
name=structname)
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
bool has_%(c_name)s;
''',
c_name=c_var(argname))
if structured:
push_indent()
ret += generate_struct("", argname, argentry)
pop_indent()
else:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(argentry), c_name=c_var(argname))
if len(fieldname):
fieldname = " " + fieldname
ret += mcgen('''
}%(field)s;
''',
field=fieldname)
return ret
def generate_enum_lookup(name, values):
ret = mcgen('''
const char *%(name)s_lookup[] = {
''',
name=name)
i = 0
for value in values:
ret += mcgen('''
"%(value)s",
''',
value=value.lower())
ret += mcgen('''
NULL,
};
''')
return ret
def generate_enum(name, values):
lookup_decl = mcgen('''
extern const char *%(name)s_lookup[];
''',
name=name)
enum_decl = mcgen('''
typedef enum %(name)s
{
''',
name=name)
# append automatically generated _MAX value
enum_values = values + [ 'MAX' ]
i = 0
for value in enum_values:
enum_decl += mcgen('''
%(abbrev)s_%(value)s = %(i)d,
''',
abbrev=de_camel_case(name).upper(),
value=c_var(value).upper(),
i=i)
i += 1
enum_decl += mcgen('''
} %(name)s;
''',
name=name)
return lookup_decl + enum_decl
def generate_union(name, typeinfo):
ret = mcgen('''
struct %(name)s
{
%(name)sKind kind;
union {
''',
name=name)
for key in typeinfo:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(typeinfo[key]),
c_name=c_var(key))
ret += mcgen('''
};
};
''')
return ret
def generate_type_cleanup_decl(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj);
''',
c_type=c_type(name),type=name)
return ret
def generate_type_cleanup(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj)
{
QapiDeallocVisitor *md;
Visitor *v;
if (!obj) {
return;
}
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
visit_type_%(type)s(v, &obj, NULL, NULL);
qapi_dealloc_visitor_cleanup(md);
}
''',
c_type=c_type(name),type=name)
return ret
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "p:o:", ["prefix=", "output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
c_file = 'qapi-types.c'
h_file = 'qapi-types.h'
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
fdef = open(c_file, 'w')
fdecl = open(h_file, 'w')
fdef.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* deallocation functions for schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Michael Roth <mdroth@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qapi/qapi-dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
''', prefix=prefix))
fdecl.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include "qapi/qapi-types-core.h"
''',
guard=guardname(h_file)))
exprs = parse_schema(sys.stdin)
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_fwd_struct(expr['type'], expr['data'])
elif expr.has_key('enum'):
ret += generate_enum(expr['enum'], expr['data'])
fdef.write(generate_enum_lookup(expr['enum'], expr['data']))
elif expr.has_key('union'):
ret += generate_fwd_struct(expr['union'], expr['data']) + "\n"
ret += generate_enum('%sKind' % expr['union'], expr['data'].keys())
else:
continue
fdecl.write(ret)
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_struct(expr['type'], "", expr['data']) + "\n"
ret += generate_type_cleanup_decl(expr['type'] + "List")
fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['type'])
fdef.write(generate_type_cleanup(expr['type']) + "\n")
elif expr.has_key('union'):
ret += generate_union(expr['union'], expr['data'])
else:
continue
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
| gpl-2.0 |
zstyblik/infernal-twin | build/pip/build/lib.linux-i686-2.7/pip/commands/freeze.py | 311 | 2330 | from __future__ import absolute_import
import sys
import pip
from pip.basecommand import Command
from pip.operations.freeze import freeze
from pip.wheel import WheelCache
class FreezeCommand(Command):
"""
Output installed packages in requirements format.
packages are listed in a case-insensitive sorted order.
"""
name = 'freeze'
usage = """
%prog [options]"""
summary = 'Output installed packages in requirements format.'
log_streams = ("ext://sys.stderr", "ext://sys.stderr")
def __init__(self, *args, **kw):
super(FreezeCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirement',
action='store',
default=None,
metavar='file',
help="Use the order in the given requirements file and its "
"comments when generating output.")
self.cmd_opts.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL for finding packages, which will be added to the '
'output.')
self.cmd_opts.add_option(
'-l', '--local',
dest='local',
action='store_true',
default=False,
help='If in a virtualenv that has global access, do not output '
'globally-installed packages.')
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
format_control = pip.index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
freeze_kwargs = dict(
requirement=options.requirement,
find_links=options.find_links,
local_only=options.local,
user_only=options.user,
skip_regex=options.skip_requirements_regex,
isolated=options.isolated_mode,
wheel_cache=wheel_cache)
for line in freeze(**freeze_kwargs):
sys.stdout.write(line + '\n')
| gpl-3.0 |
loveshell/volatility | volatility/plugins/linux/keyboard_notifiers.py | 11 | 2613 | # Volatility
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Joe Sylve
@license: GNU General Public License 2.0
@contact: joe.sylve@gmail.com
@organization: 504ENSICS Labs
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.common as linux_common
class linux_keyboard_notifiers(linux_common.AbstractLinuxCommand):
"""Parses the keyboard notifier call chain"""
def calculate(self):
linux_common.set_plugin_members(self)
knl_addr = self.addr_space.profile.get_symbol("keyboard_notifier_list")
if not knl_addr:
debug.error("Symbol keyboard_notifier_list not found in kernel")
knl = obj.Object("atomic_notifier_head", offset = knl_addr, vm = self.addr_space)
symbol_cache = {}
for call_back in linux_common.walk_internal_list("notifier_block", "next", knl.head):
call_addr = call_back.notifier_call
if symbol_cache.has_key(call_addr):
sym_name = symbol_cache[call_addr]
hooked = 0
else:
sym_name = self.profile.get_symbol_by_address("kernel", call_addr)
if not sym_name:
sym_name = "HOOKED"
module = obj.Object("module", offset = 0xffffffffa03a15d0, vm = self.addr_space)
sym = module.get_symbol_for_address(call_addr)
sym_name = "%s: %s/%s" % (sym_name, module.name, sym)
hooked = 1
symbol_cache[call_addr] = sym_name
yield call_addr, sym_name, hooked
def render_text(self, outfd, data):
self.table_header(outfd, [("Address", "[addrpad]"), ("Symbol", "<30")])
for call_addr, sym_name, _ in data:
self.table_row(outfd, call_addr, sym_name)
| gpl-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django-1.3/django/contrib/gis/geos/prototypes/predicates.py | 623 | 1777 | """
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
## Binary & unary predicate functions ##
def binary_predicate(func, *args):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = c_char
func.errcheck = check_predicate
return func
def unary_predicate(func):
"For GEOS unary predicate functions."
func.argtypes = [GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
## Unary Predicates ##
geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ'))
geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty'))
geos_isring = unary_predicate(GEOSFunc('GEOSisRing'))
geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple'))
geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid'))
## Binary Predicates ##
geos_contains = binary_predicate(GEOSFunc('GEOSContains'))
geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses'))
geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint'))
geos_equals = binary_predicate(GEOSFunc('GEOSEquals'))
geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double)
geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects'))
geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps'))
geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p)
geos_touches = binary_predicate(GEOSFunc('GEOSTouches'))
geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
| bsd-3-clause |
DANCEcollaborative/forum-xblock | XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/django/contrib/sessions/backends/file.py | 2 | 5395 | import errno
import os
import tempfile
from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
class SessionStore(SessionBase):
"""
Implements a file based session store.
"""
def __init__(self, session_key=None):
self.storage_path = getattr(settings, "SESSION_FILE_PATH", None)
if not self.storage_path:
self.storage_path = tempfile.gettempdir()
# Make sure the storage path is valid.
if not os.path.isdir(self.storage_path):
raise ImproperlyConfigured(
"The session storage path %r doesn't exist. Please set your"
" SESSION_FILE_PATH setting to an existing directory in which"
" Django can store session data." % self.storage_path)
self.file_prefix = settings.SESSION_COOKIE_NAME
super(SessionStore, self).__init__(session_key)
VALID_KEY_CHARS = set("abcdef0123456789")
def _key_to_file(self, session_key=None):
"""
Get the file associated with this session key.
"""
if session_key is None:
session_key = self._get_or_create_session_key()
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
if not set(session_key).issubset(self.VALID_KEY_CHARS):
raise SuspiciousOperation(
"Invalid characters in session key")
return os.path.join(self.storage_path, self.file_prefix + session_key)
def load(self):
session_data = {}
try:
session_file = open(self._key_to_file(), "rb")
try:
file_data = session_file.read()
# Don't fail if there is no data in the session file.
# We may have opened the empty placeholder file.
if file_data:
try:
session_data = self.decode(file_data)
except (EOFError, SuspiciousOperation):
self._session_key = None
finally:
session_file.close()
except IOError:
self._session_key = None
return session_data
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
def save(self, must_create=False):
if self.session_key is None:
return self.create()
# Get the session data now, before we start messing
# with the file it is stored within.
session_data = self._get_session(no_load=must_create)
session_file_name = self._key_to_file()
try:
# Make sure the file exists. If it does not already exist, an
# empty placeholder file is created.
flags = os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0)
if must_create:
flags |= os.O_EXCL
fd = os.open(session_file_name, flags)
os.close(fd)
except OSError, e:
if must_create and e.errno == errno.EEXIST:
raise CreateError
raise
# Write the session file without interfering with other threads
# or processes. By writing to an atomically generated temporary
# file and then using the atomic os.rename() to make the complete
# file visible, we avoid having to lock the session file, while
# still maintaining its integrity.
#
# Note: Locking the session file was explored, but rejected in part
# because in order to be atomic and cross-platform, it required a
# long-lived lock file for each session, doubling the number of
# files in the session storage directory at any given time. This
# rename solution is cleaner and avoids any additional overhead
# when reading the session data, which is the more common case
# unless SESSION_SAVE_EVERY_REQUEST = True.
#
# See ticket #8616.
dir, prefix = os.path.split(session_file_name)
try:
output_file_fd, output_file_name = tempfile.mkstemp(dir=dir,
prefix=prefix + '_out_')
renamed = False
try:
try:
os.write(output_file_fd, self.encode(session_data))
finally:
os.close(output_file_fd)
os.rename(output_file_name, session_file_name)
renamed = True
finally:
if not renamed:
os.unlink(output_file_name)
except (OSError, IOError, EOFError):
pass
def exists(self, session_key):
return os.path.exists(self._key_to_file(session_key))
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
os.unlink(self._key_to_file(session_key))
except OSError:
pass
def clean(self):
pass
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.