code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
#
# Copyright (c) 2013 Corey Goldberg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import datetime
import os
import subprocess
import time
import unittest
import psutil
from procstats import ProcStats
class ProcStatsTestCase(unittest.TestCase):
def test_init(self):
pid = os.getpid()
ps = ProcStats(pid)
self.assertIsInstance(ps.pid, int)
self.assertGreaterEqual(ps.interval, 0.0)
def test_create_by_pid(self):
pid = os.getpid()
ps = ProcStats(pid)
self.assertIsInstance(ps.pid, int)
def test_create_by_bad_pid(self):
invalid_pid = 9999999
self.assertRaises(psutil.NoSuchProcess, ProcStats, invalid_pid)
def test_get_stat(self):
pid = os.getpid()
ps = ProcStats(pid)
stat = ps.get_stat()
self.assertIsInstance(stat['name'], str)
self.assertGreaterEqual(stat['cpu_percent'], 0.0)
self.assertLessEqual(stat['cpu_percent'], 100.0)
self.assertIsInstance(stat['cpu_percent'], float)
self.assertGreaterEqual(stat['memory_percent'], 0.0)
self.assertLessEqual(stat['memory_percent'], 100.0)
self.assertIsInstance(stat['memory_percent'], float)
self.assertIsInstance(stat['io_read_count'], int)
self.assertIsInstance(stat['io_write_count'], int)
self.assertIsInstance(stat['io_read_bytes'], int)
self.assertIsInstance(stat['io_write_bytes'], int)
self.assertIsInstance(stat['num_threads'], int)
self.assertIsInstance(stat['num_fds'], int)
def test_run_get_stats(self):
pid = os.getpid()
ps = ProcStats(pid, 1.0)
ps.start()
time.sleep(3)
ps.stop()
stats = ps.get_stats()
self.assertGreater(len(stats), 1)
dt, stat = stats[0]
self.assertIsInstance(dt, datetime.datetime)
self.assertGreater(len(stat), 1)
class ProcStatsSubprocessTestCase(unittest.TestCase):
def setUp(self):
self.p = subprocess.Popen(['sleep', '2'],)
self.addCleanup(self.p.kill)
def test_with_subprocess(self):
ps = ProcStats(self.p.pid)
self.assertIsInstance(ps.pid, int)
stat = ps.get_stat()
self.assertEqual(stat['name'], 'sleep')
def test_dead_subprocess(self):
p = subprocess.Popen(['sleep', '0.5'],)
ps = ProcStats(p.pid)
time.sleep(1.0)
self.assertRaises(psutil.AccessDenied, ps.get_stat)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
cgoldberg/procstats
|
test_procstats.py
|
Python
|
gpl-3.0
| 3,147
|
default_app_config = 'exams.apps.ExamsConfig'
|
Zahajamaan/Fudulbank
|
exams/__init__.py
|
Python
|
agpl-3.0
| 46
|
# Copyright 2018 Verizon Wireless
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from unittest import mock
from glance_store import exceptions as store_exceptions
from oslo_config import cfg
from oslo_utils import units
import taskflow
import glance.async_.flows.api_image_import as import_flow
from glance.common import exception
from glance.common.scripts.image_import import main as image_import
from glance import context
from glance import gateway
import glance.tests.utils as test_utils
from cursive import exception as cursive_exception
CONF = cfg.CONF
TASK_TYPE = 'api_image_import'
TASK_ID1 = 'dbbe7231-020f-4311-87e1-5aaa6da56c02'
IMAGE_ID1 = '41f5b3b0-f54c-4cef-bd45-ce3e376a142f'
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
class TestApiImageImportTask(test_utils.BaseTestCase):
def setUp(self):
super(TestApiImageImportTask, self).setUp()
self.wd_task_input = {
"import_req": {
"method": {
"name": "web-download",
"uri": "http://example.com/image.browncow"
}
}
}
self.gd_task_input = {
"import_req": {
"method": {
"name": "glance-direct"
}
}
}
self.mock_task_repo = mock.MagicMock()
self.mock_image_repo = mock.MagicMock()
self.mock_image = self.mock_image_repo.get.return_value
self.mock_image.extra_properties = {
'os_glance_import_task': TASK_ID1,
'os_glance_stage_host': 'http://glance2',
}
@mock.patch('glance.async_.flows.api_image_import._VerifyStaging.__init__')
@mock.patch('taskflow.patterns.linear_flow.Flow.add')
@mock.patch('taskflow.patterns.linear_flow.__init__')
def _pass_uri(self, mock_lf_init, mock_flow_add, mock_VS_init,
uri, file_uri, import_req):
flow_kwargs = {"task_id": TASK_ID1,
"task_type": TASK_TYPE,
"task_repo": self.mock_task_repo,
"image_repo": self.mock_image_repo,
"image_id": IMAGE_ID1,
"context": mock.MagicMock(),
"import_req": import_req}
mock_lf_init.return_value = None
mock_VS_init.return_value = None
self.config(node_staging_uri=uri)
import_flow.get_flow(**flow_kwargs)
mock_VS_init.assert_called_with(TASK_ID1, TASK_TYPE,
self.mock_task_repo,
file_uri)
def test_get_flow_handles_node_uri_with_ending_slash(self):
test_uri = 'file:///some/where/'
expected_uri = '{0}{1}'.format(test_uri, IMAGE_ID1)
self._pass_uri(uri=test_uri, file_uri=expected_uri,
import_req=self.gd_task_input['import_req'])
self._pass_uri(uri=test_uri, file_uri=expected_uri,
import_req=self.wd_task_input['import_req'])
def test_get_flow_handles_node_uri_without_ending_slash(self):
test_uri = 'file:///some/where'
expected_uri = '{0}/{1}'.format(test_uri, IMAGE_ID1)
self._pass_uri(uri=test_uri, file_uri=expected_uri,
import_req=self.wd_task_input['import_req'])
self._pass_uri(uri=test_uri, file_uri=expected_uri,
import_req=self.gd_task_input['import_req'])
def test_get_flow_pops_stage_host(self):
import_flow.get_flow(task_id=TASK_ID1, task_type=TASK_TYPE,
task_repo=self.mock_task_repo,
image_repo=self.mock_image_repo,
image_id=IMAGE_ID1,
context=mock.MagicMock(),
import_req=self.gd_task_input['import_req'])
self.assertNotIn('os_glance_stage_host',
self.mock_image.extra_properties)
self.assertIn('os_glance_import_task',
self.mock_image.extra_properties)
def test_assert_quota_no_task(self):
ignored = mock.MagicMock()
task_repo = mock.MagicMock()
task_repo.get.return_value = None
task_id = 'some-task'
enforce_fn = mock.MagicMock()
enforce_fn.side_effect = exception.LimitExceeded
with mock.patch.object(import_flow, 'LOG') as mock_log:
self.assertRaises(exception.LimitExceeded,
import_flow.assert_quota,
ignored, task_repo, task_id,
[], ignored, enforce_fn)
task_repo.get.assert_called_once_with('some-task')
# Make sure we logged instead of crashed if no task was found
mock_log.error.assert_called_once_with('Failed to find task %r to '
'update after quota failure',
'some-task')
task_repo.save.assert_not_called()
def test_assert_quota(self):
ignored = mock.MagicMock()
task_repo = mock.MagicMock()
task_id = 'some-task'
enforce_fn = mock.MagicMock()
enforce_fn.side_effect = exception.LimitExceeded
wrapper = mock.MagicMock()
action = wrapper.__enter__.return_value
action.image_status = 'importing'
self.assertRaises(exception.LimitExceeded,
import_flow.assert_quota,
ignored, task_repo, task_id,
['store1'], wrapper, enforce_fn)
action.remove_importing_stores.assert_called_once_with(['store1'])
action.set_image_attribute.assert_called_once_with(status='queued')
task_repo.get.assert_called_once_with('some-task')
task_repo.save.assert_called_once_with(task_repo.get.return_value)
def test_assert_quota_copy(self):
ignored = mock.MagicMock()
task_repo = mock.MagicMock()
task_id = 'some-task'
enforce_fn = mock.MagicMock()
enforce_fn.side_effect = exception.LimitExceeded
wrapper = mock.MagicMock()
action = wrapper.__enter__.return_value
action.image_status = 'active'
self.assertRaises(exception.LimitExceeded,
import_flow.assert_quota,
ignored, task_repo, task_id,
['store1'], wrapper, enforce_fn)
action.remove_importing_stores.assert_called_once_with(['store1'])
action.set_image_attribute.assert_not_called()
task_repo.get.assert_called_once_with('some-task')
task_repo.save.assert_called_once_with(task_repo.get.return_value)
class TestImageLock(test_utils.BaseTestCase):
def setUp(self):
super(TestImageLock, self).setUp()
self.img_repo = mock.MagicMock()
@mock.patch('glance.async_.flows.api_image_import.LOG')
def test_execute_confirms_lock(self, mock_log):
self.img_repo.get.return_value.extra_properties = {
'os_glance_import_task': TASK_ID1}
wrapper = import_flow.ImportActionWrapper(self.img_repo, IMAGE_ID1,
TASK_ID1)
imagelock = import_flow._ImageLock(TASK_ID1, TASK_TYPE, wrapper)
imagelock.execute()
mock_log.debug.assert_called_once_with('Image %(image)s import task '
'%(task)s lock confirmed',
{'image': IMAGE_ID1,
'task': TASK_ID1})
@mock.patch('glance.async_.flows.api_image_import.LOG')
def test_execute_confirms_lock_not_held(self, mock_log):
wrapper = import_flow.ImportActionWrapper(self.img_repo, IMAGE_ID1,
TASK_ID1)
imagelock = import_flow._ImageLock(TASK_ID1, TASK_TYPE, wrapper)
self.assertRaises(exception.TaskAbortedError,
imagelock.execute)
@mock.patch('glance.async_.flows.api_image_import.LOG')
def test_revert_drops_lock(self, mock_log):
wrapper = import_flow.ImportActionWrapper(self.img_repo, IMAGE_ID1,
TASK_ID1)
imagelock = import_flow._ImageLock(TASK_ID1, TASK_TYPE, wrapper)
with mock.patch.object(wrapper, 'drop_lock_for_task') as mock_drop:
imagelock.revert(None)
mock_drop.assert_called_once_with()
mock_log.debug.assert_called_once_with('Image %(image)s import task '
'%(task)s dropped its lock '
'after failure',
{'image': IMAGE_ID1,
'task': TASK_ID1})
@mock.patch('glance.async_.flows.api_image_import.LOG')
def test_revert_drops_lock_missing(self, mock_log):
wrapper = import_flow.ImportActionWrapper(self.img_repo, IMAGE_ID1,
TASK_ID1)
imagelock = import_flow._ImageLock(TASK_ID1, TASK_TYPE, wrapper)
with mock.patch.object(wrapper, 'drop_lock_for_task') as mock_drop:
mock_drop.side_effect = exception.NotFound()
imagelock.revert(None)
mock_log.warning.assert_called_once_with('Image %(image)s import task '
'%(task)s lost its lock '
'during execution!',
{'image': IMAGE_ID1,
'task': TASK_ID1})
class TestImportToStoreTask(test_utils.BaseTestCase):
def setUp(self):
super(TestImportToStoreTask, self).setUp()
self.gateway = gateway.Gateway()
self.context = context.RequestContext(user_id=TENANT1,
project_id=TENANT1,
overwrite=False)
self.img_factory = self.gateway.get_image_factory(self.context)
def test_execute(self):
wrapper = mock.MagicMock()
action = mock.MagicMock()
task_repo = mock.MagicMock()
wrapper.__enter__.return_value = action
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, wrapper,
"http://url",
"store1", False,
True)
# Assert file_path is honored
with mock.patch.object(image_import, '_execute') as mock_execute:
image_import.execute(mock.sentinel.path)
mock_execute.assert_called_once_with(action, mock.sentinel.path)
# Assert file_path is optional
with mock.patch.object(image_import, '_execute') as mock_execute:
image_import.execute()
mock_execute.assert_called_once_with(action, None)
def test_execute_body_with_store(self):
image = mock.MagicMock()
img_repo = mock.MagicMock()
img_repo.get.return_value = image
task_repo = mock.MagicMock()
wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1,
TASK_ID1)
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, wrapper,
"http://url",
"store1", False,
True)
action = mock.MagicMock()
image_import._execute(action, mock.sentinel.path)
action.set_image_data.assert_called_once_with(
mock.sentinel.path,
TASK_ID1, backend='store1',
set_active=True,
callback=image_import._status_callback)
action.remove_importing_stores(['store1'])
def test_execute_body_with_store_no_path(self):
image = mock.MagicMock()
img_repo = mock.MagicMock()
img_repo.get.return_value = image
task_repo = mock.MagicMock()
wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1,
TASK_ID1)
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, wrapper,
"http://url",
"store1", False,
True)
action = mock.MagicMock()
image_import._execute(action, None)
action.set_image_data.assert_called_once_with(
'http://url',
TASK_ID1, backend='store1',
set_active=True,
callback=image_import._status_callback)
action.remove_importing_stores(['store1'])
def test_execute_body_without_store(self):
image = mock.MagicMock()
img_repo = mock.MagicMock()
img_repo.get.return_value = image
task_repo = mock.MagicMock()
wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1,
TASK_ID1)
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, wrapper,
"http://url",
None, False,
True)
action = mock.MagicMock()
image_import._execute(action, mock.sentinel.path)
action.set_image_data.assert_called_once_with(
mock.sentinel.path,
TASK_ID1, backend=None,
set_active=True,
callback=image_import._status_callback)
action.remove_importing_stores.assert_not_called()
@mock.patch('glance.async_.flows.api_image_import.LOG.debug')
@mock.patch('oslo_utils.timeutils.now')
def test_status_callback_limits_rate(self, mock_now, mock_log):
img_repo = mock.MagicMock()
task_repo = mock.MagicMock()
task_repo.get.return_value.status = 'processing'
wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1,
TASK_ID1)
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, wrapper,
"http://url",
None, False,
True)
expected_calls = []
log_call = mock.call('Image import %(image_id)s copied %(copied)i MiB',
{'image_id': IMAGE_ID1,
'copied': 0})
action = mock.MagicMock(image_id=IMAGE_ID1)
mock_now.return_value = 1000
image_import._status_callback(action, 32, 32)
# First call will emit immediately because we only ran __init__
# which sets the last status to zero
expected_calls.append(log_call)
mock_log.assert_has_calls(expected_calls)
image_import._status_callback(action, 32, 64)
# Second call will not emit any other logs because no time
# has passed
mock_log.assert_has_calls(expected_calls)
mock_now.return_value += 190
image_import._status_callback(action, 32, 96)
# Third call will not emit any other logs because not enough
# time has passed
mock_log.assert_has_calls(expected_calls)
mock_now.return_value += 300
image_import._status_callback(action, 32, 128)
# Fourth call will emit because we crossed five minutes
expected_calls.append(log_call)
mock_log.assert_has_calls(expected_calls)
mock_now.return_value += 150
image_import._status_callback(action, 32, 128)
# Fifth call will not emit any other logs because not enough
# time has passed
mock_log.assert_has_calls(expected_calls)
mock_now.return_value += 3600
image_import._status_callback(action, 32, 128)
# Sixth call will emit because we crossed five minutes
expected_calls.append(log_call)
mock_log.assert_has_calls(expected_calls)
def test_raises_when_image_deleted(self):
img_repo = mock.MagicMock()
task_repo = mock.MagicMock()
wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1,
TASK_ID1)
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, wrapper,
"http://url",
"store1", False,
True)
image = self.img_factory.new_image(image_id=UUID1)
image.status = "deleted"
img_repo.get.return_value = image
self.assertRaises(exception.ImportTaskError, image_import.execute)
@mock.patch("glance.async_.flows.api_image_import.image_import")
def test_remove_store_from_property(self, mock_import):
img_repo = mock.MagicMock()
task_repo = mock.MagicMock()
wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1,
TASK_ID1)
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, wrapper,
"http://url",
"store1", True,
True)
extra_properties = {"os_glance_importing_to_stores": "store1,store2",
"os_glance_import_task": TASK_ID1}
image = self.img_factory.new_image(image_id=UUID1,
extra_properties=extra_properties)
img_repo.get.return_value = image
image_import.execute()
self.assertEqual(
image.extra_properties['os_glance_importing_to_stores'], "store2")
def test_revert_updates_status_keys(self):
img_repo = mock.MagicMock()
task_repo = mock.MagicMock()
wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1,
TASK_ID1)
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, wrapper,
"http://url",
"store1", True,
True)
extra_properties = {"os_glance_importing_to_stores": "store1,store2",
"os_glance_import_task": TASK_ID1}
image = self.img_factory.new_image(image_id=UUID1,
extra_properties=extra_properties)
img_repo.get.return_value = image
fail_key = 'os_glance_failed_import'
pend_key = 'os_glance_importing_to_stores'
image_import.revert(None)
self.assertEqual('store2', image.extra_properties[pend_key])
try:
raise Exception('foo')
except Exception:
fake_exc_info = sys.exc_info()
extra_properties = {"os_glance_importing_to_stores": "store1,store2"}
image_import.revert(taskflow.types.failure.Failure(fake_exc_info))
self.assertEqual('store2', image.extra_properties[pend_key])
self.assertEqual('store1', image.extra_properties[fail_key])
@mock.patch("glance.async_.flows.api_image_import.image_import")
def test_raises_when_all_stores_must_succeed(self, mock_import):
img_repo = mock.MagicMock()
task_repo = mock.MagicMock()
wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1,
TASK_ID1)
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, wrapper,
"http://url",
"store1", True,
True)
extra_properties = {'os_glance_import_task': TASK_ID1}
image = self.img_factory.new_image(image_id=UUID1,
extra_properties=extra_properties)
img_repo.get.return_value = image
mock_import.set_image_data.side_effect = \
cursive_exception.SignatureVerificationError(
"Signature verification failed")
self.assertRaises(cursive_exception.SignatureVerificationError,
image_import.execute)
@mock.patch("glance.async_.flows.api_image_import.image_import")
def test_doesnt_raise_when_not_all_stores_must_succeed(self, mock_import):
img_repo = mock.MagicMock()
task_repo = mock.MagicMock()
wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1,
TASK_ID1)
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, wrapper,
"http://url",
"store1", False,
True)
extra_properties = {'os_glance_import_task': TASK_ID1}
image = self.img_factory.new_image(image_id=UUID1,
extra_properties=extra_properties)
img_repo.get.return_value = image
mock_import.set_image_data.side_effect = \
cursive_exception.SignatureVerificationError(
"Signature verification failed")
try:
image_import.execute()
self.assertEqual(image.extra_properties['os_glance_failed_import'],
"store1")
except cursive_exception.SignatureVerificationError:
self.fail("Exception shouldn't be raised")
@mock.patch('glance.common.scripts.utils.get_task')
def test_status_callback_updates_task_message(self, mock_get):
task_repo = mock.MagicMock()
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, mock.MagicMock(),
"http://url",
"store1", False,
True)
task = mock.MagicMock()
task.status = 'processing'
mock_get.return_value = task
action = mock.MagicMock()
image_import._status_callback(action, 128, 256 * units.Mi)
mock_get.assert_called_once_with(task_repo, TASK_ID1)
task_repo.save.assert_called_once_with(task)
self.assertEqual(_('Copied %i MiB' % 256), task.message)
@mock.patch('glance.common.scripts.utils.get_task')
def test_status_aborts_missing_task(self, mock_get):
task_repo = mock.MagicMock()
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, mock.MagicMock(),
"http://url",
"store1", False,
True)
mock_get.return_value = None
action = mock.MagicMock()
self.assertRaises(exception.TaskNotFound,
image_import._status_callback,
action, 128, 256 * units.Mi)
mock_get.assert_called_once_with(task_repo, TASK_ID1)
task_repo.save.assert_not_called()
@mock.patch('glance.common.scripts.utils.get_task')
def test_status_aborts_invalid_task_state(self, mock_get):
task_repo = mock.MagicMock()
image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE,
task_repo, mock.MagicMock(),
"http://url",
"store1", False,
True)
task = mock.MagicMock()
task.status = 'failed'
mock_get.return_value = task
action = mock.MagicMock()
self.assertRaises(exception.TaskAbortedError,
image_import._status_callback,
action, 128, 256 * units.Mi)
mock_get.assert_called_once_with(task_repo, TASK_ID1)
task_repo.save.assert_not_called()
class TestDeleteFromFS(test_utils.BaseTestCase):
def test_delete_with_backends_deletes(self):
task = import_flow._DeleteFromFS(TASK_ID1, TASK_TYPE)
self.config(enabled_backends='file:foo')
with mock.patch.object(import_flow.store_api, 'delete') as mock_del:
task.execute(mock.sentinel.path)
mock_del.assert_called_once_with(
mock.sentinel.path,
'os_glance_staging_store')
def test_delete_with_backends_delete_fails(self):
self.config(enabled_backends='file:foo')
task = import_flow._DeleteFromFS(TASK_ID1, TASK_TYPE)
with mock.patch.object(import_flow.store_api, 'delete') as mock_del:
mock_del.side_effect = store_exceptions.NotFound(image=IMAGE_ID1,
message='Testing')
# If we didn't swallow this we would explode here
task.execute(mock.sentinel.path)
mock_del.assert_called_once_with(
mock.sentinel.path,
'os_glance_staging_store')
# Raise something unexpected and make sure it bubbles up
mock_del.side_effect = RuntimeError
self.assertRaises(RuntimeError,
task.execute, mock.sentinel.path)
@mock.patch('os.path.exists')
@mock.patch('os.unlink')
def test_delete_without_backends_exists(self, mock_unlink, mock_exists):
mock_exists.return_value = True
task = import_flow._DeleteFromFS(TASK_ID1, TASK_TYPE)
task.execute('1234567foo')
# FIXME(danms): I have no idea why the code arbitrarily snips
# the first seven characters from the path. Need a comment or
# *something*.
mock_unlink.assert_called_once_with('foo')
mock_unlink.reset_mock()
mock_unlink.side_effect = OSError(123, 'failed')
# Make sure we swallow the OSError and don't explode
task.execute('1234567foo')
@mock.patch('os.path.exists')
@mock.patch('os.unlink')
def test_delete_without_backends_missing(self, mock_unlink, mock_exists):
mock_exists.return_value = False
task = import_flow._DeleteFromFS(TASK_ID1, TASK_TYPE)
task.execute('foo')
mock_unlink.assert_not_called()
class TestImportCopyImageTask(test_utils.BaseTestCase):
def setUp(self):
super(TestImportCopyImageTask, self).setUp()
self.context = context.RequestContext(user_id=TENANT1,
project_id=TENANT1,
overwrite=False)
@mock.patch("glance.async_.flows.api_image_import.image_import")
@mock.patch('glance_store.get_store_from_store_identifier')
def test_init_copy_flow_as_non_owner(self, mock_gs, mock_import):
img_repo = mock.MagicMock()
admin_repo = mock.MagicMock()
fake_req = {"method": {"name": "copy-image"},
"backend": ['cheap']}
fake_img = mock.MagicMock()
fake_img.id = IMAGE_ID1
fake_img.status = 'active'
fake_img.extra_properties = {'os_glance_import_task': TASK_ID1}
admin_repo.get.return_value = fake_img
import_flow.get_flow(task_id=TASK_ID1,
task_type=TASK_TYPE,
task_repo=mock.MagicMock(),
image_repo=img_repo,
admin_repo=admin_repo,
image_id=IMAGE_ID1,
import_req=fake_req,
context=self.context,
backend=['cheap'])
# Assert that we saved the image with the admin repo instead of the
# user-context one at the end of get_flow() when we initialize the
# parameters.
admin_repo.save.assert_called_once_with(fake_img, 'active')
img_repo.save.assert_not_called()
class TestVerifyImageStateTask(test_utils.BaseTestCase):
def test_verify_active_status(self):
fake_img = mock.MagicMock(status='active',
extra_properties={
'os_glance_import_task': TASK_ID1})
mock_repo = mock.MagicMock()
mock_repo.get.return_value = fake_img
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
task = import_flow._VerifyImageState(TASK_ID1, TASK_TYPE,
wrapper, 'anything!')
task.execute()
fake_img.status = 'importing'
self.assertRaises(import_flow._NoStoresSucceeded,
task.execute)
def test_revert_copy_status_unchanged(self):
wrapper = mock.MagicMock()
task = import_flow._VerifyImageState(TASK_ID1, TASK_TYPE,
wrapper, 'copy-image')
task.revert(mock.sentinel.result)
# If we are doing copy-image, no state update should be made
wrapper.__enter__.return_value.set_image_attribute.assert_not_called()
def test_reverts_state_nocopy(self):
wrapper = mock.MagicMock()
task = import_flow._VerifyImageState(TASK_ID1, TASK_TYPE,
wrapper, 'glance-direct')
task.revert(mock.sentinel.result)
# Except for copy-image, image state should revert to queued
action = wrapper.__enter__.return_value
action.set_image_attribute.assert_called_once_with(status='queued')
class TestImportActionWrapper(test_utils.BaseTestCase):
def test_wrapper_success(self):
mock_repo = mock.MagicMock()
mock_repo.get.return_value.extra_properties = {
'os_glance_import_task': TASK_ID1}
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
with wrapper as action:
self.assertIsInstance(action, import_flow._ImportActions)
mock_repo.get.assert_has_calls([mock.call(IMAGE_ID1),
mock.call(IMAGE_ID1)])
mock_repo.save.assert_called_once_with(
mock_repo.get.return_value,
mock_repo.get.return_value.status)
def test_wrapper_failure(self):
mock_repo = mock.MagicMock()
mock_repo.get.return_value.extra_properties = {
'os_glance_import_task': TASK_ID1}
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
class SpecificError(Exception):
pass
try:
with wrapper:
raise SpecificError('some failure')
except SpecificError:
# NOTE(danms): Make sure we only caught the test exception
# and aren't hiding anything else
pass
mock_repo.get.assert_called_once_with(IMAGE_ID1)
mock_repo.save.assert_not_called()
@mock.patch.object(import_flow, 'LOG')
def test_wrapper_logs_status(self, mock_log):
mock_repo = mock.MagicMock()
mock_image = mock_repo.get.return_value
mock_image.extra_properties = {'os_glance_import_task': TASK_ID1}
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
mock_image.status = 'foo'
with wrapper as action:
action.set_image_attribute(status='bar')
mock_log.debug.assert_called_once_with(
'Image %(image_id)s status changing from '
'%(old_status)s to %(new_status)s',
{'image_id': IMAGE_ID1,
'old_status': 'foo',
'new_status': 'bar'})
self.assertEqual('bar', mock_image.status)
def test_image_id_property(self):
mock_repo = mock.MagicMock()
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
self.assertEqual(IMAGE_ID1, wrapper.image_id)
def test_set_image_attribute(self):
mock_repo = mock.MagicMock()
mock_image = mock_repo.get.return_value
mock_image.extra_properties = {'os_glance_import_task': TASK_ID1}
mock_image.status = 'bar'
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
with wrapper as action:
action.set_image_attribute(status='foo', virtual_size=123,
size=64)
mock_repo.save.assert_called_once_with(mock_image, 'bar')
self.assertEqual('foo', mock_image.status)
self.assertEqual(123, mock_image.virtual_size)
self.assertEqual(64, mock_image.size)
def test_set_image_attribute_disallowed(self):
mock_repo = mock.MagicMock()
mock_image = mock_repo.get.return_value
mock_image.extra_properties = {'os_glance_import_task': TASK_ID1}
mock_image.status = 'bar'
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
with wrapper as action:
self.assertRaises(AttributeError,
action.set_image_attribute, id='foo')
@mock.patch.object(import_flow, 'LOG')
def test_set_image_extra_properties(self, mock_log):
mock_repo = mock.MagicMock()
mock_image = mock_repo.get.return_value
mock_image.image_id = IMAGE_ID1
mock_image.extra_properties = {'os_glance_import_task': TASK_ID1}
mock_image.status = 'bar'
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
# One banned property
with wrapper as action:
action.set_image_extra_properties({'os_glance_foo': 'bar'})
self.assertEqual({'os_glance_import_task': TASK_ID1},
mock_image.extra_properties)
mock_log.warning.assert_called()
mock_log.warning.reset_mock()
# Two banned properties
with wrapper as action:
action.set_image_extra_properties({'os_glance_foo': 'bar',
'os_glance_baz': 'bat'})
self.assertEqual({'os_glance_import_task': TASK_ID1},
mock_image.extra_properties)
mock_log.warning.assert_called()
mock_log.warning.reset_mock()
# One banned and one allowed property
with wrapper as action:
action.set_image_extra_properties({'foo': 'bar',
'os_glance_foo': 'baz'})
self.assertEqual({'foo': 'bar',
'os_glance_import_task': TASK_ID1},
mock_image.extra_properties)
mock_log.warning.assert_called_once_with(
'Dropping %(key)s=%(val)s during metadata injection for %(image)s',
{'key': 'os_glance_foo', 'val': 'baz',
'image': IMAGE_ID1})
def test_image_size(self):
mock_repo = mock.MagicMock()
mock_image = mock_repo.get.return_value
mock_image.image_id = IMAGE_ID1
mock_image.extra_properties = {'os_glance_import_task': TASK_ID1}
mock_image.size = 123
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
with wrapper as action:
self.assertEqual(123, action.image_size)
def test_image_locations(self):
mock_repo = mock.MagicMock()
mock_image = mock_repo.get.return_value
mock_image.image_id = IMAGE_ID1
mock_image.extra_properties = {'os_glance_import_task': TASK_ID1}
mock_image.locations = {'some': {'complex': ['structure']}}
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
with wrapper as action:
self.assertEqual({'some': {'complex': ['structure']}},
action.image_locations)
# Mutate our copy
action.image_locations['foo'] = 'bar'
# Make sure we did not mutate the image itself
self.assertEqual({'some': {'complex': ['structure']}},
mock_image.locations)
def test_drop_lock_for_task(self):
mock_repo = mock.MagicMock()
mock_repo.get.return_value.extra_properties = {
'os_glance_import_task': TASK_ID1}
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
wrapper.drop_lock_for_task()
mock_repo.delete_property_atomic.assert_called_once_with(
mock_repo.get.return_value, 'os_glance_import_task', TASK_ID1)
def test_assert_task_lock(self):
mock_repo = mock.MagicMock()
mock_repo.get.return_value.extra_properties = {
'os_glance_import_task': TASK_ID1}
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
wrapper.assert_task_lock()
# Try again with a different task ID and it should fail
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
'foo')
self.assertRaises(exception.TaskAbortedError,
wrapper.assert_task_lock)
def _grab_image(self, wrapper):
with wrapper:
pass
@mock.patch.object(import_flow, 'LOG')
def test_check_task_lock(self, mock_log):
mock_repo = mock.MagicMock()
wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1,
TASK_ID1)
image = mock.MagicMock(image_id=IMAGE_ID1)
image.extra_properties = {'os_glance_import_task': TASK_ID1}
mock_repo.get.return_value = image
self._grab_image(wrapper)
mock_log.error.assert_not_called()
image.extra_properties['os_glance_import_task'] = 'somethingelse'
self.assertRaises(exception.TaskAbortedError,
self._grab_image, wrapper)
mock_log.error.assert_called_once_with(
'Image %(image)s import task %(task)s attempted to take action on '
'image, but other task %(other)s holds the lock; Aborting.',
{'image': image.image_id,
'task': TASK_ID1,
'other': 'somethingelse'})
class TestImportActions(test_utils.BaseTestCase):
def setUp(self):
super(TestImportActions, self).setUp()
self.image = mock.MagicMock()
self.image.image_id = IMAGE_ID1
self.image.status = 'active'
self.image.extra_properties = {'speed': '88mph'}
self.image.checksum = mock.sentinel.checksum
self.image.os_hash_algo = mock.sentinel.hash_algo
self.image.os_hash_value = mock.sentinel.hash_value
self.image.size = mock.sentinel.size
self.actions = import_flow._ImportActions(self.image)
def test_image_property_proxies(self):
self.assertEqual(IMAGE_ID1, self.actions.image_id)
self.assertEqual('active', self.actions.image_status)
def test_merge_store_list(self):
# Addition with no existing property works
self.actions.merge_store_list('stores', ['foo', 'bar'])
self.assertEqual({'speed': '88mph',
'stores': 'bar,foo'},
self.image.extra_properties)
# Addition adds to the list
self.actions.merge_store_list('stores', ['baz'])
self.assertEqual('bar,baz,foo', self.image.extra_properties['stores'])
# Removal preserves the rest
self.actions.merge_store_list('stores', ['foo'], subtract=True)
self.assertEqual('bar,baz', self.image.extra_properties['stores'])
# Duplicates aren't duplicated
self.actions.merge_store_list('stores', ['bar'])
self.assertEqual('bar,baz', self.image.extra_properties['stores'])
# Removing the last store leaves the key empty but present
self.actions.merge_store_list('stores', ['baz', 'bar'], subtract=True)
self.assertEqual('', self.image.extra_properties['stores'])
# Make sure we ignore falsey stores
self.actions.merge_store_list('stores', ['', None])
self.assertEqual('', self.image.extra_properties['stores'])
@mock.patch.object(import_flow, 'LOG')
def test_merge_store_logs_info(self, mock_log):
# Removal from non-present key logs debug, but does not fail
self.actions.merge_store_list('stores', ['foo,bar'], subtract=True)
mock_log.debug.assert_has_calls([
mock.call(
'Stores %(stores)s not in %(key)s for image %(image_id)s',
{'image_id': IMAGE_ID1,
'key': 'stores',
'stores': 'foo,bar'}),
mock.call(
'Image %(image_id)s %(key)s=%(stores)s',
{'image_id': IMAGE_ID1,
'key': 'stores',
'stores': ''}),
])
mock_log.debug.reset_mock()
self.actions.merge_store_list('stores', ['foo'])
self.assertEqual('foo', self.image.extra_properties['stores'])
mock_log.debug.reset_mock()
# Removal from a list where store is not present logs debug,
# but does not fail
self.actions.merge_store_list('stores', ['bar'], subtract=True)
self.assertEqual('foo', self.image.extra_properties['stores'])
mock_log.debug.assert_has_calls([
mock.call(
'Stores %(stores)s not in %(key)s for image %(image_id)s',
{'image_id': IMAGE_ID1,
'key': 'stores',
'stores': 'bar'}),
mock.call(
'Image %(image_id)s %(key)s=%(stores)s',
{'image_id': IMAGE_ID1,
'key': 'stores',
'stores': 'foo'}),
])
def test_store_list_helpers(self):
self.actions.add_importing_stores(['foo', 'bar', 'baz'])
self.actions.remove_importing_stores(['bar'])
self.actions.add_failed_stores(['foo', 'bar'])
self.actions.remove_failed_stores(['foo'])
self.assertEqual({'speed': '88mph',
'os_glance_importing_to_stores': 'baz,foo',
'os_glance_failed_import': 'bar'},
self.image.extra_properties)
@mock.patch.object(image_import, 'set_image_data')
def test_set_image_data(self, mock_sid):
self.assertEqual(mock_sid.return_value,
self.actions.set_image_data(
mock.sentinel.uri, mock.sentinel.task_id,
mock.sentinel.backend, mock.sentinel.set_active))
mock_sid.assert_called_once_with(
self.image, mock.sentinel.uri, mock.sentinel.task_id,
backend=mock.sentinel.backend, set_active=mock.sentinel.set_active,
callback=None)
@mock.patch.object(image_import, 'set_image_data')
def test_set_image_data_with_callback(self, mock_sid):
def fake_set_image_data(image, uri, task_id, backend=None,
set_active=False,
callback=None):
callback(mock.sentinel.chunk, mock.sentinel.total)
mock_sid.side_effect = fake_set_image_data
callback = mock.MagicMock()
self.actions.set_image_data(mock.sentinel.uri, mock.sentinel.task_id,
mock.sentinel.backend,
mock.sentinel.set_active,
callback=callback)
# Make sure our callback was triggered through the functools.partial
# to include the original params and the action wrapper
callback.assert_called_once_with(self.actions,
mock.sentinel.chunk,
mock.sentinel.total)
def test_remove_location_for_store(self):
self.image.locations = [
{},
{'metadata': {}},
{'metadata': {'store': 'foo'}},
{'metadata': {'store': 'bar'}},
]
self.actions.remove_location_for_store('foo')
self.assertEqual([{}, {'metadata': {}},
{'metadata': {'store': 'bar'}}],
self.image.locations)
# Add a second definition for bar and make sure only one is removed
self.image.locations.append({'metadata': {'store': 'bar'}})
self.actions.remove_location_for_store('bar')
self.assertEqual([{}, {'metadata': {}},
{'metadata': {'store': 'bar'}}],
self.image.locations)
def test_remove_location_for_store_last_location(self):
self.image.locations = [{'metadata': {'store': 'foo'}}]
self.actions.remove_location_for_store('foo')
self.assertEqual([], self.image.locations)
self.assertIsNone(self.image.checksum)
self.assertIsNone(self.image.os_hash_algo)
self.assertIsNone(self.image.os_hash_value)
self.assertIsNone(self.image.size)
@mock.patch.object(import_flow, 'LOG')
def test_remove_location_for_store_pop_failures(self, mock_log):
class TestList(list):
def pop(self):
pass
self.image.locations = TestList([{'metadata': {'store': 'foo'}}])
with mock.patch.object(self.image.locations, 'pop',
new_callable=mock.PropertyMock) as mock_pop:
mock_pop.side_effect = store_exceptions.NotFound(image='image')
self.actions.remove_location_for_store('foo')
mock_log.warning.assert_called_once_with(
_('Error deleting from store foo when reverting.'))
mock_log.warning.reset_mock()
mock_pop.side_effect = store_exceptions.Forbidden()
self.actions.remove_location_for_store('foo')
mock_log.warning.assert_called_once_with(
_('Error deleting from store foo when reverting.'))
mock_log.warning.reset_mock()
mock_pop.side_effect = Exception
self.actions.remove_location_for_store('foo')
mock_log.warning.assert_called_once_with(
_('Unexpected exception when deleting from store foo.'))
mock_log.warning.reset_mock()
def test_pop_extra_property(self):
self.image.extra_properties = {'foo': '1', 'bar': 2}
# Should remove, if present
self.actions.pop_extra_property('foo')
self.assertEqual({'bar': 2}, self.image.extra_properties)
# Should not raise if missing
self.actions.pop_extra_property('baz')
self.assertEqual({'bar': 2}, self.image.extra_properties)
@mock.patch('glance.common.scripts.utils.get_task')
class TestCompleteTask(test_utils.BaseTestCase):
def setUp(self):
super(TestCompleteTask, self).setUp()
self.task_repo = mock.MagicMock()
self.task = mock.MagicMock()
self.wrapper = mock.MagicMock(image_id=IMAGE_ID1)
def test_execute(self, mock_get_task):
complete = import_flow._CompleteTask(TASK_ID1, TASK_TYPE,
self.task_repo, self.wrapper)
mock_get_task.return_value = self.task
complete.execute()
mock_get_task.assert_called_once_with(self.task_repo,
TASK_ID1)
self.task.succeed.assert_called_once_with({'image_id': IMAGE_ID1})
self.task_repo.save.assert_called_once_with(self.task)
self.wrapper.drop_lock_for_task.assert_called_once_with()
def test_execute_no_task(self, mock_get_task):
mock_get_task.return_value = None
complete = import_flow._CompleteTask(TASK_ID1, TASK_TYPE,
self.task_repo, self.wrapper)
complete.execute()
self.task_repo.save.assert_not_called()
self.wrapper.drop_lock_for_task.assert_called_once_with()
def test_execute_succeed_fails(self, mock_get_task):
mock_get_task.return_value = self.task
self.task.succeed.side_effect = Exception('testing')
complete = import_flow._CompleteTask(TASK_ID1, TASK_TYPE,
self.task_repo, self.wrapper)
complete.execute()
self.task.fail.assert_called_once_with(
_('Error: <class \'Exception\'>: testing'))
self.task_repo.save.assert_called_once_with(self.task)
self.wrapper.drop_lock_for_task.assert_called_once_with()
def test_execute_drop_lock_fails(self, mock_get_task):
mock_get_task.return_value = self.task
self.wrapper.drop_lock_for_task.side_effect = exception.NotFound()
complete = import_flow._CompleteTask(TASK_ID1, TASK_TYPE,
self.task_repo, self.wrapper)
with mock.patch('glance.async_.flows.api_image_import.LOG') as m_log:
complete.execute()
m_log.error.assert_called_once_with('Image %(image)s import task '
'%(task)s did not hold the '
'lock upon completion!',
{'image': IMAGE_ID1,
'task': TASK_ID1})
self.task.succeed.assert_called_once_with({'image_id': IMAGE_ID1})
|
openstack/glance
|
glance/tests/unit/async_/flows/test_api_image_import.py
|
Python
|
apache-2.0
| 51,706
|
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from cirq.experiments import purity_from_probabilities
def test_purity_from_probabilities():
probabilities = np.random.uniform(0, 1, size=4)
probabilities /= np.sum(probabilities)
purity = purity_from_probabilities(4, probabilities)
np.testing.assert_allclose(purity, np.var(probabilities) * 80 / 3)
|
quantumlib/Cirq
|
cirq-core/cirq/experiments/purity_estimation_test.py
|
Python
|
apache-2.0
| 923
|
from typing import Optional
from functools import wraps
from flask import request
from flask import current_app as app
def blueprint_auth(resource: Optional[str] = None):
"""
This decorator is used to add authentication to a Flask Blueprint
"""
def fdec(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = app.auth
if not auth.authorized([], resource or "_blueprint", request.method):
return auth.authenticate()
return f(*args, **kwargs)
return decorated
return fdec
|
petrjasek/superdesk-core
|
superdesk/auth/decorator.py
|
Python
|
agpl-3.0
| 569
|
"""
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
from django.template import TemplateDoesNotExist
from django.template.backends.django import copy_exception
from django.utils.encoding import force_bytes
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def __init__(self, engine, loaders):
self.template_cache = {}
self.get_template_cache = {}
self.loaders = engine.get_template_loaders(loaders)
super().__init__(engine)
def get_contents(self, origin):
return origin.loader.get_contents(origin)
def get_template(self, template_name, skip=None):
"""
Perform the caching that gives this loader its name. Often many of the
templates attempted will be missing, so memory use is of concern here.
To keep it in check, caching behavior is a little complicated when a
template is not found. See ticket #26306 for more details.
With template debugging disabled, cache the TemplateDoesNotExist class
for every missing template and raise a new instance of it after
fetching it from the cache.
With template debugging enabled, a unique TemplateDoesNotExist object
is cached for each missing template to preserve debug data. When
raising an exception, Python sets __traceback__, __context__, and
__cause__ attributes on it. Those attributes can contain references to
all sorts of objects up the call chain and caching them creates a
memory leak. Thus, unraised copies of the exceptions are cached and
copies of those copies are raised after they're fetched from the cache.
"""
key = self.cache_key(template_name, skip)
cached = self.get_template_cache.get(key)
if cached:
if isinstance(cached, type) and issubclass(cached, TemplateDoesNotExist):
raise cached(template_name)
elif isinstance(cached, TemplateDoesNotExist):
raise copy_exception(cached)
return cached
try:
template = super().get_template(template_name, skip)
except TemplateDoesNotExist as e:
self.get_template_cache[key] = copy_exception(e) if self.engine.debug else TemplateDoesNotExist
raise
else:
self.get_template_cache[key] = template
return template
def get_template_sources(self, template_name):
for loader in self.loaders:
yield from loader.get_template_sources(template_name)
def cache_key(self, template_name, skip=None):
"""
Generate a cache key for the template name, dirs, and skip.
If skip is provided, only origins that match template_name are included
in the cache key. This ensures each template is only parsed and cached
once if contained in different extend chains like:
x -> a -> a
y -> a -> a
z -> a -> a
"""
dirs_prefix = ''
skip_prefix = ''
if skip:
matching = [origin.name for origin in skip if origin.template_name == template_name]
if matching:
skip_prefix = self.generate_hash(matching)
return '-'.join(s for s in (str(template_name), skip_prefix, dirs_prefix) if s)
def generate_hash(self, values):
return hashlib.sha1(force_bytes('|'.join(values))).hexdigest()
def reset(self):
"Empty the template cache."
self.template_cache.clear()
self.get_template_cache.clear()
|
edmorley/django
|
django/template/loaders/cached.py
|
Python
|
bsd-3-clause
| 3,669
|
# vim: set fileencoding=utf-8 :
from __future__ import absolute_import
from __future__ import print_function
import sys
import re
import boto
import boto.ec2
import boto.vpc
from .. import errors
from .. import formatting
from .. import provider
# -- API request/response stuff --
http_messages = { 400: "Problem with info",
401: "Authorisation failed",
403: "Permission denied",
412: "Dry-run mode" }
EC2_STATE_PENDING = 0
EC2_STATE_RUNNING = 16
EC2_STATE_SHUTTING_DOWN = 32
EC2_STATE_TERMINATED = 48
EC2_STATE_STOPPING = 64
EC2_STATE_STOPPED = 80
EC2_STATE_NONE = -1
action_state_map = {'status': EC2_STATE_NONE,
'fullstatus': EC2_STATE_NONE,
'start': EC2_STATE_RUNNING,
'restart': EC2_STATE_RUNNING,
'stop': EC2_STATE_STOPPED,
'kill': EC2_STATE_TERMINATED}
__all__ = []
# *** CLASSES ***
class PerRegionCohort(provider.Cohort):
instances = None
desired_state = EC2_STATE_NONE
def __init__(self, region, ids, host_map, params):
"""@param host_map is the mapping, for all specified hosts in this
region, of instance ID to Ansible inventory object."""
super(PerRegionCohort,self).__init__(region, ids, host_map, params)
try:
# When letting the boto library read the credentials, tell it which profile to use, if any.
# TO-DO: test with .aws/credentials and .boto
if params['use_boto']:
profile = params.get("profile")
else:
profile = None
self.ec2 = boto.vpc.connect_to_region(region, profile_name=profile)
except boto.exception.NoAuthHandlerFound as e:
raise errors.AuthError("No credentials")
if not self.ec2:
raise errors.ProviderError("Unknown cloud region " + region)
def take_action(self, action):
super(PerRegionCohort,self).take_action(action)
logger = self.global_params['logger']
self.desired_state = action_state_map[action]
try:
if action == 'status' or action == 'fullstatus':
self.instances = self.ec2.get_only_instances(instance_ids=self.instance_ids)
for instance in self.instances:
# List the instance, unless its state doesn't match a
# limitation that's in force
if (instance.state_code == EC2_STATE_RUNNING or not self.global_params['only_running']) and \
(instance.state_code == EC2_STATE_STOPPED or not self.global_params['only_stopped']):
# Look up the inventory object from the EC2 object's ID
host = self.host_map[instance.id]
if action == 'status':
# TO-DO: Return a tuple instead
# Used to be "%s \t%s\t%s"
formatting.print_host(host.name, instance.id, instance.state)
elif action == 'fullstatus':
# à la knife node show
# TO-DO: Ansible groups, EC2 tags
info = { 'az': instance.placement,
'image_id': instance.image_id,
'instance_type': instance.instance_type,
'launch_time': instance.launch_time,
'private_ip': instance.private_ip_address,
'public_ip': instance.ip_address }
if instance.public_dns_name:
info['fqdn'] = instance.public_dns_name
elif instance.ip_address:
import socket
addr_info = socket.gethostbyaddr(instance.ip_address)
info['fqdn'] = addr_info[0]
if instance.vpc_id:
## template += """
## VPC: {vpc_id} (<name>), {subnet_id} (<name>)""".format(subnet_id=instance.subnet_id,
## subnet_name="",
## vpc_id=instance.vpc_id,
## vpc_name="")
info['vpc_info'] = instance.vpc_id
vpcs = self.ec2.get_all_vpcs(vpc_ids=[instance.vpc_id])
i_vpc = vpcs[0]
if 'Name' in i_vpc.tags:
info['vpc_info'] += " (%s)" % i_vpc.tags['Name']
info['vpc_info'] += ", " + instance.subnet_id
subnets = self.ec2.get_all_subnets(subnet_ids=[instance.subnet_id])
i_subnet = subnets[0]
if 'Name' in i_subnet.tags:
info['vpc_info'] += " (%s)" % i_subnet.tags['Name']
# TO-DO: use Jinja2 or something instead
if 'fqdn' in info:
template = "FQDN: {fqdn}\n"
else:
template = ""
template += """\
Instance type: {instance_type}
Location: {az} (availability zone)
IP addrs: {public_ip} {private_ip}"""
if 'vpc_info' in info:
template += """
VPC: {vpc_info}"""
template += """
Launch time: {launch_time} from AMI: {image_id}"""
formatting.print_host(host.name, instance.id, instance.state,
template.format(**info))
elif action == 'start':
self.ec2.start_instances(instance_ids=self.instance_ids, dry_run=self.global_params['dry_run'])
elif action == 'stop':
self.ec2.stop_instances(instance_ids=self.instance_ids, dry_run=self.global_params['dry_run'])
elif action == 'restart':
self.ec2.reboot_instances(instance_ids=self.instance_ids, dry_run=self.global_params['dry_run'])
elif action == 'kill':
if self.global_params['confirm']:
self.ec2.terminate_instances(instance_ids=self.instance_ids, dry_run=self.global_params['dry_run'])
else:
logger.report_notice("Not killing instances because -y wasn't specified")
sys.exit(0)
else:
raise errors.ActionError("Unknown action '%s'" % (action,))
except boto.exception.EC2ResponseError as e:
# An error occurred when making the API request.
## print(str(e.args[2]))
# Unlike other exceptions, don't use str(e), because this
# outputs everything including the response body
http_code = e.args[0]
category_str = http_messages.get(http_code, "Unknown error (code %d)" % (http_code,))
if http_code == 412: # Precondition Failed: means dry run
logger.report_notice(category_str,
"-- further info:\n ",
e.message)
sys.exit(0)
elif http_code == 401: # Unauthorized
raise errors.AuthError(category_str + " -- further info:\n " + e.message)
elif http_code in (400, 403): # Bad Request, Forbidden
# Find IDs of missing instance(s) in the response body, e.g.
# The instance IDs 'i-1414202a, i-3d4e0607' do not exist
# or The instance ID 'i-234aa3a9' does not exist
# (Do all matching first because otherwise stacked if-else doesn't work.)
match_single = re.search(r"instance ID '(i-[0-9a-f]*)'", str(e.args[2]))
match_multiple = re.search(r"instance IDs '(i-[0-9a-f]*[^']*)'", str(e.args[2]))
if match_single:
host = self.host_map[match_single.group(1)]
logger.report_error(category_str,
"for host '%s':\n " % (host.name,),
e.message)
elif match_multiple:
id_list_string = match_multiple.group(1)
id_list = id_list_string.split(", ")
assert len(id_list) >= 2
# actual error message, then list each host on a separate line
logger.report_error(category_str,
"for hosts; instance IDs do not exist:")
for id in id_list:
host = self.host_map[id]
print("\t%s (%s)" % (host.name, id), file=sys.stderr)
else:
# Something went wrong when trying to see which instance ID was bad
logger.report_error(category_str,
"-- bad instance ID; further info:\n ",
e.message)
# This won't cause an additional message
raise errors.MissingInstanceError("Instance ID problem")
else:
raise errors.InstanceError("Unknown instance problem", http_code)
def num_deviants(self, first_run):
"""Used during polling. Returns the number of instances in a given
cohort that don't match the state indicated by the given action."""
logger = self.global_params['logger']
if not self.instances:
self.instances = self.ec2.get_only_instances(instance_ids=self.instance_ids)
undesired_count = 0
if self.global_params['debug']:
print('[%d:]' % self.desired_state, end=' ')
for instance in self.instances:
if not first_run:
try:
instance.update(validate=True)
except ValueError:
raise errors.InstanceError("instance no longer exists; instance ID = ", instance.id)
if instance.state_code != self.desired_state:
undesired_count += 1
if self.global_params['debug']:
print('[%d != %d]' % (instance.state_code, self.desired_state), end=' ')
else:
if self.global_params['debug']:
print('[%d]' % instance.state_code, end=' ')
# previous_state_code
if self.global_params['debug']:
print("%d deviants in AWS region %s" % (undesired_count, self.region))
return undesired_count
# *** FUNCTIONS ***
def init(params):
if not params['use_boto']:
## print "params['use_boto'] is False!"
# Use aws-cli config file for credentials
import boto_helper
c = boto_helper.Credentials(params.get("profile"))
|
unixnut/cloud-support
|
shepherd/aws/__init__.py
|
Python
|
gpl-2.0
| 11,401
|
# util.py
# -------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import sys
import inspect
import heapq, random
import cStringIO
class FixedRandom:
def __init__(self):
fixedState = (3, (2147483648L, 507801126L, 683453281L, 310439348L, 2597246090L, \
2209084787L, 2267831527L, 979920060L, 3098657677L, 37650879L, 807947081L, 3974896263L, \
881243242L, 3100634921L, 1334775171L, 3965168385L, 746264660L, 4074750168L, 500078808L, \
776561771L, 702988163L, 1636311725L, 2559226045L, 157578202L, 2498342920L, 2794591496L, \
4130598723L, 496985844L, 2944563015L, 3731321600L, 3514814613L, 3362575829L, 3038768745L, \
2206497038L, 1108748846L, 1317460727L, 3134077628L, 988312410L, 1674063516L, 746456451L, \
3958482413L, 1857117812L, 708750586L, 1583423339L, 3466495450L, 1536929345L, 1137240525L, \
3875025632L, 2466137587L, 1235845595L, 4214575620L, 3792516855L, 657994358L, 1241843248L, \
1695651859L, 3678946666L, 1929922113L, 2351044952L, 2317810202L, 2039319015L, 460787996L, \
3654096216L, 4068721415L, 1814163703L, 2904112444L, 1386111013L, 574629867L, 2654529343L, \
3833135042L, 2725328455L, 552431551L, 4006991378L, 1331562057L, 3710134542L, 303171486L, \
1203231078L, 2670768975L, 54570816L, 2679609001L, 578983064L, 1271454725L, 3230871056L, \
2496832891L, 2944938195L, 1608828728L, 367886575L, 2544708204L, 103775539L, 1912402393L, \
1098482180L, 2738577070L, 3091646463L, 1505274463L, 2079416566L, 659100352L, 839995305L, \
1696257633L, 274389836L, 3973303017L, 671127655L, 1061109122L, 517486945L, 1379749962L, \
3421383928L, 3116950429L, 2165882425L, 2346928266L, 2892678711L, 2936066049L, 1316407868L, \
2873411858L, 4279682888L, 2744351923L, 3290373816L, 1014377279L, 955200944L, 4220990860L, \
2386098930L, 1772997650L, 3757346974L, 1621616438L, 2877097197L, 442116595L, 2010480266L, \
2867861469L, 2955352695L, 605335967L, 2222936009L, 2067554933L, 4129906358L, 1519608541L, \
1195006590L, 1942991038L, 2736562236L, 279162408L, 1415982909L, 4099901426L, 1732201505L, \
2934657937L, 860563237L, 2479235483L, 3081651097L, 2244720867L, 3112631622L, 1636991639L, \
3860393305L, 2312061927L, 48780114L, 1149090394L, 2643246550L, 1764050647L, 3836789087L, \
3474859076L, 4237194338L, 1735191073L, 2150369208L, 92164394L, 756974036L, 2314453957L, \
323969533L, 4267621035L, 283649842L, 810004843L, 727855536L, 1757827251L, 3334960421L, \
3261035106L, 38417393L, 2660980472L, 1256633965L, 2184045390L, 811213141L, 2857482069L, \
2237770878L, 3891003138L, 2787806886L, 2435192790L, 2249324662L, 3507764896L, 995388363L, \
856944153L, 619213904L, 3233967826L, 3703465555L, 3286531781L, 3863193356L, 2992340714L, \
413696855L, 3865185632L, 1704163171L, 3043634452L, 2225424707L, 2199018022L, 3506117517L, \
3311559776L, 3374443561L, 1207829628L, 668793165L, 1822020716L, 2082656160L, 1160606415L, \
3034757648L, 741703672L, 3094328738L, 459332691L, 2702383376L, 1610239915L, 4162939394L, \
557861574L, 3805706338L, 3832520705L, 1248934879L, 3250424034L, 892335058L, 74323433L, \
3209751608L, 3213220797L, 3444035873L, 3743886725L, 1783837251L, 610968664L, 580745246L, \
4041979504L, 201684874L, 2673219253L, 1377283008L, 3497299167L, 2344209394L, 2304982920L, \
3081403782L, 2599256854L, 3184475235L, 3373055826L, 695186388L, 2423332338L, 222864327L, \
1258227992L, 3627871647L, 3487724980L, 4027953808L, 3053320360L, 533627073L, 3026232514L, \
2340271949L, 867277230L, 868513116L, 2158535651L, 2487822909L, 3428235761L, 3067196046L, \
3435119657L, 1908441839L, 788668797L, 3367703138L, 3317763187L, 908264443L, 2252100381L, \
764223334L, 4127108988L, 384641349L, 3377374722L, 1263833251L, 1958694944L, 3847832657L, \
1253909612L, 1096494446L, 555725445L, 2277045895L, 3340096504L, 1383318686L, 4234428127L, \
1072582179L, 94169494L, 1064509968L, 2681151917L, 2681864920L, 734708852L, 1338914021L, \
1270409500L, 1789469116L, 4191988204L, 1716329784L, 2213764829L, 3712538840L, 919910444L, \
1318414447L, 3383806712L, 3054941722L, 3378649942L, 1205735655L, 1268136494L, 2214009444L, \
2532395133L, 3232230447L, 230294038L, 342599089L, 772808141L, 4096882234L, 3146662953L, \
2784264306L, 1860954704L, 2675279609L, 2984212876L, 2466966981L, 2627986059L, 2985545332L, \
2578042598L, 1458940786L, 2944243755L, 3959506256L, 1509151382L, 325761900L, 942251521L, \
4184289782L, 2756231555L, 3297811774L, 1169708099L, 3280524138L, 3805245319L, 3227360276L, \
3199632491L, 2235795585L, 2865407118L, 36763651L, 2441503575L, 3314890374L, 1755526087L, \
17915536L, 1196948233L, 949343045L, 3815841867L, 489007833L, 2654997597L, 2834744136L, \
417688687L, 2843220846L, 85621843L, 747339336L, 2043645709L, 3520444394L, 1825470818L, \
647778910L, 275904777L, 1249389189L, 3640887431L, 4200779599L, 323384601L, 3446088641L, \
4049835786L, 1718989062L, 3563787136L, 44099190L, 3281263107L, 22910812L, 1826109246L, \
745118154L, 3392171319L, 1571490704L, 354891067L, 815955642L, 1453450421L, 940015623L, \
796817754L, 1260148619L, 3898237757L, 176670141L, 1870249326L, 3317738680L, 448918002L, \
4059166594L, 2003827551L, 987091377L, 224855998L, 3520570137L, 789522610L, 2604445123L, \
454472869L, 475688926L, 2990723466L, 523362238L, 3897608102L, 806637149L, 2642229586L, \
2928614432L, 1564415411L, 1691381054L, 3816907227L, 4082581003L, 1895544448L, 3728217394L, \
3214813157L, 4054301607L, 1882632454L, 2873728645L, 3694943071L, 1297991732L, 2101682438L, \
3952579552L, 678650400L, 1391722293L, 478833748L, 2976468591L, 158586606L, 2576499787L, \
662690848L, 3799889765L, 3328894692L, 2474578497L, 2383901391L, 1718193504L, 3003184595L, \
3630561213L, 1929441113L, 3848238627L, 1594310094L, 3040359840L, 3051803867L, 2462788790L, \
954409915L, 802581771L, 681703307L, 545982392L, 2738993819L, 8025358L, 2827719383L, \
770471093L, 3484895980L, 3111306320L, 3900000891L, 2116916652L, 397746721L, 2087689510L, \
721433935L, 1396088885L, 2751612384L, 1998988613L, 2135074843L, 2521131298L, 707009172L, \
2398321482L, 688041159L, 2264560137L, 482388305L, 207864885L, 3735036991L, 3490348331L, \
1963642811L, 3260224305L, 3493564223L, 1939428454L, 1128799656L, 1366012432L, 2858822447L, \
1428147157L, 2261125391L, 1611208390L, 1134826333L, 2374102525L, 3833625209L, 2266397263L, \
3189115077L, 770080230L, 2674657172L, 4280146640L, 3604531615L, 4235071805L, 3436987249L, \
509704467L, 2582695198L, 4256268040L, 3391197562L, 1460642842L, 1617931012L, 457825497L, \
1031452907L, 1330422862L, 4125947620L, 2280712485L, 431892090L, 2387410588L, 2061126784L, \
896457479L, 3480499461L, 2488196663L, 4021103792L, 1877063114L, 2744470201L, 1046140599L, \
2129952955L, 3583049218L, 4217723693L, 2720341743L, 820661843L, 1079873609L, 3360954200L, \
3652304997L, 3335838575L, 2178810636L, 1908053374L, 4026721976L, 1793145418L, 476541615L, \
973420250L, 515553040L, 919292001L, 2601786155L, 1685119450L, 3030170809L, 1590676150L, \
1665099167L, 651151584L, 2077190587L, 957892642L, 646336572L, 2743719258L, 866169074L, \
851118829L, 4225766285L, 963748226L, 799549420L, 1955032629L, 799460000L, 2425744063L, \
2441291571L, 1928963772L, 528930629L, 2591962884L, 3495142819L, 1896021824L, 901320159L, \
3181820243L, 843061941L, 3338628510L, 3782438992L, 9515330L, 1705797226L, 953535929L, \
764833876L, 3202464965L, 2970244591L, 519154982L, 3390617541L, 566616744L, 3438031503L, \
1853838297L, 170608755L, 1393728434L, 676900116L, 3184965776L, 1843100290L, 78995357L, \
2227939888L, 3460264600L, 1745705055L, 1474086965L, 572796246L, 4081303004L, 882828851L, \
1295445825L, 137639900L, 3304579600L, 2722437017L, 4093422709L, 273203373L, 2666507854L, \
3998836510L, 493829981L, 1623949669L, 3482036755L, 3390023939L, 833233937L, 1639668730L, \
1499455075L, 249728260L, 1210694006L, 3836497489L, 1551488720L, 3253074267L, 3388238003L, \
2372035079L, 3945715164L, 2029501215L, 3362012634L, 2007375355L, 4074709820L, 631485888L, \
3135015769L, 4273087084L, 3648076204L, 2739943601L, 1374020358L, 1760722448L, 3773939706L, \
1313027823L, 1895251226L, 4224465911L, 421382535L, 1141067370L, 3660034846L, 3393185650L, \
1850995280L, 1451917312L, 3841455409L, 3926840308L, 1397397252L, 2572864479L, 2500171350L, \
3119920613L, 531400869L, 1626487579L, 1099320497L, 407414753L, 2438623324L, 99073255L, \
3175491512L, 656431560L, 1153671785L, 236307875L, 2824738046L, 2320621382L, 892174056L, \
230984053L, 719791226L, 2718891946L, 624L), None)
self.random = random.Random()
self.random.setstate(fixedState)
"""
Data structures useful for implementing SearchAgents
"""
class Stack:
"A container with a last-in-first-out (LIFO) queuing policy."
def __init__(self):
self.list = []
def push(self,item):
"Push 'item' onto the stack"
self.list.append(item)
def pop(self):
"Pop the most recently pushed item from the stack"
return self.list.pop()
def isEmpty(self):
"Returns true if the stack is empty"
return len(self.list) == 0
class Queue:
"A container with a first-in-first-out (FIFO) queuing policy."
def __init__(self):
self.list = []
def push(self,item):
"Enqueue the 'item' into the queue"
self.list.insert(0,item)
def pop(self):
"""
Dequeue the earliest enqueued item still in the queue. This
operation removes the item from the queue.
"""
return self.list.pop()
def isEmpty(self):
"Returns true if the queue is empty"
return len(self.list) == 0
class PriorityQueue:
"""
Implements a priority queue data structure. Each inserted item
has a priority associated with it and the client is usually interested
in quick retrieval of the lowest-priority item in the queue. This
data structure allows O(1) access to the lowest-priority item.
Note that this PriorityQueue does not allow you to change the priority
of an item. However, you may insert the same item multiple times with
different priorities.
"""
def __init__(self):
self.heap = []
self.count = 0
def push(self, item, priority):
# FIXME: restored old behaviour to check against old results better
# FIXED: restored to stable behaviour
entry = (priority, self.count, item)
# entry = (priority, item)
heapq.heappush(self.heap, entry)
self.count += 1
def pop(self):
(_, _, item) = heapq.heappop(self.heap)
# (_, item) = heapq.heappop(self.heap)
return item
def isEmpty(self):
return len(self.heap) == 0
class PriorityQueueWithFunction(PriorityQueue):
"""
Implements a priority queue with the same push/pop signature of the
Queue and the Stack classes. This is designed for drop-in replacement for
those two classes. The caller has to provide a priority function, which
extracts each item's priority.
"""
def __init__(self, priorityFunction):
"priorityFunction (item) -> priority"
self.priorityFunction = priorityFunction # store the priority function
PriorityQueue.__init__(self) # super-class initializer
def push(self, item):
"Adds an item to the queue with priority from the priority function"
PriorityQueue.push(self, item, self.priorityFunction(item))
def manhattanDistance( xy1, xy2 ):
"Returns the Manhattan distance between points xy1 and xy2"
return abs( xy1[0] - xy2[0] ) + abs( xy1[1] - xy2[1] )
"""
Data structures and functions useful for various course projects
The search project should not need anything below this line.
"""
class Counter(dict):
"""
A counter keeps track of counts for a set of keys.
The counter class is an extension of the standard python
dictionary type. It is specialized to have number values
(integers or floats), and includes a handful of additional
functions to ease the task of counting data. In particular,
all keys are defaulted to have value 0. Using a dictionary:
a = {}
print a['test']
would give an error, while the Counter class analogue:
>>> a = Counter()
>>> print a['test']
0
returns the default 0 value. Note that to reference a key
that you know is contained in the counter,
you can still use the dictionary syntax:
>>> a = Counter()
>>> a['test'] = 2
>>> print a['test']
2
This is very useful for counting things without initializing their counts,
see for example:
>>> a['blah'] += 1
>>> print a['blah']
1
The counter also includes additional functionality useful in implementing
the classifiers for this assignment. Two counters can be added,
subtracted or multiplied together. See below for details. They can
also be normalized and their total count and arg max can be extracted.
"""
def __getitem__(self, idx):
self.setdefault(idx, 0)
return dict.__getitem__(self, idx)
def incrementAll(self, keys, count):
"""
Increments all elements of keys by the same count.
>>> a = Counter()
>>> a.incrementAll(['one','two', 'three'], 1)
>>> a['one']
1
>>> a['two']
1
"""
for key in keys:
self[key] += count
def argMax(self):
"""
Returns the key with the highest value.
"""
if len(self.keys()) == 0: return None
all = self.items()
values = [x[1] for x in all]
maxIndex = values.index(max(values))
return all[maxIndex][0]
def sortedKeys(self):
"""
Returns a list of keys sorted by their values. Keys
with the highest values will appear first.
>>> a = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> a['third'] = 1
>>> a.sortedKeys()
['second', 'third', 'first']
"""
sortedItems = self.items()
compare = lambda x, y: sign(y[1] - x[1])
sortedItems.sort(cmp=compare)
return [x[0] for x in sortedItems]
def totalCount(self):
"""
Returns the sum of counts for all keys.
"""
return sum(self.values())
def normalize(self):
"""
Edits the counter such that the total count of all
keys sums to 1. The ratio of counts for all keys
will remain the same. Note that normalizing an empty
Counter will result in an error.
"""
total = float(self.totalCount())
if total == 0: return
for key in self.keys():
self[key] = self[key] / total
def divideAll(self, divisor):
"""
Divides all counts by divisor
"""
divisor = float(divisor)
for key in self:
self[key] /= divisor
def copy(self):
"""
Returns a copy of the counter
"""
return Counter(dict.copy(self))
def __mul__(self, y ):
"""
Multiplying two counters gives the dot product of their vectors where
each unique label is a vector element.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['second'] = 5
>>> a['third'] = 1.5
>>> a['fourth'] = 2.5
>>> a * b
14
"""
sum = 0
x = self
if len(x) > len(y):
x,y = y,x
for key in x:
if key not in y:
continue
sum += x[key] * y[key]
return sum
def __radd__(self, y):
"""
Adding another counter to a counter increments the current counter
by the values stored in the second counter.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> a += b
>>> a['first']
1
"""
for key, value in y.items():
self[key] += value
def __add__( self, y ):
"""
Adding two counters gives a counter with the union of all keys and
counts of the second added to counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a + b)['first']
1
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] + y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = y[key]
return addend
def __sub__( self, y ):
"""
Subtracting a counter from another gives a counter with the union of all keys and
counts of the second subtracted from counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a - b)['first']
-5
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] - y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = -1 * y[key]
return addend
def raiseNotDefined():
fileName = inspect.stack()[1][1]
line = inspect.stack()[1][2]
method = inspect.stack()[1][3]
print "*** Method not implemented: %s at line %s of %s" % (method, line, fileName)
sys.exit(1)
def normalize(vectorOrCounter):
"""
normalize a vector or counter by dividing each value by the sum of all values
"""
normalizedCounter = Counter()
if type(vectorOrCounter) == type(normalizedCounter):
counter = vectorOrCounter
total = float(counter.totalCount())
if total == 0: return counter
for key in counter.keys():
value = counter[key]
normalizedCounter[key] = value / total
return normalizedCounter
else:
vector = vectorOrCounter
s = float(sum(vector))
if s == 0: return vector
return [el / s for el in vector]
def nSample(distribution, values, n):
if sum(distribution) != 1:
distribution = normalize(distribution)
rand = [random.random() for i in range(n)]
rand.sort()
samples = []
samplePos, distPos, cdf = 0,0, distribution[0]
while samplePos < n:
if rand[samplePos] < cdf:
samplePos += 1
samples.append(values[distPos])
else:
distPos += 1
cdf += distribution[distPos]
return samples
def sample(distribution, values = None):
if type(distribution) == Counter:
items = sorted(distribution.items())
distribution = [i[1] for i in items]
values = [i[0] for i in items]
if sum(distribution) != 1:
distribution = normalize(distribution)
choice = random.random()
i, total= 0, distribution[0]
while choice > total:
i += 1
total += distribution[i]
return values[i]
def sampleFromCounter(ctr):
items = sorted(ctr.items())
return sample([v for k,v in items], [k for k,v in items])
def getProbability(value, distribution, values):
"""
Gives the probability of a value under a discrete distribution
defined by (distributions, values).
"""
total = 0.0
for prob, val in zip(distribution, values):
if val == value:
total += prob
return total
def flipCoin( p ):
r = random.random()
return r < p
def chooseFromDistribution( distribution ):
"Takes either a counter or a list of (prob, key) pairs and samples"
if type(distribution) == dict or type(distribution) == Counter:
return sample(distribution)
r = random.random()
base = 0.0
for prob, element in distribution:
base += prob
if r <= base: return element
def nearestPoint( pos ):
"""
Finds the nearest grid point to a position (discretizes).
"""
( current_row, current_col ) = pos
grid_row = int( current_row + 0.5 )
grid_col = int( current_col + 0.5 )
return ( grid_row, grid_col )
def sign( x ):
"""
Returns 1 or -1 depending on the sign of x
"""
if( x >= 0 ):
return 1
else:
return -1
def arrayInvert(array):
"""
Inverts a matrix stored as a list of lists.
"""
result = [[] for i in array]
for outer in array:
for inner in range(len(outer)):
result[inner].append(outer[inner])
return result
def matrixAsList( matrix, value = True ):
"""
Turns a matrix into a list of coordinates matching the specified value
"""
rows, cols = len( matrix ), len( matrix[0] )
cells = []
for row in range( rows ):
for col in range( cols ):
if matrix[row][col] == value:
cells.append( ( row, col ) )
return cells
def lookup(name, namespace):
"""
Get a method or class from any imported module from its name.
Usage: lookup(functionName, globals())
"""
dots = name.count('.')
if dots > 0:
moduleName, objName = '.'.join(name.split('.')[:-1]), name.split('.')[-1]
module = __import__(moduleName)
return getattr(module, objName)
else:
modules = [obj for obj in namespace.values() if str(type(obj)) == "<type 'module'>"]
options = [getattr(module, name) for module in modules if name in dir(module)]
options += [obj[1] for obj in namespace.items() if obj[0] == name ]
if len(options) == 1: return options[0]
if len(options) > 1: raise Exception, 'Name conflict for %s'
raise Exception, '%s not found as a method or class' % name
def pause():
"""
Pauses the output stream awaiting user feedback.
"""
print "<Press enter/return to continue>"
raw_input()
# code to handle timeouts
#
# FIXME
# NOTE: TimeoutFuncton is NOT reentrant. Later timeouts will silently
# disable earlier timeouts. Could be solved by maintaining a global list
# of active time outs. Currently, questions which have test cases calling
# this have all student code so wrapped.
#
import signal
import time
class TimeoutFunctionException(Exception):
"""Exception to raise on a timeout"""
pass
class TimeoutFunction:
def __init__(self, function, timeout):
self.timeout = timeout
self.function = function
def handle_timeout(self, signum, frame):
raise TimeoutFunctionException()
def __call__(self, *args, **keyArgs):
# If we have SIGALRM signal, use it to cause an exception if and
# when this function runs too long. Otherwise check the time taken
# after the method has returned, and throw an exception then.
if hasattr(signal, 'SIGALRM'):
old = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.timeout)
try:
result = self.function(*args, **keyArgs)
finally:
signal.signal(signal.SIGALRM, old)
signal.alarm(0)
else:
startTime = time.time()
result = self.function(*args, **keyArgs)
timeElapsed = time.time() - startTime
if timeElapsed >= self.timeout:
self.handle_timeout(None, None)
return result
_ORIGINAL_STDOUT = None
_ORIGINAL_STDERR = None
_MUTED = False
class WritableNull:
def write(self, string):
pass
def mutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if _MUTED:
return
_MUTED = True
_ORIGINAL_STDOUT = sys.stdout
#_ORIGINAL_STDERR = sys.stderr
sys.stdout = WritableNull()
#sys.stderr = WritableNull()
def unmutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if not _MUTED:
return
_MUTED = False
sys.stdout = _ORIGINAL_STDOUT
#sys.stderr = _ORIGINAL_STDERR
|
omardroubi/Artificial-Intelligence
|
Projects/Project4/bayesNets/util.py
|
Python
|
apache-2.0
| 25,733
|
from flask_app.flask_app import db
from crypto.CryptoUtils import generate_api_key
from datetime import datetime, timedelta
class ApiUser(db.Model):
__tablename__ = "api_user"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
key = db.Column(db.String(126))
email = db.Column(db.String(100))
usage = db.Column(db.Integer)
confirmed = db.Column(db.Boolean)
expired_at = db.Column(db.DateTime)
# hash = db.Column(db.String(1000))
def __init__(self, email=None):
self.email = email
self.key = generate_api_key(email)
self.usage = 0
self.confirmed = False
self.expired_at = datetime.now() + timedelta(days=7)
# self.hash = CryptoUtils.hash_password(password)
def __repr__(self):
return ""
|
MaximeGir/StarTrekCorpora
|
database/models/ApiUser.py
|
Python
|
mit
| 801
|
# -*- coding: utf-8 -*-
import codecs
import os
from django.conf import settings
from django.utils.html import strip_tags
#from haystack import site
from haystack import indexes
from haystack.fields import CharField
from celery_haystack import indexes as celery_indexes
from projects import constants
from projects.models import ImportedFile, Project
import logging
log = logging.getLogger(__name__)
class ProjectIndex(celery_indexes.CelerySearchIndex, indexes.Indexable):
text = CharField(document=True, use_template=True)
author = CharField()
title = CharField(model_attr='name')
description = CharField(model_attr='description')
repo_type = CharField(model_attr='repo_type')
absolute_url = CharField()
def prepare_author(self, obj):
return obj.users.all()[0]
def prepare_absolute_url(self, obj):
return obj.get_absolute_url()
def get_model(self):
return Project
def index_queryset(self):
"Used when the entire index for model is updated."
return self.get_model().objects.public()
#Should prob make a common subclass for this and FileIndex
class ImportedFileIndex(celery_indexes.CelerySearchIndex, indexes.Indexable):
text = CharField(document=True)
author = CharField()
project = CharField(model_attr='project__name', faceted=True)
version = CharField(model_attr='version__slug', faceted=True)
title = CharField(model_attr='name')
absolute_url = CharField()
def prepare_author(self, obj):
return obj.project.users.all()[0]
def prepare_title(self, obj):
return obj.name.replace('.html', '').replace('_', ' ').title()
def prepare_absolute_url(self, obj):
return obj.get_absolute_url()
def prepare_text(self, obj):
"""
Prepare the text of the html file.
This only works on machines that have the html
files for the projects checked out.
"""
#Import this here to hopefully fix tests for now.
from pyquery import PyQuery
full_path = obj.project.rtd_build_path()
file_path = os.path.join(full_path, obj.path.lstrip('/'))
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('(Search Index) Unable to index file: %s, error :%s' % (file_path, e))
return
log.debug('(Search Index) Indexing %s:%s' % (obj.project, obj.path))
DOCUMENT_PYQUERY_PATH = getattr(settings, 'DOCUMENT_PYQUERY_PATH',
'div.document')
try:
to_index = strip_tags(PyQuery(content)(
DOCUMENT_PYQUERY_PATH).html()).replace(u'¶', '')
except ValueError:
#Pyquery returns ValueError if div.document doesn't exist.
return
if not to_index:
log.info('(Search Index) Unable to index file: %s:%s, empty file' % (obj.project,
file_path))
else:
log.debug('(Search Index) %s:%s length: %s' % (obj.project, file_path,
len(to_index)))
return to_index
def get_model(self):
return ImportedFile
def index_queryset(self):
"Used when the entire index for model is updated."
return (self.get_model().objects
.filter(project__privacy_level=constants.PUBLIC))
|
nyergler/pythonslides
|
readthedocs/projects/search_indexes.py
|
Python
|
mit
| 3,498
|
#!/usr/bin/env python
#
# This script demonstrates both the high-level AND low-level
# OT API, in the Python language.
#
# Before running this script, make sure the _opentxs.so and
# the opentxs.py are both in a folder on the path: $PYTHONPATH
# Personally I put them in /usr/local/lib/python2.7/site-packages
# and then I set $PYTHONPATH to contain that path.
#
# Also, make sure the server is running ('opentxs-server')
#
nb = raw_input('\n\
nBefore running this script, make sure the _opentxs.so and\nthe opentxs.py are both in a folder on the path: $PYTHONPATH\nPersonally I put them in /usr/local/lib/python2.7/site-packages\nand then I set $PYTHONPATH to contain that path.\n\nAlso, make sure the server is running (opentxs-server)\n\nOkay, ready to test?\nPress enter TWICE to try out the OT API... ')
# ---------------------------------------------------------
import opentxs
# These functions are perfect examples of the 'Low-level API',
# which is useful for simple functions that don't require messaging
# any OT servers. See OTAPI_Wrap.hpp for the complete low-level API.
opentxs.OTAPI_Wrap_AppInit()
opentxs.OTAPI_Wrap_LoadWallet()
# ---------------------------------------------------------
# Use the low-level API to see how many server contracts
# are in the user's wallet.
count = opentxs.OTAPI_Wrap_GetServerCount()
print 'Server count: ', count
# ---------------------------------------------------------
# OT MADE EASY (high-level API)
#
# This object handles all the request/response going on with
# any servers, plus all the retries and synchronization. It's
# the 'High-level API'. See OT_ME.h for the complete set of
# high-level API functions.
objEasy = opentxs.OT_ME()
# ---------------------------------------------------------
#
# Use the High-level API to download a user's public key from
# the server. (Obviously this will fail if the server is not
# running, or if the test data is not installed.)
strCheck = objEasy.check_user('r1fUoHwJOWCuK3WBAAySjmKYqsG6G2TYIxdqY6YNuuG', 'DYEB6U7dcpbwdGrftPnslNKz76BDuBTFAjiAgKaiY2n', 'HpDoVBTix9GRLvZZoKBi2zv2f4IFVLmRrW2Q0nAA0OH')
# ---------------------------------------------------------
# objEasy.check_user corresponds to the command-line:
# 'opentxs checknym'
#
# It's a useful test because it shows whether a server message
# is successful, before going the extra step of trying a real
# financial transaction.
#
# Therefore we only need to verify the Message Success, and not
# any transaction success. (Remember, a successful message can
# still contain a failed transaction.)
nResult = objEasy.VerifyMessageSuccess(strCheck)
if nResult < 0:
print 'Error in check nym. Is the server running? Is the test data in ~/.ot ?'
elif nResult == 0:
print 'Failure in check nym. Is the test data in ~/.ot ?'
elif nResult == 1:
print 'Success in check nym.'
else:
print 'Unexpected return value in check nym.'
# ---------------------------------------------------------
#
# Use the high-level API to withdraw some cash (1 silver gram)
# from FT's Silver account. (Obviously this will not work if the
# localhost server is not running, or if the test data is not
# installed to ~/.ot )
# Make sure we have the proper mint...
objEasy.load_or_retrieve_mint("r1fUoHwJOWCuK3WBAAySjmKYqsG6G2TYIxdqY6YNuuG", \
"DYEB6U7dcpbwdGrftPnslNKz76BDuBTFAjiAgKaiY2n", \
"7f8nlUn795x8931JParRnmKAyw8cegRyBMcFg9FccaF")
if objEasy.VerifyMessageSuccess(strCheck) < 0:
print 'Failure: Unable to load or retrieve necessary mint file for withdrawal...'
# This is a "real" financial transaction:
#
strWithdraw = objEasy.withdraw_cash('r1fUoHwJOWCuK3WBAAySjmKYqsG6G2TYIxdqY6YNuuG', 'DYEB6U7dcpbwdGrftPnslNKz76BDuBTFAjiAgKaiY2n', 'yQGh0vgm9YiqYOh6bfLDxyAA7Nnh2NmturCQmOt4LTo', 1)
# ---------------------------------------------------------
# InterpretTransactionMsgReply
#
# This function first verifies whether the message itself was a success.
# (For example, what if the server was down, and never received it?)
#
# Once it verifies that the reply was successful as a message, then it
# peers deeper, to see whether the balance agreement was successful as well.
# (After all, any transaction is automatically rejected if the balance
# agreement is poorly-formed.)
#
# Then if the balance agreement was successful, then finally, this same
# function (InterpretTransactionMsgReply) checks to see whether the
# transaction ITSELF was successful. After all, maybe there was an error
# saving it back to disk on the server side and this caused the transaction
# to fail. Or maybe there wasn't enough money in the account. Etc. All of
# the above work is done in the below call:
nResult = objEasy.InterpretTransactionMsgReply('r1fUoHwJOWCuK3WBAAySjmKYqsG6G2TYIxdqY6YNuuG', 'DYEB6U7dcpbwdGrftPnslNKz76BDuBTFAjiAgKaiY2n', 'yQGh0vgm9YiqYOh6bfLDxyAA7Nnh2NmturCQmOt4LTo', 'withdraw_cash', strWithdraw)
if nResult < 0:
print '\nError in withdraw cash. Is the server running? Is the test data in ~/.ot ?'
elif nResult == 0:
print '\nFailure in withdraw cash. Is the test data installed in ~/.ot ?'
elif nResult == 1:
print '\nSuccess in withdraw cash! (Using high-level API in Python.)'
else:
print '\nUnexpected return value in withdraw cash.'
# ---------------------------------------------------------
# At this point we're done. We've downloaded a public key from
# the OT server, and we've also withdrawn a little cash from the
# server. We've demonstrated that both the high-level and low-level
# OT APIs are operational through Python.
# So... we're done. Let's shutdown OT and finish execution.
# (Using the low-level API...)
opentxs.OTAPI_Wrap_Output(0, "\nOne more thing: Successfully used OT_API_Output.\n")
opentxs.OTAPI_Wrap_AppCleanup()
# P.S. to see the complete OT high-level API: OT_ME.hpp
# and to see the complete OT low-level API: OTAPI_Wrap.hpp
#
# See the Open-Transactions/include/opentxs folder for all
# relevant headers.
#
# One more thing: If you want to see a lot of free sample code
# similar to the above code, which shows you how to use all the
# different OT API function calls, check out this file:
#
# Open-Transactions/scripts/ot/ot_commands.ot
#
# (It contains the complete implementation for a command-line
# Open-Transactions client.)
# --------------------------------------
|
Open-Transactions/opentxs-cli
|
scripts/demo/python/python_ot_test.py
|
Python
|
agpl-3.0
| 6,333
|
from setuptools import setup
setup(
name='tangouca', # jeez
version='0.0.1',
author='Matthias Vogelgesang',
author_email='matthias.vogelgesang@kit.edu',
url='http://ufo.kit.edu',
license='(?)',
description='TANGO server for libuca',
long_description='TANGO server for libuca',
scripts=['Uca'],
install_requires=['PyTango', 'tifffile']
)
|
ufo-kit/libuca
|
tango/setup.py
|
Python
|
lgpl-2.1
| 381
|
# -*- coding: utf-8 -*-
import psycopg2
import sqlalchemy
from sqlalchemy import Column, Float, Integer, String, Boolean, DateTime, and_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
from sqlalchemy.types import Enum
import contextlib
import enum
import os
import yaml
import db_old
db_old.connect(False)
# Defining database scheme here
with open("db_config_old.yaml", "r", encoding='utf-8') as stream:
config = yaml.load(stream, Loader=yaml.FullLoader)
engine = sqlalchemy.create_engine('postgresql+psycopg2://' + config["user"] + "@/" + config["db"], client_encoding='utf8')
with engine.connect() as conn:
conn.execute("SET search_path TO obstaravania")
Base = declarative_base()
class Firma(Base):
__tablename__ = 'firma'
id = Column(Integer, primary_key=True)
ico = Column(String, index=True)
name = Column(String)
address = Column(String)
email = Column(String)
class Candidate(Base):
__tablename__ = 'candidate'
id = Column(Integer, primary_key=True)
score = Column(Float)
company_id = Column(Integer, ForeignKey("firma.id"))
company = relationship("Firma")
obstaravanie_id = Column(Integer, ForeignKey("obstaravanie.id"))
obstaravanie = relationship("Obstaravanie", back_populates="candidates", foreign_keys=[obstaravanie_id])
reason_id = Column(Integer, ForeignKey("obstaravanie.id"))
reason = relationship("Obstaravanie", foreign_keys=[reason_id])
# Prediction for average price and stddev.
class Prediction(Base):
__tablename__ = 'prediction'
id = Column(Integer, primary_key=True)
obstaravanie_id = Column(Integer, ForeignKey("obstaravanie.id"))
obstaravanie = relationship("Obstaravanie", back_populates="predictions", foreign_keys=[obstaravanie_id])
mean = Column(Float)
stdev = Column(Float)
num = Column(Integer) # Sample size from which the estimates were generated
class Obstaravanie(Base):
__tablename__ = 'obstaravanie'
id = Column(Integer, primary_key=True)
official_id = Column(String, index=True)
description = Column(String)
title = Column(String)
json = Column(String)
bulletin_year = Column(Integer)
bulleting_number = Column(Integer)
bulletin_id = Column(Integer)
ekosystem_id = Column(Integer)
contract_id = Column(Integer, index=True)
finished = Column(Boolean)
draft_price = Column(Float)
final_price = Column(Float)
winner_id = Column(Integer, ForeignKey("firma.id"))
winner = relationship("Firma", back_populates="obstaravania", foreign_keys=[winner_id])
customer_id = Column(Integer, ForeignKey("firma.id"))
customer = relationship("Firma", back_populates="obstaraval", foreign_keys=[customer_id])
candidates = relationship("Candidate", back_populates="obstaravanie", foreign_keys=[Candidate.obstaravanie_id])
predictions = relationship("Prediction", back_populates="obstaravanie", foreign_keys=[Prediction.obstaravanie_id])
Firma.obstaravania = relationship("Obstaravanie", order_by=Obstaravanie.id, back_populates="winner", foreign_keys=[Obstaravanie.winner_id])
Firma.obstaraval = relationship("Obstaravanie", order_by=Obstaravanie.id, back_populates="customer", foreign_keys=[Obstaravanie.customer_id])
class RawNotice(Base):
__tablename__ = 'raw_notice'
id = Column(Integer, primary_key=True)
notice = Column(String)
# This table should always have only one row: the time of the last sync
# of slovensko.digital data
class LastSync(Base):
__tablename__ = 'last_sync'
last_sync = Column(String, primary_key=True)
# This table should always have only one row: the last id used for generating
# notifications
class LastNotificationUpdate(Base):
__tablename__ = 'last_notification_update'
last_id = Column(Integer, primary_key=True)
class NotificationStatus(enum.Enum):
GENERATED = 1
APPROVED = 2
DECLINED = 3
class Notification(Base):
__tablename__ = 'notification'
id = Column(Integer, primary_key=True)
candidate_id = Column(Integer, ForeignKey("candidate.id"))
candidate = relationship("Candidate", foreign_keys=[candidate_id])
status = Column(Enum(NotificationStatus))
date_generated = Column(DateTime)
date_modified = Column(DateTime)
Base.metadata.create_all(engine)
MakeSession = sessionmaker(bind=engine)
@contextlib.contextmanager
def Session():
sess = MakeSession()
try:
yield sess
except:
sess.rollback()
raise
finally:
sess.close()
|
verejnedigital/verejne.digital
|
obstaravania/data_model.py
|
Python
|
apache-2.0
| 4,625
|
"""aiohttp plugin that reads app configuration and stores a
GitHubClient object on app['client'].
If using caching, the caching plugin MUST be set up before
setting up this plugin.
"""
from .client import GitHubClient
from api.cache import APP_KEY as CACHE_APP_KEY
CONFIG_KEY = 'GITHUB'
APP_CLIENT_KEY = 'github_client'
def setup(app):
config = app.get(CONFIG_KEY, {})
cache = app.get(CACHE_APP_KEY)
client = GitHubClient(
config.get('CLIENT_ID'),
config.get('GITHUB_CLIENT_SECRET'),
cache=cache
)
app[APP_CLIENT_KEY] = client
|
sloria/sir
|
api/github/plugin.py
|
Python
|
mit
| 574
|
import os
from pymco.test import ctxt
from . import base
class RabbitMQTestCase(base.IntegrationTestCase):
'''RabbitMQ integration test case.'''
CTXT = {
'connector': 'rabbitmq',
'plugin.rabbitmq.vhost': '/mcollective',
'plugin.rabbitmq.pool.size': '1',
'plugin.rabbitmq.pool.1.host': 'localhost',
'plugin.rabbitmq.pool.1.port': '61613',
'plugin.rabbitmq.pool.1.user': 'mcollective',
'plugin.rabbitmq.pool.1.password': 'marionette',
}
class TestWithRabbitMQMCo22x(base.MCollective22x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQMCo23x(base.MCollective23x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQMCo24x(base.MCollective24x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQSSLMCo23x(base.MCollective23x, RabbitMQTestCase):
"""MCollective integration test case."""
CTXT = {
'connector': 'rabbitmq',
'plugin.rabbitmq.vhost': '/mcollective',
'plugin.rabbitmq.pool.size': '1',
'plugin.rabbitmq.pool.1.host': 'localhost',
'plugin.rabbitmq.pool.1.port': 61612,
'plugin.rabbitmq.pool.1.user': 'mcollective',
'plugin.rabbitmq.pool.1.password': 'marionette',
'plugin.rabbitmq.pool.1.ssl': 'true',
'plugin.rabbitmq.pool.1.ssl.ca': os.path.join(ctxt.ROOT,
'fixtures/ca.pem'),
'plugin.rabbitmq.pool.1.ssl.key': os.path.join(
ctxt.ROOT,
'fixtures/activemq_private.pem'),
'plugin.rabbitmq.pool.1.ssl.cert': os.path.join(
ctxt.ROOT,
'fixtures/activemq_cert.pem',
),
}
|
rafaduran/python-mcollective
|
tests/integration/test_with_rabbitmq.py
|
Python
|
bsd-3-clause
| 1,771
|
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views, errors
from ..models import Permission
@main.app_context_processor
def inject_permissions():
return dict(Permission=Permission)
|
Lirean/Elibrary
|
app/main/__init__.py
|
Python
|
gpl-3.0
| 219
|
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
This module provide function to plot the speed control info from log csv file
"""
import sys
import matplotlib.pyplot as plt
import numpy as np
import tkFileDialog
from process import get_start_index
from process import preprocess
from process import process
class Plotter(object):
"""
Plot the speed info
"""
def __init__(self):
"""
Init the speed info
"""
np.set_printoptions(precision=3)
self.file = open('temp_result.csv', 'a')
def process_data(self, filename):
"""
Load the file and preprocess th data
"""
self.data = preprocess(filename)
self.tablecmd, self.tablespeed, self.tableacc, self.speedsection, self.accsection, self.timesection = process(
self.data)
def plot_result(self):
"""
Plot the desired data
"""
fig, axarr = plt.subplots(2, 1, sharex=True)
plt.tight_layout()
fig.subplots_adjust(hspace=0)
axarr[0].plot(
self.data['time'], self.data['ctlbrake'], label='Brake CMD')
axarr[0].plot(
self.data['time'],
self.data['brake_percentage'],
label='Brake Output')
axarr[0].plot(
self.data['time'], self.data['ctlthrottle'], label='Throttle CMD')
axarr[0].plot(
self.data['time'],
self.data['throttle_percentage'],
label='Throttle Output')
axarr[0].plot(
self.data['time'],
self.data['engine_rpm'] / 100,
label='Engine RPM')
axarr[0].legend(fontsize='medium')
axarr[0].grid(True)
axarr[0].set_title('Command')
axarr[1].plot(
self.data['time'],
self.data['vehicle_speed'],
label='Vehicle Speed')
for i in range(len(self.timesection)):
axarr[1].plot(
self.timesection[i],
self.speedsection[i],
label='Speed Segment')
axarr[1].plot(
self.timesection[i], self.accsection[i], label='IMU Segment')
axarr[1].legend(fontsize='medium')
axarr[1].grid(True)
axarr[1].set_title('Speed')
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
#plt.tight_layout(pad=0.20)
fig.canvas.mpl_connect('key_press_event', self.press)
plt.show()
def press(self, event):
"""
Keyboard events during plotting
"""
if event.key == 'q' or event.key == 'Q':
self.file.close()
plt.close()
if event.key == 'w' or event.key == 'W':
for i in range(len(self.tablecmd)):
for j in range(len(self.tablespeed[i])):
self.file.write("%s, %s, %s\n" %
(self.tablecmd[i], self.tablespeed[i][j],
self.tableacc[i][j]))
print("Finished writing results")
def main():
"""
demo
"""
if len(sys.argv) == 2:
# Get the latest file
file_path = sys.argv[1]
else:
file_path = tkFileDialog.askopenfilename(
initialdir="/home/caros/.ros",
filetypes=(("csv files", ".csv"), ("all files", "*.*")))
print('File path: %s' % file_path)
plotter = Plotter()
plotter.process_data(file_path)
print('Finished reading the file.')
plotter.plot_result()
if __name__ == '__main__':
main()
|
wanglei828/apollo
|
modules/tools/vehicle_calibration/plot_data.py
|
Python
|
apache-2.0
| 4,296
|
# -*- coding: utf-8 -*-
import logging
from icebergsdk.resources.application import Application, ApplicationCommissionSettings, ApplicationMerchantPolicies,\
ApplicationTransaction, ApplicationPaymentSettings, ApplicationUrls, ApplicationPermission
from icebergsdk.resources.order import Order, MerchantOrder, OrderItem
from icebergsdk.resources.cart import Cart, CartItem
from icebergsdk.resources.product import Product, ProductOffer, ProductVariation, ProductOfferImage, Category, Brand,\
ProductFamily, ProductFamilySelector, Image
from icebergsdk.resources.store import Store, MerchantImage, MerchantAddress, StoreBankAccount,\
MerchantCommissionSettings, MerchantFeed, MerchantShippingPolicy,\
MerchantTransaction, Permission
from icebergsdk.resources.user import User, Profile, UserShoppingPreference
from icebergsdk.resources.address import Address, Country
from icebergsdk.resources.payment import Payment
from icebergsdk.resources.message import Message
from icebergsdk.resources.review import Review, MerchantReview
from icebergsdk.resources.webhooks import Webhook, WebhookTrigger, WebhookTriggerAttempt
from icebergsdk.resources.currency import Currency
from icebergsdk.resources.mp_admin import Transaction, MarketPlaceTransaction
from icebergsdk.resources.return_refund import Return, Refund
from icebergsdk.resources.channels import ProductChannel, ChannelPropagationPolicy, ProductChannelLogEvent
from icebergsdk.resources.service import ServiceOffer, ServiceOfferVariation, ServiceOption
from icebergsdk.resources.timeslots import (
AvailabilityCalendar, AvailabilityTimeSlot, Reservation)
from icebergsdk.resources.options import Option, OptionAnswer
logger = logging.getLogger('icebergsdk')
def get_class_from_resource_uri(resource_uri):
types = {
"application": Application,
"application_commission_settings": ApplicationCommissionSettings,
"app_payment_settings": ApplicationPaymentSettings,
"app_permission": ApplicationPermission,
"application_merchant_policies": ApplicationMerchantPolicies,
"application_urls": ApplicationUrls,
"app_transaction": ApplicationTransaction,
"mp_transaction": MarketPlaceTransaction,
"product": Product,
"brand": Brand,
"currency": Currency,
"productoffer": ProductOffer,
"offer_image": ProductOfferImage,
"product_variation": ProductVariation,
"user": User,
"address": Address,
"country": Country,
"profile": Profile,
"user_shopping_prefs": UserShoppingPreference,
"payment": Payment,
"image": Image,
"merchant": Store,
"store_bank_account": StoreBankAccount,
"commission_settings": MerchantCommissionSettings,
"merchant_address": MerchantAddress,
"merchant_image": MerchantImage,
"order": Order,
"merchant_order": MerchantOrder,
"message": Message,
"cart": Cart,
"cart_item": CartItem,
"order_item": OrderItem,
"review": Review,
"merchant_review": MerchantReview,
"category": Category,
"webhook": Webhook,
"webhook_trigger": WebhookTrigger,
"webhook_trigger_attempt": WebhookTriggerAttempt,
"merchant_catalog_feed": MerchantFeed,
"merchant_shipping_policy": MerchantShippingPolicy,
"store_transaction": MerchantTransaction,
"transaction": Transaction,
"return": Return,
"refund": Refund,
"permission": Permission,
"product_channel": ProductChannel,
"product_channel_propagation_policy": ChannelPropagationPolicy,
"product_channel_log_event": ProductChannelLogEvent,
"product_family": ProductFamily,
"product_family_selector": ProductFamilySelector,
"service_offer": ServiceOffer,
"service_option": ServiceOption,
"service_offer_variation": ServiceOfferVariation,
"availability_calendar": AvailabilityCalendar,
"availability_timeslot": AvailabilityTimeSlot,
"reservation": Reservation,
"option": Option,
"option_answer": OptionAnswer
}
# Hack for now... Will be changed
for resource, klass in types.iteritems():
if "/%s/" % resource in resource_uri:
return klass
logger.error('cant find resource for %s' % resource_uri)
raise NotImplementedError()
|
Iceberg-Marketplace/Iceberg-API-PYTHON
|
icebergsdk/resources/__init__.py
|
Python
|
mit
| 4,440
|
#!/usr/bin/env python3
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.enums import Platform
from cerbero.utils import shell
from cerbero.errors import FatalError
def get_libtool_versions(version, soversion=0):
parts = version.split('.')
if not parts or len(parts) > 3:
raise FatalError('Version must contain three or fewer parts: {!r}'
''.format(version))
try:
major = int(parts[0])
minor = 0
micro = 0
if len(parts) > 1:
minor = int(parts[1])
if len(parts) > 2:
micro = int(parts[2])
except ValueError:
raise FatalError('Invalid version: {!r}'.format(version))
interface_age = 0
if (minor % 2) == 0:
interface_age = micro
binary_age = (100 * minor) + micro
return (soversion, binary_age - interface_age, interface_age)
class LibtoolLibrary(object):
'''
Helper class to create libtool libraries files (.la)
'''
LIBTOOL_TPL = '''\
# %(libname)s - a libtool library file
# Generated by libtool (GNU libtool) 2.4.2 Debian-2.4.2-1ubuntu1
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# The name that we can dlopen(3).
dlname='%(dlname)s'
# Names of this library.
library_names='%(library_names)s'
# The name of the static archive.
old_library='%(old_library)s'
# Linker flags that can not go in dependency_libs.
inherited_linker_flags=''
# Libraries that this one depends upon.
dependency_libs='%(dependency_libs)s'
# Names of additional weak libraries provided by this library
weak_library_names=''
# Version information for libglib-2.0.
current=%(current)s
age=%(age)s
revision=%(revision)s
# Is this an already installed library?
installed=yes
# Should we warn about portability when linking against -modules?
shouldnotlink=no
# Files to dlopen/dlpreopen
dlopen=''
dlpreopen=''
# Directory that this library needs to be installed in:
libdir='%(libdir)s'
'''
def __init__(self, libname, major, minor, micro, libdir, platform,
deps=None, static_only=False):
self.libtool_vars = {
'libname': '',
'dlname': '',
'library_names': '',
'old_library': '',
'dependency_libs': '',
'current': '',
'age': '',
'revision': '',
'libdir': ''}
if platform == Platform.WINDOWS:
shared_ext = 'dll.a'
elif platform in [Platform.DARWIN, Platform.IOS]:
shared_ext = 'dylib'
else:
shared_ext = 'so'
if not libname.startswith('lib'):
libname = 'lib%s' % libname
if deps is None:
deps = ''
self.libname = libname
self.libdir = libdir
self.laname = '%s.la' % libname
dlname_base = '%s.%s' % (libname, shared_ext)
dlname = dlname_base
dlname_all = dlname_base
major_str = ''
minor_str = ''
micro_str = ''
if major is not None:
dlname = '%s.%s' % (dlname_base, major)
major_str = major
if minor is not None:
dlname_all = '%s.%s' % (dlname, minor)
minor_str = minor
if micro is not None:
dlname_all = '%s.%s' % (dlname_all, micro)
micro_str = micro
old_library = '%s.a' % libname
self.change_value('libname', self.laname)
if not static_only:
self.change_value('dlname', dlname)
self.change_value('library_names', '%s %s %s' % (dlname_all, dlname,
dlname_base))
self.change_value('old_library', old_library)
self.change_value('current', minor_str)
self.change_value('age', minor_str)
self.change_value('revision', micro_str)
self.change_value('libdir', libdir)
self.change_value('dependency_libs', self._parse_deps(deps))
def save(self):
path = os.path.join(self.libdir, self.laname)
if shell.DRY_RUN:
print('Creating {}'.format(path))
return
with open(path, 'w') as f:
f.write(self.LIBTOOL_TPL % self.libtool_vars)
def change_value(self, key, val):
self.libtool_vars[key] = val
def _parse_deps(self, deps):
# FIXME: these deps need to be resolved recursively since the list of
# dependency_libs in .la files are exhaustive. For now, recipes are
# handling that.
deps_str = ''
libtool_deps = [x for x in deps if not x.startswith('-l')]
lib_deps = [x for x in deps if x.startswith('-l')]
for d in libtool_deps:
dep_dir, dep_name = os.path.split(d)
if dep_dir:
# we already have a prepended path
deps_str += ' ' + d + '.la '
else:
if not d.startswith('lib'):
d = 'lib' + d
deps_str += ' %s/%s.la ' % (self.libdir, d)
deps_str += ' '.join(lib_deps)
return deps_str
|
nirbheek/cerbero
|
cerbero/tools/libtool.py
|
Python
|
lgpl-2.1
| 5,912
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('release', '0007_add_release_variant_type_module'),
]
operations = [
migrations.AddField(
model_name='variant',
name='variant_release',
field=models.CharField(max_length=100, null=True, blank=True),
),
migrations.AddField(
model_name='variant',
name='variant_version',
field=models.CharField(max_length=100, null=True, blank=True),
),
]
|
release-engineering/product-definition-center
|
pdc/apps/release/migrations/0008_auto_20160719_1221.py
|
Python
|
mit
| 632
|
from mock import patch
import mock
from kiwi.filesystem.squashfs import FileSystemSquashFs
class TestFileSystemSquashfs:
@patch('os.path.exists')
def setup(self, mock_exists):
mock_exists.return_value = True
self.squashfs = FileSystemSquashFs(mock.Mock(), 'root_dir')
@patch('platform.machine')
@patch('kiwi.filesystem.squashfs.Command.run')
def test_create_on_file(self, mock_command, mock_machine):
mock_machine.return_value = 'x86_64'
self.squashfs.create_on_file('myimage', 'label')
mock_command.assert_called_once_with(
[
'mksquashfs', 'root_dir', 'myimage', '-noappend',
'-b', '1M', '-comp', 'xz', '-Xbcj', 'x86'
]
)
@patch('platform.machine')
@patch('kiwi.filesystem.squashfs.Command.run')
def test_create_on_file_exclude_data(self, mock_command, mock_machine):
mock_machine.return_value = 'ppc64le'
self.squashfs.create_on_file('myimage', 'label', ['foo'])
mock_command.assert_called_once_with(
[
'mksquashfs', 'root_dir', 'myimage', '-noappend', '-b', '1M',
'-comp', 'xz', '-Xbcj', 'powerpc', '-wildcards', '-e', 'foo'
]
)
@patch('platform.machine')
@patch('kiwi.filesystem.squashfs.Command.run')
def test_create_on_file_unkown_arch(self, mock_command, mock_machine):
mock_machine.return_value = 'aarch64'
self.squashfs.create_on_file('myimage', 'label')
mock_command.assert_called_once_with(
[
'mksquashfs', 'root_dir', 'myimage',
'-noappend', '-b', '1M', '-comp', 'xz'
]
)
@patch('kiwi.filesystem.squashfs.Command.run')
def test_create_on_file_gzip(self, mock_command):
self.squashfs.custom_args = {
'compression': 'gzip', 'create_options': []
}
self.squashfs.create_on_file('myimage', 'label')
mock_command.assert_called_once_with(
[
'mksquashfs', 'root_dir', 'myimage',
'-noappend', '-b', '1M', '-comp', 'gzip'
]
)
@patch('kiwi.filesystem.squashfs.Command.run')
def test_create_on_file_no_compression(self, mock_command):
self.squashfs.custom_args = {
'compression': 'uncompressed', 'create_options': []
}
self.squashfs.create_on_file('myimage', 'label')
mock_command.assert_called_once_with(
[
'mksquashfs', 'root_dir', 'myimage', '-noappend',
'-b', '1M', '-noI', '-noD', '-noF', '-noX'
]
)
|
SUSE/kiwi
|
test/unit/filesystem/squashfs_test.py
|
Python
|
gpl-3.0
| 2,670
|
'''
>>> from qpage import *
>>> import random
>>> random.seed(1)
>>> list_randomize([1,2,3,5,6])
[2, 1, 5, 3, 6]
>>> email_at("example@yahoo.com")
'example at yahoo.com'
>>> convert_bytes(200)
'200.0 bytes'
>>> convert_bytes(6000)
'5.9 KB'
>>> convert_bytes(80000)
'78.1 KB'
>>> random.seed(1)
>>> random_badge_color()
'yellowgreen'
>>> create_badge()
'https://img.shields.io/badge/qpage-2.0-blue.svg'
>>> random.seed(1)
>>> create_badge(random=True)
'https://img.shields.io/badge/qpage-2.0-yellowgreen.svg'
>>> random.seed(1)
>>> read_lorem(5)
'ipsum \nLorem sit dolor amet,'
>>> print_line(4)
----
>>> print_line(5,"%")
%%%%%
>>> name_standard('test')
'Test'
>>> name_standard('TesT')
'Test'
>>> internet() # if there is stable internet connection
True
>>> wait_func(4)
.
.
.
.
>>> wait_func()
.
.
'''
|
sepandhaghighi/qpage
|
source/test.py
|
Python
|
mit
| 805
|
from itertools import product
from axelrod import Actions, Player, init_args, random_choice
from axelrod.strategy_transformers import FinalTransformer
from .lookerup import LookerUp, create_lookup_table_keys
C, D = Actions.C, Actions.D
@FinalTransformer((D, D)) # End with two defections if tournament length is known
class Gambler(LookerUp):
"""
A LookerUp class player which will select randomly an action in some cases.
It will always defect the last 2 turns.
"""
name = 'Gambler'
classifier = {
'memory_depth': float('inf'),
'stochastic': True,
'makes_use_of': set(['length']),
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
@init_args
def __init__(self, lookup_table=None):
"""
If no lookup table is provided to the constructor, then use the TFT one.
"""
if not lookup_table:
lookup_table = {
('', 'C', 'D') : 0,
('', 'D', 'D') : 0,
('', 'C', 'C') : 1,
('', 'D', 'C') : 1,
}
LookerUp.__init__(self, lookup_table=lookup_table, value_length=None)
def strategy(self, opponent):
action = LookerUp.strategy(self, opponent)
# action could be 'C' or a float
if action in [C, D]:
return action
return random_choice(action)
class PSOGambler(Gambler):
"""
A LookerUp strategy that uses a lookup table with probability numbers
generated using a Particle Swarm Optimisation (PSO) algorithm.
A description of how this strategy was trained is given here:
https://gist.github.com/GDKO/60c3d0fd423598f3c4e4
"""
name = "PSO Gambler"
def __init__(self):
lookup_table_keys = create_lookup_table_keys(plays=2,
opponent_start_plays=2)
# GK: Pattern of values determined previously with a pso algorithm.
pattern_pso = [1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 0.0, 1.0, 0.93, 0.0, 1.0, 0.67, 0.42, 0.0,
0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.48, 0.0,
0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.19, 1.0, 1.0, 0.0,
0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.36, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# Zip together the keys and the action pattern to get the lookup table.
lookup_table = dict(zip(lookup_table_keys, pattern_pso))
Gambler.__init__(self, lookup_table=lookup_table)
|
marcharper/Axelrod
|
axelrod/strategies/gambler.py
|
Python
|
mit
| 2,652
|
# coding=utf-8
"""Module used to generate context for aggregation result section."""
from safe.definitions.exposure import exposure_all, exposure_population
from safe.definitions.fields import (
affected_exposure_count_field,
aggregation_name_field,
total_affected_field,
exposure_type_field,
exposure_class_field)
from safe.gis.vector.tools import read_dynamic_inasafe_field
from safe.report.extractors.util import (
layer_definition_type,
resolve_from_dictionary,
retrieve_exposure_classes_lists)
from safe.utilities.rounding import format_number
from safe.utilities.i18n import tr
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
def aggregation_result_extractor(impact_report, component_metadata):
"""Extracting aggregation result of breakdown from the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
context = {}
"""Initializations"""
extra_args = component_metadata.extra_args
# Find out aggregation report type
exposure_layer = impact_report.exposure
analysis_layer = impact_report.analysis
provenance = impact_report.impact_function.provenance
exposure_summary_table = impact_report.exposure_summary_table
if exposure_summary_table:
exposure_summary_table_fields = exposure_summary_table.keywords[
'inasafe_fields']
aggregation_summary = impact_report.aggregation_summary
aggregation_summary_fields = aggregation_summary.keywords[
'inasafe_fields']
debug_mode = impact_report.impact_function.debug_mode
"""Filtering report sections"""
# Only process for applicable exposure types
# Get exposure type definition
exposure_type = layer_definition_type(exposure_layer)
# Only round the number when it is population exposure and it is not
# in debug mode
is_rounded = not debug_mode
is_population = exposure_type is exposure_population
# For now aggregation report only applicable for breakable exposure types:
itemizable_exposures_all = [
exposure for exposure in exposure_all
if exposure.get('classifications')]
if exposure_type not in itemizable_exposures_all:
return context
"""Generating type name for columns"""
type_fields = read_dynamic_inasafe_field(
aggregation_summary_fields, affected_exposure_count_field)
# do not include total, to preserve ordering and proper reference
type_fields.remove('total')
# we need to sort the column
# get the classes lists
# retrieve classes definitions
exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer)
# sort columns based on class order
# create function to sort
def sort_classes(_type_field):
"""Sort method to retrieve exposure class key index."""
# class key is the type field name
# find index in class list
for i, _exposure_class in enumerate(exposure_classes_lists):
if _type_field == _exposure_class['key']:
index = i
break
else:
index = -1
return index
# sort
type_fields = sorted(type_fields, key=sort_classes)
# generate type_header_labels for column header
type_header_labels = []
for type_name in type_fields:
type_label = tr(type_name.capitalize())
type_header_labels.append(type_label)
"""Generating values for rows"""
# generate rows of values for values of each column
rows = []
aggregation_name_index = aggregation_summary.fieldNameIndex(
aggregation_name_field['field_name'])
total_field_index = aggregation_summary.fieldNameIndex(
total_affected_field['field_name'])
type_field_index = []
for type_name in type_fields:
field_name = affected_exposure_count_field['field_name'] % type_name
type_index = aggregation_summary.fieldNameIndex(field_name)
type_field_index.append(type_index)
for feat in aggregation_summary.getFeatures():
total_affected_value = format_number(
feat[total_field_index],
enable_rounding=is_rounded,
is_population=is_population)
if total_affected_value == '0':
# skip aggregation type if the total affected is zero
continue
item = {
# Name is the header for each row
'name': feat[aggregation_name_index],
# Total is the total for each row
'total': total_affected_value
}
# Type values is the values for each column in each row
type_values = []
for idx in type_field_index:
affected_value = format_number(
feat[idx],
enable_rounding=is_rounded)
type_values.append(affected_value)
item['type_values'] = type_values
rows.append(item)
"""Generate total for footers"""
# calculate total values for each type. Taken from exposure summary table
type_total_values = []
# Get affected field index
affected_field_index = exposure_summary_table.fieldNameIndex(
total_affected_field['field_name'])
# Get breakdown field
breakdown_field = None
# I'm not sure what's the difference
# It is possible to have exposure_type_field or exposure_class_field
# at the moment
breakdown_fields = [
exposure_type_field,
exposure_class_field
]
for field in breakdown_fields:
if field['key'] in exposure_summary_table_fields:
breakdown_field = field
break
breakdown_field_name = breakdown_field['field_name']
breakdown_field_index = exposure_summary_table.fieldNameIndex(
breakdown_field_name)
# Fetch total affected for each breakdown name
value_dict = {}
for feat in exposure_summary_table.getFeatures():
# exposure summary table is in csv format, so the field returned is
# always in text format
affected_value = int(float(feat[affected_field_index]))
affected_value = format_number(
affected_value,
enable_rounding=is_rounded,
is_population=is_population)
value_dict[feat[breakdown_field_index]] = affected_value
if value_dict:
for type_name in type_fields:
affected_value_string_formatted = value_dict[type_name]
if affected_value_string_formatted == '0':
# if total affected for breakdown type is zero
# current column index
column_index = len(type_total_values)
# cut column header
type_header_labels = (
type_header_labels[:column_index] +
type_header_labels[column_index + 1:])
# cut all row values for the column
for item in rows:
type_values = item['type_values']
item['type_values'] = (
type_values[:column_index] +
type_values[column_index + 1:])
continue
type_total_values.append(affected_value_string_formatted)
"""Get the super total affected"""
# total for affected (super total)
analysis_feature = analysis_layer.getFeatures().next()
field_index = analysis_layer.fieldNameIndex(
total_affected_field['field_name'])
total_all = format_number(
analysis_feature[field_index],
enable_rounding=is_rounded)
"""Generate and format the context"""
aggregation_area_default_header = resolve_from_dictionary(
extra_args, 'aggregation_area_default_header')
header_label = (
aggregation_summary.title() or aggregation_area_default_header)
table_header_format = resolve_from_dictionary(
extra_args, 'table_header_format')
# check unit
units = exposure_type['units']
if units:
unit = units[0]
abbreviation = unit['abbreviation']
if abbreviation:
unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
else:
unit_string = ''
else:
unit_string = ''
table_header = table_header_format.format(
title=provenance['map_legend_title'],
unit=unit_string)
table_header = ' '.join(table_header.split())
section_header = resolve_from_dictionary(extra_args, 'header')
notes = resolve_from_dictionary(extra_args, 'notes')
total_header = resolve_from_dictionary(extra_args, 'total_header')
total_in_aggregation_header = resolve_from_dictionary(
extra_args, 'total_in_aggregation_header')
context['header'] = section_header
context['notes'] = notes
context['aggregation_result'] = {
'table_header': table_header,
'header_label': header_label,
'type_header_labels': type_header_labels,
'total_label': total_header,
'total_in_aggregation_area_label': total_in_aggregation_header,
'rows': rows,
'type_total_values': type_total_values,
'total_all': total_all,
}
return context
|
Gustry/inasafe
|
safe/report/extractors/aggregate_result.py
|
Python
|
gpl-3.0
| 9,641
|
"""Test getattr and associated."""
# --- import --------------------------------------------------------------------------------------
import WrightTools as wt
from WrightTools import datasets
# --- test ----------------------------------------------------------------------------------------
def test_axis_variable_namespace_collision():
root = wt.Collection()
p = datasets.PyCMDS.wm_w2_w1_001
data = wt.data.from_PyCMDS(p, parent=root, name="data")
assert isinstance(data.wm, wt.data._axis.Axis)
assert isinstance(data.w2, wt.data._axis.Axis)
assert isinstance(data.w1, wt.data._axis.Axis)
assert isinstance(data.d1, wt.data._variable.Variable)
assert isinstance(data.d2, wt.data._variable.Variable)
data.close()
def test_transform():
root = wt.Collection()
p = datasets.PyCMDS.wm_w2_w1_001
data = wt.data.from_PyCMDS(p, parent=root, name="data")
data.transform("wm-w1", "w1", "w2")
assert hasattr(data, "wm__m__w1")
assert hasattr(data, "w1")
assert hasattr(data, "w2")
data.close()
# --- run -----------------------------------------------------------------------------------------
if __name__ == "__main__":
test_axis_variable_namespace_collision()
test_transform()
|
wright-group/WrightTools
|
tests/group/attributes.py
|
Python
|
mit
| 1,261
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common types used in DeepMind Robotics vision package.
This file is maintained for simplifying data creation and manipulation as well
as facilitating type hints.
"""
import dataclasses
from typing import Mapping, Optional, Sequence, Tuple
import numpy as np
MaskPoints = Sequence[Sequence[Tuple[int, int]]]
Centers = Mapping[str, Optional[np.ndarray]]
Detections = Mapping[str, Optional[np.ndarray]]
@dataclasses.dataclass(frozen=True)
class Intrinsics:
"""Camera intrinsics.
Attributes:
camera_matrix: intrinsic camera matrix for the raw (distorted) images: K =
[[fx 0 cx], [ 0 fy cy], [ 0 0 1]]. Projects 3D points in the camera
coordinate frame to 2D pixel coordinates using the focal lengths (fx, fy)
and principal point (cx, cy).
distortion_parameters: the distortion parameters, size depending on the
distortion model. For example, the "plumb_bob" model has 5 parameters (k1,
k2, t1, t2, k3).
"""
camera_matrix: np.ndarray
distortion_parameters: np.ndarray
@dataclasses.dataclass(frozen=True)
class Extrinsics:
"""Camera extrinsics.
Attributes:
pos_xyz: camera position in the world reference frame.
quat_xyzw: camera unit quaternion in the world reference frame.
"""
pos_xyz: Tuple[float, float, float]
quat_xyzw: Tuple[float, float, float, float]
@dataclasses.dataclass(frozen=True)
class Blob:
"""An image blob.
Attributes:
center: (u, v) coordintes of the blob barycenter.
contour: Matrix of (u, v) coordinates of the blob contour.
"""
center: np.ndarray
contour: np.ndarray
@dataclasses.dataclass(frozen=True)
class Camera:
"""Camera parameters.
Attributes:
width: image width.
height: image height.
extrinsics: camera extrinsics.
intrinsics: camera intrinsics.
"""
width: int
height: int
extrinsics: Optional[Extrinsics] = None
intrinsics: Optional[Intrinsics] = None
@dataclasses.dataclass(frozen=True)
class ValueRange:
"""A generic N-dimensional range of values in terms of lower and upper bounds.
Attributes:
lower: A ND array with the lower values of the range.
upper: A ND array with the upper values of the range.
"""
lower: np.ndarray
upper: np.ndarray
@dataclasses.dataclass(frozen=True)
class ColorRange(ValueRange):
"""A range of colors in terms of lower and upper bounds.
Typical usage example:
# A YUV color range (cuboid)
ColorRange(lower=np.array[0., 0.25, 0.25],
upper=np.array[1., 0.75, 0.75])
Attributes:
lower: A 3D array with the lower values of the color range.
upper: A 3D array with the upper values of the color range.
"""
@dataclasses.dataclass(frozen=True)
class PositionLimit(ValueRange):
"""A range of Cartesian position in terms of lower and upper bounds.
Typical usage example:
# Define a position limit in Cartesian space (cuboid)
Limits(lower=np.array[-0.5, -0.5, 0.],
upper=np.array[0.5, 0.5, 0.5])
Attributes:
lower: An [x, y, z] array with the lower values of the position limit.
upper: An [x, y, z] array with the upper values of the position limit.
"""
@dataclasses.dataclass(frozen=True)
class Plane:
"""Parameterization of a 3d plane.
Attributes:
point: 3d point which lies in the plane.
normal: 3d vector normal to the plane.
"""
point: np.ndarray
normal: np.ndarray
|
deepmind/dm_robotics
|
py/vision/types.py
|
Python
|
apache-2.0
| 3,974
|
from datetime import datetime, timedelta
from django.test import TestCase
from couchforms.models import XFormInstance, XFormArchived
from couchforms.signals import xform_archived, xform_unarchived
from couchforms import fetch_and_wrap_form
class TestFormArchiving(TestCase):
def testArchive(self):
form = XFormInstance(
form={'foo': 'bar'}
)
form.save()
form.put_attachment(name='form.xml', content='<data/>')
self.assertEqual("XFormInstance", form.doc_type)
self.assertEqual(0, len(form.history))
lower_bound = datetime.utcnow() - timedelta(seconds=1)
form.archive(user='mr. librarian')
upper_bound = datetime.utcnow() + timedelta(seconds=1)
form = fetch_and_wrap_form(form._id)
self.assertEqual('XFormArchived', form.doc_type)
self.assertTrue(isinstance(form, XFormArchived))
[archival] = form.history
self.assertTrue(lower_bound <= archival.date <= upper_bound)
self.assertEqual('archive', archival.operation)
self.assertEqual('mr. librarian', archival.user)
lower_bound = datetime.utcnow() - timedelta(seconds=1)
form.unarchive(user='mr. researcher')
upper_bound = datetime.utcnow() + timedelta(seconds=1)
form = fetch_and_wrap_form(form._id)
self.assertEqual('XFormInstance', form.doc_type)
self.assertTrue(isinstance(form, XFormInstance))
[archival, restoration] = form.history
self.assertTrue(lower_bound <= restoration.date <= upper_bound)
self.assertEqual('unarchive', restoration.operation)
self.assertEqual('mr. researcher', restoration.user)
def testSignal(self):
global archive_counter, restore_counter
archive_counter = 0
restore_counter = 0
def count_archive(**kwargs):
global archive_counter
archive_counter += 1
def count_unarchive(**kwargs):
global restore_counter
restore_counter += 1
xform_archived.connect(count_archive)
xform_unarchived.connect(count_unarchive)
form = XFormInstance(form={'foo': 'bar'})
form.save()
form.put_attachment(name='form.xml', content='<data/>')
self.assertEqual(0, archive_counter)
self.assertEqual(0, restore_counter)
form.archive()
self.assertEqual(1, archive_counter)
self.assertEqual(0, restore_counter)
form.unarchive()
self.assertEqual(1, archive_counter)
self.assertEqual(1, restore_counter)
|
puttarajubr/commcare-hq
|
corehq/ex-submodules/couchforms/tests/test_archive.py
|
Python
|
bsd-3-clause
| 2,584
|
# coding: utf-8
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'tests.myapp',
'test_without_migrations',
)
SITE_ID=1,
SECRET_KEY='secret'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
|
henriquebastos/django-test-without-migrations
|
tests/settings.py
|
Python
|
mit
| 384
|
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _data_path_doc,
_get_version, _version_doc)
has_erp_core_data = partial(has_dataset, name='erp_core')
@verbose
def data_path(path=None, force_update=False, update_path=True,
download=True, verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='erp_core',
download=download)
data_path.__doc__ = _data_path_doc.format(name='erp_core',
conf='MNE_DATASETS_ERP_CORE_PATH')
def get_version(): # noqa: D103
return _get_version('erp_core')
get_version.__doc__ = _version_doc.format(name='erp_core')
|
kambysese/mne-python
|
mne/datasets/erp_core/erp_core.py
|
Python
|
bsd-3-clause
| 797
|
# Generated by Django 2.2.1 on 2019-06-06 18:33
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
import django.utils.timezone
import gwells.db_comments.model_mixins
import wells.data_migrations
class Migration(migrations.Migration):
dependencies = [
('wells', '0097_auto_20190604_1828'),
]
operations = [
migrations.CreateModel(
name='WellOrientationCode',
fields=[
('create_user', models.CharField(max_length=60)),
('create_date', models.DateTimeField(default=django.utils.timezone.now)),
('update_user', models.CharField(max_length=60)),
('update_date', models.DateTimeField(default=django.utils.timezone.now)),
('display_order', models.PositiveIntegerField()),
('effective_date', models.DateTimeField(default=django.utils.timezone.now)),
('expiry_date', models.DateTimeField(default=datetime.datetime(9999, 12, 31, 23, 59, 59, 999999, tzinfo=utc))),
('well_orientation_code', models.CharField(editable=False, max_length=100, primary_key=True, serialize=False)),
('description', models.CharField(max_length=100)),
],
options={
'db_table': 'well_orientation_code',
'ordering': ['display_order', 'description'],
},
bases=(models.Model, gwells.db_comments.model_mixins.DBComments),
),
migrations.RunPython(
code=wells.data_migrations.load_well_orientation_codes,
reverse_code=wells.data_migrations.unload_well_orientation_codes,
),
migrations.AddField(
model_name='activitysubmission',
name='well_orientation_status',
field=models.ForeignKey(blank=True, db_column='well_orientation_code', null=True, on_delete=django.db.models.deletion.CASCADE, to='wells.WellOrientationCode', verbose_name='Well Orientation Code'),
),
migrations.AddField(
model_name='well',
name='well_orientation_status',
field=models.ForeignKey(blank=True, db_column='well_orientation_code', null=True, on_delete=django.db.models.deletion.CASCADE, to='wells.WellOrientationCode', verbose_name='Well Orientation Code'),
),
migrations.AddField(
model_name='fieldsprovided',
name='well_orientation_status',
field=models.BooleanField(default=False),
)
]
|
bcgov/gwells
|
app/backend/wells/migrations/0098_auto_20190606_1833.py
|
Python
|
apache-2.0
| 2,587
|
#!/usr/bin/python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""The main script for starting training and evaluation.
How to run:
blaze run -c opt --config=dmtf_cuda \
learning/brain/research/dune/experimental/representation/release/train_and_eval -- \
--workdir /tmp/test \
--config /google/src/cloud/akolesnikov/release/release/config/supervised/imagenet.py \
--nouse_tpu
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
import functools
import math
import os
import absl.app as app
import absl.flags as flags
import absl.logging as logging
import tensorflow as tf
import tensorflow_hub as hub
import datasets
from self_supervision.self_supervision_lib import get_self_supervision_model
import utils
from tensorflow.contrib.cluster_resolver import TPUClusterResolver
FLAGS = flags.FLAGS
# General run setup flags.
flags.DEFINE_string('workdir', None, 'Where to store files.')
flags.mark_flag_as_required('workdir')
flags.DEFINE_integer('num_gpus', 1, 'Number of GPUs to use.')
flags.DEFINE_bool('use_tpu', True, 'Whether running on TPU or not.')
flags.DEFINE_bool('run_eval', False, 'Run eval mode')
flags.DEFINE_string('tpu_worker_name', 'tpu_worker',
'Name of a TPU worker.')
# More detailed experiment flags
flags.DEFINE_string('dataset', None, 'Which dataset to use, typically '
'`imagenet`.')
flags.mark_flag_as_required('dataset')
flags.DEFINE_string('dataset_dir', None, 'Location of the dataset files.')
flags.mark_flag_as_required('dataset_dir')
flags.DEFINE_integer('eval_batch_size', None, 'Optional different batch-size'
' evaluation, defaults to the same as `batch_size`.')
flags.DEFINE_integer('keep_checkpoint_every_n_hours', None, 'Keep one '
'checkpoint every this many hours. Otherwise, only the '
'last few ones are kept. Defaults to 4h.')
flags.DEFINE_integer('random_seed', None, 'Seed to use. None is random.')
flags.DEFINE_integer('save_checkpoints_secs', None, 'Every how many seconds '
'to save a checkpoint. Defaults to 600 ie every 10mins.')
flags.DEFINE_string('serving_input_key', None, 'The name of the input tensor '
'in the generated hub module. Just leave it at default.')
flags.DEFINE_string('serving_input_shape', None, 'The shape of the input tensor'
' in the stored hub module. Can contain `None`.')
flags.DEFINE_string('signature', None, 'The name of the tensor to use as '
'representation for evaluation. Just leave to default.')
flags.DEFINE_string('task', None, 'Which pretext-task to learn from. Can be '
'one of `rotation`, `exemplar`, `jigsaw`, '
'`relative_patch_location`, `linear_eval`, `supervised`.')
flags.mark_flag_as_required('task')
flags.DEFINE_string('train_split', None, 'Which dataset split to train on. '
'Should only be `train` (default) or `trainval`.')
flags.DEFINE_string('val_split', None, 'Which dataset split to eval on. '
'Should only be `val` (default) or `test`.')
# Flags about the pretext tasks
flags.DEFINE_integer('embed_dim', None, 'For most pretext tasks, which '
'dimension the embedding/hidden vector should be. '
'Defaults to 1000.')
flags.DEFINE_float('margin', None, 'For the `exemplar` pretext task, '
'how large the triplet loss margin should be.')
flags.DEFINE_integer('num_of_inception_patches', None, 'For the Exemplar '
'pretext task, how many instances of an image to create.')
flags.DEFINE_integer('patch_jitter', None, 'For patch-based methods, by how '
'many pixels to jitter the patches. Defaults to 0.')
flags.DEFINE_integer('perm_subset_size', None, 'Subset of permutations to '
'sample per example in the `jigsaw` pretext task. '
'Defaults to 8.')
flags.DEFINE_integer('splits_per_side', None, 'For the `crop_patches` '
'preprocessor, how many times to split a side. '
'For example, 3 will result in 3x3=9 patches.')
# Flags for evaluation.
flags.DEFINE_string('eval_model', None, 'Whether to perform evaluation with a '
'`linear` (default) model, or with an `mlp` model.')
flags.DEFINE_string('hub_module', None, 'Folder where the hub module that '
'should be evaluated is stored.')
flags.DEFINE_string('pool_mode', None, 'When running evaluation on '
'intermediate layers (not logits) of the network, it is '
'commonplace to pool the features down to 9000. This '
'decides the pooling method to be used: `adaptive_max` '
'(default), `adaptive_avg`, `max`, or `avg`.')
flags.DEFINE_string('combine_patches', None, 'When running evaluation on '
'patch models, it is used to merge patch representations'
'to the full image representation. The value should be set'
'to `avg_pool`(default), or `concat`.')
# Flags about the model.
flags.DEFINE_string('architecture', None,
help='Which basic network architecture to use. '
'One of vgg19, resnet50, revnet50.')
# flags.mark_flag_as_required('architecture') # Not required in eval mode.
flags.DEFINE_integer('filters_factor', None, 'Widening factor for network '
'filters. For ResNet, default = 4 = vanilla ResNet.')
flags.DEFINE_bool('last_relu', None, 'Whether to include (default) the final '
'ReLU layer in ResNet/RevNet models or not.')
flags.DEFINE_string('mode', None, 'Which ResNet to use, `v1` or `v2`.')
# Flags about the optimization process.
flags.DEFINE_integer('batch_size', None, 'The global batch-size to use.')
flags.mark_flag_as_required('batch_size')
flags.DEFINE_string('decay_epochs', None, 'Optional list of epochs at which '
'learning-rate decay should happen, such as `15,25`.')
flags.DEFINE_integer('epochs', None, 'Number of epochs to run training.')
flags.mark_flag_as_required('epochs')
flags.DEFINE_float('lr_decay_factor', None, 'Factor by which to decay the '
'learning-rate at each decay step. Default 0.1.')
flags.DEFINE_float('lr', None, 'The base learning-rate to use for training.')
flags.mark_flag_as_required('lr')
flags.DEFINE_float('lr_scale_batch_size', None, 'The batch-size for which the '
'base learning-rate `lr` is defined. For batch-sizes '
'different from that, it is scaled linearly accordingly.'
'For example lr=0.1, batch_size=128, lr_scale_batch_size=32'
', then actual lr=0.025.')
flags.mark_flag_as_required('lr_scale_batch_size')
flags.DEFINE_string('optimizer', None, 'Which optimizer to use. '
'Only `sgd` (default) or `adam` are supported.')
flags.DEFINE_integer('warmup_epochs', None, 'Duration of the linear learning-'
'rate warm-up (from 0 to actual). Defaults to 0.')
flags.DEFINE_float('weight_decay', None, 'Strength of weight-decay. '
'Defaults to 1e-4, and may be set to 0.')
# Flags about pre-processing/data augmentation.
flags.DEFINE_string('crop_size', None, 'Size of the crop when using `crop` '
'or `central_crop` preprocessing. Either a single '
'integer like `32` or a pair like `32,24`.')
flags.DEFINE_float('grayscale_probability', None, 'When using `to_gray` '
'preprocessing, probability of actually doing it. Defaults '
'to 1.0, i.e. deterministically grayscaling the input.')
flags.DEFINE_string('preprocessing', None, 'A comma-separated list of '
'pre-processing steps to perform, see preprocess.py.')
flags.mark_flag_as_required('preprocessing')
flags.DEFINE_bool('randomize_resize_method', None, 'Whether or not (default) '
'to use a random interpolation method in the `resize` '
'preprocessor.')
flags.DEFINE_string('resize_size', None, 'For the `resize`, '
'`inception_preprocess`, and '
'`crop_inception_preprocess_patches` preprocessors, the '
'size in pixels to which to resize the input. Can be a '
'single number for square, or a pair as `128,64`.')
flags.DEFINE_integer('smaller_size', None, 'For the `resize_small` preprocessor'
', the desired size that the smaller side should have '
'after resizing the image (keeping aspect ratio).')
# Number of iterations (=training steps) per TPU training loop. Use >100 for
# good speed. This is the minimum number of steps between checkpoints.
TPU_ITERATIONS_PER_LOOP = 500
def train_and_eval():
"""Trains a network on (self) supervised data."""
checkpoint_dir = os.path.join(FLAGS.workdir)
if FLAGS.use_tpu:
master = TPUClusterResolver(
tpu=[os.environ['TPU_NAME']]).get_master()
else:
master = ''
config = tf.contrib.tpu.RunConfig(
model_dir=checkpoint_dir,
tf_random_seed=FLAGS.get_flag_value('random_seed', None),
master=master,
evaluation_master=master,
keep_checkpoint_every_n_hours=FLAGS.get_flag_value(
'keep_checkpoint_every_n_hours', 4),
save_checkpoints_secs=FLAGS.get_flag_value('save_checkpoints_secs', 600),
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=TPU_ITERATIONS_PER_LOOP,
tpu_job_name=FLAGS.tpu_worker_name))
# The global batch-sizes are passed to the TPU estimator, and it will pass
# along the local batch size in the model_fn's `params` argument dict.
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=get_self_supervision_model(FLAGS.task),
model_dir=checkpoint_dir,
config=config,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.get_flag_value('eval_batch_size', FLAGS.batch_size))
if FLAGS.run_eval:
data_fn = functools.partial(
datasets.get_data,
split_name=FLAGS.get_flag_value('val_split', 'val'),
is_training=False,
shuffle=False,
num_epochs=1,
drop_remainder=FLAGS.use_tpu)
# Contrary to what the documentation claims, the `train` and the
# `evaluate` functions NEED to have `max_steps` and/or `steps` set and
# cannot make use of the iterator's end-of-input exception, so we need
# to do some math for that here.
num_samples = datasets.get_count(FLAGS.get_flag_value('val_split', 'val'))
num_steps = num_samples // FLAGS.get_flag_value('eval_batch_size',
FLAGS.batch_size)
tf.logging.info('val_steps: %d', num_steps)
for checkpoint in tf.contrib.training.checkpoints_iterator(
estimator.model_dir, timeout=10 * 60):
estimator.evaluate(
checkpoint_path=checkpoint, input_fn=data_fn, steps=num_steps)
hub_exporter = hub.LatestModuleExporter('hub', serving_input_fn)
hub_exporter.export(
estimator,
os.path.join(checkpoint_dir, 'export/hub'),
checkpoint)
if tf.gfile.Exists(os.path.join(FLAGS.workdir, 'TRAINING_IS_DONE')):
break
# Evaluates the latest checkpoint on validation set.
result = estimator.evaluate(input_fn=data_fn, steps=num_steps)
return result
else:
train_data_fn = functools.partial(
datasets.get_data,
split_name=FLAGS.get_flag_value('train_split', 'train'),
is_training=True,
num_epochs=int(math.ceil(FLAGS.epochs)),
drop_remainder=True)
# We compute the number of steps and make use of Estimator's max_steps
# arguments instead of relying on the Dataset's iterator to run out after
# a number of epochs so that we can use 'fractional' epochs, which are
# used by regression tests. (And because TPUEstimator needs it anyways.)
num_samples = datasets.get_count(FLAGS.get_flag_value('train_split',
'train'))
# Depending on whether we drop the last batch each epoch or only at the
# ver end, this should be ordered differently for rounding.
updates_per_epoch = num_samples // FLAGS.batch_size
num_steps = int(math.ceil(FLAGS.epochs * updates_per_epoch))
tf.logging.info('train_steps: %d', num_steps)
estimator.train(train_data_fn, max_steps=num_steps)
def serving_input_fn():
"""A serving input fn."""
input_shape = utils.str2intlist(
FLAGS.get_flag_value('serving_input_shape', 'None,None,None,3'))
image_features = {
FLAGS.get_flag_value('serving_input_key', 'image'):
tf.placeholder(dtype=tf.float32, shape=input_shape)}
return tf.estimator.export.ServingInputReceiver(
features=image_features, receiver_tensors=image_features)
def main(unused_argv):
# logging.info('config: %s', FLAGS)
logging.info('workdir: %s', FLAGS.workdir)
train_and_eval()
logging.info('I\'m done with my work, ciao!')
if __name__ == '__main__':
app.run(main)
|
google/revisiting-self-supervised
|
train_and_eval.py
|
Python
|
apache-2.0
| 13,927
|
import threading
class Singleton(object):
_instance = None
def __new__(class_, *args, **kwargs):
if not isinstance(class_._instance, class_):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
if __name__ == "__main__":
s1 = Singleton()
s2 = Singleton()
s3 = Singleton()
print s1, id(s1)
print s2, id(s2)
print s3, id(s3)
|
kieslee/xlogging
|
xlogging/singleton.py
|
Python
|
apache-2.0
| 425
|
# -*- coding: utf-8 -*-
"""
sphinx.pycode
~~~~~~~~~~~~~
Utilities parsing and analyzing Python code.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from sphinx import package_dir
from sphinx.errors import PycodeError
from sphinx.pycode import nodes
from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals
from sphinx.util import get_module_source, detect_encoding
from sphinx.util.pycompat import next, StringIO, BytesIO, TextIOWrapper
from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
# load the Python grammar
_grammarfile = path.join(package_dir, 'pycode', 'Grammar.txt')
pygrammar = driver.load_grammar(_grammarfile)
pydriver = driver.Driver(pygrammar, convert=nodes.convert)
# an object with attributes corresponding to token and symbol names
class sym: pass
for k, v in pygrammar.symbol2number.iteritems():
setattr(sym, k, v)
for k, v in token.tok_name.iteritems():
setattr(sym, v, k)
# a dict mapping terminal and nonterminal numbers to their names
number2name = pygrammar.number2symbol.copy()
number2name.update(token.tok_name)
_eq = nodes.Leaf(token.EQUAL, '=')
class AttrDocVisitor(nodes.NodeVisitor):
"""
Visitor that collects docstrings for attribute assignments on toplevel and
in classes (class attributes and attributes set in __init__).
The docstrings can either be in special '#:' comments before the assignment
or in a docstring after it.
"""
def init(self, scope, encoding):
self.scope = scope
self.in_init = 0
self.encoding = encoding
self.namespace = []
self.collected = {}
self.tagnumber = 0
self.tagorder = {}
def add_tag(self, name):
name = '.'.join(self.namespace + [name])
self.tagorder[name] = self.tagnumber
self.tagnumber += 1
def visit_classdef(self, node):
"""Visit a class."""
self.add_tag(node[1].value)
self.namespace.append(node[1].value)
self.generic_visit(node)
self.namespace.pop()
def visit_funcdef(self, node):
"""Visit a function (or method)."""
# usually, don't descend into functions -- nothing interesting there
self.add_tag(node[1].value)
if node[1].value == '__init__':
# however, collect attributes set in __init__ methods
self.in_init += 1
self.generic_visit(node)
self.in_init -= 1
def visit_expr_stmt(self, node):
"""Visit an assignment which may have a special comment before (or
after) it.
"""
if _eq not in node.children:
# not an assignment (we don't care for augmented assignments)
return
# look *after* the node; there may be a comment prefixing the NEWLINE
# of the simple_stmt
parent = node.parent
idx = parent.children.index(node) + 1
while idx < len(parent):
if parent[idx].type == sym.SEMI:
idx += 1
continue # skip over semicolon
if parent[idx].type == sym.NEWLINE:
prefix = parent[idx].get_prefix()
if not isinstance(prefix, unicode):
prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
if docstring:
self.add_docstring(node, docstring)
return # don't allow docstrings both before and after
break
# now look *before* the node
pnode = node[0]
prefix = pnode.get_prefix()
# if the assignment is the first statement on a new indentation
# level, its preceding whitespace and comments are not assigned
# to that token, but the first INDENT or DEDENT token
while not prefix:
pnode = pnode.get_prev_leaf()
if not pnode or pnode.type not in (token.INDENT, token.DEDENT):
break
prefix = pnode.get_prefix()
if not isinstance(prefix, unicode):
prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
self.add_docstring(node, docstring)
def visit_simple_stmt(self, node):
"""Visit a docstring statement which may have an assignment before."""
if node[0].type != token.STRING:
# not a docstring; but still need to visit children
return self.generic_visit(node)
prev = node.get_prev_sibling()
if not prev:
return
if prev.type == sym.simple_stmt and \
prev[0].type == sym.expr_stmt and _eq in prev[0].children:
# need to "eval" the string because it's returned in its
# original form
docstring = literals.evalString(node[0].value, self.encoding)
docstring = prepare_docstring(docstring)
self.add_docstring(prev[0], docstring)
def add_docstring(self, node, docstring):
# add an item for each assignment target
for i in range(0, len(node) - 1, 2):
target = node[i]
if self.in_init and self.number2name[target.type] == 'power':
# maybe an attribute assignment -- check necessary conditions
if (# node must have two children
len(target) != 2 or
# first child must be "self"
target[0].type != token.NAME or target[0].value != 'self' or
# second child must be a "trailer" with two children
self.number2name[target[1].type] != 'trailer' or
len(target[1]) != 2 or
# first child must be a dot, second child a name
target[1][0].type != token.DOT or
target[1][1].type != token.NAME):
continue
name = target[1][1].value
elif target.type != token.NAME:
# don't care about other complex targets
continue
else:
name = target.value
self.add_tag(name)
if docstring:
namespace = '.'.join(self.namespace)
if namespace.startswith(self.scope):
self.collected[namespace, name] = docstring
class ModuleAnalyzer(object):
# cache for analyzer objects -- caches both by module and file name
cache = {}
@classmethod
def for_string(cls, string, modname, srcname='<string>'):
if isinstance(string, bytes):
return cls(BytesIO(string), modname, srcname)
return cls(StringIO(string), modname, srcname, decoded=True)
@classmethod
def for_file(cls, filename, modname):
if ('file', filename) in cls.cache:
return cls.cache['file', filename]
try:
fileobj = open(filename, 'rb')
except Exception, err:
raise PycodeError('error opening %r' % filename, err)
obj = cls(fileobj, modname, filename)
cls.cache['file', filename] = obj
return obj
@classmethod
def for_module(cls, modname):
if ('module', modname) in cls.cache:
entry = cls.cache['module', modname]
if isinstance(entry, PycodeError):
raise entry
return entry
try:
type, source = get_module_source(modname)
if type == 'string':
obj = cls.for_string(source, modname)
else:
obj = cls.for_file(source, modname)
except PycodeError, err:
cls.cache['module', modname] = err
raise
cls.cache['module', modname] = obj
return obj
def __init__(self, source, modname, srcname, decoded=False):
# name of the module
self.modname = modname
# name of the source file
self.srcname = srcname
# file-like object yielding source lines
self.source = source
# cache the source code as well
pos = self.source.tell()
if not decoded:
self.encoding = detect_encoding(self.source.readline)
self.source.seek(pos)
self.code = self.source.read().decode(self.encoding)
self.source.seek(pos)
self.source = TextIOWrapper(self.source, self.encoding)
else:
self.encoding = None
self.code = self.source.read()
self.source.seek(pos)
# will be filled by tokenize()
self.tokens = None
# will be filled by parse()
self.parsetree = None
# will be filled by find_attr_docs()
self.attr_docs = None
self.tagorder = None
# will be filled by find_tags()
self.tags = None
def tokenize(self):
"""Generate tokens from the source."""
if self.tokens is not None:
return
try:
self.tokens = list(tokenize.generate_tokens(self.source.readline))
except tokenize.TokenError, err:
raise PycodeError('tokenizing failed', err)
self.source.close()
def parse(self):
"""Parse the generated source tokens."""
if self.parsetree is not None:
return
self.tokenize()
try:
self.parsetree = pydriver.parse_tokens(self.tokens)
except parse.ParseError, err:
raise PycodeError('parsing failed', err)
def find_attr_docs(self, scope=''):
"""Find class and module-level attributes and their documentation."""
if self.attr_docs is not None:
return self.attr_docs
self.parse()
attr_visitor = AttrDocVisitor(number2name, scope, self.encoding)
attr_visitor.visit(self.parsetree)
self.attr_docs = attr_visitor.collected
self.tagorder = attr_visitor.tagorder
# now that we found everything we could in the tree, throw it away
# (it takes quite a bit of memory for large modules)
self.parsetree = None
return attr_visitor.collected
def find_tags(self):
"""Find class, function and method definitions and their location."""
if self.tags is not None:
return self.tags
self.tokenize()
result = {}
namespace = []
stack = []
indent = 0
defline = False
expect_indent = False
def tokeniter(ignore = (token.COMMENT, token.NL)):
for tokentup in self.tokens:
if tokentup[0] not in ignore:
yield tokentup
tokeniter = tokeniter()
for type, tok, spos, epos, line in tokeniter:
if expect_indent:
if type != token.INDENT:
# no suite -- one-line definition
assert stack
dtype, fullname, startline, _ = stack.pop()
endline = epos[0]
namespace.pop()
result[fullname] = (dtype, startline, endline)
expect_indent = False
if tok in ('def', 'class'):
name = next(tokeniter)[1]
namespace.append(name)
fullname = '.'.join(namespace)
stack.append((tok, fullname, spos[0], indent))
defline = True
elif type == token.INDENT:
expect_indent = False
indent += 1
elif type == token.DEDENT:
indent -= 1
# if the stacklevel is the same as it was before the last
# def/class block, this dedent closes that block
if stack and indent == stack[-1][3]:
dtype, fullname, startline, _ = stack.pop()
endline = spos[0]
namespace.pop()
result[fullname] = (dtype, startline, endline)
elif type == token.NEWLINE:
# if this line contained a definition, expect an INDENT
# to start the suite; if there is no such INDENT
# it's a one-line definition
if defline:
defline = False
expect_indent = True
self.tags = result
return result
if __name__ == '__main__':
import time, pprint
x0 = time.time()
#ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html')
ma = ModuleAnalyzer.for_file('sphinx/environment.py',
'sphinx.environment')
ma.tokenize()
x1 = time.time()
ma.parse()
x2 = time.time()
#for (ns, name), doc in ma.find_attr_docs().iteritems():
# print '>>', ns, name
# print '\n'.join(doc)
pprint.pprint(ma.find_tags())
x3 = time.time()
#print nodes.nice_repr(ma.parsetree, number2name)
print "tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2)
|
waseem18/oh-mainline
|
vendor/packages/sphinx/sphinx/pycode/__init__.py
|
Python
|
agpl-3.0
| 13,014
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a convenient wrapper for spawning a test lighttpd instance.
Usage:
lighttpd_server PATH_TO_DOC_ROOT
"""
import codecs
import contextlib
import httplib
import os
import pexpect
import random
import shutil
import socket
import sys
import tempfile
class LighttpdServer(object):
"""Wraps lighttpd server, providing robust startup.
Args:
document_root: Path to root of this server's hosted files.
port: TCP port on the _host_ machine that the server will listen on. If
ommitted it will attempt to use 9000, or if unavailable it will find
a free port from 8001 - 8999.
lighttpd_path, lighttpd_module_path: Optional paths to lighttpd binaries.
base_config_path: If supplied this file will replace the built-in default
lighttpd config file.
extra_config_contents: If specified, this string will be appended to the
base config (default built-in, or from base_config_path).
config_path, error_log, access_log: Optional paths where the class should
place temprary files for this session.
"""
def __init__(self, document_root, port=None,
lighttpd_path=None, lighttpd_module_path=None,
base_config_path=None, extra_config_contents=None,
config_path=None, error_log=None, access_log=None):
self.temp_dir = tempfile.mkdtemp(prefix='lighttpd_for_chrome_android')
self.document_root = os.path.abspath(document_root)
self.fixed_port = port
self.port = port or 9000
self.server_tag = 'LightTPD ' + str(random.randint(111111, 999999))
self.lighttpd_path = lighttpd_path or '/usr/sbin/lighttpd'
self.lighttpd_module_path = lighttpd_module_path or '/usr/lib/lighttpd'
self.base_config_path = base_config_path
self.extra_config_contents = extra_config_contents
self.config_path = config_path or self._Mktmp('config')
self.error_log = error_log or self._Mktmp('error_log')
self.access_log = access_log or self._Mktmp('access_log')
self.pid_file = self._Mktmp('pid_file')
self.process = None
def _Mktmp(self, name):
return os.path.join(self.temp_dir, name)
def _GetRandomPort(self):
# Ports 8001-8004 are reserved for other test servers. Ensure we don't
# collide with them.
return random.randint(8005, 8999)
def StartupHttpServer(self):
"""Starts up a http server with specified document root and port."""
# Currently we use lighttpd as http sever in test.
while True:
if self.base_config_path:
# Read the config
with codecs.open(self.base_config_path, 'r', 'utf-8') as f:
config_contents = f.read()
else:
config_contents = self._GetDefaultBaseConfig()
if self.extra_config_contents:
config_contents += self.extra_config_contents
# Write out the config, filling in placeholders from the members of |self|
with codecs.open(self.config_path, 'w', 'utf-8') as f:
f.write(config_contents % self.__dict__)
if (not os.path.exists(self.lighttpd_path) or
not os.access(self.lighttpd_path, os.X_OK)):
raise EnvironmentError(
'Could not find lighttpd at %s.\n'
'It may need to be installed (e.g. sudo apt-get install lighttpd)'
% self.lighttpd_path)
self.process = pexpect.spawn(self.lighttpd_path,
['-D', '-f', self.config_path,
'-m', self.lighttpd_module_path],
cwd=self.temp_dir)
client_error, server_error = self._TestServerConnection()
if not client_error:
assert int(open(self.pid_file, 'r').read()) == self.process.pid
break
self.process.close()
if self.fixed_port or not 'in use' in server_error:
print 'Client error:', client_error
print 'Server error:', server_error
return False
self.port = self._GetRandomPort()
return True
def ShutdownHttpServer(self):
"""Shuts down our lighttpd processes."""
if self.process:
self.process.terminate()
shutil.rmtree(self.temp_dir, ignore_errors=True)
def _TestServerConnection(self):
# Wait for server to start
server_msg = ''
for timeout in xrange(1, 5):
client_error = None
try:
with contextlib.closing(httplib.HTTPConnection(
'127.0.0.1', self.port, timeout=timeout)) as http:
http.set_debuglevel(timeout > 3)
http.request('HEAD', '/')
r = http.getresponse()
r.read()
if (r.status == 200 and r.reason == 'OK' and
r.getheader('Server') == self.server_tag):
return (None, server_msg)
client_error = ('Bad response: %s %s version %s\n ' %
(r.status, r.reason, r.version) +
'\n '.join([': '.join(h) for h in r.getheaders()]))
except (httplib.HTTPException, socket.error) as client_error:
pass # Probably too quick connecting: try again
# Check for server startup error messages
ix = self.process.expect([pexpect.TIMEOUT, pexpect.EOF, '.+'],
timeout=timeout)
if ix == 2: # stdout spew from the server
server_msg += self.process.match.group(0)
elif ix == 1: # EOF -- server has quit so giveup.
client_error = client_error or 'Server exited'
break
return (client_error or 'Timeout', server_msg)
def _GetDefaultBaseConfig(self):
return """server.tag = "%(server_tag)s"
server.modules = ( "mod_access",
"mod_accesslog",
"mod_alias",
"mod_cgi",
"mod_rewrite" )
# default document root required
#server.document-root = "."
# files to check for if .../ is requested
index-file.names = ( "index.php", "index.pl", "index.cgi",
"index.html", "index.htm", "default.htm" )
# mimetype mapping
mimetype.assign = (
".gif" => "image/gif",
".jpg" => "image/jpeg",
".jpeg" => "image/jpeg",
".png" => "image/png",
".svg" => "image/svg+xml",
".css" => "text/css",
".html" => "text/html",
".htm" => "text/html",
".xhtml" => "application/xhtml+xml",
".xhtmlmp" => "application/vnd.wap.xhtml+xml",
".js" => "application/x-javascript",
".log" => "text/plain",
".conf" => "text/plain",
".text" => "text/plain",
".txt" => "text/plain",
".dtd" => "text/xml",
".xml" => "text/xml",
".manifest" => "text/cache-manifest",
)
# Use the "Content-Type" extended attribute to obtain mime type if possible
mimetype.use-xattr = "enable"
##
# which extensions should not be handle via static-file transfer
#
# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi
static-file.exclude-extensions = ( ".php", ".pl", ".cgi" )
server.bind = "127.0.0.1"
server.port = %(port)s
## virtual directory listings
dir-listing.activate = "enable"
#dir-listing.encoding = "iso-8859-2"
#dir-listing.external-css = "style/oldstyle.css"
## enable debugging
#debug.log-request-header = "enable"
#debug.log-response-header = "enable"
#debug.log-request-handling = "enable"
#debug.log-file-not-found = "enable"
#### SSL engine
#ssl.engine = "enable"
#ssl.pemfile = "server.pem"
# Autogenerated test-specific config follows.
cgi.assign = ( ".cgi" => "/usr/bin/env",
".pl" => "/usr/bin/env",
".asis" => "/bin/cat",
".php" => "/usr/bin/php-cgi" )
server.errorlog = "%(error_log)s"
accesslog.filename = "%(access_log)s"
server.upload-dirs = ( "/tmp" )
server.pid-file = "%(pid_file)s"
server.document-root = "%(document_root)s"
"""
def main(argv):
server = LighttpdServer(*argv[1:])
try:
if server.StartupHttpServer():
raw_input('Server running at http://127.0.0.1:%s -'
' press Enter to exit it.' % server.port)
else:
print 'Server exit code:', server.process.exitstatus
finally:
server.ShutdownHttpServer()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
tierney/cryptagram-cc
|
src/build/android/lighttpd_server.py
|
Python
|
gpl-3.0
| 8,640
|
# -*- coding: utf-8 -*-
# @Time : 2017/6/29 10:17
# @Author : Hua
# @Site :
# @File : baseClient.py
# @Software: PyCharm
import shutil
from jinja2 import Environment, PackageLoader
import os, sqlite3
from config.Config import Config
class BaseClient(object):
ENV = Environment(loader=PackageLoader('templates', ''), trim_blocks=True,
keep_trailing_newline=True, lstrip_blocks=True)
DOWNLOAD_DIR = 'downloads'
OUTPUT_DIR = 'output'
def __init__(self, config):
"""
:type config: Config
"""
self.config = config
self.url = config.documentUrl if not config.documentUrl.endswith('/') else config.documentUrl[:-1]
self.originDir = self.url.split('//')[-1].split('/')[0] # 获取域名对应的文件夹名
self.docsetDir = config.name + '.docset'
self.resourcesPath = os.path.join(self.OUTPUT_DIR, self.docsetDir, 'Contents', 'Resources')
self.infoPath = os.path.join(self.OUTPUT_DIR, self.docsetDir, 'Contents', 'Info.plist')
self.documentsPath = os.path.join(self.resourcesPath, 'Documents')
def crawlTheSite(self):
nativePath = self.downloadSite()
self.copySiteToDocsets(nativePath, self.resourcesPath)
self.changeSomeText()
self.generateInfoPlist()
self.generateDB()
self.setupIcon()
def generateInfoPlist(self):
# 根据模板生成 Info.list
print '4. generate info.plist'
template = self.ENV.get_template('info.plist')
infoTxt = template.render({'bundleIdentifier': self.config.name, 'homePage': self.config.homePage or self.config.indexPage})
with open(self.infoPath, 'w') as f:
f.write(infoTxt)
print '4.1. already write info.plist success'
def generateDB(self):
dbFilePath = os.path.join(self.resourcesPath, 'docSet.dsidx')
print '5. generate db path: {path}'.format(path=dbFilePath)
db = sqlite3.connect(dbFilePath)
cur = db.cursor()
try:
cur.execute('DROP TABLE searchIndex;')
except:
pass
cur.execute('CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT);')
cur.execute('CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path);')
self.writeDB(cur, db)
def writeDB(self, cur, db):
for root, dirs, files in os.walk(self.documentsPath):
for fileName in files:
fullPath = os.path.join(root, fileName)
if fileName.endswith(".html"):
if self.config.indexPage and fullPath == os.path.join(self.documentsPath, self.config.indexPage):
configList = self.config.indexPageConfigList
elif self.config.homePage and fullPath == os.path.join(self.documentsPath, self.config.homePage):
configList = self.config.homePageConfigList
else:
configList = self.config.otherPageConfigList
for config in configList:
if config.regular:
self.writeItemToDB(cur, fullPath, config.regular, config.typeName)
db.commit()
db.close()
def writeItemToDB(self, cur, fullPath, regex, typeName):
"""
搜索匹配索引,并写入数据库
:param cur: 数据库句柄
:param fullPath: 当前完整的文件路径
:param regex: 正则对象
:param typeName: 索引类型
"""
page = open(fullPath).read()
result = regex.findall(page)
for item in result:
if isinstance(item, tuple): # 通常是目录
name = item[1]
if name.startswith('Version'):
break
path = item[0]
elif isinstance(item, basestring): # 通常是页内链接
name = item
path = '#'.join([fullPath[len(self.documentsPath):], item])
if not name:
continue
path = self._changeRelativePath(self.documentsPath, fullPath, path)
cur.execute('REPLACE INTO searchIndex(name, type, path) VALUES (?,?,?)',
(name.decode('utf8'), typeName, path.decode('utf8')))
if result:
print 'write %d index item type = %s into DB' % (len(result), typeName)
def downloadSite(self):
"""
下载整站
"""
print '1. start download site from {url} ……'.format(url=self.url)
newNativePath = os.path.join(self.DOWNLOAD_DIR, self.config.name) # 本地保存的路径
if os.path.exists(newNativePath):
print '1.1. alrady have this path: {path}, skip this step'.format(path=newNativePath)
return newNativePath
# 创建downloads文件夹
if not os.path.exists(self.DOWNLOAD_DIR):
os.mkdir(self.DOWNLOAD_DIR)
os.system('cd {download_dir} && wget -r -p -np -q -k {url}'.format(download_dir=self.DOWNLOAD_DIR, url=self.config.documentUrl))
print '1.1. download: {path} success'.format(path = newNativePath)
# 重命名
oldNativePath = os.path.join(self.DOWNLOAD_DIR, self.originDir)
os.rename(oldNativePath, newNativePath)
print '1.2. rename folder: {oldName} --> {newName} success'.format(oldName=oldNativePath, newName=newNativePath)
return newNativePath
def copySiteToDocsets(self, nativePath, resourcePath):
"""
将网站内容放到指定docset路径下
:return:
"""
print '2. copy site to docsets ……'
docsetPath = os.path.join(self.OUTPUT_DIR, self.docsetDir)
if os.path.exists(docsetPath):
print '2.1. already have this path: {path}, remove this path and then copy it'.format(path=docsetPath)
shutil.rmtree(docsetPath)
# 创建本地 docset 的文件夹
os.makedirs(resourcePath)
# 判断是否有原始文件夹
if os.path.exists(resourcePath):
# 复制整站到指定的文件夹
shutil.copytree(nativePath, self.documentsPath)
print '2.2 copy folder: {oldPath} --> {newPath} success'.format(oldPath=nativePath, newPath=self.documentsPath)
def _changeRelativePath(self, baseRootPath, basePath, relativePath):
"""
修改相对路径
:param baseRootPath: 最终要基于的根路径
:param basePath: 当前所在的路径
:param relativePath: 相对于当前路径的目标路径
:rtype: str
"""
count = relativePath.count('../')
tmpPath = '/'.join(basePath.split('/')[:-(count + 1)])
p = os.path.join(tmpPath, '/'.join(relativePath.split('/')[count:]))
return p.replace(baseRootPath, '')
def changeSomeText(self):
print '3. change some text from html'
pass
def setupIcon(self):
print '6. setup icon'
pass
|
GeekerHua/develop_documentation_spider
|
client/BaseClient.py
|
Python
|
mit
| 7,057
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
APP_NAME = 'legit'
APP_SCRIPT = './legit_r'
VERSION = '0.2.0'
# Grab requirements.
with open('reqs.txt') as f:
required = f.readlines()
settings = dict()
# Publish Helper.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel upload')
sys.exit()
# Build Helper.
if sys.argv[-1] == 'build':
import py2exe
sys.argv.append('py2exe')
settings.update(
console=[{'script': APP_SCRIPT}],
zipfile = None,
options = {
'py2exe': {
'compressed': 1,
'optimize': 0,
'bundle_files': 1}})
settings.update(
name=APP_NAME,
version=VERSION,
description='Git for Humans.',
long_description=open('README.rst').read(),
author='Kenneth Reitz',
author_email='me@kennethreitz.com',
url='https://github.com/kennethreitz/legit',
packages= ['legit',],
install_requires=required,
license='BSD',
classifiers=(
# 'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
),
entry_points={
'console_scripts': [
'legit = legit.cli:main',
],
}
)
setup(**settings)
|
deshion/legit
|
setup.py
|
Python
|
bsd-3-clause
| 1,882
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0009_auto_20150324_2149'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='in_stock',
),
migrations.RemoveField(
model_name='product',
name='off_shelf',
),
migrations.AddField(
model_name='product',
name='status',
field=models.CharField(choices=[('N', 'In stock'), ('O', 'Out of stock'), ('F', 'Off shelf')], max_length=1, default='N'),
preserve_default=True,
),
]
|
sorz/isi
|
store/product/migrations/0010_auto_20150324_2315.py
|
Python
|
mit
| 733
|
# Copyright (c) 2009, Rotem Yaari <vmalloc@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from repository import RemoteRepository
from repository import LocalRepository
from repository import clone
from repository import find_repository
|
t-amerssonis/okami
|
src/Okami/third-parts/gitpy/__init__.py
|
Python
|
mit
| 1,673
|
#!/usr/bin/python
""" Build light controller """
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
import sys
sys.path.append(current_dir)
import signal
import getopt
import importlib
from lib import daemonize
from lib import logger
from lib import list_utils
from lib.constants import STATUS
from config import json_config
from lights import job2ledstrip
default_config_file = 'config.json'
dlogger = logger.Logger('light_controller')
light = None
class LightController:
def _write_pid(self, filename):
global dlogger
try:
pidfile = open(filename, "w")
pidfile.write("%d\n" % os.getpid())
pidfile.close()
except IOError, e:
dlogger.log("ERROR: unable to write pid file %s: %s" % (filename, str(e)))
def _unlink_pid(self, filename):
try:
os.unlink(filename)
except:
pass
def _handle_signals(self, signum, stack):
global light
if signum == signal.SIGTERM or signum == signal.SIGINT:
if light is not None:
light.stop()
def _print_usage(self, prog_cmd):
print "Usage: %s [options]" % prog_cmd
print "Options:"
print "-b --daemonize Run in the background."
print "-l --syslog Log output to syslog."
print "-c <file> --config <file> Config file, default \"%s\"." % default_config_file
print "-h --help Print this help page."
def __init__(self):
global light
global dlogger
background = False
forcesyslog = False
config_file = default_config_file
try:
(opts, args) = getopt.getopt(sys.argv[1:], "hblc:", ["-h", "help", "-b", "daemonize", "-l", "syslog", "-c", "config="])
except getopt.error, why:
dlogger.log("Error: getopt error: %s" % why)
self._print_usage(sys.argv[0])
sys.exit(-1)
try:
for opt in opts:
if opt[0] == "-h" or opt[0] == "--help":
self._print_usage(sys.argv[0])
sys.exit(1)
if opt[0] == "-b" or opt[0] == "--daemonize":
background = True
continue
if opt[0] == "-l" or opt[0] == "--syslog":
forcesyslog = True
continue
if opt[0] == "-c" or opt[0] == "--config":
config_file = opt[1]
continue
self._print_usage(sys.argv[0])
sys.exit(-1)
except ValueError, why:
dlogger.log("Error: bad parameter \"%s\" for option %s: %s" % (opt[1], opt[0], why))
self._print_usage(sys.argv[0])
sys.exit(-1)
if forcesyslog:
logger.Logger.use_syslog = True
if background:
logger.Logger.use_syslog = True
daemonize.createDaemon()
if not os.path.isfile(config_file):
dlogger.log("ERROR: config file %s not found." % config_file)
sys.exit(-1)
try:
self.conf = json_config.JsonConfig(config_file)
except Exception, e:
logger.print_trace(e)
sys.exit(-1)
ci_server_conf = self.conf['ci_server']
ci_server_type = ci_server_conf.pop('type')
self.poll_interval_seconds = ci_server_conf.pop('pollrate_s', None)
self.ci = importlib.import_module('ci_server.' + ci_server_type).Source(**ci_server_conf)
def list_projects(self):
return self.ci.list_projects()
def control_lights(self):
light_conf = self.conf['light']
light_type = light_conf.pop('type')
light = importlib.import_module('lights.' + light_type).Strand(**light_conf)
jobs = self.conf['jobs']
job_names = []
job_branches = []
for job in jobs:
job_names.append(job['name'])
job_branches.append(job['branch'])
if len(jobs) < 1:
dlogger.log("No jobs have been configured")
sys.exit(-1)
translator = job2ledstrip.Job2LedStrip(job_names, light)
dlogger.log("Starting light controller")
pidfilename = "/var/run/%s.pid" % os.path.basename(sys.argv[0])
self._write_pid(pidfilename)
try:
light.daemon = True
light.start()
for job in jobs:
translator.update(job['name'], STATUS.UNKNOWN)
while True:
for job in jobs:
job_name = job['name']
status = self.ci.project_status(job_name, job['branch'])
translator.update(job_name, status)
light.join(self.poll_interval_seconds)
if not light.isAlive():
break
except Exception, e:
logger.print_trace(e)
sys.exit(-1)
self._unlink_pid(pidfilename)
dlogger.log("Terminated light controller")
if __name__ == "__main__":
lc = LightController()
signal.signal(signal.SIGTERM, lc._handle_signals)
signal.signal(signal.SIGINT, lc._handle_signals)
# print lc.list_projects()
lc.control_lights()
|
DiUS/build-lights
|
light-controller/light_controller.py
|
Python
|
gpl-3.0
| 5,319
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) The Spyder Development Team
#
# Licensed under the terms of the MIT License
# (See LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Tests."""
|
spyder-ide/loghub
|
loghub/tests/__init__.py
|
Python
|
mit
| 321
|
#!/usr/bin/env python3.7
# coding=utf-8
"""Jerod Gawne, 2018.06.11
https://github.com/jerodg/hackerrank
"""
import sys
import traceback
if __name__ == '__main__':
try:
[print(i ** 2) for i in range(int(input()))]
except Exception:
print(traceback.print_exception(*sys.exc_info()))
|
jerodg/hackerrank-python
|
python/00.Introduction/4.Loops/solution1.py
|
Python
|
mit
| 307
|
import os.path as op
import logging
import shutil
from subprocess import check_output
from tempfile import mkdtemp
import click
from ob_pipelines.s3 import (
s3, download_file_or_folder, remove_file_or_folder, SCRATCH_DIR, path_to_bucket_and_key
)
logger = logging.getLogger('ob-pipelines')
@click.command()
@click.argument('fq1')
@click.argument('fq2')
@click.argument('out_dir')
@click.argument('name')
def fastqc(fq1, fq2, out_dir, name):
"""Run FastQC"""
out_dir = out_dir if out_dir.endswith('/') else out_dir + '/'
temp_dir = mkdtemp(dir=SCRATCH_DIR)
fq1_local = op.join(temp_dir, name + '_1.fastq.gz')
fq2_local = op.join(temp_dir, name + '_2.fastq.gz')
if fq1.startswith('s3://'):
# Assume that if fq1 is in S3, so is fq2
download_file_or_folder(fq1, fq1_local)
download_file_or_folder(fq2, fq2_local)
else:
shutil.copy(fq1, fq1_local)
shutil.copy(fq2, fq2_local)
cmd = ['fastqc', '-o', temp_dir, fq1_local, fq2_local]
# Run command and save output
logging.info('Running:\n{}'.format(' '.join(cmd)))
out = check_output(cmd)
logging.info(out.decode())
out_files = [
name + '_1_fastqc.html',
name + '_2_fastqc.html',
name + '_1_fastqc.zip',
name + '_2_fastqc.zip'
]
for fname in out_files:
# Upload temp out directory to S3 with prefix
if out_dir.startswith('s3://'):
bucket, key = path_to_bucket_and_key(out_dir)
local_fpath = op.join(temp_dir, fname)
print('uploading {} to s3://{}/{}{}'.format(local_fpath, bucket, key, fname))
s3.upload_file(local_fpath, bucket, key + fname)
remove_file_or_folder(local_fpath)
else:
shutil.move(temp_dir, out_dir)
if __name__ == '__main__':
fastqc()
|
outlierbio/ob-pipelines
|
ob_pipelines/apps/fastqc/fastqc.py
|
Python
|
apache-2.0
| 1,841
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler host filters
"""
import functools
from nova import filters
from nova import objects
class BaseHostFilter(filters.BaseFilter):
"""Base class for host filters."""
def _filter_one(self, obj, filter_properties):
"""Return True if the object passes the filter, otherwise False."""
return self.host_passes(obj, filter_properties)
def host_passes(self, host_state, filter_properties):
"""Return True if the HostState passes the filter, otherwise False.
Override this in a subclass.
"""
raise NotImplementedError()
class HostFilterHandler(filters.BaseFilterHandler):
def __init__(self):
super(HostFilterHandler, self).__init__(BaseHostFilter)
def all_filters():
"""Return a list of filter classes found in this directory.
This method is used as the default for available scheduler filters
and should return a list of all filter classes available.
"""
return HostFilterHandler().get_all_classes()
# TODO(sbauza): Remove that decorator once all filters are using RequestSpec
# object directly.
def compat_legacy_props(function):
"""Decorator for returning a legacy filter_properties dictionary.
This is used for keeping unchanged the existing filters without yet using
the RequestSpec fields by returning a legacy dictionary.
"""
@functools.wraps(function)
def decorated_host_passes(self, host_state, filter_properties):
if isinstance(filter_properties, objects.RequestSpec):
legacy_props = filter_properties.to_legacy_filter_properties_dict()
legacy_props.update({'request_spec': (
filter_properties.to_legacy_request_spec_dict()),
'instance_type': filter_properties.flavor})
# TODO(sbauza): Adding two keys not used in-tree but which will be
# provided as non-fields for the RequestSpec once we provide it to
# the filters
legacy_props.update(
{'context': filter_properties._context,
'config_options': filter_properties.config_options})
filter_properties = legacy_props
return function(self, host_state, filter_properties)
return decorated_host_passes
|
devendermishrajio/nova
|
nova/scheduler/filters/__init__.py
|
Python
|
apache-2.0
| 2,897
|
#!/usr/bin/env python
import image,numpy as np,auger,plot,dump
from scipy.ndimage.filters import gaussian_filter
np.random.seed(65983146)
f, ((ax1,ax2),(ax3,ax4)) = plot.subplots(nrows=2, ncols=2, sharex=False, sharey=False)
#spot 29 CT
filen = 'run.pMSA/output.2391740/ipnl-auger-tof-1.root'
auger.testfit(filen,1e9,ax1)
filen = 'run.EPjL/output.2391621/ipnl-auger-tof-1.root'
auger.testfit(filen,1e7,ax2)
filen = 'run.pMSA/output.2391740/iba-auger-notof-3.root'
auger.testfit(filen,1e9,ax3)
filen = 'run.EPjL/output.2391621/iba-auger-notof-3.root'
auger.testfit(filen,1e7,ax4)
#ax1.set_title(labels[0])
#ax2.set_title(labels[1])
#ax3.set_title(labels[2])
#ax4.set_title(labels[3])
#f.subplots_adjust(hspace=.5)
ax1.set_xlabel('')
ax2.set_xlabel('')
ax2.set_ylabel('')
ax4.set_ylabel('')
f.savefig('fopproc.pdf', bbox_inches='tight')
plot.close('all')
|
brenthuisman/phd_tools
|
graph.fopproc.py
|
Python
|
lgpl-3.0
| 863
|
#!/usr/bin/python
"""
Author: Jared R. Luellen
This repo is located at: https://github.com/jluellen/sms_ip
Sends current internal and external IP on boot
Add to cron by: sudo crontab -e
Append: @reboot python /home/pi/get_ip.py &
Save crontab
Reboot device and test
"""
from googlevoice import Voice
import re
import os
import subprocess
import mechanize
voice = Voice()
def main():
version = get_os()
internal_ip = get_internal_ip(version)
external_ip = get_external_ip()
text = 'Raspberry Pi: \r\r' + 'Internal: ' + internal_ip + '\n' + 'External: ' + external_ip
print text
send_text(text)
def get_os():
# Determine which OS distribution the system is running
version = os.uname()[0]
return version
def get_internal_ip(version):
if version == 'Darwin':
internal_ip = subprocess.check_output(['/sbin/ifconfig','en0'])
print internal_ip
internal_ip = re.search('inet ([\d\.]*)', internal_ip)
if version == 'Linux':
internal_ip = subprocess.check_output(['/sbin/ifconfig','eth0'])
internal_ip = re.search('inet addr:([\d\.]*)', internal_ip)
internal_ip = internal_ip.group(1)
return internal_ip
def get_external_ip():
# Generates a web browser instance
# Opens www.whatsmyip.com/ and gets external IP address
br = mechanize.Browser()
# Browser options
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# User-Agent makes the destination website think it's from a real person
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
html = br.open('http://www.whatismyip.com/')
html = html.read()
html = unicode(html, errors='ignore')
# Searches through the raw html file and grabs the paragraph "the-ip", where the external IP is displayed
match = re.search('<div class="the-ip">(.*)</div>', html)
# Looks at "the-ip" section and finds html char, i.e. ':'
if match:
chars = re.findall('\&\#(\d*)', match.group(1))
external_ip = ''.join([chr(int(char)) for char in chars])
#debug.write(str(external_ip))
return external_ip
def send_text(text):
# Reads a file listed in git ignore, it contains username, password, and phone number(s) you wish to send to
# EXAMPLE user_info.txt file:
# email@gmail.com
# YOUR_PASSWORD
# 3333333333, 3333333333, 3333333333
user_info = open('/home/pi/sms_ip/user_info.txt', 'r')
user_name = user_info.readline()
user_pass = user_info.readline()
user_tele = user_info.readline()
user_info.close()
voice.login(user_name, user_pass)
phoneNumber = user_tele
voice.send_sms(phoneNumber, text)
if __name__ == "__main__": main()
|
jluellen/sms_ip
|
get_ip.py
|
Python
|
gpl-2.0
| 2,802
|
import threading
import time
import queue
import paho.mqtt.client as mqtt
from python_homeautomation.devices.BaseDevice import BaseDevice
class MySensorHumidity(BaseDevice):
MODULE = 'MySensorHumidity'
UI_FIELDS = [{'name': 'humidity', 'text': 'Humidity', 'type': 'text', 'extra': None}]
def monitor(self, client, userdata, msg):
print('MySensorTemp [{}]: {}: {}'.format(self.name, msg.topic, msg.payload))
self.out_queue.put({'humidity': msg.payload.decode('utf-8')})
def event(self):
while self.running:
try:
data = self.in_queue.get(timeout=5)
print('Received data: {}'.format(data))
except queue.Empty:
pass
def run(self):
self.client.on_message = self.monitor
self.client.connect(self.mqtt_address, self.mqtt_port)
print("Subscribing to mygateway-out/{}/{}/#".format(self.node_id, self.sensor_id))
self.client.subscribe('mygateway-out/{}/{}/#'.format(self.node_id, self.sensor_id))
self.event_thread = threading.Thread(target=self.event)
while self.running:
self.client.loop()
time.sleep(2)
self.event_thread.start()
self.event_thread.join()
def stop(self):
self.running = False
def __init__(self, name, in_queue, out_queue, device_id):
BaseDevice.__init__(self, device_id)
self.name = name
self.in_queue = in_queue
self.out_queue = out_queue
self.monitor_thread = None
self.event_thread = None
self.node_id = self.args['node_id']
self.sensor_id = self.args['sensor_id']
self.mqtt_address = self.args['hostname']
self.mqtt_port = int(self.args['port'])
self.running = True
self.client = mqtt.Client()
|
sroehl/python_homeautomation
|
python_homeautomation/devices/MySensorHumidity.py
|
Python
|
mit
| 1,830
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('event_mapper', '0020_auto_20150511_1626'),
]
operations = [
migrations.RemoveField(
model_name='movement',
name='id',
),
migrations.RemoveField(
model_name='movement',
name='region',
),
migrations.AddField(
model_name='movement',
name='country',
field=models.OneToOneField(primary_key=True, default=1, serialize=False, to='event_mapper.Country', help_text=b'The country where the movement happens.', verbose_name=b'Country'),
preserve_default=False,
),
]
|
timlinux/watchkeeper
|
django_project/event_mapper/migrations/0021_auto_20150511_2130.py
|
Python
|
bsd-2-clause
| 791
|
#!/usr/bin/python3
from RaceGame import RaceGame
import pygame
def main():
pygame.init()
game = RaceGame()
game.run()
if __name__ == "__main__":
main()
|
gezichtshaar/PyRaceGame
|
program.py
|
Python
|
gpl-2.0
| 171
|
# this file is here to make Install.py and utils.py importable.
# keep these lines to make it non-zero size and have winzip cooperate.
|
collective/Products.Paypal2SalesforceLead
|
Products/Paypal2SalesforceLead/Extensions/__init__.py
|
Python
|
gpl-2.0
| 137
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-20 12:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hkm', '0028_order_discount_lines'),
]
operations = [
migrations.AddField(
model_name='productordercollection',
name='is_checkout_successful',
field=models.NullBooleanField(verbose_name='Checkout successful'),
),
migrations.AddField(
model_name='productordercollection',
name='is_payment_successful',
field=models.NullBooleanField(verbose_name='Payment successful'),
),
migrations.AddField(
model_name='productordercollection',
name='is_order_successful',
field=models.NullBooleanField(verbose_name='Order successful'),
),
]
|
andersinno/kuvaselaamo
|
hkm/migrations/0029_checkout_status_flags.py
|
Python
|
mit
| 921
|
from PIL import Image
import math, sys, json
import os.path
if __name__ == "__main__":
if not(len(sys.argv) == 3 and os.path.isfile(sys.argv[1])):
print "USAGE: python2 levelconverter.py inputfile outputfile"
sys.exit(-1)
im = Image.open(sys.argv[1])
rgb_im = im.convert('RGB')
(width, height) = im.size
result = []
for y in range(height):
currentArray = []
for x in range(width):
r, g, b = rgb_im.getpixel((x, y))
kleurtje = '%02x%02x%02x' % (r, g, b)
currentArray.append(kleurtje)
result.append(currentArray)
f = open(sys.argv[2], 'w+')
f.write(json.dumps(result))
f.close
|
nickschot/ludumdare28
|
levelconverter.py
|
Python
|
gpl-2.0
| 715
|
from typing import Any, Dict, Iterable, List, Tuple
import cirq
import stim
@cirq.value_equality
class DetAnnotation(cirq.Operation):
"""Annotates that a particular combination of measurements is deterministic.
Creates a DETECTOR operation when converting to a stim circuit.
"""
def __init__(
self,
*,
parity_keys: Iterable[str] = (),
relative_keys: Iterable[int] = (),
coordinate_metadata: Iterable[float] = (),
):
"""
Args:
parity_keys: The keys of some measurements with the property that their parity is always
the same under noiseless execution of the circuit.
relative_keys: Refers to measurements relative to this operation. For example,
relative key -1 is the previous measurement. All entries must be negative.
coordinate_metadata: An optional location for the detector. This has no effect on the
function of the circuit, but can be used by plotting tools.
"""
self.parity_keys = frozenset(parity_keys)
self.relative_keys = frozenset(relative_keys)
self.coordinate_metadata = tuple(coordinate_metadata)
@property
def qubits(self) -> Tuple[cirq.Qid, ...]:
return ()
def with_qubits(self, *new_qubits) -> 'DetAnnotation':
return self
def _value_equality_values_(self) -> Any:
return self.parity_keys, self.coordinate_metadata
def _circuit_diagram_info_(self, args: Any) -> str:
items: List[str] = [repr(e) for e in sorted(self.parity_keys)]
items += [f'rec[{e}]' for e in sorted(self.relative_keys)]
k = ",".join(str(e) for e in items)
return f"Det({k})"
def _json_dict_(self) -> Dict[str, Any]:
result = {
'cirq_type': self.__class__.__name__,
'parity_keys': sorted(self.parity_keys),
'coordinate_metadata': self.coordinate_metadata,
}
if self.relative_keys:
result['relative_keys'] = sorted(self.relative_keys)
return result
def __repr__(self) -> str:
return (
f'stimcirq.DetAnnotation('
f'parity_keys={sorted(self.parity_keys)}, '
f'relative_keys={sorted(self.relative_keys)}, '
f'coordinate_metadata={self.coordinate_metadata!r})'
)
def _decompose_(self):
return []
def _is_comment_(self) -> bool:
return True
def _stim_conversion_(
self,
edit_circuit: stim.Circuit,
edit_measurement_key_lengths: List[Tuple[str, int]],
have_seen_loop: bool = False,
**kwargs,
):
# Ideally these references would all be resolved ahead of time, to avoid the redundant
# linear search overhead and also to avoid the detectors and measurements being interleaved
# instead of grouped (grouping measurements is helpful for stabilizer simulation). But that
# didn't happen and this is the context we're called in and we're going to make it work.
if have_seen_loop and self.parity_keys:
raise NotImplementedError(
"Measurement key conversion is not reliable when loops are present."
)
# Find indices of measurement record targets.
remaining = set(self.parity_keys)
rec_targets = [stim.target_rec(k) for k in sorted(self.relative_keys, reverse=True)]
for offset in range(len(edit_measurement_key_lengths)):
m_key, m_len = edit_measurement_key_lengths[-1 - offset]
if m_len != 1:
raise NotImplementedError(f"multi-qubit measurement {m_key!r}")
if m_key in remaining:
remaining.discard(m_key)
rec_targets.append(stim.target_rec(-1 - offset))
if not remaining:
break
if remaining:
raise ValueError(
f"{self!r} was processed before measurements it referenced ({sorted(remaining)!r})."
f" Make sure the referenced measurements keys are actually in the circuit, and come"
f" in an earlier moment (or earlier in the same moment's operation order)."
)
edit_circuit.append_operation("DETECTOR", rec_targets, self.coordinate_metadata)
|
quantumlib/Stim
|
glue/cirq/stimcirq/_det_annotation.py
|
Python
|
apache-2.0
| 4,341
|
#! /usr/bin/python
import sys
import os
import json
import grpc
import time
import subprocess
from google.oauth2 import service_account
import google.oauth2.credentials
import google.auth.transport.requests
import google.auth.transport.grpc
from google.firestore.v1beta1 import firestore_pb2
from google.firestore.v1beta1 import firestore_pb2_grpc
from google.firestore.v1beta1 import document_pb2
from google.firestore.v1beta1 import document_pb2_grpc
from google.firestore.v1beta1 import common_pb2
from google.firestore.v1beta1 import common_pb2_grpc
from google.firestore.v1beta1 import write_pb2
from google.firestore.v1beta1 import write_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
from google.firestore.admin.v1beta1 import index_pb2
from google.firestore.admin.v1beta1 import index_pb2_grpc
from google.firestore.admin.v1beta1 import firestore_admin_pb2
from google.firestore.admin.v1beta1 import firestore_admin_pb2_grpc
def main():
subprocess.call('clear')
fl = os.path.dirname(os.path.abspath(__file__))
fn = os.path.join(fl, 'grpc.json')
with open(fn) as grpc_file:
item = json.load(grpc_file)
creds = item["grpc"]["Commit"]["credentials"]
credentials = service_account.Credentials.from_service_account_file("{}".format(creds))
scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/datastore'])
http_request = google.auth.transport.requests.Request()
channel = google.auth.transport.grpc.secure_authorized_channel(scoped_credentials, http_request, 'firestore.googleapis.com:443')
stub = firestore_pb2_grpc.FirestoreStub(channel)
now = time.time()
seconds = int(now)
timestamp = timestamp_pb2.Timestamp(seconds=seconds)
# database defined in the grpc.json file
database = item["grpc"]["Commit"]["database"]
options = common_pb2.TransactionOptions(read_write = common_pb2.TransactionOptions.ReadWrite())
begin_transaction_request = firestore_pb2.BeginTransactionRequest(database = database, options = options)
begin_transaction_response = stub.BeginTransaction(begin_transaction_request)
transaction = begin_transaction_response.transaction
stub = firestore_pb2_grpc.FirestoreStub(channel)
now = time.time()
seconds = int(now)
timestamp = timestamp_pb2.Timestamp(seconds=seconds)
field_paths = {}
# document mask field_path is defined in the grpc.json file
field_paths= item["grpc"]["Commit"]["field_paths"]
update_mask = common_pb2.DocumentMask(field_paths = [field_paths])
# document_fileds is defined in the grpc.json file
fields=item["grpc"]["Commit"]["fields"]
# document_name is defined in the grpc.json file
name =item["grpc"]["Commit"]["name"]
update = document_pb2.Document(name=name, fields=fields, create_time = timestamp, update_time = timestamp)
writes = {}
database = item["grpc"]["Commit"]["database"]
writes = write_pb2.Write(update_mask = update_mask, update=update)
commit_request = firestore_pb2.CommitRequest(database = database, writes = [writes], transaction = transaction )
commit_response = stub.Commit(commit_request)
print(commit_response)
if __name__ == "__main__":
main()
|
GoogleCloudPlatform/grpc-gcp-python
|
firestore/examples/end2end/src/Commit.py
|
Python
|
apache-2.0
| 3,667
|
from __future__ import with_statement
from os import path, makedirs, environ
import shutil
from nose.plugins.skip import SkipTest
import _mssql
from .helpers import tmpdir
config_dump_path = path.join(tmpdir, 'freetds-config-dump.txt')
def setup_module():
if not path.isdir(tmpdir):
makedirs(tmpdir)
class TestConfig(object):
def connect(self, **kwargs):
environ['TDSDUMPCONFIG'] = config_dump_path
try:
_mssql.connect(**kwargs)
assert False
except _mssql.MSSQLDriverException, e:
# we get this when the name of the server is not valid
if 'Connection to the database failed' not in str(e):
raise
except _mssql.MSSQLDatabaseException, e:
# we get this when the name or IP can be obtained but the connection
# can not be made
if e.args[0][0] != 20009:
raise
with open(config_dump_path, 'rb') as fh:
return fh.read()
def test_config_values(self):
config_dump = self.connect(
server='dontnameyourserverthis',
user = 'bob',
database = 'tempdb',
)
assert 'user_name = bob' in config_dump
# it would be nice if this was the DB name, see test_dbsetldbname()
assert 'database = \n' in config_dump
# test default port
assert 'port = 1433' in config_dump
# not sure why 7.1 version is used instead of 8.0 which is the
# default
assert 'major_version = 7' in config_dump
assert 'minor_version = 1' in config_dump
def test_dbsetldbname(self):
# sybdb.h defines DBSETLDBNAME, we should try to use that to get
# the DB in the config dump for debugging purposes
raise SkipTest # test_dbsetldbname
def test_tds_protocal_version_42(self):
config_dump = self.connect(tds_version='4.2')
assert 'major_version = 4' in config_dump
assert 'minor_version = 2' in config_dump
def test_tds_protocal_version_70(self):
config_dump = self.connect(tds_version='7.0')
assert 'major_version = 7' in config_dump
assert 'minor_version = 0' in config_dump
def test_tds_protocal_version_71(self):
config_dump = self.connect(tds_version='7.1')
assert 'major_version = 7' in config_dump
assert 'minor_version = 1' in config_dump
def test_tds_protocal_version_80(self):
# follow-up: turns out 8.0 was erroneous. MS named the new protocal
# 7.1 instead of 8.0, so FreeTDS will accept 8.0 but shows as 7.1.
# got that from the FreeTDS mailling list. New FreeTDS docs,built from
# source, have a page that describes the protocal and that page lists
# versions 7.0, 7.1, and 7.2 among others.
config_dump = self.connect(tds_version='8.0')
assert 'major_version = 7' in config_dump
assert 'minor_version = 1' in config_dump
def test_tds_protocal_version_invalid(self):
try:
self.connect(tds_version='1.0')
assert False
except _mssql.MSSQLException, e:
assert 'unrecognized tds version: 1.0' == str(e)
def test_tds_nonstandard_port_int(self):
#it should convert it to a string
config_dump = self.connect(port=1435)
assert 'port = 1435' in config_dump
|
blackbass1988/pymssql-macos-lion
|
tests/test_config.py
|
Python
|
lgpl-2.1
| 3,403
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.conf import settings
from django.template.defaultfilters import filesizeformat # noqa
from django.utils.text import normalize_newlines # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon.utils import fields
from horizon.utils import functions
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.images_and_snapshots import utils
LOG = logging.getLogger(__name__)
class SelectProjectUserAction(workflows.Action):
project_id = forms.ChoiceField(label=_("Project"))
user_id = forms.ChoiceField(label=_("User"))
def __init__(self, request, *args, **kwargs):
super(SelectProjectUserAction, self).__init__(request, *args, **kwargs)
# Set our project choices
projects = [(tenant.id, tenant.name)
for tenant in request.user.authorized_tenants]
self.fields['project_id'].choices = projects
# Set our user options
users = [(request.user.id, request.user.username)]
self.fields['user_id'].choices = users
class Meta:
name = _("Project & User")
# Unusable permission so this is always hidden. However, we
# keep this step in the workflow for validation/verification purposes.
permissions = ("!",)
class SelectProjectUser(workflows.Step):
action_class = SelectProjectUserAction
contributes = ("project_id", "user_id")
class SetInstanceDetailsAction(workflows.Action):
availability_zone = forms.ChoiceField(label=_("Availability Zone"),
required=False)
name = forms.CharField(max_length=80, label=_("Instance Name"))
flavor = forms.ChoiceField(label=_("Flavor"),
help_text=_("Size of image to launch."))
count = forms.IntegerField(label=_("Instance Count"),
min_value=1,
initial=1,
help_text=_("Number of instances to launch."))
source_type = forms.ChoiceField(label=_("Instance Boot Source"),
required=True,
help_text=_("Choose Your Boot Source "
"Type."))
instance_snapshot_id = forms.ChoiceField(label=_("Instance Snapshot"),
required=False)
volume_id = forms.ChoiceField(label=_("Volume"), required=False)
volume_snapshot_id = forms.ChoiceField(label=_("Volume Snapshot"),
required=False)
image_id = forms.ChoiceField(
label=_("Image Name"),
required=False,
widget=fields.SelectWidget(
data_attrs=('volume_size',),
transform=lambda x: ("%s (%s)" % (x.name,
filesizeformat(x.bytes)))))
volume_size = forms.CharField(label=_("Device size (GB)"),
required=False,
help_text=_("Volume size in gigabytes "
"(integer value)."))
device_name = forms.CharField(label=_("Device Name"),
required=False,
initial="vda",
help_text=_("Volume mount point (e.g. 'vda' "
"mounts at '/dev/vda')."))
delete_on_terminate = forms.BooleanField(label=_("Delete on Terminate"),
initial=False,
required=False,
help_text=_("Delete volume on "
"instance terminate"))
class Meta:
name = _("Details")
help_text_template = ("project/instances/"
"_launch_details_help.html")
def __init__(self, request, context, *args, **kwargs):
self._init_images_cache()
super(SetInstanceDetailsAction, self).__init__(
request, context, *args, **kwargs)
source_type_choices = [
('', _("--- Select source ---")),
("image_id", _("Boot from image")),
("instance_snapshot_id", _("Boot from snapshot")),
]
if base.is_service_enabled(request, 'volume'):
source_type_choices.append(("volume_id", _("Boot from volume")))
try:
if api.nova.extension_supported("BlockDeviceMappingV2Boot",
request):
source_type_choices.append(("volume_image_id",
_("Boot from image (creates a new volume)")))
except Exception:
exceptions.handle(request, _('Unable to retrieve extensions '
'information.'))
source_type_choices.append(("volume_snapshot_id",
_("Boot from volume snapshot (creates a new volume)")))
self.fields['source_type'].choices = source_type_choices
def clean(self):
cleaned_data = super(SetInstanceDetailsAction, self).clean()
count = cleaned_data.get('count', 1)
# Prevent launching more instances than the quota allows
usages = quotas.tenant_quota_usages(self.request)
available_count = usages['instances']['available']
if available_count < count:
error_message = ungettext_lazy('The requested instance '
'cannot be launched as you only '
'have %(avail)i of your quota '
'available. ',
'The requested %(req)i instances '
'cannot be launched as you only '
'have %(avail)i of your quota '
'available.',
count)
params = {'req': count,
'avail': available_count}
raise forms.ValidationError(error_message % params)
# Validate our instance source.
source_type = self.data.get('source_type', None)
if source_type == 'image_id':
if not cleaned_data.get('image_id'):
msg = _("You must select an image.")
self._errors['image_id'] = self.error_class([msg])
elif source_type == 'instance_snapshot_id':
if not cleaned_data['instance_snapshot_id']:
msg = _("You must select a snapshot.")
self._errors['instance_snapshot_id'] = self.error_class([msg])
elif source_type == 'volume_id':
if not cleaned_data.get('volume_id'):
msg = _("You must select a volume.")
self._errors['volume_id'] = self.error_class([msg])
# Prevent launching multiple instances with the same volume.
# TODO(gabriel): is it safe to launch multiple instances with
# a snapshot since it should be cloned to new volumes?
if count > 1:
msg = _('Launching multiple instances is only supported for '
'images and instance snapshots.')
raise forms.ValidationError(msg)
elif source_type == 'volume_image_id':
if not cleaned_data['image_id']:
msg = _("You must select an image.")
self._errors['image_id'] = self.error_class([msg])
if not self.data.get('volume_size', None):
msg = _("You must set volume size")
self._errors['volume_size'] = self.error_class([msg])
if not cleaned_data.get('device_name'):
msg = _("You must set device name")
self._errors['device_name'] = self.error_class([msg])
elif source_type == 'volume_snapshot_id':
if not cleaned_data.get('volume_snapshot_id'):
msg = _("You must select a snapshot.")
self._errors['volume_snapshot_id'] = self.error_class([msg])
if not cleaned_data.get('device_name'):
msg = _("You must set device name")
self._errors['device_name'] = self.error_class([msg])
return cleaned_data
def populate_flavor_choices(self, request, context):
"""By default, returns the available flavors, sorted by RAM
usage (ascending).
Override these behaviours with a CREATE_INSTANCE_FLAVOR_SORT dict
in local_settings.py.
"""
def get_key(flavor, sort_key):
try:
return getattr(flavor, sort_key)
except AttributeError:
LOG.warning('Could not find sort key "%s". Using the default '
'"ram" instead.', sort_key)
return getattr(flavor, 'ram')
try:
flavors = api.nova.flavor_list(request)
flavor_sort = getattr(settings, 'CREATE_INSTANCE_FLAVOR_SORT', {})
rev = flavor_sort.get('reverse', False)
sort_key = flavor_sort.get('key', 'ram')
if not callable(sort_key):
key = lambda flavor: get_key(flavor, sort_key)
else:
key = sort_key
flavor_list = [(flavor.id, "%s" % flavor.name)
for flavor in sorted(flavors, key=key, reverse=rev)]
except Exception:
flavor_list = []
exceptions.handle(request,
_('Unable to retrieve instance flavors.'))
return flavor_list
def populate_availability_zone_choices(self, request, context):
try:
zones = api.nova.availability_zone_list(request)
except Exception:
zones = []
exceptions.handle(request,
_('Unable to retrieve availability zones.'))
zone_list = [(zone.zoneName, zone.zoneName)
for zone in zones if zone.zoneState['available']]
zone_list.sort()
if not zone_list:
zone_list.insert(0, ("", _("No availability zones found")))
elif len(zone_list) > 1:
zone_list.insert(0, ("", _("Any Availability Zone")))
return zone_list
def get_help_text(self):
extra = {}
try:
extra['usages'] = api.nova.tenant_absolute_limits(self.request)
extra['usages_json'] = json.dumps(extra['usages'])
flavors = json.dumps([f._info for f in
api.nova.flavor_list(self.request)])
extra['flavors'] = flavors
images = utils.get_available_images(self.request,
self.initial['project_id'],
self._images_cache)
if images is not None:
attrs = [{'id': i.id,
'min_disk': getattr(i, 'min_disk', 0),
'min_ram': getattr(i, 'min_ram', 0)}
for i in images]
extra['images'] = json.dumps(attrs)
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve quota information."))
return super(SetInstanceDetailsAction, self).get_help_text(extra)
def _init_images_cache(self):
if not hasattr(self, '_images_cache'):
self._images_cache = {}
def _get_volume_display_name(self, volume):
if hasattr(volume, "volume_id"):
vol_type = "snap"
visible_label = _("Snapshot")
else:
vol_type = "vol"
visible_label = _("Volume")
return (("%s:%s" % (volume.id, vol_type)),
(_("%(name)s - %(size)s GB (%(label)s)") %
{'name': volume.display_name or volume.id,
'size': volume.size,
'label': visible_label}))
def populate_image_id_choices(self, request, context):
choices = []
images = utils.get_available_images(request,
context.get('project_id'),
self._images_cache)
for image in images:
image.bytes = image.size
image.volume_size = functions.bytes_to_gigabytes(image.bytes)
choices.append((image.id, image))
if choices:
choices.sort(key=lambda c: c[1].name)
choices.insert(0, ("", _("Select Image")))
else:
choices.insert(0, ("", _("No images available")))
return choices
def populate_instance_snapshot_id_choices(self, request, context):
images = utils.get_available_images(request,
context.get('project_id'),
self._images_cache)
choices = [(image.id, image.name)
for image in images
if image.properties.get("image_type", '') == "snapshot"]
if choices:
choices.insert(0, ("", _("Select Instance Snapshot")))
else:
choices.insert(0, ("", _("No snapshots available")))
return choices
def populate_volume_id_choices(self, request, context):
try:
volumes = [self._get_volume_display_name(v)
for v in cinder.volume_list(self.request)
if v.status == api.cinder.VOLUME_STATE_AVAILABLE]
except Exception:
volumes = []
exceptions.handle(self.request,
_('Unable to retrieve list of volumes.'))
if volumes:
volumes.insert(0, ("", _("Select Volume")))
else:
volumes.insert(0, ("", _("No volumes available")))
return volumes
def populate_volume_snapshot_id_choices(self, request, context):
try:
snapshots = cinder.volume_snapshot_list(self.request)
snapshots = [self._get_volume_display_name(s) for s in snapshots
if s.status == api.cinder.VOLUME_STATE_AVAILABLE]
except Exception:
snapshots = []
exceptions.handle(self.request,
_('Unable to retrieve list of volume '
'snapshots.'))
if snapshots:
snapshots.insert(0, ("", _("Select Volume Snapshot")))
else:
snapshots.insert(0, ("", _("No volume snapshots available")))
return snapshots
class SetInstanceDetails(workflows.Step):
action_class = SetInstanceDetailsAction
depends_on = ("project_id", "user_id")
contributes = ("source_type", "source_id",
"availability_zone", "name", "count", "flavor",
"device_name", # Can be None for an image.
"delete_on_terminate")
def prepare_action_context(self, request, context):
if 'source_type' in context and 'source_id' in context:
context[context['source_type']] = context['source_id']
return context
def contribute(self, data, context):
context = super(SetInstanceDetails, self).contribute(data, context)
# Allow setting the source dynamically.
if ("source_type" in context and "source_id" in context
and context["source_type"] not in context):
context[context["source_type"]] = context["source_id"]
# Translate form input to context for source values.
if "source_type" in data:
if data["source_type"] in ["image_id", "volume_image_id"]:
context["source_id"] = data.get("image_id", None)
else:
context["source_id"] = data.get(data["source_type"], None)
if "volume_size" in data:
context["volume_size"] = data["volume_size"]
return context
KEYPAIR_IMPORT_URL = "horizon:project:access_and_security:keypairs:import"
class SetAccessControlsAction(workflows.Action):
keypair = forms.DynamicChoiceField(label=_("Key Pair"),
required=False,
help_text=_("Which key pair to use for "
"authentication."),
add_item_link=KEYPAIR_IMPORT_URL)
admin_pass = forms.RegexField(
label=_("Admin Pass"),
required=False,
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid': validators.password_validator_msg()})
confirm_admin_pass = forms.CharField(
label=_("Confirm Admin Pass"),
required=False,
widget=forms.PasswordInput(render_value=False))
groups = forms.MultipleChoiceField(label=_("Security Groups"),
required=True,
initial=["default"],
widget=forms.CheckboxSelectMultiple(),
help_text=_("Launch instance in these "
"security groups."))
class Meta:
name = _("Access & Security")
help_text = _("Control access to your instance via key pairs, "
"security groups, and other mechanisms.")
def __init__(self, request, *args, **kwargs):
super(SetAccessControlsAction, self).__init__(request, *args, **kwargs)
if not api.nova.can_set_server_password():
del self.fields['admin_pass']
del self.fields['confirm_admin_pass']
def populate_keypair_choices(self, request, context):
try:
keypairs = api.nova.keypair_list(request)
keypair_list = [(kp.name, kp.name) for kp in keypairs]
except Exception:
keypair_list = []
exceptions.handle(request,
_('Unable to retrieve key pairs.'))
if keypair_list:
if len(keypair_list) == 1:
self.fields['keypair'].initial = keypair_list[0][0]
keypair_list.insert(0, ("", _("Select a key pair")))
else:
keypair_list = (("", _("No key pairs available")),)
return keypair_list
def populate_groups_choices(self, request, context):
try:
groups = api.network.security_group_list(request)
security_group_list = [(sg.name, sg.name) for sg in groups]
except Exception:
exceptions.handle(request,
_('Unable to retrieve list of security groups'))
security_group_list = []
return security_group_list
def clean(self):
'''Check to make sure password fields match.'''
cleaned_data = super(SetAccessControlsAction, self).clean()
if 'admin_pass' in cleaned_data:
if cleaned_data['admin_pass'] != cleaned_data.get(
'confirm_admin_pass', None):
raise forms.ValidationError(_('Passwords do not match.'))
return cleaned_data
class SetAccessControls(workflows.Step):
action_class = SetAccessControlsAction
depends_on = ("project_id", "user_id")
contributes = ("keypair_id", "security_group_ids",
"admin_pass", "confirm_admin_pass")
def contribute(self, data, context):
if data:
post = self.workflow.request.POST
context['security_group_ids'] = post.getlist("groups")
context['keypair_id'] = data.get("keypair", "")
context['admin_pass'] = data.get("admin_pass", "")
context['confirm_admin_pass'] = data.get("confirm_admin_pass", "")
return context
class CustomizeAction(workflows.Action):
customization_script = forms.CharField(widget=forms.Textarea,
label=_("Customization Script"),
required=False,
help_text=_("A script or set of "
"commands to be "
"executed after the "
"instance has been "
"built (max 16kb)."))
class Meta:
name = _("Post-Creation")
help_text_template = ("project/instances/"
"_launch_customize_help.html")
class PostCreationStep(workflows.Step):
action_class = CustomizeAction
contributes = ("customization_script",)
class SetNetworkAction(workflows.Action):
network = forms.MultipleChoiceField(label=_("Networks"),
required=True,
widget=forms.CheckboxSelectMultiple(),
error_messages={
'required': _(
"At least one network must"
" be specified.")},
help_text=_("Launch instance with"
" these networks"))
if api.neutron.is_port_profiles_supported():
profile = forms.ChoiceField(label=_("Policy Profiles"),
required=False,
help_text=_("Launch instance with "
"this policy profile"))
class Meta:
name = _("Networking")
permissions = ('openstack.services.network',)
help_text = _("Select networks for your instance.")
def populate_network_choices(self, request, context):
try:
tenant_id = self.request.user.tenant_id
networks = api.neutron.network_list_for_tenant(request, tenant_id)
for n in networks:
n.set_id_as_name_if_empty()
network_list = [(network.id, network.name) for network in networks]
except Exception:
network_list = []
exceptions.handle(request,
_('Unable to retrieve networks.'))
return network_list
def populate_profile_choices(self, request, context):
try:
profiles = api.neutron.profile_list(request, 'policy')
profile_list = [(profile.id, profile.name) for profile in profiles]
except Exception:
profile_list = []
exceptions.handle(request, _("Unable to retrieve profiles."))
return profile_list
class SetNetwork(workflows.Step):
action_class = SetNetworkAction
# Disabling the template drag/drop only in the case port profiles
# are used till the issue with the drag/drop affecting the
# profile_id detection is fixed.
if api.neutron.is_port_profiles_supported():
contributes = ("network_id", "profile_id",)
else:
template_name = "project/instances/_update_networks.html"
contributes = ("network_id",)
def contribute(self, data, context):
if data:
networks = self.workflow.request.POST.getlist("network")
# If no networks are explicitly specified, network list
# contains an empty string, so remove it.
networks = [n for n in networks if n != '']
if networks:
context['network_id'] = networks
if api.neutron.is_port_profiles_supported():
context['profile_id'] = data.get('profile', None)
return context
class LaunchInstance(workflows.Workflow):
slug = "launch_instance"
name = _("Launch Instance")
finalize_button_name = _("Launch")
success_message = _('Launched %(count)s named "%(name)s".')
failure_message = _('Unable to launch %(count)s named "%(name)s".')
success_url = "horizon:project:instances:index"
default_steps = (SelectProjectUser,
SetInstanceDetails,
SetAccessControls,
SetNetwork,
PostCreationStep)
def format_status_message(self, message):
name = self.context.get('name', 'unknown instance')
count = self.context.get('count', 1)
if int(count) > 1:
return message % {"count": _("%s instances") % count,
"name": name}
else:
return message % {"count": _("instance"), "name": name}
@sensitive_variables('context')
def handle(self, request, context):
custom_script = context.get('customization_script', '')
dev_mapping_1 = None
dev_mapping_2 = None
image_id = ''
# Determine volume mapping options
source_type = context.get('source_type', None)
if source_type in ['image_id', 'instance_snapshot_id']:
image_id = context['source_id']
elif source_type in ['volume_id', 'volume_snapshot_id']:
dev_mapping_1 = {context['device_name']: '%s::%s' %
(context['source_id'],
int(bool(context['delete_on_terminate'])))}
elif source_type == 'volume_image_id':
dev_mapping_2 = [
{'device_name': str(context['device_name']),
'source_type': 'image',
'destination_type': 'volume',
'delete_on_termination':
int(bool(context['delete_on_terminate'])),
'uuid': context['source_id'],
'boot_index': '0',
'volume_size': context['volume_size']
}
]
netids = context.get('network_id', None)
if netids:
nics = [{"net-id": netid, "v4-fixed-ip": ""}
for netid in netids]
else:
nics = None
avail_zone = context.get('availability_zone', None)
# Create port with Network Name and Port Profile
# for the use with the plugin supporting port profiles.
# neutron port-create <Network name> --n1kv:profile <Port Profile ID>
# for net_id in context['network_id']:
## HACK for now use first network
if api.neutron.is_port_profiles_supported():
net_id = context['network_id'][0]
LOG.debug("Horizon->Create Port with %(netid)s %(profile_id)s",
{'netid': net_id, 'profile_id': context['profile_id']})
port = None
try:
port = api.neutron.port_create(request, net_id,
policy_profile_id=
context['profile_id'])
except Exception:
msg = (_('Port not created for profile-id (%s).') %
context['profile_id'])
exceptions.handle(request, msg)
if port and port.id:
nics = [{"port-id": port.id}]
try:
api.nova.server_create(request,
context['name'],
image_id,
context['flavor'],
context['keypair_id'],
normalize_newlines(custom_script),
context['security_group_ids'],
block_device_mapping=dev_mapping_1,
block_device_mapping_v2=dev_mapping_2,
nics=nics,
availability_zone=avail_zone,
instance_count=int(context['count']),
admin_pass=context['admin_pass'])
return True
except Exception:
exceptions.handle(request)
return False
|
neudesk/neucloud
|
openstack_dashboard/dashboards/project/instances/workflows/create_instance.py
|
Python
|
apache-2.0
| 29,514
|
#!/usr/bin/python
import argparse
#import math
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
from column import getColumn
#from operator import itemgetter, attrgetter
def createParser ():
parser = argparse.ArgumentParser()
parser.add_argument ('--abs', action='store_const', const=True)
parser.add_argument ('--fcord', action='store_const', const=True)
parser.add_argument ('--file', default="graph.csv", action="store")
parser.add_argument ('-t', '--title', default='title', action="store")
parser.add_argument ('-o', '--output', default='graph.png', action="store")
parser.add_argument ('--xlabel', default='x', action="store")
parser.add_argument ('--ylabel1', default='y', action="store")
parser.add_argument ('--ylabel2', default='y', action="store")
parser.add_argument ('--ylabel3', default='y', action="store")
#parser.add_argument ('--l1', default='fx', action="store")
#parser.add_argument ('--l2', default='fy', action="store")
#parser.add_argument ('--l3', default='fz', action="store")
return parser
if __name__ == '__main__':
parser = createParser()
namespace = parser.parse_args(sys.argv[1:])
file = '{}'.format(namespace.file)
title = '{}'.format(namespace.title)
output = '{}'.format(namespace.output)
xlabel = '{}'.format(namespace.xlabel)
ylabel1 = '{}'.format(namespace.ylabel1)
ylabel2 = '{}'.format(namespace.ylabel2)
ylabel3 = '{}'.format(namespace.ylabel3)
#l1 = '{}'.format(namespace.l1)
#l2 = '{}'.format(namespace.l2)
#l3 = '{}'.format(namespace.l3)
xx = np.array(getColumn(file,0)).astype(float)
yx = np.array(getColumn(file,1)).astype(float)
yy = np.array(getColumn(file,2)).astype(float)
yz = np.array(getColumn(file,3)).astype(float)
if namespace.abs:
y = np.sqrt(yx**2+yy**2+yx**2)
plt.figure()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel1)
plt.vlines(xx, [0], y*1.604, 'b')
plt.grid(True)
#leg = plt.legend()
if namespace.fcord:
plt.figure()
p1 = plt.subplot(3, 1, 1)
plt.title(title)
p1.vlines(xx,[0], yx*1.604, 'b')
plt.ylabel(ylabel1)
plt.grid(True)
#plt.legend()
p2 =plt.subplot(3, 1, 2, sharex=p1)
p2.vlines(xx,[0],yy*1.604, 'r')
plt.ylabel(ylabel2)
plt.grid(True)
#plt.legend()
p3 =plt.subplot(3, 1, 3, sharex=p1)
p3.vlines(xx,[0],yz*1.604, 'g')
plt.ylabel(ylabel3)
plt.xlabel(xlabel)
plt.grid(True)
#plt.legend()
xticklabels = p1.get_xticklabels()+p2.get_xticklabels()
setp(xticklabels, visible=False)
plt.savefig(output, dpi=150)
|
zhekan/MDrun
|
src/forceatom.py
|
Python
|
agpl-3.0
| 2,801
|
# -*- coding: utf-8 -*-
import urllib
import urllib2
import re
import os
import xbmcplugin
import xbmcgui
import xbmcaddon
import xbmcvfs
from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP
viewmode=None
try:
from xml.sax.saxutils import escape
except: traceback.print_exc()
try:
import json
except:
import simplejson as json
import time
hlsretry=False
resolve_url=[]
g_ignoreSetResolved=[]
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
return response
https_response = http_response
REMOTE_DBG=False;
if REMOTE_DBG:
# Make pydev debugger works for auto reload.
# Note pydevd module need to be copied in XBMC\system\python\Lib\pysrc
try:
import pysrc.pydevd as pydevd
# stdoutToServer and stderrToServer redirect stdout and stderr to eclipse console
pydevd.settrace('localhost', stdoutToServer=True, stderrToServer=True)
except ImportError:
sys.stderr.write("Error: " +
"You must add org.python.pydev.debug.pysrc to your PYTHONPATH.")
sys.exit(1)
addon = xbmcaddon.Addon('plugin.video.rodasemotores')
addon_version = addon.getAddonInfo('version')
profile = xbmc.translatePath(addon.getAddonInfo('profile').decode('utf-8'))
home = xbmc.translatePath(addon.getAddonInfo('path').decode('utf-8'))
favorites = os.path.join(profile, 'favorites')
history = os.path.join(profile, 'history')
REV = os.path.join(profile, 'list_revision')
icon = os.path.join(home, 'icon.png')
FANART = os.path.join(home, 'fanart.gif')
motores_source = os.path.join(profile, 'motores_source')
functions_dir = profile
favoritesdb = os.path.join(profile, 'favorites.db')
debug = addon.getSetting('debug')
if os.path.exists(favorites)==True:
FAV = open(favorites).read()
else: FAV = []
if os.path.exists(motores_source)==True:
SOURCES = open(motores_source).read()
else: SOURCES = []
def addon_log(string):
if debug == 'true':
xbmc.log("[addon.rodasemotores-%s]: %s" %(addon_version, string))
def makeRequest(url, headers=None):
try:
if headers is None:
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 537.40'}
if '|' in url:
url,header_in_page=url.split('|')
header_in_page=header_in_page.split('&')
for h in header_in_page:
if len(h.split('='))==2:
n,v=h.split('=')
else:
vals=h.split('=')
n=vals[0]
v='='.join(vals[1:])
#n,v=h.split('=')
print n,v
headers[n]=v
req = urllib2.Request(url,None,headers)
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
except urllib2.URLError, e:
addon_log('URL: '+url)
if hasattr(e, 'code'):
addon_log('We failed with error code - %s.' % e.code)
xbmc.executebuiltin("XBMC.Notification(rodasemotores, code error - "+str(e.code)+",10000,"+icon+")")
elif hasattr(e, 'reason'):
addon_log('We failed to reach a server.')
addon_log('Reason: %s' %e.reason)
xbmc.executebuiltin("XBMC.Notification(rodasemotores, server error. - "+str(e.reason)+",10000,"+icon+")")
def SKindex():
addDir('[B][COLOR deepskyblue]| Favoritos[/COLOR][/B]','[B][COLOR deepskyblue]| Favoritos[/COLOR][/B]',4,'https://imgur.com/Ugy4OAE.png',FANART,'','','','')
getData(Base,FANART)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def getSources():
try:
if os.path.exists(favorites) == True:
addDir('[B][COLOR deepskyblue]| Favoritos[/COLOR][/B]','[B][COLOR deepskyblue]| Favoritos[/COLOR][/B]',4,'https://imgur.com/Ugy4OAE.png' , os.path.join(home, 'fanart.gif'),FANART,'','','','')
if os.path.exists(motores_source)==True:
sources = json.loads(open(motores_source,"r").read())
#print 'sources',sources
if len(ba) > 1:
for i in ba:
try:
## for pre 1.0.8 ba
if isinstance(i, list):
addDir(i[0].encode('utf-8'),i[1].encode('utf-8'),1,icon,FANART,'','','','','source')
else:
thumb = icon
fanart = FANART
desc = ''
date = ''
credits = ''
genre = ''
if i.has_key('thumbnail'):
thumb = i['thumbnail']
if i.has_key('fanart'):
fanart = i['fanart']
except: traceback.print_exc()
else:
if len(ba) == 1:
if isinstance(ba[0], list):
getData(ba[0][1].encode('utf-8'),FANART)
else:
getData(ba[0]['url'], ba[0]['fanart'])
except: traceback.print_exc()
def getSoup(url,data=None):
global viewmode,tsdownloader, hlsretry
tsdownloader=False
hlsretry=False
if url.startswith('http://') or url.startswith('https://'):
enckey=False
if '$$TSDOWNLOADER$$' in url:
tsdownloader=True
url=url.replace("$$TSDOWNLOADER$$","")
if '$$HLSRETRY$$' in url:
hlsretry=True
url=url.replace("$$HLSRETRY$$","")
if '$$LSProEncKey=' in url:
enckey=url.split('$$LSProEncKey=')[1].split('$$')[0]
rp='$$LSProEncKey=%s$$'%enckey
url=url.replace(rp,"")
data =makeRequest(url)
if enckey:
import pyaes
enckey=enckey.encode("ascii")
print enckey
missingbytes=16-len(enckey)
enckey=enckey+(chr(0)*(missingbytes))
print repr(enckey)
data=base64.b64decode(data)
decryptor = pyaes.new(enckey , pyaes.MODE_ECB, IV=None)
data=decryptor.decrypt(data).split('\0')[0]
#print repr(data)
if re.search("#EXTM3U",data) or 'm3u' in url:
# print 'found m3u data'
return data
elif data == None:
if not '/' in url or not '\\' in url:
# print 'No directory found. Lets make the url to cache dir'
url = os.path.join(communityfiles,url)
if xbmcvfs.exists(url):
if url.startswith("smb://") or url.startswith("nfs://"):
copy = xbmcvfs.copy(url, os.path.join(profile, 'temp', 'sorce_temp.txt'))
if copy:
data = open(os.path.join(profile, 'temp', 'sorce_temp.txt'), "r").read()
xbmcvfs.delete(os.path.join(profile, 'temp', 'sorce_temp.txt'))
else:
addon_log("failed to copy from smb:")
else:
data = open(url, 'r').read()
if re.match("#EXTM3U",data)or 'm3u' in url:
# print 'found m3u data'
return data
else:
addon_log("Soup Data not found!")
return
if '<SetViewMode>' in data:
try:
viewmode=re.findall('<SetViewMode>(.*?)<',data)[0]
xbmc.executebuiltin("Container.SetViewMode(%s)"%viewmode)
print 'done setview',viewmode
except: pass
return BeautifulSOAP(data, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
def processPyFunction(data):
try:
if data and len(data)>0 and data.startswith('$pyFunction:'):
data=doEval(data.split('$pyFunction:')[1],'',None,None )
except: pass
return data
def getData(url,icon, data=None):
os.path.join(home, 'resources', 'fanart.gif')
soup = getSoup(url,data)
#print type(soup)
if isinstance(soup,BeautifulSOAP):
#print 'xxxxxxxxxxsoup',soup
if len(soup('search')) > 0:
search = soup('search')
for sear in search:
linkedUrl = sear('externallink')[0].string
name = sear('name')[0].string
try:
name=processPyFunction(name)
except: pass
thumbnail = sear('thumbnail')[0].string
if thumbnail == None:
thumbnail = ''
thumbnail=processPyFunction(thumbnail)
try:
if not sear('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = sear('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
addDir(name.encode('utf-8'),linkedUrl.encode('utf-8'),12,thumbnail,fanArt,'','','',None,'source')
except:
addon_log('There was a problem adding directory from getData(): '+name.encode('utf-8', 'ignore'))
else:
addon_log('No Search: getItems')
if len(soup('channels')) > 0 and addon.getSetting('donotshowbychannels') == 'false':
channels = soup('channel')
for channel in channels:
# print channel
linkedUrl=''
lcount=0
try:
linkedUrl = channel('externallink')[0].string
lcount=len(channel('externallink'))
except: pass
#print 'linkedUrl',linkedUrl,lcount
if lcount>1: linkedUrl=''
name = channel('name')[0].string
try:
name=processPyFunction(name)
except: pass
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
thumbnail = ''
thumbnail=processPyFunction(thumbnail)
try:
if linkedUrl=='':
addDir(name.encode('utf-8', 'ignore'),url.encode('utf-8'),2,thumbnail,fanArt,desc,genre,date,credits,True)
else:
#print linkedUrl
addDir(name.encode('utf-8'),linkedUrl.encode('utf-8'),1,thumbnail,fanArt,desc,genre,date,None,'source')
except:
addon_log('There was a problem adding directory from getData(): '+name.encode('utf-8', 'ignore'))
else:
addon_log('No Channels: getItems')
getItems(soup('item'),fanart)
else:
parse_m3u(soup)
def getSearchData(url,icon, data=None):
keyboard = xbmc.Keyboard()
keyboard.setHeading("[COLOR white][B]Rodas e Motores - Pesquisar[/B][/COLOR]")
keyboard.setDefault('')
keyboard.doModal()
if keyboard.isConfirmed():
term = keyboard.getText()
term = term.replace(' ','').lower()
else:
xbmcgui.Dialog().ok('[COLOR white][B]Rodas e Motores[/B][/COLOR]', '[COLOR white][B]Pesquisas em branco não são permitidas.[/B][/COLOR]')
quit()
fanart=''
dontLink=False
os.path.join(home, 'resources', 'fanart.gif')
soup = getSoup(url,data)
#print type(soup)
if isinstance(soup,BeautifulSOAP):
#print 'xxxxxxxxxxsoup',soup
if len(soup('link')) > 0:
main_item = soup('item')
for ite in main_item:
sear = ite('link')[0].string
soup = getSoup(sear,data)
items = soup('item')
total = len(items)
add_playlist = addon.getSetting('add_playlist')
ask_playlist_items =addon.getSetting('ask_playlist_items')
use_thumb = addon.getSetting('use_thumb')
parentalblock =addon.getSetting('parentalblocked')
parentalblock= parentalblock=="true"
for item in items:
try:
isXMLSource=False
isJsonrpc = False
applyblock='false'
try:
applyblock = item('parentalblock')[0].string
except:
addon_log('parentalblock Error')
applyblock = ''
if applyblock=='true' and parentalblock: continue
try:
name = item('title')[0].string
if name is None:
name = 'unknown?'
try:
name=processPyFunction(name)
except: pass
except:
addon_log('Name Error')
name = ''
check_name = re.sub('\[.+?\]','',name)
if term in check_name.replace(' ','').lower():
try:
if item('epg'):
if item.epg_url:
addon_log('Get EPG Regex')
epg_url = item.epg_url.string
epg_regex = item.epg_regex.string
epg_name = get_epg(epg_url, epg_regex)
if epg_name:
name += ' - ' + epg_name
elif item('epg')[0].string > 1:
name += getepg(item('epg')[0].string)
else:
pass
except:
addon_log('EPG Error')
url = []
if len(item('link')) >0:
#print 'item link', item('link')
for i in item('link'):
if not i.string == None:
url.append(i.string)
elif len(item('utube')) >0:
for i in item('utube'):
if not i.string == None:
if ' ' in i.string :
utube = 'plugin://plugin.video.youtube/search/?q='+ urllib.quote_plus(i.string)
isJsonrpc=utube
elif len(i.string) == 11:
utube = 'plugin://plugin.video.youtube/play/?video_id='+ i.string
elif (i.string.startswith('PL') and not '&order=' in i.string) or i.string.startswith('UU'):
utube = 'plugin://plugin.video.youtube/play/?&order=default&playlist_id=' + i.string
elif i.string.startswith('PL') or i.string.startswith('UU'):
utube = 'plugin://plugin.video.youtube/play/?playlist_id=' + i.string
elif i.string.startswith('UC') and len(i.string) > 12:
utube = 'plugin://plugin.video.youtube/channel/' + i.string + '/'
isJsonrpc=utube
elif not i.string.startswith('UC') and not (i.string.startswith('PL')) :
utube = 'plugin://plugin.video.youtube/user/' + i.string + '/'
isJsonrpc=utube
url.append(utube)
elif len(item('urlsolve')) >0:
for i in item('urlsolve'):
if not i.string == None:
resolver = i.string +'&mode=19'
url.append(resolver)
if len(url) < 1:
raise
try:
isXMLSource = item('externallink')[0].string
except: pass
if isXMLSource:
ext_url=[isXMLSource]
isXMLSource=True
else:
isXMLSource=False
try:
isJsonrpc = item('jsonrpc')[0].string
except: pass
if isJsonrpc:
ext_url=[isJsonrpc]
isJsonrpc=True
else:
isJsonrpc=False
try:
thumbnail = item('thumbnail')[0].string
if thumbnail == None:
raise
thumbnail=processPyFunction(thumbnail)
except:
thumbnail = ''
try:
if not item('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = item('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = item('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = item('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = item('date')[0].string
if date == None:
raise
except:
date = ''
regexs = None
if item('regex'):
try:
reg_item = item('regex')
regexs = parse_regex(reg_item)
except:
pass
if len(url) > 1:
alt = 0
playlist = []
ignorelistsetting=True if '$$LSPlayOnlyOne$$' in url[0] else False
for i in url:
if add_playlist == "false" and not ignorelistsetting:
alt += 1
addLink(i,'%s) %s' %(alt, name.encode('utf-8', 'ignore')),thumbnail,fanArt,desc,genre,date,True,playlist,regexs,total)
elif (add_playlist == "true" and ask_playlist_items == 'true') or ignorelistsetting:
if regexs:
playlist.append(i+'®exs='+regexs)
elif any(x in i for x in resolve_url) and i.startswith('http'):
playlist.append(i+'&mode=19')
else:
playlist.append(i)
else:
playlist.append(i)
if len(playlist) > 1:
addLink('', name.encode('utf-8'),thumbnail,fanArt,desc,genre,date,True,playlist,regexs,total)
else:
if dontLink:
return name,url[0],regexs
if isXMLSource:
if not regexs == None: #<externallink> and <regex>
addDir(name.encode('utf-8'),ext_url[0].encode('utf-8'),1,thumbnail,fanArt,desc,genre,date,None,'!!update',regexs,url[0].encode('utf-8'))
else:
addDir(name.encode('utf-8'),ext_url[0].encode('utf-8'),1,thumbnail,fanArt,desc,genre,date,None,'source',None,None)
elif isJsonrpc:
addDir(name.encode('utf-8'),ext_url[0],53,thumbnail,fanArt,desc,genre,date,None,'source')
else:
addLink(url[0],name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
except: pass
def getItems(items,fanart,dontLink=False):
total = len(items)
addon_log('Total Items: %s' %total)
add_playlist = addon.getSetting('add_playlist')
ask_playlist_items =addon.getSetting('ask_playlist_items')
use_thumb = addon.getSetting('use_thumb')
parentalblock =addon.getSetting('parentalblocked')
parentalblock= parentalblock=="true"
for item in items:
isXMLSource=False
isJsonrpc = False
applyblock='false'
try:
applyblock = item('parentalblock')[0].string
except:
addon_log('parentalblock Error')
applyblock = ''
if applyblock=='true' and parentalblock: continue
try:
name = item('title')[0].string
if name is None:
name = 'unknown?'
try:
name=processPyFunction(name)
except: pass
except:
addon_log('Name Error')
name = ''
try:
if item('epg'):
if item.epg_url:
addon_log('Get EPG Regex')
epg_url = item.epg_url.string
epg_regex = item.epg_regex.string
epg_name = get_epg(epg_url, epg_regex)
if epg_name:
name += ' - ' + epg_name
elif item('epg')[0].string > 1:
name += getepg(item('epg')[0].string)
else:
pass
except:
addon_log('EPG Error')
try:
url = []
if len(item('link')) >0:
#print 'item link', item('link')
for i in item('link'):
if not i.string == None:
url.append(i.string)
elif len(item('utube')) >0:
for i in item('utube'):
if not i.string == None:
if ' ' in i.string :
utube = 'plugin://plugin.video.youtube/search/?q='+ urllib.quote_plus(i.string)
isJsonrpc=utube
elif len(i.string) == 11:
utube = 'plugin://plugin.video.youtube/play/?video_id='+ i.string
elif (i.string.startswith('PL') and not '&order=' in i.string) or i.string.startswith('UU'):
utube = 'plugin://plugin.video.youtube/play/?&order=default&playlist_id=' + i.string
elif i.string.startswith('PL') or i.string.startswith('UU'):
utube = 'plugin://plugin.video.youtube/play/?playlist_id=' + i.string
elif i.string.startswith('UC') and len(i.string) > 7:
utube = 'plugin://plugin.video.youtube/channel/' + i.string + '/'
isJsonrpc=utube
elif not i.string.startswith('UC') and not (i.string.startswith('PL')) :
utube = 'plugin://plugin.video.youtube/user/' + i.string + '/'
isJsonrpc=utube
url.append(utube)
elif len(item('urlsolve')) >0:
for i in item('urlsolve'):
if not i.string == None:
resolver = i.string +'&mode=19'
url.append(resolver)
if len(url) < 1:
raise
except:
addon_log('Error <link> element, Passing:'+name.encode('utf-8', 'ignore'))
continue
try:
isXMLSource = item('externallink')[0].string
except: pass
if isXMLSource:
ext_url=[isXMLSource]
isXMLSource=True
else:
isXMLSource=False
try:
isJsonrpc = item('jsonrpc')[0].string
except: pass
if isJsonrpc:
ext_url=[isJsonrpc]
#print 'JSON-RPC ext_url',ext_url
isJsonrpc=True
else:
isJsonrpc=False
try:
thumbnail = item('thumbnail')[0].string
if thumbnail == None:
raise
thumbnail=processPyFunction(thumbnail)
except:
thumbnail = ''
try:
if not item('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = item('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = item('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = item('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = item('date')[0].string
if date == None:
raise
except:
date = ''
regexs = None
if item('regex'):
try:
reg_item = item('regex')
regexs = parse_regex(reg_item)
except:
pass
try:
if len(url) > 1:
alt = 0
playlist = []
ignorelistsetting=True if '$$LSPlayOnlyOne$$' in url[0] else False
for i in url:
if add_playlist == "false" and not ignorelistsetting:
alt += 1
addLink(i,'%s) %s' %(alt, name.encode('utf-8', 'ignore')),thumbnail,fanArt,desc,genre,date,True,playlist,regexs,total)
elif (add_playlist == "true" and ask_playlist_items == 'true') or ignorelistsetting:
if regexs:
playlist.append(i+'®exs='+regexs)
elif any(x in i for x in resolve_url) and i.startswith('http'):
playlist.append(i+'&mode=19')
else:
playlist.append(i)
else:
playlist.append(i)
if len(playlist) > 1:
addLink('', name.encode('utf-8'),thumbnail,fanArt,desc,genre,date,True,playlist,regexs,total)
else:
if dontLink:
return name,url[0],regexs
if isXMLSource:
if not regexs == None: #<externallink> and <regex>
addDir(name.encode('utf-8'),ext_url[0].encode('utf-8'),1,thumbnail,fanArt,desc,genre,date,None,'!!update',regexs,url[0].encode('utf-8'))
#addLink(url[0],name.encode('utf-8', 'ignore')+ '[COLOR yellow]build XML[/COLOR]',thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
else:
addDir(name.encode('utf-8'),ext_url[0].encode('utf-8'),1,thumbnail,fanArt,desc,genre,date,None,'source',None,None)
#addDir(name.encode('utf-8'),url[0].encode('utf-8'),1,thumbnail,fanart,desc,genre,date,None,'source')
elif isJsonrpc:
addDir(name.encode('utf-8'),ext_url[0],11,thumbnail,fanArt,desc,genre,date,None,'source')
#xbmc.executebuiltin("Container.SetViewMode(500)")
else:
addLink(url[0],name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
#print 'success'
except:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def getFavorites():
if os.path.exists(favorites):
items = json.loads(open(favorites).read())
total = len(items)
for i in items:
name = i[0]
url = i[1]
iconimage = i[2]
try:
fanArt = i[3]
if fanArt == None:
raise
except:
if addon.getSetting('use_thumb') == "true":
fanArt = iconimage
else:
fanArt = fanart
try: playlist = i[5]
except: playlist = None
try: regexs = i[6]
except: regexs = None
if i[4] == 0:
addLink(url,name,iconimage,fanArt,'','','','fav',playlist,regexs,total)
else:
addDir(name,url,i[4],iconimage,fanart,'','','','','fav')
else:
addDir('[COLOR red][B][ No Favourites Added Yet ][/B][/COLOR]','','','','','','','','','fav')
def addFavorites(name,url,iconimage,fanart,mode,playlist=None,regexs=None):
favList = []
if not os.path.exists(favorites + 'txt'):
os.makedirs(favorites + 'txt')
if not os.path.exists(history):
os.makedirs(history)
try:
# seems that after
name = name.encode('utf-8', 'ignore')
except:
pass
if os.path.exists(favorites)==False:
addon_log('Making Favorites File')
favList.append((name,url,iconimage,fanart,mode,playlist,regexs))
a = open(favorites, "w")
a.write(json.dumps(favList))
a.close()
else:
addon_log('Appending Favorites')
a = open(favorites).read()
data = json.loads(a)
data.append((name,url,iconimage,fanart,mode))
b = open(favorites, "w")
b.write(json.dumps(data))
b.close()
def rmFavorites(name):
data = json.loads(open(favorites).read())
for index in range(len(data)):
if data[index][0]==name:
del data[index]
b = open(favorites, "w")
b.write(json.dumps(data))
b.close()
break
xbmc.executebuiltin("XBMC.Container.Refresh")
def addDir(name,url,mode,iconimage,fanart,description,genre,date,credits,showcontext=False,regexs=None,reg_url=None,allinfo={}):
if regexs and len(regexs)>0:
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&fanart="+urllib.quote_plus(fanart)+"®exs="+regexs
else:
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&fanart="+urllib.quote_plus(fanart)
ok=True
if date == '':
date = None
else:
description += '\n\nDate: %s' %date
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
if len(allinfo) <1 :
liz.setInfo(type="Video", infoLabels={ "Title": name, "Plot": description, "Genre": genre, "dateadded": date, "credits": credits })
else:
liz.setInfo(type="Video", infoLabels= allinfo)
liz.setProperty("Fanart_Image", fanart)
if showcontext:
contextMenu = []
parentalblock =addon.getSetting('parentalblocked')
parentalblock= parentalblock=="true"
parentalblockedpin =addon.getSetting('parentalblockedpin')
# print 'parentalblockedpin',parentalblockedpin
if len(parentalblockedpin)>0:
if parentalblock:
contextMenu.append(('Disable Parental Block','XBMC.RunPlugin(%s?mode=55&name=%s)' %(sys.argv[0], urllib.quote_plus(name))))
else:
contextMenu.append(('Enable Parental Block','XBMC.RunPlugin(%s?mode=56&name=%s)' %(sys.argv[0], urllib.quote_plus(name))))
if showcontext == 'source':
if name in str(SOURCES):
contextMenu.append(('Remove from Sources','XBMC.RunPlugin(%s?mode=8&name=%s)' %(sys.argv[0], urllib.quote_plus(name))))
elif showcontext == 'download':
contextMenu.append(('Download','XBMC.RunPlugin(%s?url=%s&mode=9&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
elif showcontext == 'fav':
contextMenu.append(('[B][COLOR red]Remover favorito[/COLOR][/B]','XBMC.RunPlugin(%s?mode=6&name=%s)'
%(sys.argv[0], urllib.quote_plus(name))))
if not name in FAV:
contextMenu.append(('[B][COLOR Green]Adicionar aos favoritos[/COLOR][/B]','XBMC.RunPlugin(%s?mode=5&name=%s&url=%s&iconimage=%s&fanart=%s&fav_mode=%s)'
%(sys.argv[0], urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(iconimage), urllib.quote_plus(fanart), mode)))
liz.addContextMenuItems(contextMenu)
if showcontext == '!!update':
fav_params2 = (
'%s?url=%s&mode=9®exs=%s'
%(sys.argv[0], urllib.quote_plus(reg_url), regexs)
)
contextMenu.append(('[COLOR yellow]!!update[/COLOR]','XBMC.RunPlugin(%s)' %fav_params2))
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def ascii(string):
if isinstance(string, basestring):
if isinstance(string, unicode):
string = string.encode('ascii', 'ignore')
return string
def uni(string, encoding = 'utf-8'):
if isinstance(string, basestring):
if not isinstance(string, unicode):
string = unicode(string, encoding, 'ignore')
return string
def removeNonAscii(s): return "".join(filter(lambda x: ord(x)<128, s))
def sendJSON( command):
data = ''
try:
data = xbmc.executeJSONRPC(uni(command))
except UnicodeEncodeError:
data = xbmc.executeJSONRPC(ascii(command))
return uni(data)
def pluginquerybyJSON(url,give_me_result=None,playlist=False):
if 'audio' in url:
json_query = uni('{"jsonrpc":"2.0","method":"Files.GetDirectory","params": {"directory":"%s","media":"video", "properties": ["title", "album", "artist", "duration","thumbnail", "year"]}, "id": 1}') %url
else:
json_query = uni('{"jsonrpc":"2.0","method":"Files.GetDirectory","params":{"directory":"%s","media":"video","properties":[ "plot","playcount","director", "genre","votes","duration","trailer","premiered","thumbnail","title","year","dateadded","fanart","rating","season","episode","studio","mpaa"]},"id":1}') %url
json_folder_detail = json.loads(sendJSON(json_query))
#print json_folder_detail
if give_me_result:
return json_folder_detail
if json_folder_detail.has_key('error'):
return
else:
for i in json_folder_detail['result']['files'] :
meta ={}
url = i['file']
name = removeNonAscii(i['label'])
thumbnail = removeNonAscii(i['thumbnail'])
fanart = removeNonAscii(i['fanart'])
meta = dict((k,v) for k, v in i.iteritems() if not v == '0' or not v == -1 or v == '')
meta.pop("file", None)
if i['filetype'] == 'file':
if playlist:
play_playlist(name,url,queueVideo='1')
continue
else:
addLink(url,name,thumbnail,fanart,'','','','',None,'',total=len(json_folder_detail['result']['files']),allinfo=meta)
#xbmc.executebuiltin("Container.SetViewMode(500)")
if i['type'] and i['type'] == 'tvshow' :
xbmcplugin.setContent(int(sys.argv[1]), 'tvshows')
elif i['episode'] > 0 :
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
else:
addDir(name,url,11,thumbnail,fanart,'','','','',allinfo=meta)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def addLink(url,name,iconimage,fanart,description,genre,date,showcontext,playlist,regexs,total,setCookie="",allinfo={}):
#print 'url,name',url,name,iconimage
contextMenu =[]
parentalblock =addon.getSetting('parentalblocked')
parentalblock= parentalblock=="true"
parentalblockedpin =addon.getSetting('parentalblockedpin')
# print 'parentalblockedpin',parentalblockedpin
if len(parentalblockedpin)>0:
if parentalblock:
contextMenu.append(('Disable Parental Block','XBMC.RunPlugin(%s?mode=55&name=%s)' %(sys.argv[0], urllib.quote_plus(name))))
else:
contextMenu.append(('Enable Parental Block','XBMC.RunPlugin(%s?mode=56&name=%s)' %(sys.argv[0], urllib.quote_plus(name))))
try:
name = name.encode('utf-8')
except: pass
ok = True
isFolder=False
if regexs:
mode = '9'
if 'listrepeat' in regexs:
isFolder=True
# print 'setting as folder in link'
contextMenu.append(('','XBMC.RunPlugin(%s?url=%s&mode=21&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
elif (any(x in url for x in resolve_url) and url.startswith('http')) or url.endswith('&mode=19'):
url=url.replace('&mode=19','')
mode = '19'
contextMenu.append(('','XBMC.RunPlugin(%s?url=%s&mode=21&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
elif url.endswith('&mode=18'):
url=url.replace('&mode=18','')
mode = '18'
contextMenu.append(('','XBMC.RunPlugin(%s?url=%s&mode=23&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
if addon.getSetting('dlaudioonly') == 'true':
contextMenu.append(('','XBMC.RunPlugin(%s?url=%s&mode=24&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
else:
mode = '7'
if 'plugin://plugin.video.youtube/play/?video_id=' in url:
yt_audio_url = url.replace('plugin://plugin.video.youtube/play/?video_id=','https://www.youtube.com/watch?v=')
u=sys.argv[0]+"?"
play_list = False
if playlist:
if addon.getSetting('add_playlist') == "false" and '$$LSPlayOnlyOne$$' not in playlist[0] :
u += "url="+urllib.quote_plus(url)+"&mode="+mode
else:
u += "mode=8&name=%s&playlist=%s" %(urllib.quote_plus(name), urllib.quote_plus(str(playlist).replace(',','||')))
name = name + '[COLOR magenta] (' + str(len(playlist)) + ' items )[/COLOR]'
play_list = True
else:
u += "url="+urllib.quote_plus(url)+"&mode="+mode
if regexs:
u += "®exs="+regexs
if not setCookie == '':
u += "&setCookie="+urllib.quote_plus(setCookie)
if iconimage and not iconimage == '':
u += "&iconimage="+urllib.quote_plus(iconimage)
if date == '':
date = None
else:
description += '\n\nDate: %s' %date
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
#if isFolder:
if allinfo==None or len(allinfo) <1:
liz.setInfo(type="Video", infoLabels={ "Title": name, "Plot": description, "Genre": genre, "dateadded": date })
else:
liz.setInfo(type="Video", infoLabels=allinfo)
liz.setProperty("Fanart_Image", fanart)
if (not play_list) and not any(x in url for x in g_ignoreSetResolved) and not '$PLAYERPROXY$=' in url:# (not url.startswith('plugin://plugin.video.f4mTester')):
if regexs:
#print urllib.unquote_plus(regexs)
if '$pyFunction:playmedia(' not in urllib.unquote_plus(regexs) and 'notplayable' not in urllib.unquote_plus(regexs) and 'listrepeat' not in urllib.unquote_plus(regexs) :
#print 'setting isplayable',url, urllib.unquote_plus(regexs),url
liz.setProperty('IsPlayable', 'true')
else:
liz.setProperty('IsPlayable', 'true')
else:
addon_log( 'NOT setting isplayable'+url)
if showcontext:
#contextMenu = []
if showcontext == 'fav':
contextMenu.append(
('[B][COLOR red]Remover favorito[/COLOR][/B]','XBMC.RunPlugin(%s?mode=6&name=%s)'
%(sys.argv[0], urllib.quote_plus(name)))
)
elif not name in FAV:
try:
fav_params = (
'%s?mode=5&name=%s&url=%s&iconimage=%s&fanart=%s&fav_mode=0'
%(sys.argv[0], urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(iconimage), urllib.quote_plus(fanart))
)
except:
fav_params = (
'%s?mode=5&name=%s&url=%s&iconimage=%s&fanart=%s&fav_mode=0'
%(sys.argv[0], urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(iconimage.encode("utf-8")), urllib.quote_plus(fanart.encode("utf-8")))
)
if playlist:
fav_params += 'playlist='+urllib.quote_plus(str(playlist).replace(',','||'))
if regexs:
fav_params += "®exs="+regexs
contextMenu.append(('[B][COLOR green]Adicionar aos favoritos[/COLOR][/B]','XBMC.RunPlugin(%s)' %fav_params))
liz.addContextMenuItems(contextMenu)
try:
if not playlist is None:
if addon.getSetting('add_playlist') == "false":
playlist_name = name.split(') ')[1]
contextMenu_ = [
('Play '+playlist_name+' PlayList','XBMC.RunPlugin(%s?mode=8&name=%s&playlist=%s)'
%(sys.argv[0], urllib.quote_plus(playlist_name), urllib.quote_plus(str(playlist).replace(',','||'))))
]
liz.addContextMenuItems(contextMenu_)
except: pass
#print 'adding',name
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,totalItems=total,isFolder=isFolder)
#print 'added',name
return ok
def d2x(d, root="root",nested=0):
op = lambda tag: '<' + tag + '>'
cl = lambda tag: '</' + tag + '>\n'
ml = lambda v,xml: xml + op(key) + str(v) + cl(key)
xml = op(root) + '\n' if root else ""
for key,vl in d.iteritems():
vtype = type(vl)
if nested==0: key='regex' #enforcing all top level tags to be named as regex
if vtype is list:
for v in vl:
v=escape(v)
xml = ml(v,xml)
if vtype is dict:
xml = ml('\n' + d2x(vl,None,nested+1),xml)
if vtype is not list and vtype is not dict:
if not vl is None: vl=escape(vl)
#print repr(vl)
if vl is None:
xml = ml(vl,xml)
else:
#xml = ml(escape(vl.encode("utf-8")),xml)
xml = ml(vl.encode("utf-8"),xml)
xml += cl(root) if root else ""
return xml
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
params=get_params()
Base = 'http://psychomc.xyz/AddonPTdocs/rodasemotores.xml'
url=None
name=None
mode=None
playlist=None
iconimage=None
fanart=FANART
playlist=None
fav_mode=None
regexs=None
try:
url=urllib.unquote_plus(params["url"]).decode('utf-8')
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
try:
fanart=urllib.unquote_plus(params["fanart"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
playlist=eval(urllib.unquote_plus(params["playlist"]).replace('||',','))
except:
pass
try:
fav_mode=int(params["fav_mode"])
except:
pass
try:
regexs=params["regexs"]
except:
pass
playitem=''
try:
playitem=urllib.unquote_plus(params["playitem"])
except:
pass
addon_log("Mode: "+str(mode))
if not url is None:
addon_log("URL: "+str(url.encode('utf-8')))
addon_log("Name: "+str(name))
if not playitem =='':
s=getSoup('',data=playitem)
name,url,regexs=getItems(s,None,dontLink=True)
mode=10
if mode==None:
addon_log("Index")
SKindex()
#addon_log("getSources")
#getSources()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==1:
addon_log("getData")
data=None
if regexs and len(regexs)>0:
data,setresolved=getRegexParsed(regexs, url)
#print data
#url=''
if data.startswith('http') or data.startswith('smb') or data.startswith('nfs') or data.startswith('/'):
url=data
data=None
#create xml here
getData(url,fanart,data)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==2:
addon_log("getChannelItems")
getChannelItems(name,url,fanart)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==3:
addon_log("getSubChannelItems")
getSubChannelItems(name,url,fanart)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==4:
addon_log("getFavorites")
getFavorites()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==5:
addon_log("addFavorites")
try:
name = name.split('\\ ')[1]
except:
pass
try:
name = name.split(' - ')[0]
except:
pass
addFavorites(name,url,iconimage,fanart,fav_mode)
elif mode==6:
addon_log("rmFavorites")
try:
name = name.split('\\ ')[1]
except:
pass
try:
name = name.split(' - ')[0]
except:
pass
rmFavorites(name)
elif mode==7:
addon_log("setResolvedUrl")
if not url.startswith("plugin://plugin") or not any(x in url for x in g_ignoreSetResolved):
setres=True
if '$$LSDirect$$' in url:
url=url.replace('$$LSDirect$$','')
setres=False
item = xbmcgui.ListItem(path=url)
if not setres:
xbmc.Player().play(url)
else:
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
else:
# print 'Not setting setResolvedUrl'
xbmc.executebuiltin('XBMC.RunPlugin('+url+')')
elif mode==8:
addon_log("play_playlist")
play_playlist(name, playlist)
elif mode==9 or mode==10:
addon_log("getRegexParsed")
elif mode==11:
addon_log("Requesting JSON-RPC Items")
pluginquerybyJSON(url)
#xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==12:
addon_log("getSearchData")
data=None
if regexs and len(regexs)>0:
data,setresolved=getRegexParsed(regexs, url)
#print data
#url=''
if data.startswith('http') or data.startswith('smb') or data.startswith('nfs') or data.startswith('/'):
url=data
data=None
#create xml here
getSearchData(url,fanart,data)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
if not viewmode==None:
print 'setting view mode'
xbmc.executebuiltin("Container.SetViewMode(%s)"%viewmode)
|
cmvac/demagorgon.repository
|
plugin.video.rodasemotores/default.py
|
Python
|
gpl-2.0
| 51,862
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from solution import Solution
num = 14
sol = Solution()
res = sol.isUgly(num)
print(res)
|
zhlinh/leetcode
|
0263.Ugly Number/test.py
|
Python
|
apache-2.0
| 137
|
#!/usr/bin/env python
# Copyright (c) 2017,2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from copy import deepcopy
from f5_cccl.resource.ltm.internal_data_group import InternalDataGroup
from mock import Mock
import pytest
cfg_test = {
'name': 'test_dg',
'partition': 'my_partition',
'type': 'string',
'records': [
{
"name": "test_record_name",
"data": "test record data"
}
]
}
class FakeObj: pass
@pytest.fixture
def bigip():
bigip = Mock()
return bigip
def test_create_internal_data_group():
"""Test InternalDataGroup creation."""
idg = InternalDataGroup(
**cfg_test
)
assert idg
# verify all cfg items
for k,v in cfg_test.items():
assert idg.data[k] == v
def test_hash():
"""Test InternalDataGroup hash."""
idg1 = InternalDataGroup(
**cfg_test
)
idg2 = InternalDataGroup(
**cfg_test
)
cfg_changed = deepcopy(cfg_test)
cfg_changed['name'] = 'test'
idg3 = InternalDataGroup(
**cfg_changed
)
cfg_changed = deepcopy(cfg_test)
cfg_changed['partition'] = 'other'
idg4 = InternalDataGroup(
**cfg_changed
)
assert idg1
assert idg2
assert idg3
assert idg4
assert hash(idg1) == hash(idg2)
assert hash(idg1) != hash(idg3)
assert hash(idg1) != hash(idg4)
def test_eq():
"""Test InternalDataGroup equality."""
partition = 'Common'
name = 'idg_1'
idg1 = InternalDataGroup(
**cfg_test
)
idg2 = InternalDataGroup(
**cfg_test
)
assert idg1
assert idg2
assert idg1 == idg2
# name not equal
cfg_changed = deepcopy(cfg_test)
cfg_changed['name'] = 'idg_2'
idg2 = InternalDataGroup(**cfg_changed)
assert idg1 != idg2
# partition not equal
cfg_changed = deepcopy(cfg_test)
cfg_changed['partition'] = 'test'
idg2 = InternalDataGroup(**cfg_changed)
assert idg1 != idg2
# the records in the group not equal
cfg_changed = deepcopy(cfg_test)
cfg_changed['records'][0]['data'] = 'changed data'
idg2 = InternalDataGroup(**cfg_changed)
assert idg1 != idg2
# different objects
fake = FakeObj
assert idg1 != fake
# should be equal after assignment
idg2 = idg1
assert idg1 == idg2
def test_uri_path(bigip):
"""Test InternalDataGroup URI."""
idg = InternalDataGroup(
**cfg_test
)
assert idg
assert idg._uri_path(bigip) == bigip.tm.ltm.data_group.internals.internal
|
ryan-talley/f5-cccl
|
f5_cccl/resource/ltm/test/test_internal_data_group.py
|
Python
|
apache-2.0
| 3,068
|
"""
config/default.py
------------------
Default configuration settings for Flask app.
"""
import os
DEBUG = os.environ.get('FLASK_DEBUG', True)
SECRET_KEY = os.environ.get('FLASK_SECRET_KEY', 'default-secret-key')
USERNAME = os.environ.get('FLASK_USERNAME', 'admin')
SALT = os.environ.get('FLASK_SALT', 'default-password-salt')
PASSWORD = os.environ.get('FLASK_PASSWORD',
'pbkdf2:sha1:1000$PyroY8oH$f750609556f5da1bf3a0bb051b82e75fd5c57579')
APP_NAME = os.environ.get('OPENSHIFT_APP_NAME', 'homepage')
HOST_NAME = os.environ.get('OPENSHIFT_APP_DNS', 'localhost')
IP = os.environ.get('OPENSHIFT_PYTHON_IP', '127.0.0.1')
PORT = int(os.environ.get('OPENSHIFT_PYTHON_PORT', 8080))
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATABASE = os.path.join(os.environ.get('OPENSHIFT_DATA_DIR', PROJECT_DIR),
'{}.db'.format(APP_NAME))
SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE)
|
kennyng/kennyng.org
|
config/default.py
|
Python
|
mit
| 994
|
from operator import itemgetter
from mediawords.db import connect_to_db
from mediawords.dbi.downloads.store import store_content
from mediawords.test.db.create import create_test_topic, create_test_medium, create_test_feed, create_test_story
from topics_base.stories import merge_foreign_rss_stories
def test_merge_foreign_rss_stories():
"""Test merge_foreign_rss_stories()."""
db = connect_to_db()
topic = create_test_topic(db, 'foo')
medium = create_test_medium(db, 'norss')
feed = create_test_feed(db=db, label='norss', medium=medium)
num_stories = 10
stories = [
create_test_story(db=db, label=str(i), feed=feed)
for i in range(num_stories)
]
rss_medium = create_test_medium(db, 'rss')
rss_medium = db.query(
"update media set foreign_rss_links = 't' where media_id = %(a)s returning *",
{'a': rss_medium['media_id']}).hash()
rss_feed = create_test_feed(db=db, label='rss', medium=rss_medium)
num_rss_stories = 10
rss_stories = []
for i in range(num_rss_stories):
story = create_test_story(db=db, label=str(i), feed=rss_feed)
download = db.create('downloads', {
'stories_id': story['stories_id'],
'feeds_id': rss_feed['feeds_id'],
'url': story['url'],
'host': 'foo',
'type': 'content',
'state': 'success',
'priority': 0,
'sequence': 0,
'path': 'postgresql'})
store_content(db, download, story['title'])
rss_stories.append(story)
# noinspection SqlInsertValues
db.query(
f"""
insert into topic_stories (stories_id, topics_id)
select s.stories_id, {int(topic['topics_id'])}
from stories s
"""
)
assert db.query("select count(*) from topic_stories").flat()[0] == num_stories + num_rss_stories
merge_foreign_rss_stories(db, topic)
assert db.query("select count(*) from topic_stories").flat()[0] == num_stories
assert db.query("select count(*) from topic_seed_urls").flat()[0] == num_rss_stories
got_topic_stories_ids = db.query("select stories_id from topic_stories").flat()
expected_topic_stories_ids = [s['stories_id'] for s in stories]
assert sorted(got_topic_stories_ids) == sorted(expected_topic_stories_ids)
got_seed_urls = db.query(
"select topics_id, url, content from topic_seed_urls where topics_id = %(a)s",
{'a': topic['topics_id']}).hashes()
expected_seed_urls = \
[{'url': s['url'], 'topics_id': topic['topics_id'], 'content': s['title']} for s in rss_stories]
assert sorted(got_seed_urls, key=itemgetter('url')) == sorted(expected_seed_urls, key=itemgetter('url'))
|
berkmancenter/mediacloud
|
apps/topics-base/tests/python/topics_base/stories/test_merge_foreign_rss_stories.py
|
Python
|
agpl-3.0
| 2,758
|
#!/usr/bin/env python
#############################################################################
# montyhall.py - Monty Hall Problem solver
# v0.5
# Copyright (C) 2013 Ian Havelock
#
# //http://en.wikipedia.org/wiki/Monty_Hall_problem
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# IMPORTS ###################################################################
import sqlite3
import random
import argparse
# THIRD PARTY IMPORTS #######################################################
# PARTICULAR IMPORTS ########################################################
from datetime import datetime
# CONSTANTS #################################################################
# the name of our SQLite database
CONN = sqlite3.connect('montyhall.sqlite3')
# VARIABLES #################################################################
# FUNCTIONS #################################################################
def begin():
print "\n\n###########################"
print "Monty Hall Problem Solver"
print "Morrolan 2013"
print "###########################"
arg_data = get_args()
check_exists()
sim_id = generate_simulation_id()
print "\nRunning simulation ID: " + str(sim_id)
random.seed()
# create a loop between 1 and number of runs
for i in range(0, int(arg_data['runs'])):
calculate_1_run(sim_id, arg_data)
print "\nFinished!"
produce_results(sim_id)
# now lets analyze the results here
CONN.close()
def get_args():
"""Get the commandline arguments and parameters if defaults overridden."""
# create a dictionary containing the argument data so it is easier to pass
# to other functions.
arg_data = {
'doors' : None,
'runs' : None,
'no_switch' : None,
}
# Create the optional arguments
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--doors", help="Optional parameter to override number of doors.", default=3)
parser.add_argument("-r", "--runs", help="Optional parameter to override the number of runs.", default=100)
parser.add_argument("-s", "--switch", help="The player switches or not - enter 'y' or 'n'.")
argu = parser.parse_args()
# assign the commandline arguments to the dictionary
arg_data['doors'] = argu.doors
arg_data['runs'] = argu.runs
arg_data['switch'] = argu.switch
print "\nNumber of Doors: " + str(format(int(argu.doors), ',d'))
print "Number of Runs: " + str(format(int(argu.runs), ',d'))
if argu.switch is None:
print "Player will randomly choose to switch."
elif argu.switch in 'yY':
print "Player will ALWAYS switch."
elif argu.switch in 'nN':
print "Player will NEVER switch."
elif argu.switch not in 'yYnN':
print "Invalid option specified."
exit
return arg_data
def check_exists():
cursor = CONN.cursor()
test_string = """
SELECT name FROM sqlite_master WHERE type='table' AND name='results'
"""
cursor.execute(test_string)
a = cursor.fetchall()
if a == []:
create_table()
def create_table():
cursor = CONN.cursor()
creation_string = """
CREATE TABLE [results] (
[result_id] INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
[simulation_id] INTEGER NOT NULL,
[no_of_doors] INTEGER NOT NULL,
[player_door] INTEGER NOT NULL,
[car_door] INTEGER NOT NULL,
[closed_door] INTEGER NOT NULL,
[switched] BOOLEAN NOT NULL)
"""
cursor.execute(creation_string)
def generate_simulation_id():
cursor = CONN.cursor()
cursor.execute("SELECT simulation_id FROM results ORDER BY simulation_id DESC LIMIT 1")
last_sim_id = cursor.fetchone()
if last_sim_id is not None:
new_sim_id = int(last_sim_id[0]) + 1
else:
new_sim_id = 1
return new_sim_id
def calculate_1_run(sim_id, arg_data):
# Initialise all variables to be zero
no_of_doors = int(arg_data['doors'])
car_door = None
player_door = None
closed_door = None
switch_decision = None
# pick a door for the car
car_door = random.randint(1, no_of_doors)
# pick a door for the player
player_door = random.randint(1, no_of_doors)
# pick a door for the host to NOT open (cannot be player door)
closed_door = random.randint(1, no_of_doors)
if player_door != car_door:
closed_door = car_door
else:
while closed_door == player_door:
closed_door = random.randint(1, no_of_doors)
# decide whether we switch or stick
if arg_data['switch'] is None:
switch_decision = random.randint(0, 1)
elif arg_data['switch'] in 'yY':
switch_decision = 1
elif arg_data['switch'] in 'nN':
switch_decision = 0
# now lets save all of this to a database
store_result(sim_id, no_of_doors, car_door,player_door, closed_door, switch_decision)
def store_result(simulation_id, no_of_doors, car_door,player_door, closed_door, switch_decision):
cursor = CONN.cursor()
cursor.execute("""INSERT INTO 'results'
(simulation_id, no_of_doors, car_door, player_door, closed_door, switched)
values (?, ?, ?, ?, ?, ?)""", (simulation_id, no_of_doors, car_door, player_door, closed_door, switch_decision))
CONN.commit()
def produce_results(simulation_id):
print "\n\n###################################\n"
print "RESULTS:"
cursor = CONN.cursor()
cursor.execute("SELECT count(*) from results where player_door = car_door and simulation_id = {0}".format(simulation_id))
_res = cursor.fetchone()
_res = _res[0]
print "\nThe player chose the car {0} times.".format(format(int(_res), 'd'))
cursor.execute("SELECT count(*) from results where switched = 1 and simulation_id = {0}".format(simulation_id))
_res = cursor.fetchone()
_res = _res[0]
print "The player switched doors {0} times.".format(format(int(_res), 'd'))
cursor.execute("select count(*) from results where car_door != player_door and switched = 1 and simulation_id = {0}".format(simulation_id))
_res = cursor.fetchone()
_result1 = _res[0]
cursor.execute("select count(*) from results where car_door == player_door and switched = 0 and simulation_id = {0}".format(simulation_id))
_res = cursor.fetchone()
_result2 = _res[0]
_res = _result1 + _result2
print "The player won the car {0} times.".format(format(int(_res), 'd'))
def main():
begin()
######################################################
if __name__ == "__main__":
main()
|
Morrolan/montyhall
|
montyhall.py
|
Python
|
gpl-3.0
| 6,973
|
import datetime
import re
import string
from flask import request
from server import app
from server.base import cached_route
from server.penndata import depts, reg
def is_dept(keyword):
return keyword.upper() in depts.keys()
def get_serializable_course(course):
return {
"_id": str(course.get("_id", "")),
"dept": course.get("dept", ""),
"title": course.get("title", ""),
"courseNumber": course.get("courseNumber", ""),
"credits": course.get("credits"),
"sectionNumber": course.get("sectionNumber", ""),
"type": course.get("type", ""),
"times": course.get("times", ""),
"days": course.get("days", ""),
"hours": course.get("hours", ""),
"building": course.get("building"),
"roomNumber": course.get("roomNumber"),
"prof": course.get("prof"),
}
def search_course(course):
params = dict()
if len(course.get("dept", "")) > 0:
id_param = ""
id_param += course.get("dept").lower()
if len(course.get("courseNumber", "")) > 0:
id_param += "-" + course.get("courseNumber").lower()
if len(course.get("sectionNumber", "")) > 0:
id_param += course.get("sectionNumber").lower()
params["course_id"] = id_param
if len(course["desc_search"]) > 0:
params["description"] = course["desc_search"]
if len(params) == 0:
return None
final_courses = reg.search(params)
return {"courses": list(final_courses)}
def get_type_search(search_query):
course = {"courseNumber": "", "sectionNumber": "", "dept": "", "desc_search": ""}
search_punc = re.sub("[%s]" % re.escape(string.punctuation), " ", search_query)
def repl(matchobj):
return matchobj.group(0)[0] + " " + matchobj.group(0)[1]
search_presplit = re.sub("(\\d[a-zA-z]|[a-zA-z]\\d)", repl, search_punc)
split = search_presplit.split()
found_desc = False
in_desc = False
for s in split:
s = s.strip()
if s.isalpha() and is_dept(s.upper()):
in_desc = False
course["dept"] = s.upper()
elif s.isdigit():
in_desc = False
if len(s) == 3:
course["courseNumber"] = s
if len(s) == 6:
course["courseNumber"] = s[:3]
course["sectionNumber"] = s[-3:]
else:
if not found_desc or in_desc:
found_desc = True
in_desc = True
if len(course["desc_search"]) == 0:
course["desc_search"] = s
else:
course["desc_search"] += " " + s
return course
@app.route("/registrar/search", methods=["GET"])
def search():
search_query = request.args["q"]
def get_data():
query_results = search_course(get_type_search(search_query))
if query_results is None:
return {"error": "The search query could not be processed."}
else:
return query_results
return cached_route("registrar_query:%s" % search_query, datetime.timedelta(days=1), get_data)
@app.route("/registrar/search/instructor", methods=["GET"])
def search_instructor():
query = request.args["q"]
def get_data():
results = reg.search({"instructor": query})
if results is None:
return {"error": "The search query could not be processed."}
else:
return {"courses": list(results)}
return cached_route(
"registrar_query_instructor:%s" % query, datetime.timedelta(days=1), get_data
)
|
pennlabs/penn-mobile-server
|
server/registrar.py
|
Python
|
mit
| 3,609
|
import pip
pip_version = tuple(int(v) if v.isdigit() else v
for v in pip.__version__.split('.'))
if pip_version > (10,):
from pip._internal.req import req_file
else:
from pip.req import req_file
def req_file_build_parser(line=None):
if pip_version > (10,):
return req_file.build_parser(line=line)
else:
return req_file.build_parser()
__all__ = [
'pip_version',
'req_file',
'req_file_build_parser',
]
|
5monkeys/blues
|
blues/compat.py
|
Python
|
mit
| 471
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='market',
version='0.0.1',
description='Market microservice',
long_description=long_description,
url='https://github.com/pap/simplebank',
author='Simplebank Engineering',
author_email='engineering@simplebank.book',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
"Programming Language :: Python",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
keywords='microservices market',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'nameko==2.12.0',
'logstash_formatter==0.5.17',
'circuitbreaker==1.3.0',
'gutter==0.5.0',
'request-id==1.0',
'statsd==3.3.0',
'nameko-sentry==1.0.0',
'jaeger-client == 4.3.0',
'pyopenssl==19.1.0',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
|
morganjbruce/microservices-in-action
|
chapter-11/market/setup.py
|
Python
|
mit
| 1,856
|
# PiTimer - Python Hardware Programming Education Project For Raspberry Pi
# Copyright (C) 2015 Jason Birch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#/****************************************************************************/
#/* PiTimer - Step 9 - Maintaining the system time. */
#/* ------------------------------------------------------------------------ */
#/* V1.00 - 2015-07-04 - Jason Birch */
#/* ------------------------------------------------------------------------ */
#/* Class to store a single schedule event in. */
#/****************************************************************************/
import string
import operator
import datetime
import SystemTime
# Constants to define relay state.
RELAY_TOGGLE = -1
RELAY_OFF = 0
RELAY_ON = 1
class ScheduleItem:
def __init__(self, NewRelayNumber, NewScheduleDate, NewRelayState, NewRepeat):
# Unique ID to refer specicially to this specific instance of this class.
self.ItemID = id(self)
# The ID of the relay this instance referrs to.
self.RelayNumber = NewRelayNumber
# The time and date this schedule is to activate at.
self.ScheduleDate = NewScheduleDate
# The state the relay is to be set to.
self.RelayState = NewRelayState
# The period after activation for the schedule to activate again.
self.Repeat = NewRepeat
#/****************************************************************/
#/* Return the unique ID of this spacific instance of the class. */
#/****************************************************************/
def GetItemID(self):
return self.ItemID
#/*******************************************************************/
#/* Return the relay number of this spacific instance of the class. */
#/*******************************************************************/
def GetRelayNumber(self):
return self.RelayNumber
#/*******************************************************************/
#/* Return the relay action of this spacific instance of the class. */
#/*******************************************************************/
def GetRelayState(self):
return self.RelayState
#/********************************************************************/
#/* Return the schedule date of this spacific instance of the class. */
#/********************************************************************/
def GetScheduleDate(self):
return self.ScheduleDate
#/*******************************************************/
#/* Update the schedule date to the next schedule date. */
#/*******************************************************/
def SetNextScheduleDate(self):
Now = datetime.datetime.now()
if self.Repeat:
while self.ScheduleDate < Now:
self.ScheduleDate += self.Repeat
return True
else:
return False
#/**********************************************************************/
#/* When tabulating the results, display a specific row for the table. */
#/**********************************************************************/
def DisplayItem(self, SelectLeftChar):
Period = int(self.Repeat.total_seconds())
Seconds = operator.mod(Period, 60)
Period = operator.div(Period, 60)
Minutes = operator.mod(Period, 60)
Period = operator.div(Period, 60)
Hours = operator.mod(Period, 24)
Period = operator.div(Period, 24)
Days = Period
print("{}{:^19}".format(SelectLeftChar, self.ScheduleDate.strftime("%Y-%m-%d %H:%M:%S")) + "\r")
print("{}{:>2}={:<2} {:>3} {:0>2}:{:0>2}:{:0>2}".format(SelectLeftChar, self.RelayNumber, self.RelayState, str(Days), str(Hours), str(Minutes), str(Seconds)) + "\r")
#/********************************************************/
#/* Save this schedule item to the provided file handle. */
#/********************************************************/
def Save(self, File):
File.write(str(self.ItemID) + "," + str(self.RelayNumber) + "," + self.ScheduleDate.strftime("%Y-%m-%d %H:%M:%S") + "," + str(self.RelayState) + "," + str(int(self.Repeat.total_seconds())) + "\n")
#/**********************************************************************/
#/* Load the next schedule item details from the provided file handle. */
#/**********************************************************************/
def Load(self, File):
ThisLine = File.readline()
if ThisLine:
Fields = ThisLine.split(",")
self.ItemID = string.atoi(Fields[0])
self.RelayNumber = string.atoi(Fields[1])
self.RelayState = string.atoi(Fields[3])
Period = string.atoi(Fields[4])
Seconds = operator.mod(Period, 24 * 60 * 60)
Days = operator.div(Period, 24 * 60 * 60)
self.Repeat = datetime.timedelta(Days, Seconds)
DateTime = Fields[2].split(" ")
ThisDate = DateTime[0].split("-")
ThisTime = DateTime[1].split(":")
self.ScheduleDate = datetime.datetime(string.atoi(ThisDate[0]), string.atoi(ThisDate[1]), string.atoi(ThisDate[2]), string.atoi(ThisTime[0]), string.atoi(ThisTime[1]), string.atoi(ThisTime[2]))
return ThisLine != ""
|
BirchJD/RPiTimer
|
PiTimer_Step-9/ScheduleItem.py
|
Python
|
gpl-3.0
| 5,820
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import pytest
from pootle.core.state import ItemState, State
from pootle_fs.models import StoreFS
from pootle_fs.response import (
FS_RESPONSE, ProjectFSItemResponse, ProjectFSResponse)
from pootle_project.models import Project
from pootle_store.models import Store
class DummyContext(object):
def __str__(self):
return "<DummyContext object>"
class DummyFSItemState(ItemState):
@property
def pootle_path(self):
if "pootle_path" in self.kwargs:
return self.kwargs["pootle_path"]
elif self.store_fs:
return self.store_fs.pootle_path
elif self.store:
return self.store.pootle_path
@property
def fs_path(self):
if "fs_path" in self.kwargs:
return self.kwargs["fs_path"]
elif self.store_fs:
return self.store_fs.path
@property
def store(self):
if "store" in self.kwargs:
return self.kwargs["store"]
elif self.store_fs:
return self.store_fs.store
@property
def store_fs(self):
return self.kwargs.get("store_fs")
class DummyFSState(State):
"""The pootle_fs State can create ItemStates with
- a store_fs (that has a store)
- a store_fs (that has no store)
- a store and an fs_path
- a pootle_path and an fs_path
"""
item_state_class = DummyFSItemState
def state_fs_staged(self, **kwargs):
for store_fs in kwargs.get("fs_staged", []):
yield dict(store_fs=store_fs)
def state_fs_ahead(self, **kwargs):
for store_fs in kwargs.get("fs_ahead", []):
yield dict(store_fs=store_fs)
def state_fs_untracked(self, **kwargs):
for fs_path, pootle_path in kwargs.get("fs_untracked", []):
yield dict(fs_path=fs_path, pootle_path=pootle_path)
def state_pootle_untracked(self, **kwargs):
for fs_path, store in kwargs.get("pootle_untracked", []):
yield dict(fs_path=fs_path, store=store)
@pytest.mark.django_db
def test_fs_response_instance():
context = DummyContext()
resp = ProjectFSResponse(context)
assert resp.context == context
assert resp.response_types == FS_RESPONSE.keys()
assert resp.has_failed is False
assert resp.made_changes is False
assert list(resp.failed()) == []
assert list(resp.completed()) == []
assert str(resp) == (
"<ProjectFSResponse(<DummyContext object>): No changes made>")
assert list(resp) == []
with pytest.raises(KeyError):
resp["DOES_NOT_EXIST"]
def _test_item(item, item_state):
assert isinstance(item, ProjectFSItemResponse)
assert item.kwargs["fs_state"] == item_state
assert item.fs_state == item_state
assert item.failed is False
assert item.fs_path == item.fs_state.fs_path
assert item.pootle_path == item.fs_state.pootle_path
assert item.store_fs == item.fs_state.store_fs
assert item.store == item.fs_state.store
assert (
str(item)
== ("<ProjectFSItemResponse(<DummyContext object>): %s "
"%s::%s>" % (item.action_type, item.pootle_path, item.fs_path)))
def _test_fs_response(expected=2, **kwargs):
action_type = kwargs.pop("action_type")
state_type = kwargs.pop("state_type")
resp = ProjectFSResponse(DummyContext())
state = DummyFSState(DummyContext(), **kwargs)
for fs_state in state[state_type]:
resp.add(action_type, fs_state=fs_state)
assert resp.has_failed is False
assert resp.made_changes is True
assert resp.response_types == FS_RESPONSE.keys()
assert len(list(resp.completed())) == 2
assert list(resp.failed()) == []
assert action_type in resp
assert str(resp) == (
"<ProjectFSResponse(<DummyContext object>): %s: %s>"
% (action_type, expected))
for i, item in enumerate(resp[action_type]):
_test_item(item, state[state_type][i])
@pytest.mark.django_db
def test_fs_response_path_items(settings, tmpdir):
pootle_fs_path = os.path.join(str(tmpdir), "fs_response_test")
settings.POOTLE_FS_PATH = pootle_fs_path
project = Project.objects.get(code="project0")
fs_untracked = []
for i in range(0, 2):
fs_untracked.append(
("/some/fs/fs_untracked_%s.po" % i,
"/language0/%s/fs_untracked_%s.po" % (project.code, i)))
_test_fs_response(
fs_untracked=fs_untracked,
action_type="fetched_from_fs",
state_type="fs_untracked")
@pytest.mark.django_db
def test_fs_response_store_items(settings, tmpdir):
pootle_fs_path = os.path.join(str(tmpdir), "fs_response_test")
settings.POOTLE_FS_PATH = pootle_fs_path
project = Project.objects.get(code="project0")
pootle_untracked = []
for i in range(0, 2):
pootle_untracked.append(
("/some/fs/pootle_untracked_%s.po" % i,
Store.objects.create_by_path(
"/language0/%s/pootle_untracked_%s.po" % (project.code, i))))
_test_fs_response(
pootle_untracked=pootle_untracked,
action_type="added_from_pootle",
state_type="pootle_untracked")
@pytest.mark.django_db
def test_fs_response_store_fs_items(settings, tmpdir):
pootle_fs_path = os.path.join(str(tmpdir), "fs_response_test")
settings.POOTLE_FS_PATH = pootle_fs_path
project = Project.objects.get(code="project0")
fs_ahead = []
for i in range(0, 2):
pootle_path = "/language0/%s/fs_ahead_%s.po" % (project.code, i)
fs_path = "/some/fs/fs_ahead_%s.po" % i
fs_ahead.append(
StoreFS.objects.create(
store=Store.objects.create_by_path(pootle_path),
path=fs_path))
_test_fs_response(
fs_ahead=fs_ahead,
action_type="pulled_to_pootle",
state_type="fs_ahead")
@pytest.mark.django_db
def test_fs_response_store_fs_no_store_items(settings, tmpdir):
pootle_fs_path = os.path.join(str(tmpdir), "fs_response_test")
settings.POOTLE_FS_PATH = pootle_fs_path
project = Project.objects.get(code="project0")
fs_staged = []
for i in range(0, 2):
pootle_path = "/language0/%s/fs_staged_%s.po" % (project.code, i)
fs_path = "/some/fs/fs_staged_%s.po" % i
fs_staged.append(
StoreFS.objects.create(
pootle_path=pootle_path,
path=fs_path))
_test_fs_response(
fs_staged=fs_staged,
action_type="pulled_to_pootle",
state_type="fs_staged")
|
Finntack/pootle
|
tests/pootle_fs/fs_response.py
|
Python
|
gpl-3.0
| 6,794
|
#!/usr/bin/python
import pygeoip
import json
from logsparser.lognormalizer import LogNormalizer as LN
import gzip
import glob
import socket
import urllib2
IP = 'IP.Of,Your.Server'
normalizer = LN('/usr/local/share/logsparser/normalizers')
gi = pygeoip.GeoIP('../GeoLiteCity.dat')
def complete(text, state):
return (glob.glob(text+'*')+[none])[state]
def sshcheck():
attacks = {}
users = {}
try:
import readline, rlcompleter
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(complete)
except ImportError:
print 'No Tab Completion'
LOGs = raw_input('Enter the path to the log file: ')
for LOG in LOGs.split(' '):
if LOG.endswith('.gz'):
auth_logs = gzip.GzipFile(LOG, 'r')
else:
auth_logs = open(LOG, 'r')
if len(LOGs) is '1':
print "Parsing log file"
else:
print "Parsing log files"
for log in auth_logs:
l = {"raw": log }
normalizer.normalize(l)
if l.get('action') == 'fail' and l.get('program') == 'sshd':
u = l['user']
p = l['source_ip']
o1, o2, o3, o4 = [int(i) for i in p.split('.')]
if o1 == 192 and o2 == 168 or o1 == 172 and o2 in range(16, 32) or o1 == 10:
print "Private IP, %s No geolocation data" %str(p)
attacks[p] = attacks.get(p, 0) + 1
getip()
dojson(attacks, IP)
def getip():
global IP
if IP is 0:
try:
i = urllib2.Request("http://icanhazip.com")
p = urllib2.urlopen(i)
IP = p.read()
except:
print "can't seem to grab your IP please set IP variable so We can better map attacks"
def dojson(attacks, IP):
data = {}
for i,(a,p) in enumerate(attacks.iteritems()):
datalist = [{ 'ip': a, 'attacks': p, 'local_ip': IP }]
data[i] = datalist
newdata = data
newjson = json.dumps(newdata)
print json.loads(newjson)
send(newjson)
def send(data):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('Ip.Of.Your.Server', 9999))
s.sendall(data)
s.close()
try:
sshcheck()
except KeyboardInterrupt:
print '\nCtrl+C Exiting...'
exit(0)
|
radman404/Who-s-attacking-me-now--
|
wamnclient.py
|
Python
|
gpl-2.0
| 2,126
|
#!/usr/bin/env python
# encoding: utf-8
from flask import jsonify
def make_error_resp(code, msg):
return jsonify(**{
'data': {},
'msg': msg,
'code': code,
'extras': {}
})
def normal_resp(data):
return jsonify(**{
'data': data,
'msg': 'success',
'code': 2000,
'extras': {}
})
def page_resp(data, total, page, page_size):
resp = {
'data': data,
'msg': 'success',
'code': 2000,
'extras': {
'total': total,
'page': page,
'page_size': page_size,
'has_prev': page > 1,
'has_next': (page_size) * page < total
},
}
return jsonify(**resp)
|
luke0922/MarkdownEditor
|
application/utils/response.py
|
Python
|
gpl-2.0
| 728
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.orchestration import base
from tempest.lib import exceptions as lib_exc
from tempest import test
class TemplateYAMLNegativeTestJSON(base.BaseOrchestrationTest):
template = """
HeatTemplateFormatVersion: '2012-12-12'
Description: |
Template which creates only a new user
Resources:
CfnUser:
Type: AWS::IAM::User
"""
invalid_template_url = 'http://www.example.com/template.yaml'
@classmethod
def resource_setup(cls):
super(TemplateYAMLNegativeTestJSON, cls).resource_setup()
cls.parameters = {}
@test.attr(type=['negative'])
@test.idempotent_id('5586cbca-ddc4-4152-9db8-fa1ce5fc1876')
def test_validate_template_url(self):
"""Validating template passing url to it."""
self.assertRaises(lib_exc.BadRequest,
self.client.validate_template_url,
template_url=self.invalid_template_url,
parameters=self.parameters)
class TemplateAWSNegativeTestJSON(TemplateYAMLNegativeTestJSON):
template = """
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template which creates only a new user",
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User"
}
}
}
"""
invalid_template_url = 'http://www.example.com/template.template'
|
HybridF5/tempest_debug
|
tempest/api/orchestration/stacks/test_templates_negative.py
|
Python
|
apache-2.0
| 1,954
|
from .csv_collect import *
from .noise_test import *
from .streamer_lsl import *
from .streamer_osc import *
from .streamer_tcp_server import *
from .udp_server import *
__version__ = "1.0.0"
|
OpenBCI/OpenBCI_Python
|
openbci/plugins/__init__.py
|
Python
|
mit
| 193
|
from direct.showbase.DirectObject import DirectObject
from otp.avatar.DistributedPlayer import DistributedPlayer
from direct.task.Task import Task
class TelemetryLimiter(DirectObject):
TaskName = 'TelemetryLimiterEnforce'
LeakDetectEventName = 'telemetryLimiter'
def __init__(self):
self._objs = {}
self._task = taskMgr.add(self._enforceLimits, self.TaskName, priority=40)
def destroy(self):
taskMgr.remove(self._task)
del self._objs
def getNumObjs(self):
return len(self._objs)
def addObj(self, obj):
id = obj.getTelemetryLimiterId()
self._objs[id] = obj
self.accept(self._getDummyEventName(obj), self._dummyEventHandler)
def hasObj(self, obj):
id = obj.getTelemetryLimiterId()
return id in self._objs
def _getDummyEventName(self, obj):
return '%s-%s-%s-%s' % (self.LeakDetectEventName,
obj.getTelemetryLimiterId(),
id(obj),
obj.__class__.__name__)
def _dummyEventHandler(self, *args, **kargs):
pass
def removeObj(self, obj):
id = obj.getTelemetryLimiterId()
self._objs.pop(id)
self.ignore(self._getDummyEventName(obj))
def _enforceLimits(self, task = None):
for obj in self._objs.itervalues():
obj.enforceTelemetryLimits()
return Task.cont
class TelemetryLimit:
def __call__(self, obj):
pass
class RotationLimitToH(TelemetryLimit):
def __init__(self, pConst = 0.0, rConst = 0.0):
self._pConst = pConst
self._rConst = rConst
def __call__(self, obj):
if obj.isEmpty():
return
obj.setHpr(obj.getH(), self._pConst, self._rConst)
class TLNull:
def __init__(self, *limits):
pass
def destroy(self):
pass
class TLGatherAllAvs(DirectObject):
def __init__(self, name, *limits):
self._name = name
self._avs = {}
self._limits = makeList(limits)
self._avId2limits = {}
avs = base.cr.doFindAllInstances(DistributedPlayer)
for av in avs:
self._handlePlayerArrive(av)
self.accept(DistributedPlayer.GetPlayerGenerateEvent(), self._handlePlayerArrive)
self.accept(DistributedPlayer.GetPlayerDeleteEvent(), self._handlePlayerLeave)
def _handlePlayerArrive(self, av):
if av is not localAvatar:
self._avs[av.doId] = av
limitList = []
for limit in self._limits:
l = limit()
limitList.append(l)
av.addTelemetryLimit(l)
self._avId2limits[av.doId] = limitList
base.cr.telemetryLimiter.addObj(av)
def _handlePlayerLeave(self, av):
if av is not localAvatar and base.cr.telemetryLimiter.hasObj(av) and av.doId in self._avId2limits:
base.cr.telemetryLimiter.removeObj(av)
for limit in self._avId2limits[av.doId]:
av.removeTelemetryLimit(limit)
del self._avId2limits[av.doId]
if av.doId in self._avs:
del self._avs[av.doId]
def destroy(self):
self.ignoreAll()
while len(self._avs):
self._handlePlayerLeave(self._avs.values()[0])
del self._avs
del self._limits
del self._avId2limits
|
ToonTownInfiniteRepo/ToontownInfinite
|
otp/distributed/TelemetryLimiter.py
|
Python
|
mit
| 3,341
|
{% if created %}from rest_framework import viewsets, routers, serializers
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from models import {{model_name}}
{% if with_filter %}import django_filters{% endif %}
{% endif %}
class {{model_name}}Serializer(serializers.ModelSerializer):
class Meta:
model = {{model_name}}
{% if with_filter %}
class {{model_name}}Filter(django_filters.FilterSet):
## some examples to get you started
## more at: http://www.django-rest-framework.org/api-guide/filtering/
#date_range = django_filters.DateRangeFilter(name="day")
#date_from = django_filters.DateFilter(name="day", lookup_type="gte")
#date_to = django_filters.DateFilter(name="day", lookup_type="lte")
#customer = django_filters.CharFilter(name="project__customer")
#month = django_filters.DateFilter(name="day__month")
class Meta:
model = {{model_name}}
#fields = ['status', 'day', 'date_range', 'date_from', 'date_to', 'month']
{% endif %}
class {{model_name}}ViewSet(viewsets.ModelViewSet):
"""
api/v1/{{model_name|lower}}/
----
"""
model = {{model_name}}
serializer_class={{model_name}}Serializer
{% if with_filter %}filter_class = {{model_name}}Filter
filter_fields = ()
{% endif %}
def get_queryset(self):
'''
* Limit queries to only those owned by the current user
* Admin can see all items
* Not logged in is empty list
'''
user = self.request.user
if not user.is_authenticated():
return {{model_name}}.objects.none()
if user.is_superuser:
return {{model_name}}.objects.all()
else:
return {{model_name}}.objects.filter(user=user.id)
## endpoints should be plural:
router = routers.DefaultRouter()
router.register('{{model_name|lower()}}s', {{model_name}}ViewSet)
|
TangentMicroServices/DRF-Project-Scaffolder
|
templates/api.py
|
Python
|
mit
| 1,972
|
"""Modifies a password for a volume's access"""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
@click.command()
@click.argument('access_id')
@click.option('--password', '-p', multiple=False,
help='Password you want to set, this command will fail if the password is not strong')
@environment.pass_env
def cli(env, access_id, password):
"""Changes a password for a volume's access.
access id is the allowed_host_id from slcli block access-list
"""
block_manager = SoftLayer.BlockStorageManager(env.client)
result = block_manager.set_credential_password(access_id=access_id, password=password)
if result:
click.echo('Password updated for %s' % access_id)
else:
click.echo('FAILED updating password for %s' % access_id)
|
softlayer/softlayer-python
|
SoftLayer/CLI/block/access/password.py
|
Python
|
mit
| 849
|
#!/bin/python
# -*- coding: utf-8 -*-
# Fenrir TTY screen reader
# By Chrys, Storm Dragon, and contributers.
from fenrirscreenreader.core import debug
from fenrirscreenreader.utils import char_utils
class command():
def __init__(self):
pass
def initialize(self, environment):
self.env = environment
def shutdown(self):
pass
def getDescription(self):
return _('set review and phonetically presents the current character')
def run(self):
self.env['runtime']['cursorManager'].enterReviewModeCurrTextCursor()
self.env['screen']['newCursorReview']['x'], self.env['screen']['newCursorReview']['y'], currChar = \
char_utils.getCurrentChar(self.env['screen']['newCursorReview']['x'], self.env['screen']['newCursorReview']['y'], self.env['screen']['newContentText'])
if currChar.isspace():
self.env['runtime']['outputManager'].presentText(_("blank"), interrupt=True, flush=False)
else:
currChar = char_utils.getPhonetic(currChar)
self.env['runtime']['outputManager'].presentText(currChar ,interrupt=True, announceCapital=True, flush=False)
def setCallback(self, callback):
pass
|
chrys87/fenrir
|
src/fenrirscreenreader/commands/commands/review_curr_char_phonetic.py
|
Python
|
lgpl-3.0
| 1,237
|
from collections import Iterable
from django.conf import settings
from django.contrib.auth.decorators import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ImproperlyConfigured
from django.core.exceptions import PermissionDenied
from guardian.utils import get_403_or_None
class LoginRequiredMixin(object):
"""
A login required mixin for use with class based views. This Class is a
light wrapper around the `login_required` decorator and hence function
parameters are just attributes defined on the class.
Due to parent class order traversal this mixin must be added as the left
most mixin of a view.
The mixin has exaclty the same flow as `login_required` decorator:
If the user isn't logged in, redirect to ``settings.LOGIN_URL``, passing
the current absolute path in the query string. Example:
``/accounts/login/?next=/polls/3/``.
If the user is logged in, execute the view normally. The view code is
free to assume the user is logged in.
**Class Settings**
``LoginRequiredMixin.redirect_field_name``
*Default*: ``'next'``
``LoginRequiredMixin.login_url``
*Default*: ``settings.LOGIN_URL``
"""
redirect_field_name = REDIRECT_FIELD_NAME
login_url = settings.LOGIN_URL
def dispatch(self, request, *args, **kwargs):
return login_required(redirect_field_name=self.redirect_field_name,
login_url=self.login_url)(
super(LoginRequiredMixin, self).dispatch
)(request, *args, **kwargs)
class PermissionRequiredMixin(object):
"""
A view mixin that verifies if the current logged in user has the specified
permission by wrapping the ``request.user.has_perm(..)`` method.
If a `get_object()` method is defined either manually or by including
another mixin (for example ``SingleObjectMixin``) or ``self.object`` is
defiend then the permission will be tested against that specific instance.
.. note:
Testing of a permission against a specific object instance requires an
authentication backend that supports. Please see ``django-guardian`` to
add object level permissions to your project.
The mixin does the following:
If the user isn't logged in, redirect to settings.LOGIN_URL, passing
the current absolute path in the query string. Example:
/accounts/login/?next=/polls/3/.
If the `raise_exception` is set to True than rather than redirect to
login page a `PermissionDenied` (403) is raised.
If the user is logged in, and passes the permission check than the view
is executed normally.
**Example Usage**::
class SecureView(PermissionRequiredMixin, View):
...
permission_required = 'auth.change_user'
...
**Class Settings**
``PermissionRequiredMixin.permission_required``
*Default*: ``None``, must be set to either a string or list of strings
in format: *<app_label>.<permission_codename>*.
``PermissionRequiredMixin.login_url``
*Default*: ``settings.LOGIN_URL``
``PermissionRequiredMixin.redirect_field_name``
*Default*: ``'next'``
``PermissionRequiredMixin.return_403``
*Default*: ``False``. Returns 403 error page instead of redirecting
user.
``PermissionRequiredMixin.raise_exception``
*Default*: ``False``
`permission_required` - the permission to check of form "<app_label>.<permission codename>"
i.e. 'polls.can_vote' for a permission on a model in the polls application.
"""
### default class view settings
login_url = settings.LOGIN_URL
permission_required = None
redirect_field_name = REDIRECT_FIELD_NAME
return_403 = False
raise_exception = False
def get_required_permissions(self, request=None):
"""
Returns list of permissions in format *<app_label>.<codename>* that
should be checked against *request.user* and *object*. By default, it
returns list from ``permission_required`` attribute.
:param request: Original request.
"""
if isinstance(self.permission_required, basestring):
perms = [self.permission_required]
elif isinstance(self.permission_required, Iterable):
perms = [p for p in self.permission_required]
else:
raise ImproperlyConfigured("'PermissionRequiredMixin' requires "
"'permission_required' attribute to be set to "
"'<app_label>.<permission codename>' but is set to '%s' instead"
% self.permission_required)
return perms
def check_permissions(self, request):
"""
Checks if *request.user* has all permissions returned by
*get_required_permissions* method.
:param request: Original request.
"""
obj = hasattr(self, 'get_object') and self.get_object() \
or getattr(self, 'object', None)
forbidden = get_403_or_None(request,
perms=self.get_required_permissions(request),
obj=obj,
login_url=self.login_url,
redirect_field_name=self.redirect_field_name,
return_403=self.return_403,
)
if forbidden:
self.on_permission_check_fail(request, forbidden, obj=obj)
if forbidden and self.raise_exception:
raise PermissionDenied()
return forbidden
def on_permission_check_fail(self, request, response, obj=None):
"""
Method called upon permission check fail. By default it does nothing and
should be overridden, if needed.
:param request: Original request
:param response: 403 response returned by *check_permissions* method.
:param obj: Object that was fetched from the view (using ``get_object``
method or ``object`` attribute, in that order).
"""
def dispatch(self, request, *args, **kwargs):
response = self.check_permissions(request)
if response:
return response
return super(PermissionRequiredMixin, self).dispatch(request, *args,
**kwargs)
|
hfeeki/django-guardian
|
guardian/mixins.py
|
Python
|
bsd-2-clause
| 6,473
|
# TODO separate into different package modules
|
dibaunaumh/fcs-skateboard
|
fcs_aux/mies/__init__.py
|
Python
|
agpl-3.0
| 47
|
import os
import unittest
import re
from mock import MagicMock
from PyAnalysisTools.base.ProcessConfig import ProcessConfig, parse_and_build_process_config, find_process_config, \
Process
from PyAnalysisTools.base.YAMLHandle import YAMLLoader as yl
cwd = os.path.dirname(__file__)
class TestProcess(unittest.TestCase):
@classmethod
def setUpClass(self):
self.data_set_info = yl.read_yaml(os.path.join(cwd, 'fixtures/dataset_info_pmg.yml'))
def setUp(self):
pass
def tearDown(self):
pass
def test_str(self):
process = Process('tmp/ntuple-311570_0.MC16a.root', self.data_set_info)
self.assertEqual("TBbLQmumu1300l1 parsed from file name tmp/ntuple-311570_0.MC16a.root", process.__str__())
def test_unicode(self):
process = Process('tmp/ntuple-311570_0.MC16a.root', self.data_set_info)
self.assertEqual("TBbLQmumu1300l1 parsed from file name tmp/ntuple-311570_0.MC16a.root", process.__unicode__())
def test_format(self):
process = Process('tmp/ntuple-311570_0.MC16a.root', self.data_set_info)
self.assertEqual("TBbLQmumu1300l1 parsed from file name tmp/ntuple-311570_0.MC16a.root", "{:s}".format(process))
def test_hash(self):
process = Process('tmp/ntuple-311570_0.MC16a.root', self.data_set_info)
self.assertEqual(hash("TBbLQmumu1300l1"), hash(process))
def test_process_file_name_ntuple(self):
process = Process('tmp/ntuple-311570_0.MC16a.root', self.data_set_info)
self.assertTrue(process.is_mc)
self.assertFalse(process.is_data)
self.assertEqual('TBbLQmumu1300l1', process.process_name)
self.assertEqual('311570', process.dsid)
self.assertEqual('mc16a', process.mc_campaign)
def test_process_file_name_hist(self):
process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info)
self.assertTrue(process.is_mc)
self.assertFalse(process.is_data)
self.assertEqual('TBbLQmumu1300l1', process.process_name)
self.assertEqual('311570', process.dsid)
self.assertEqual('mc16d', process.mc_campaign)
def test_process_file_name_arbitrary_tag(self):
process = Process('tmp/foo-311570_0.MC16e.root', self.data_set_info, tags=['foo'])
self.assertTrue(process.is_mc)
self.assertFalse(process.is_data)
self.assertEqual('TBbLQmumu1300l1', process.process_name)
self.assertEqual('311570', process.dsid)
self.assertEqual('mc16e', process.mc_campaign)
def test_process_file_name_data(self):
process = Process('v21/ntuple-data18_13TeV_periodO_0.root', self.data_set_info, tags=['foo'])
self.assertFalse(process.is_mc)
self.assertTrue(process.is_data)
self.assertEqual('data18.periodO', process.process_name)
self.assertIsNone(process.dsid)
self.assertIsNone(process.mc_campaign)
def test_process_no_file_name(self):
process = Process(None, self.data_set_info, tags=['foo'])
self.assertFalse(process.is_mc)
self.assertFalse(process.is_data)
self.assertIsNone(process.process_name)
self.assertIsNone(process.dsid)
def test_process_unconvential_file_name(self):
process = Process('tmp/hist-333311570_0.MC16e.root', self.data_set_info)
self.assertTrue(process.is_mc)
self.assertFalse(process.is_data)
self.assertEqual(None, process.process_name)
self.assertEqual('333311570', process.dsid)
def test_str_operator(self):
process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info)
self.assertEqual('TBbLQmumu1300l1 parsed from file name tmp/hist-311570_0.MC16d.root', process.__str__())
def test_equality(self):
process1 = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info)
process2 = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info)
self.assertEqual(process1, process2)
def test_equality_different_files(self):
process1 = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info)
process2 = Process('tmp/ntuple-311570_0.MC16d.root', self.data_set_info)
self.assertEqual(process1, process2)
def test_inequality(self):
process1 = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info)
process2 = Process('tmp/hist-311570_0.MC16e.root', self.data_set_info)
self.assertNotEqual(process1, process2)
def test_inequality_type(self):
process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info)
self.assertNotEqual(process, None)
def test_match_true(self):
process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info)
self.assertTrue(process.match('TBbLQmumu1300l1'))
def test_match_false(self):
process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info)
self.assertFalse(process.match('TBbLQmumu1400l1'))
def test_match_fals_no_process(self):
process = Process('tmp/hist-333311570_0.MC16e.root', self.data_set_info)
self.assertFalse(process.match('TBbLQmumu1400l1'))
def test_match_any_true(self):
process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info)
self.assertTrue(process.matches_any(['TBbLQmumu1300l1']))
def test_match_any_false(self):
process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info)
self.assertFalse(process.matches_any(['TBbLQmumu1400l1']))
def test_match_any_false_invalid_input(self):
process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info)
self.assertFalse(process.matches_any('TBbLQmumu1400l1'))
def test_with_cut(self):
process1 = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info, cut='foo')
process2 = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info, cut='bar')
self.assertNotEqual(process1, process2)
self.assertNotEqual(process1.process_name, process2.process_name)
self.assertEqual(process1.dsid, process2.dsid)
def test_process_file_name_mc(self):
process = Process('v21/ntuple-mc16_311570_MC16e.root', self.data_set_info, tags=['foo'])
self.assertTrue(process.is_mc)
self.assertFalse(process.is_data)
self.assertEqual('TBbLQmumu1300l1', process.process_name)
self.assertEqual('311570', process.dsid)
self.assertEqual('mc16e', process.mc_campaign)
def test_process_file_name_hist_full_path(self):
process = Process('/Users/foo/tmp/test/hists_20200206_18-04-21/hist-364106_1.MC16d.root',
self.data_set_info, tags=['foo'])
self.assertTrue(process.is_mc)
self.assertFalse(process.is_data)
self.assertEqual('ZmumuHT140280CVetoBVeto', process.process_name)
self.assertEqual('364106', process.dsid)
self.assertEqual('mc16d', process.mc_campaign)
def test_process_file_name_ntuple_data_full_path(self):
process = Process('/storage/hepgrp/morgens/LQ/ntuples/v29_merged/ntuple-data17_13TeV_periodK_0.root',
self.data_set_info, tags=['foo'])
self.assertFalse(process.is_mc)
self.assertTrue(process.is_data)
self.assertEqual('data17.periodK', process.process_name)
def test_process_file_name_data_user(self):
fname = '~/user.foo.data18_13TeV.periodAllYear.physics_Late.pro24_v01.v8_hist/user.foo.2._001.hist-output.root'
process = Process(fname, self.data_set_info, tags=['foo'])
self.assertFalse(process.is_mc)
self.assertTrue(process.is_data)
self.assertTrue(re.match(r'.*data.*', process.process_name))
def test_process_file_name_data_run(self):
fname = '~/v8/ntuple-data16_cos_306147_physics_Main_cosmicsReco.root'
process = Process(fname, self.data_set_info, tags=['foo'])
self.assertFalse(process.is_mc)
self.assertTrue(process.is_data)
self.assertTrue(re.match(r'.*data16.*306147.*', process.process_name))
def test_process_file_name_data_cos(self):
process = Process('hist-data16_cos.00306147.physics_Main.cosmicsStandardOFCs.root', None)
self.assertTrue(process.is_data)
self.assertFalse(process.is_mc)
self.assertEqual('data16_00306147', process.process_name)
self.assertIsNone(process.period)
self.assertIsNone(process.weight)
def test_process_weight(self):
process = Process('v21/ntuple-mc16_311570_MC16e.root', self.data_set_info, weight='foo')
self.assertEqual('foo', process.weight)
class TestProcessConfig(unittest.TestCase):
def setUp(self):
self.pc = ProcessConfig(name='foo', type='data')
self.cfg_file = os.path.join(os.path.dirname(__file__), 'fixtures/process_merge_config.yml')
def test_ctor(self):
self.assertIsNone(self.pc.parent_process)
self.assertIsNone(self.pc.scale_factor)
self.assertIsNone(self.pc.regions_only)
self.assertIsNone(self.pc.weight)
self.assertIsNone(self.pc.assoc_process)
self.assertTrue(self.pc.is_data)
self.assertFalse(self.pc.is_syst_process)
self.assertFalse(self.pc.is_mc)
def test_str(self):
process_cfg_str = self.pc.__str__()
self.assertTrue("Process config: foo \n" in process_cfg_str)
self.assertTrue("name=foo \n" in process_cfg_str)
self.assertTrue("type=data \n" in process_cfg_str)
self.assertTrue("is_syst_process=False \n" in process_cfg_str)
self.assertTrue("parent_process=None \n" in process_cfg_str)
self.assertTrue("scale_factor=None \n" in process_cfg_str)
self.assertTrue("is_mc=False \n" in process_cfg_str)
self.assertTrue("weight=None \n" in process_cfg_str)
self.assertTrue("is_data=True \n" in process_cfg_str)
self.assertTrue("regions_only=None \n" in process_cfg_str)
self.assertTrue("assoc_process=None \n" in process_cfg_str)
self.assertTrue("type=data \n" in process_cfg_str)
def test_unicode(self):
process_cfg_str = self.pc.__unicode__()
self.assertTrue("Process config: foo \n" in process_cfg_str)
self.assertTrue("name=foo \n" in process_cfg_str)
self.assertTrue("type=data \n" in process_cfg_str)
self.assertTrue("is_syst_process=False \n" in process_cfg_str)
self.assertTrue("parent_process=None \n" in process_cfg_str)
self.assertTrue("scale_factor=None \n" in process_cfg_str)
self.assertTrue("is_mc=False \n" in process_cfg_str)
self.assertTrue("weight=None \n" in process_cfg_str)
self.assertTrue("is_data=True \n" in process_cfg_str)
self.assertTrue("regions_only=None \n" in process_cfg_str)
self.assertTrue("assoc_process=None \n" in process_cfg_str)
self.assertTrue("type=data \n" in process_cfg_str)
def test_repr(self):
process_cfg_str = self.pc.__repr__()
self.assertTrue("Process config: foo \n" in process_cfg_str)
self.assertTrue("name=foo \n" in process_cfg_str)
self.assertTrue("type=data \n" in process_cfg_str)
self.assertTrue("is_syst_process=False \n" in process_cfg_str)
self.assertTrue("parent_process=None \n" in process_cfg_str)
self.assertTrue("scale_factor=None \n" in process_cfg_str)
self.assertTrue("is_mc=False \n" in process_cfg_str)
self.assertTrue("weight=None \n" in process_cfg_str)
self.assertTrue("is_data=True \n" in process_cfg_str)
self.assertTrue("regions_only=None \n" in process_cfg_str)
self.assertTrue("assoc_process=None \n" in process_cfg_str)
self.assertTrue("type=data \n" in process_cfg_str)
def test_parse_and_build_process_config(self):
cfgs = parse_and_build_process_config(self.cfg_file)
self.assertTrue('Data' in cfgs)
def test_parse_and_build_process_config_lsit(self):
cfgs = parse_and_build_process_config([self.cfg_file])
self.assertTrue('Data' in cfgs)
def test_parse_and_build_process_config_no_file(self):
self.assertIsNone(parse_and_build_process_config(None))
def test_parse_and_build_process_config_non_existing_file_exception(self):
try:
self.assertRaises(FileNotFoundError, parse_and_build_process_config, 'foo')
except NameError:
self.assertRaises(IOError, parse_and_build_process_config, 'foo')
def test_find_process_config_missing_input(self):
self.assertIsNone(find_process_config(None, MagicMock()))
self.assertIsNone(find_process_config(MagicMock(), None))
def test_find_process_config(self):
cfgs = parse_and_build_process_config(self.cfg_file)
self.assertEqual(cfgs['Data'], find_process_config('data18_13TeV_periodB', cfgs))
def test_find_process_config_direct_cfg_match(self):
cfgs = parse_and_build_process_config(self.cfg_file)
self.assertEqual(cfgs['Data'], find_process_config('Data', cfgs))
def test_find_process_config_no_regex(self):
cfgs = parse_and_build_process_config(self.cfg_file)
cfgs['Data'].subprocesses = ['data18_13TeV_periodB']
self.assertEqual(cfgs['Data'], find_process_config('data18_13TeV_periodB', cfgs))
def test_find_process_config_no_subprocess(self):
cfgs = parse_and_build_process_config(self.cfg_file)
delattr(cfgs['Data'], 'subprocesses')
self.assertIsNone(find_process_config('data18_13TeV_periodB', cfgs))
def test_find_process_config_multiple_matches(self):
cfgs = parse_and_build_process_config(self.cfg_file)
cfgs['tmp'] = cfgs['Data']
self.assertIsNone(find_process_config('data18_13TeV_periodB', cfgs))
def test_find_process_config_process(self):
cfgs = parse_and_build_process_config(self.cfg_file)
self.assertEqual(cfgs['Data'], find_process_config(Process('data18_13TeV_periodB', None), cfgs))
def test_find_process_config_direct_cfg_match_process(self):
cfgs = parse_and_build_process_config(self.cfg_file)
self.assertEqual(cfgs['Data'], find_process_config(Process('Data', None), cfgs))
def test_find_process_config_no_regex_process(self):
cfgs = parse_and_build_process_config(self.cfg_file)
cfgs['Data'].subprocesses = ['data18.periodB']
self.assertEqual(cfgs['Data'], find_process_config(Process('data18_13TeV_periodB', None), cfgs))
def test_find_process_config_no_subprocess_process(self):
cfgs = parse_and_build_process_config(self.cfg_file)
delattr(cfgs['Data'], 'subprocesses')
self.assertIsNone(find_process_config(Process('data18_13TeV_periodB', None), cfgs))
def test_find_process_config_multiple_matches_process(self):
cfgs = parse_and_build_process_config(self.cfg_file)
cfgs['tmp'] = cfgs['Data']
self.assertIsNone(find_process_config(Process('data18_13TeV_periodB', None), cfgs))
|
morgenst/PyAnalysisTools
|
tests/unit/TestProcessConfig.py
|
Python
|
mit
| 15,015
|
"""
This page is in the table of contents.
Fillet rounds the corners slightly in a variety of ways. This is to reduce corner blobbing and sudden extruder acceleration.
The fillet manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Fillet
==Operation==
The default 'Activate Fillet' checkbox is off. When it is on, the functions described below will work, when it is off, nothing will be done.
==Settings==
===Fillet Procedure Choice===
Default is 'Bevel''.
====Arc Point====
When selected, the corners will be filleted with an arc using the gcode point form.
====Arc Radius====
When selected, the corners will be filleted with an arc using the gcode radius form.
====Arc Segment====
When selected, the corners will be filleted with an arc composed of several segments.
====Bevel====
When selected, the corners will be beveled.
===Corner Feed Rate Multiplier===
Default: 1.0
Defines the ratio of the feed rate in corners over the original feed rate. With a high value the extruder will move quickly in corners, accelerating quickly and leaving a thin extrusion. With a low value, the extruder will move slowly in corners, accelerating gently and leaving a thick extrusion.
===Fillet Radius over Perimeter Width===
Default is 0.35.
Defines the width of the fillet.
===Reversal Slowdown over Perimeter Width===
Default is 0.5.
Defines how far before a path reversal the extruder will slow down. Some tools, like nozzle wipe, double back the path of the extruder and this option will add a slowdown point in that path so there won't be a sudden jerk at the end of the path. If the value is less than 0.1 a slowdown will not be added.
===Use Intermediate Feed Rate in Corners===
Default is on.
When selected, the feed rate entering the corner will be the average of the old feed rate and the new feed rate.
==Examples==
The following examples fillet the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and fillet.py.
> python fillet.py
This brings up the fillet dialog.
> python fillet.py Screw Holder Bottom.stl
The fillet tool is parsing the file:
Screw Holder Bottom.stl
..
The fillet tool has created the file:
.. Screw Holder Bottom_fillet.gcode
"""
from __future__ import absolute_import
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, gcodeText, repository = None ):
"Fillet a gcode linear move file or text."
return getCraftedTextFromText( archive.getTextIfEmpty( fileName, gcodeText ), repository )
def getCraftedTextFromText( gcodeText, repository = None ):
"Fillet a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'fillet'):
return gcodeText
if repository == None:
repository = settings.getReadRepository( FilletRepository() )
if not repository.activateFillet.value:
return gcodeText
if repository.arcPoint.value:
return ArcPointSkein().getCraftedGcode( repository, gcodeText )
elif repository.arcRadius.value:
return ArcRadiusSkein().getCraftedGcode( repository, gcodeText )
elif repository.arcSegment.value:
return ArcSegmentSkein().getCraftedGcode( repository, gcodeText )
elif repository.bevel.value:
return BevelSkein().getCraftedGcode( repository, gcodeText )
return gcodeText
def getNewRepository():
'Get new repository.'
return FilletRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Fillet a gcode linear move file. Depending on the settings, either arcPoint, arcRadius, arcSegment, bevel or do nothing."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'fillet', shouldAnalyze)
class BevelSkein(object):
"A class to bevel a skein of extrusions."
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.extruderActive = False
self.feedRateMinute = 960.0
self.filletRadius = 0.2
self.lineIndex = 0
self.lines = None
self.oldFeedRateMinute = None
self.oldLocation = None
self.shouldAddLine = True
def addLinearMovePoint( self, feedRateMinute, point ):
"Add a gcode linear move, feedRate and newline to the output."
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( feedRateMinute, point.dropAxis(), point.z ) )
def getCornerFeedRate(self):
"Get the corner feed rate, which may be based on the intermediate feed rate."
feedRateMinute = self.feedRateMinute
if self.repository.useIntermediateFeedRateInCorners.value:
if self.oldFeedRateMinute != None:
feedRateMinute = 0.5 * ( self.oldFeedRateMinute + self.feedRateMinute )
return feedRateMinute * self.cornerFeedRateMultiplier
def getCraftedGcode( self, repository, gcodeText ):
"Parse gcode text and store the bevel gcode."
self.cornerFeedRateMultiplier = repository.cornerFeedRateMultiplier.value
self.lines = archive.getTextLines(gcodeText)
self.repository = repository
self.parseInitialization( repository )
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def getExtruderOffReversalPoint( self, afterSegment, afterSegmentComplex, beforeSegment, beforeSegmentComplex, location ):
"If the extruder is off and the path is reversing, add intermediate slow points."
if self.repository.reversalSlowdownDistanceOverEdgeWidth.value < 0.1:
return None
if self.extruderActive:
return None
reversalBufferSlowdownDistance = self.reversalSlowdownDistance * 2.0
afterSegmentComplexLength = abs( afterSegmentComplex )
if afterSegmentComplexLength < reversalBufferSlowdownDistance:
return None
beforeSegmentComplexLength = abs( beforeSegmentComplex )
if beforeSegmentComplexLength < reversalBufferSlowdownDistance:
return None
afterSegmentComplexNormalized = afterSegmentComplex / afterSegmentComplexLength
beforeSegmentComplexNormalized = beforeSegmentComplex / beforeSegmentComplexLength
if euclidean.getDotProduct( afterSegmentComplexNormalized, beforeSegmentComplexNormalized ) < 0.95:
return None
slowdownFeedRate = self.feedRateMinute * 0.5
self.shouldAddLine = False
beforePoint = euclidean.getPointPlusSegmentWithLength( self.reversalSlowdownDistance * abs( beforeSegment ) / beforeSegmentComplexLength, location, beforeSegment )
self.addLinearMovePoint( self.feedRateMinute, beforePoint )
self.addLinearMovePoint( slowdownFeedRate, location )
afterPoint = euclidean.getPointPlusSegmentWithLength( self.reversalSlowdownDistance * abs( afterSegment ) / afterSegmentComplexLength, location, afterSegment )
self.addLinearMovePoint( slowdownFeedRate, afterPoint )
return afterPoint
def getNextLocation(self):
"Get the next linear move. Return none is none is found."
for afterIndex in xrange( self.lineIndex + 1, len(self.lines) ):
line = self.lines[ afterIndex ]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if gcodec.getFirstWord(splitLine) == 'G1':
nextLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
return nextLocation
return None
def linearMove( self, splitLine ):
"Bevel a linear move."
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.feedRateMinute = gcodec.getFeedRateMinute( self.feedRateMinute, splitLine )
if self.oldLocation != None:
nextLocation = self.getNextLocation()
if nextLocation != None:
location = self.splitPointGetAfter( location, nextLocation )
self.oldLocation = location
self.oldFeedRateMinute = self.feedRateMinute
def parseInitialization( self, repository ):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('fillet')
return
elif firstWord == '(<edgeWidth>':
edgeWidth = abs(float(splitLine[1]))
self.curveSection = 0.7 * edgeWidth
self.filletRadius = edgeWidth * repository.filletRadiusOverEdgeWidth.value
self.minimumRadius = 0.1 * edgeWidth
self.reversalSlowdownDistance = edgeWidth * repository.reversalSlowdownDistanceOverEdgeWidth.value
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
"Parse a gcode line and add it to the bevel gcode."
self.shouldAddLine = True
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
self.linearMove(splitLine)
elif firstWord == 'M101':
self.extruderActive = True
elif firstWord == 'M103':
self.extruderActive = False
if self.shouldAddLine:
self.distanceFeedRate.addLine(line)
def splitPointGetAfter( self, location, nextLocation ):
"Bevel a point and return the end of the bevel. should get complex for radius"
if self.filletRadius < 2.0 * self.minimumRadius:
return location
afterSegment = nextLocation - location
afterSegmentComplex = afterSegment.dropAxis()
afterSegmentComplexLength = abs( afterSegmentComplex )
thirdAfterSegmentLength = 0.333 * afterSegmentComplexLength
if thirdAfterSegmentLength < self.minimumRadius:
return location
beforeSegment = self.oldLocation - location
beforeSegmentComplex = beforeSegment.dropAxis()
beforeSegmentComplexLength = abs( beforeSegmentComplex )
thirdBeforeSegmentLength = 0.333 * beforeSegmentComplexLength
if thirdBeforeSegmentLength < self.minimumRadius:
return location
extruderOffReversalPoint = self.getExtruderOffReversalPoint( afterSegment, afterSegmentComplex, beforeSegment, beforeSegmentComplex, location )
if extruderOffReversalPoint != None:
return extruderOffReversalPoint
bevelRadius = min( thirdAfterSegmentLength, self.filletRadius )
bevelRadius = min( thirdBeforeSegmentLength, bevelRadius )
self.shouldAddLine = False
beforePoint = euclidean.getPointPlusSegmentWithLength( bevelRadius * abs( beforeSegment ) / beforeSegmentComplexLength, location, beforeSegment )
self.addLinearMovePoint( self.feedRateMinute, beforePoint )
afterPoint = euclidean.getPointPlusSegmentWithLength( bevelRadius * abs( afterSegment ) / afterSegmentComplexLength, location, afterSegment )
self.addLinearMovePoint( self.getCornerFeedRate(), afterPoint )
return afterPoint
class ArcSegmentSkein( BevelSkein ):
"A class to arc segment a skein of extrusions."
def addArc( self, afterCenterDifferenceAngle, afterPoint, beforeCenterSegment, beforePoint, center ):
"Add arc segments to the filleted skein."
absoluteDifferenceAngle = abs( afterCenterDifferenceAngle )
# steps = int( math.ceil( absoluteDifferenceAngle * 1.5 ) )
steps = int( math.ceil( min( absoluteDifferenceAngle * 1.5, absoluteDifferenceAngle * abs( beforeCenterSegment ) / self.curveSection ) ) )
stepPlaneAngle = euclidean.getWiddershinsUnitPolar( afterCenterDifferenceAngle / steps )
for step in xrange( 1, steps ):
beforeCenterSegment = euclidean.getRoundZAxisByPlaneAngle( stepPlaneAngle, beforeCenterSegment )
arcPoint = center + beforeCenterSegment
self.addLinearMovePoint( self.getCornerFeedRate(), arcPoint )
self.addLinearMovePoint( self.getCornerFeedRate(), afterPoint )
def splitPointGetAfter( self, location, nextLocation ):
"Fillet a point into arc segments and return the end of the last segment."
if self.filletRadius < 2.0 * self.minimumRadius:
return location
afterSegment = nextLocation - location
afterSegmentComplex = afterSegment.dropAxis()
thirdAfterSegmentLength = 0.333 * abs( afterSegmentComplex )
if thirdAfterSegmentLength < self.minimumRadius:
return location
beforeSegment = self.oldLocation - location
beforeSegmentComplex = beforeSegment.dropAxis()
thirdBeforeSegmentLength = 0.333 * abs( beforeSegmentComplex )
if thirdBeforeSegmentLength < self.minimumRadius:
return location
extruderOffReversalPoint = self.getExtruderOffReversalPoint( afterSegment, afterSegmentComplex, beforeSegment, beforeSegmentComplex, location )
if extruderOffReversalPoint != None:
return extruderOffReversalPoint
bevelRadius = min( thirdAfterSegmentLength, self.filletRadius )
bevelRadius = min( thirdBeforeSegmentLength, bevelRadius )
self.shouldAddLine = False
beforePoint = euclidean.getPointPlusSegmentWithLength( bevelRadius * abs( beforeSegment ) / abs( beforeSegmentComplex ), location, beforeSegment )
self.addLinearMovePoint( self.feedRateMinute, beforePoint )
afterPoint = euclidean.getPointPlusSegmentWithLength( bevelRadius * abs( afterSegment ) / abs( afterSegmentComplex ), location, afterSegment )
afterPointComplex = afterPoint.dropAxis()
beforePointComplex = beforePoint.dropAxis()
locationComplex = location.dropAxis()
midpoint = 0.5 * ( afterPoint + beforePoint )
midpointComplex = midpoint.dropAxis()
midpointMinusLocationComplex = midpointComplex - locationComplex
midpointLocationLength = abs( midpointMinusLocationComplex )
if midpointLocationLength < 0.01 * self.filletRadius:
self.addLinearMovePoint( self.getCornerFeedRate(), afterPoint )
return afterPoint
midpointAfterPointLength = abs( midpointComplex - afterPointComplex )
midpointCenterLength = midpointAfterPointLength * midpointAfterPointLength / midpointLocationLength
radius = math.sqrt( midpointCenterLength * midpointCenterLength + midpointAfterPointLength * midpointAfterPointLength )
centerComplex = midpointComplex + midpointMinusLocationComplex * midpointCenterLength / midpointLocationLength
center = Vector3( centerComplex.real, centerComplex.imag, midpoint.z )
afterCenterComplex = afterPointComplex - centerComplex
beforeCenter = beforePoint - center
angleDifference = euclidean.getAngleDifferenceByComplex( afterCenterComplex, beforeCenter.dropAxis() )
self.addArc( angleDifference, afterPoint, beforeCenter, beforePoint, center )
return afterPoint
class ArcPointSkein( ArcSegmentSkein ):
"A class to arc point a skein of extrusions."
def addArc( self, afterCenterDifferenceAngle, afterPoint, beforeCenterSegment, beforePoint, center ):
"Add an arc point to the filleted skein."
if afterCenterDifferenceAngle == 0.0:
return
afterPointMinusBefore = afterPoint - beforePoint
centerMinusBefore = center - beforePoint
firstWord = 'G3'
if afterCenterDifferenceAngle < 0.0:
firstWord = 'G2'
centerMinusBeforeComplex = centerMinusBefore.dropAxis()
if abs( centerMinusBeforeComplex ) <= 0.0:
return
radius = abs( centerMinusBefore )
arcDistanceZ = complex( abs( afterCenterDifferenceAngle ) * radius, afterPointMinusBefore.z )
distance = abs( arcDistanceZ )
if distance <= 0.0:
return
line = self.distanceFeedRate.getFirstWordMovement( firstWord, afterPoint ) + self.getRelativeCenter( centerMinusBeforeComplex )
cornerFeedRate = self.getCornerFeedRate()
if cornerFeedRate != None:
line += ' F' + self.distanceFeedRate.getRounded(cornerFeedRate)
self.distanceFeedRate.addLine(line)
def getRelativeCenter( self, centerMinusBeforeComplex ):
"Get the relative center."
return ' I%s J%s' % ( self.distanceFeedRate.getRounded( centerMinusBeforeComplex.real ), self.distanceFeedRate.getRounded( centerMinusBeforeComplex.imag ) )
class ArcRadiusSkein( ArcPointSkein ):
"A class to arc radius a skein of extrusions."
def getRelativeCenter( self, centerMinusBeforeComplex ):
"Get the relative center."
radius = abs( centerMinusBeforeComplex )
return ' R' + ( self.distanceFeedRate.getRounded(radius) )
class FilletRepository(object):
"A class to handle the fillet settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.fillet.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File to be Filleted', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Fillet')
self.activateFillet = settings.BooleanSetting().getFromValue('Activate Fillet', self, False )
self.filletProcedureChoiceLabel = settings.LabelDisplay().getFromName('Fillet Procedure Choice: ', self )
filletLatentStringVar = settings.LatentStringVar()
self.arcPoint = settings.Radio().getFromRadio( filletLatentStringVar, 'Arc Point', self, False )
self.arcRadius = settings.Radio().getFromRadio( filletLatentStringVar, 'Arc Radius', self, False )
self.arcSegment = settings.Radio().getFromRadio( filletLatentStringVar, 'Arc Segment', self, False )
self.bevel = settings.Radio().getFromRadio( filletLatentStringVar, 'Bevel', self, True )
self.cornerFeedRateMultiplier = settings.FloatSpin().getFromValue(0.8, 'Corner Feed Rate Multiplier (ratio):', self, 1.2, 1.0)
self.filletRadiusOverEdgeWidth = settings.FloatSpin().getFromValue( 0.25, 'Fillet Radius over Perimeter Width (ratio):', self, 0.65, 0.35 )
self.reversalSlowdownDistanceOverEdgeWidth = settings.FloatSpin().getFromValue( 0.3, 'Reversal Slowdown Distance over Perimeter Width (ratio):', self, 0.7, 0.5 )
self.useIntermediateFeedRateInCorners = settings.BooleanSetting().getFromValue('Use Intermediate Feed Rate in Corners', self, True )
self.executeTitle = 'Fillet'
def execute(self):
"Fillet button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
def main():
"Display the fillet dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
|
tinkerinestudio/Tinkerine-Suite
|
TinkerineSuite/Cura/cura_sf/skeinforge_application/skeinforge_plugins/craft_plugins/fillet.py
|
Python
|
agpl-3.0
| 18,646
|
from __future__ import unicode_literals
from builtins import object
from rest_framework import viewsets, serializers, mixins, permissions
from isisdata.models import Citation, Authority, ACRelation, PartDetails
from zotero.models import *
class InstanceResolutionEventSerializer(serializers.ModelSerializer):
class Meta(object):
model = InstanceResolutionEvent
excude = []
class AuthoritySerializer(serializers.ModelSerializer):
resolutions = InstanceResolutionEventSerializer(many=True, read_only=True)
class Meta(object):
model = Authority
exclude = ('modified_on_fm',
'modified_by_fm',
'created_on_fm',
'created_by_fm',
'redirect_to')
read_only_fields = ('id',)
class ACRelationSerializer(serializers.ModelSerializer):
resolutions = InstanceResolutionEventSerializer(many=True, read_only=True)
class Meta(object):
model = ACRelation
exclude = ('modified_on_fm',
'modified_by_fm',
'created_on_fm',
'created_by_fm')
read_only_fields = ('id',)
def update(self, instance, validated_data):
"""
Overwritten to allow related fields.
"""
for attr, value in list(validated_data.items()):
if attr == 'authority':
instance.authority = value
else:
setattr(instance, attr, value)
instance.save()
return instance
class PartDetailsSerializer(serializers.ModelSerializer):
class Meta(object):
model = PartDetails
exclude = []
class CitationSerializer(serializers.ModelSerializer):
resolutions = InstanceResolutionEventSerializer(many=True, read_only=True)
acrelation_set = ACRelationSerializer(many=True, read_only=True)
part_details = PartDetailsSerializer()
class Meta(object):
model = Citation
exclude = ('modified_on_fm',
'modified_by_fm',
'created_on_fm',
'created_by_fm',
'related_citations')
def update(self, instance, validated_data):
"""
Overwritten to allow related fields.
"""
for attr, value in list(validated_data.items()):
if attr == 'part_details':
for a, v in list(value.items()):
setattr(instance.part_details, attr, value)
else:
setattr(instance, attr, value)
instance.save()
return instance
class DraftAuthoritySerializer(serializers.ModelSerializer):
resolutions = InstanceResolutionEventSerializer(many=True)
resolved = serializers.BooleanField()
class Meta(object):
model = DraftAuthority
exclude = []
class DraftACRelationSerializer(serializers.ModelSerializer):
authority = DraftAuthoritySerializer()
class Meta(object):
model = DraftACRelation
exclude = []
class DraftCitationSerializer(serializers.ModelSerializer):
authority_relations = DraftACRelationSerializer(many=True)
class Meta(object):
model = DraftCitation
exclude = []
|
upconsulting/IsisCB
|
isiscb/zotero/serializers.py
|
Python
|
mit
| 3,223
|
#!/usr/bin/python
from distutils.core import setup
setup(name = 'py_mpgedit',
version='0.3beta',
author='Bryan Weingarten',
author_email='bryan.weingarten@mpgedit.org',
url='http://www.mpgedit.org/',
description='Python bindings to the mpgedit SDK.',
py_modules = ["pympgedit"])
|
gusrc/mpgedit
|
contrib/python/py_mpgedit/setup_pympgedit.py
|
Python
|
lgpl-2.1
| 316
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, VMware, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_maintenancemode
short_description: Place a host into maintenance mode
description:
- Place an ESXI host into maintenance mode
- Support for VSAN compliant maintenance mode when selected
author: "Jay Jahns <jjahns@vmware.com>"
version_added: "2.1"
notes:
- Tested on vSphere 5.5 and 6.0
requirements:
- "python >= 2.6"
- PyVmomi
options:
esxi_hostname:
description:
- Name of the host as defined in vCenter
required: True
vsan_mode:
description:
- Specify which VSAN compliant mode to enter
choices:
- 'ensureObjectAccessibility'
- 'evacuateAllData'
- 'noAction'
required: False
evacuate:
description:
- If True, evacuate all powered off VMs
choices:
- True
- False
default: False
required: False
timeout:
description:
- Specify a timeout for the operation
required: False
default: 0
state:
description:
- Enter or exit maintenance mode
choices:
- present
- absent
default: present
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Enter VSAN-Compliant Maintenance Mode
local_action:
module: vmware_maintenancemode
hostname: vc_host
username: vc_user
password: vc_pass
esxi_hostname: esxi.host.example
vsan: ensureObjectAccessibility
evacuate: yes
timeout: 3600
state: present
'''
RETURN = '''
hostsystem:
description: Name of vim reference
returned: always
type: string
sample: "'vim.HostSystem:host-236'"
hostname:
description: Name of host in vCenter
returned: always
type: string
sample: "esxi.local.domain"
status:
description: Action taken
returned: always
type: string
sample: "ENTER"
'''
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (HAS_PYVMOMI, TaskError, connect_to_api, find_hostsystem_by_name,
vmware_argument_spec, wait_for_task)
def EnterMaintenanceMode(module, host):
if host.runtime.inMaintenanceMode:
module.exit_json(
changed=False,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='NO_ACTION',
msg='Host already in maintenance mode')
spec = vim.host.MaintenanceSpec()
if module.params['vsan']:
spec.vsanMode = vim.vsan.host.DecommissionMode()
spec.vsanMode.objectAction = module.params['vsan']
try:
task = host.EnterMaintenanceMode_Task(
module.params['timeout'],
module.params['evacuate'],
spec)
success, result = wait_for_task(task)
return dict(changed=success,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='ENTER',
msg='Host entered maintenance mode')
except TaskError:
module.fail_json(
msg='Host failed to enter maintenance mode')
def ExitMaintenanceMode(module, host):
if not host.runtime.inMaintenanceMode:
module.exit_json(
changed=False,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='NO_ACTION',
msg='Host not in maintenance mode')
try:
task = host.ExitMaintenanceMode_Task(
module.params['timeout'])
success, result = wait_for_task(task)
return dict(changed=success,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='EXIT',
msg='Host exited maintenance mode')
except TaskError:
module.fail_json(
msg='Host failed to exit maintenance mode')
def main():
spec = vmware_argument_spec()
spec.update(dict(
esxi_hostname=dict(required=True),
vsan=dict(required=False, choices=['ensureObjectAccessibility',
'evacuateAllData',
'noAction']),
evacuate=dict(required=False, type='bool', default=False),
timeout=dict(required=False, default=0, type='int'),
state=dict(required=False,
default='present',
choices=['present', 'absent'])))
module = AnsibleModule(argument_spec=spec)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
content = connect_to_api(module)
host = find_hostsystem_by_name(content, module.params['esxi_hostname'])
if not host:
module.fail_json(
msg='Host not found in vCenter')
if module.params['state'] == 'present':
result = EnterMaintenanceMode(module, host)
elif module.params['state'] == 'absent':
result = ExitMaintenanceMode(module, host)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
bearstech/ansible
|
lib/ansible/modules/cloud/vmware/vmware_maintenancemode.py
|
Python
|
gpl-3.0
| 5,690
|
#!/usr/bin/env python
# $Id: CheetahWrapper.py,v 1.26 2007/10/02 01:22:04 tavis_rudd Exp $
"""Cheetah command-line interface.
2002-09-03 MSO: Total rewrite.
2002-09-04 MSO: Bugfix, compile command was using wrong output ext.
2002-11-08 MSO: Another rewrite.
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com> and Mike Orr <sluggoster@gmail.com>>
Version: $Revision: 1.26 $
Start Date: 2001/03/30
Last Revision Date: $Date: 2007/10/02 01:22:04 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com> and Mike Orr <sluggoster@gmail.com>"
__revision__ = "$Revision: 1.26 $"[11:-2]
import getopt, glob, os, pprint, re, shutil, sys
import cPickle as pickle
from Cheetah.Version import Version
from Cheetah.Template import Template, DEFAULT_COMPILER_SETTINGS
from Cheetah.Utils.Misc import mkdirsWithPyInitFiles
from Cheetah.Utils.optik import OptionParser
optionDashesRE = re.compile( R"^-{1,2}" )
moduleNameRE = re.compile( R"^[a-zA-Z_][a-zA-Z_0-9]*$" )
def fprintfMessage(stream, format, *args):
if format[-1:] == '^':
format = format[:-1]
else:
format += '\n'
if args:
message = format % args
else:
message = format
stream.write(message)
class Error(Exception):
pass
class Bundle:
"""Wrap the source, destination and backup paths in one neat little class.
Used by CheetahWrapper.getBundles().
"""
def __init__(self, **kw):
self.__dict__.update(kw)
def __repr__(self):
return "<Bundle %r>" % self.__dict__
class MyOptionParser(OptionParser):
standard_option_list = [] # We use commands for Optik's standard options.
def error(self, msg):
"""Print our usage+error page."""
usage(HELP_PAGE2, msg)
def print_usage(self, file=None):
"""Our usage+error page already has this."""
pass
##################################################
## USAGE FUNCTION & MESSAGES
def usage(usageMessage, errorMessage="", out=sys.stderr):
"""Write help text, an optional error message, and abort the program.
"""
out.write(WRAPPER_TOP)
out.write(usageMessage)
exitStatus = 0
if errorMessage:
out.write('\n')
out.write("*** USAGE ERROR ***: %s\n" % errorMessage)
exitStatus = 1
sys.exit(exitStatus)
WRAPPER_TOP = """\
__ ____________ __
\ \/ \/ /
\/ * * \/ CHEETAH %(Version)s Command-Line Tool
\ | /
\ ==----== / by Tavis Rudd <tavis@damnsimple.com>
\__________/ and Mike Orr <sluggoster@gmail.com>
""" % globals()
HELP_PAGE1 = """\
USAGE:
------
cheetah compile [options] [FILES ...] : Compile template definitions
cheetah fill [options] [FILES ...] : Fill template definitions
cheetah help : Print this help message
cheetah options : Print options help message
cheetah test [options] : Run Cheetah's regression tests
: (same as for unittest)
cheetah version : Print Cheetah version number
You may abbreviate the command to the first letter; e.g., 'h' == 'help'.
If FILES is a single "-", read standard input and write standard output.
Run "cheetah options" for the list of valid options.
"""
HELP_PAGE2 = """\
OPTIONS FOR "compile" AND "fill":
---------------------------------
--idir DIR, --odir DIR : input/output directories (default: current dir)
--iext EXT, --oext EXT : input/output filename extensions
(default for compile: tmpl/py, fill: tmpl/html)
-R : recurse subdirectories looking for input files
--debug : print lots of diagnostic output to standard error
--env : put the environment in the searchList
--flat : no destination subdirectories
--nobackup : don't make backups
--pickle FILE : unpickle FILE and put that object in the searchList
--stdout, -p : output to standard output (pipe)
--settings : a string representing the compiler settings to use
e.g. --settings='useNameMapper=False,useFilters=False'
This string is eval'd in Python so it should contain
valid Python syntax.
--templateAPIClass : a string representing a subclass of
Cheetah.Template:Template to use for compilation
Run "cheetah help" for the main help screen.
"""
##################################################
## CheetahWrapper CLASS
class CheetahWrapper:
MAKE_BACKUPS = True
BACKUP_SUFFIX = ".bak"
_templateClass = None
_compilerSettings = None
def __init__(self):
self.progName = None
self.command = None
self.opts = None
self.pathArgs = None
self.sourceFiles = []
self.searchList = []
##################################################
## MAIN ROUTINE
def main(self, argv=None):
"""The main program controller."""
if argv is None:
argv = sys.argv
# Step 1: Determine the command and arguments.
try:
self.progName = progName = os.path.basename(argv[0])
self.command = command = optionDashesRE.sub("", argv[1])
if command == 'test':
self.testOpts = argv[2:]
else:
self.parseOpts(argv[2:])
except IndexError:
usage(HELP_PAGE1, "not enough command-line arguments")
# Step 2: Call the command
meths = (self.compile, self.fill, self.help, self.options,
self.test, self.version)
for meth in meths:
methName = meth.__name__
# Or meth.im_func.func_name
# Or meth.func_name (Python >= 2.1 only, sometimes works on 2.0)
methInitial = methName[0]
if command in (methName, methInitial):
sys.argv[0] += (" " + methName)
# @@MO: I don't necessarily agree sys.argv[0] should be
# modified.
meth()
return
# If none of the commands matched.
usage(HELP_PAGE1, "unknown command '%s'" % command)
def parseOpts(self, args):
C, D, W = self.chatter, self.debug, self.warn
self.isCompile = isCompile = self.command[0] == 'c'
defaultOext = isCompile and ".py" or ".html"
parser = MyOptionParser()
pao = parser.add_option
pao("--idir", action="store", dest="idir", default="")
pao("--odir", action="store", dest="odir", default="")
pao("--iext", action="store", dest="iext", default=".tmpl")
pao("--oext", action="store", dest="oext", default=defaultOext)
pao("-R", action="store_true", dest="recurse", default=False)
pao("--stdout", "-p", action="store_true", dest="stdout", default=False)
pao("--debug", action="store_true", dest="debug", default=False)
pao("--env", action="store_true", dest="env", default=False)
pao("--pickle", action="store", dest="pickle", default="")
pao("--flat", action="store_true", dest="flat", default=False)
pao("--nobackup", action="store_true", dest="nobackup", default=False)
pao("--settings", action="store", dest="compilerSettingsString", default=None)
pao("--templateAPIClass", action="store", dest="templateClassName", default=None)
self.opts, self.pathArgs = opts, files = parser.parse_args(args)
D("""\
cheetah compile %s
Options are
%s
Files are %s""", args, pprint.pformat(vars(opts)), files)
#cleanup trailing path separators
seps = [sep for sep in [os.sep, os.altsep] if sep]
for attr in ['idir', 'odir']:
for sep in seps:
path = getattr(opts, attr, None)
if path and path.endswith(sep):
path = path[:-len(sep)]
setattr(opts, attr, path)
break
self._fixExts()
if opts.env:
self.searchList.insert(0, os.environ)
if opts.pickle:
f = open(opts.pickle, 'rb')
unpickled = pickle.load(f)
f.close()
self.searchList.insert(0, unpickled)
opts.verbose = not opts.stdout
##################################################
## COMMAND METHODS
def compile(self):
self._compileOrFill()
def fill(self):
from Cheetah.ImportHooks import install
install()
self._compileOrFill()
def help(self):
usage(HELP_PAGE1, "", sys.stdout)
def options(self):
usage(HELP_PAGE2, "", sys.stdout)
def test(self):
# @@MO: Ugly kludge.
TEST_WRITE_FILENAME = 'cheetah_test_file_creation_ability.tmp'
try:
f = open(TEST_WRITE_FILENAME, 'w')
except:
sys.exit("""\
Cannot run the tests because you don't have write permission in the current
directory. The tests need to create temporary files. Change to a directory
you do have write permission to and re-run the tests.""")
else:
f.close()
os.remove(TEST_WRITE_FILENAME)
# @@MO: End ugly kludge.
from Cheetah.Tests import Test
import Cheetah.Tests.unittest_local_copy as unittest
del sys.argv[1:] # Prevent unittest from misinterpreting options.
sys.argv.extend(self.testOpts)
#unittest.main(testSuite=Test.testSuite)
#unittest.main(testSuite=Test.testSuite)
unittest.main(module=Test)
def version(self):
print Version
# If you add a command, also add it to the 'meths' variable in main().
##################################################
## LOGGING METHODS
def chatter(self, format, *args):
"""Print a verbose message to stdout. But don't if .opts.stdout is
true or .opts.verbose is false.
"""
if self.opts.stdout or not self.opts.verbose:
return
fprintfMessage(sys.stdout, format, *args)
def debug(self, format, *args):
"""Print a debugging message to stderr, but don't if .debug is
false.
"""
if self.opts.debug:
fprintfMessage(sys.stderr, format, *args)
def warn(self, format, *args):
"""Always print a warning message to stderr.
"""
fprintfMessage(sys.stderr, format, *args)
def error(self, format, *args):
"""Always print a warning message to stderr and exit with an error code.
"""
fprintfMessage(sys.stderr, format, *args)
sys.exit(1)
##################################################
## HELPER METHODS
def _fixExts(self):
assert self.opts.oext, "oext is empty!"
iext, oext = self.opts.iext, self.opts.oext
if iext and not iext.startswith("."):
self.opts.iext = "." + iext
if oext and not oext.startswith("."):
self.opts.oext = "." + oext
def _compileOrFill(self):
C, D, W = self.chatter, self.debug, self.warn
opts, files = self.opts, self.pathArgs
if files == ["-"]:
self._compileOrFillStdin()
return
elif not files and opts.recurse:
which = opts.idir and "idir" or "current"
C("Drilling down recursively from %s directory.", which)
sourceFiles = []
dir = os.path.join(self.opts.idir, os.curdir)
os.path.walk(dir, self._expandSourceFilesWalk, sourceFiles)
elif not files:
usage(HELP_PAGE1, "Neither files nor -R specified!")
else:
sourceFiles = self._expandSourceFiles(files, opts.recurse, True)
sourceFiles = [os.path.normpath(x) for x in sourceFiles]
D("All source files found: %s", sourceFiles)
bundles = self._getBundles(sourceFiles)
D("All bundles: %s", pprint.pformat(bundles))
if self.opts.flat:
self._checkForCollisions(bundles)
for b in bundles:
self._compileOrFillBundle(b)
def _checkForCollisions(self, bundles):
"""Check for multiple source paths writing to the same destination
path.
"""
C, D, W = self.chatter, self.debug, self.warn
isError = False
dstSources = {}
for b in bundles:
if dstSources.has_key(b.dst):
dstSources[b.dst].append(b.src)
else:
dstSources[b.dst] = [b.src]
keys = dstSources.keys()
keys.sort()
for dst in keys:
sources = dstSources[dst]
if len(sources) > 1:
isError = True
sources.sort()
fmt = "Collision: multiple source files %s map to one destination file %s"
W(fmt, sources, dst)
if isError:
what = self.isCompile and "Compilation" or "Filling"
sys.exit("%s aborted due to collisions" % what)
def _expandSourceFilesWalk(self, arg, dir, files):
"""Recursion extension for .expandSourceFiles().
This method is a callback for os.path.walk().
'arg' is a list to which successful paths will be appended.
"""
iext = self.opts.iext
for f in files:
path = os.path.join(dir, f)
if path.endswith(iext) and os.path.isfile(path):
arg.append(path)
elif os.path.islink(path) and os.path.isdir(path):
os.path.walk(path, self._expandSourceFilesWalk, arg)
# If is directory, do nothing; 'walk' will eventually get it.
def _expandSourceFiles(self, files, recurse, addIextIfMissing):
"""Calculate source paths from 'files' by applying the
command-line options.
"""
C, D, W = self.chatter, self.debug, self.warn
idir = self.opts.idir
iext = self.opts.iext
files = []
for f in self.pathArgs:
oldFilesLen = len(files)
D("Expanding %s", f)
path = os.path.join(idir, f)
pathWithExt = path + iext # May or may not be valid.
if os.path.isdir(path):
if recurse:
os.path.walk(path, self._expandSourceFilesWalk, files)
else:
raise Error("source file '%s' is a directory" % path)
elif os.path.isfile(path):
files.append(path)
elif (addIextIfMissing and not path.endswith(iext) and
os.path.isfile(pathWithExt)):
files.append(pathWithExt)
# Do not recurse directories discovered by iext appending.
elif os.path.exists(path):
W("Skipping source file '%s', not a plain file.", path)
else:
W("Skipping source file '%s', not found.", path)
if len(files) > oldFilesLen:
D(" ... found %s", files[oldFilesLen:])
return files
def _getBundles(self, sourceFiles):
flat = self.opts.flat
idir = self.opts.idir
iext = self.opts.iext
nobackup = self.opts.nobackup
odir = self.opts.odir
oext = self.opts.oext
idirSlash = idir + os.sep
bundles = []
for src in sourceFiles:
# 'base' is the subdirectory plus basename.
base = src
if idir and src.startswith(idirSlash):
base = src[len(idirSlash):]
if iext and base.endswith(iext):
base = base[:-len(iext)]
basename = os.path.basename(base)
if flat:
dst = os.path.join(odir, basename + oext)
else:
dbn = basename
if odir and base.startswith(os.sep):
odd = odir
while odd != '':
idx = base.find(odd)
if idx == 0:
dbn = base[len(odd):]
if dbn[0] == '/':
dbn = dbn[1:]
break
odd = os.path.dirname(odd)
if odd == '/':
break
dst = os.path.join(odir, dbn + oext)
else:
dst = os.path.join(odir, base + oext)
bak = dst + self.BACKUP_SUFFIX
b = Bundle(src=src, dst=dst, bak=bak, base=base, basename=basename)
bundles.append(b)
return bundles
def _getTemplateClass(self):
C, D, W = self.chatter, self.debug, self.warn
modname = None
if self._templateClass:
return self._templateClass
modname = self.opts.templateClassName
if not modname:
return Template
p = modname.rfind('.')
if ':' not in modname:
self.error('The value of option --templateAPIClass is invalid\n'
'It must be in the form "module:class", '
'e.g. "Cheetah.Template:Template"')
modname, classname = modname.split(':')
C('using --templateAPIClass=%s:%s'%(modname, classname))
if p >= 0:
mod = getattr(__import__(modname[:p], {}, {}, [modname[p+1:]]), modname[p+1:])
else:
mod = __import__(modname, {}, {}, [])
klass = getattr(mod, classname, None)
if klass:
self._templateClass = klass
return klass
else:
self.error('**Template class specified in option --templateAPIClass not found\n'
'**Falling back on Cheetah.Template:Template')
def _getCompilerSettings(self):
if self._compilerSettings:
return self._compilerSettings
def getkws(**kws):
return kws
if self.opts.compilerSettingsString:
try:
exec 'settings = getkws(%s)'%self.opts.compilerSettingsString
except:
self.error("There's an error in your --settings option."
"It must be valid Python syntax.\n"
+" --settings='%s'\n"%self.opts.compilerSettingsString
+" %s: %s"%sys.exc_info()[:2]
)
validKeys = DEFAULT_COMPILER_SETTINGS.keys()
if [k for k in settings.keys() if k not in validKeys]:
self.error(
'The --setting "%s" is not a valid compiler setting name.'%k)
self._compilerSettings = settings
return settings
else:
return {}
def _compileOrFillStdin(self):
TemplateClass = self._getTemplateClass()
compilerSettings = self._getCompilerSettings()
if self.isCompile:
pysrc = TemplateClass.compile(file=sys.stdin,
compilerSettings=compilerSettings,
returnAClass=False)
output = pysrc
else:
output = str(TemplateClass(file=sys.stdin, compilerSettings=compilerSettings))
sys.stdout.write(output)
def _compileOrFillBundle(self, b):
C, D, W = self.chatter, self.debug, self.warn
TemplateClass = self._getTemplateClass()
compilerSettings = self._getCompilerSettings()
src = b.src
dst = b.dst
base = b.base
basename = b.basename
dstDir = os.path.dirname(dst)
what = self.isCompile and "Compiling" or "Filling"
C("%s %s -> %s^", what, src, dst) # No trailing newline.
if os.path.exists(dst) and not self.opts.nobackup:
bak = b.bak
C(" (backup %s)", bak) # On same line as previous message.
else:
bak = None
C("")
if self.isCompile:
if not moduleNameRE.match(basename):
tup = basename, src
raise Error("""\
%s: base name %s contains invalid characters. It must
be named according to the same rules as Python modules.""" % tup)
pysrc = TemplateClass.compile(file=src, returnAClass=False,
moduleName=basename,
className=basename,
compilerSettings=compilerSettings)
output = pysrc
else:
#output = str(TemplateClass(file=src, searchList=self.searchList))
tclass = TemplateClass.compile(file=src, compilerSettings=compilerSettings)
output = str(tclass(searchList=self.searchList))
if bak:
shutil.copyfile(dst, bak)
if dstDir and not os.path.exists(dstDir):
if self.isCompile:
mkdirsWithPyInitFiles(dstDir)
else:
os.makedirs(dstDir)
if self.opts.stdout:
sys.stdout.write(output)
else:
f = open(dst, 'w')
f.write(output)
f.close()
##################################################
## if run from the command line
if __name__ == '__main__': CheetahWrapper().main()
# vim: shiftwidth=4 tabstop=4 expandtab
|
carvalhomb/tsmells
|
lib/Cheetah/src/CheetahWrapper.py
|
Python
|
gpl-2.0
| 21,599
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterternary.hoverlabel"
_path_str = "scatterternary.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotly/python-api
|
packages/python/plotly/plotly/graph_objs/scatterternary/hoverlabel/_font.py
|
Python
|
mit
| 11,245
|
import os.path
def get_path(type):
path = []
tmp = type
while tmp.parent is not None:
path.insert(0, tmp)
tmp = tmp.parent
if type.type_name == 'Namespace':
path = [part.identifier.lower_case for part in path] + ['__init__.py']
else:
path = [part.identifier.lower_case for part in path[:-1]]\
+ [path[-1].identifier.lower_case + '.py']
return os.path.join(* ['.'] + path)
def name_to_string(type):
if type.typeName == 'Namespace':
return type.identifier.lower_camel_case
else:
return type.identifier.upper_camel_case
def get_fqn(type):
path = []
tmp = type
while tmp.parent is not None:
path.insert(0, tmp)
tmp = tmp.parent
return '.'.join(name_to_string(part) for part in path)
def get_import_path(type, relative_to=None):
relativity = []
if relative_to is None:
path = []
tmp = type
while tmp.parent is not None:
path.insert(0, tmp)
tmp = tmp.parent
else:
tmp2 = relative_to
while tmp2.parent is not None:
relativity.append('.')
path = []
tmp = type
while tmp.parent is not None:
path.insert(0, tmp)
if tmp.parent is tmp2.parent:
break
tmp = tmp.parent
else:
tmp2 = tmp2.parent
continue
break
return ''.join(relativity) + '.'.join(part.identifier.lower_case for part in path)
|
umlfri/umlfri2
|
addons/python_starter/libraryTemplate/lib/paths.py
|
Python
|
gpl-3.0
| 1,650
|
import pandas as pd
import sys
from suds.client import Client as sudsclient
import ssl
from .plots import *
david_categories = [
'GOTERM_BP_FAT', 'GOTERM_CC_FAT', 'GOTERM_MF_FAT', 'KEGG_PATHWAY',
'BIOCARTA', 'PFAM', 'PROSITE' ]
david_fields = [
'categoryName', 'termName', 'listHits', 'percent',
'ease', 'geneIds', 'listTotals', 'popHits', 'popTotals',
'foldEnrichment', 'bonferroni', 'benjamini', 'afdr']
# include:
# 'fisher'
# 'termName' to 'term' and 'term_name'
def DAVIDenrich(database, categories, user, ids, ids_bg = None, name = '', name_bg = '', verbose = False, p = 0.1, n = 2):
# Modified from https://david.ncifcrf.gov/content.jsp?file=WS.html
# by courtesy of HuangYi @ 20110424
"""
Queries the DAVID database for an enrichment analysis
Check https://david.ncifcrf.gov/content.jsp?file=DAVID_API.html for database == "type" tag and categories == "annot" tag.
:param database: A string for the database to query, e.g. 'WORMBASE_GENE_ID'
:param categories: A comma separated string with databases
:param user: A user ID registered at DAVID for querying
:param ids: A list with identifiers
:param name: A string with the name for the query set
:param ids_bg: A list with the background identifiers to enrich against,
'None' for whole set
:param name_bg: A string with the name for the background set
:param p: Maximum p value for enrichment of a term
:param n: Minimum number of genes within a term
:returns: None if no ids match the queried database, or a pandas data frame with results
"""
ids = ','.join([str(i) for i in ids])
use_bg = 0
if ids_bg is not None:
ids_bg = ','.join([str(i) for i in ids_bg])
ssl._create_default_https_context = ssl._create_unverified_context
url = 'https://david.ncifcrf.gov/webservice/services/DAVIDWebService?wsdl'
client = sudsclient(url)
client.wsdl.services[0].setlocation('https://david.ncifcrf.gov/webservice/services/DAVIDWebService.DAVIDWebServiceHttpSoap11Endpoint/')
client_auth = client.service.authenticate(user)
if verbose:
print('User Authentication:', client_auth)
sys.stdout.flush()
size = client.service.addList(ids, database, name, 0) #| inputListIds,idType,listName,listType)
if verbose:
print('Mapping rate of ids: ', str(size))
sys.stdout.flush()
if not float(size) > float(0):
return None
if ids_bg is not None:
size_bg = client.service.addList(ids_bg, database, name_bg, 1)
if verbose:
print('Mapping rate of background ids: ', str(size_bg))
sys.stdout.flush()
client_categories = client.service.setCategories(categories)
if verbose:
print('Categories used: ', client_categories)
sys.stdout.flush()
client_report = client.service.getChartReport(p, n)
size_report = len(client_report)
if verbose:
print('Records reported: ', str(size_report))
sys.stdout.flush()
if size_report > 0:
df = []
for r in client_report:
d = dict(r)
line = []
for f in david_fields:
line.append(str(d[f]).encode('ascii','ignore'))
df.append(line)
df = pd.DataFrame(df)
df.columns=david_fields
for col in david_fields:
df[col] = df[col].apply(lambda x: x.decode())
else:
df=None
return df
def id_nameDAVID(df,GTF=None,name_id=None):
"""
Given a DAVIDenrich output it converts ensembl gene ids to genes names and adds this column to the output
:param df: a dataframe output from DAVIDenrich
:param GTF: a GTF dataframe from readGTF()
:param name_id: instead of a gtf dataframe a dataframe with the columns 'gene_name' and 'gene_id' can be given as input
:returns: a pandas dataframe with a gene name column added to it.
"""
if name_id is None:
gene_name=retrieve_GTF_field('gene_name',GTF)
gene_id=retrieve_GTF_field('gene_id', GTF)
GTF=pd.concat([gene_name,gene_id],axis=1)
else:
GTF=name_id.copy()
df['Gene_names']="genes"
terms=df['termName'].tolist()
enrichN=pd.DataFrame()
for term in terms:
tmp=df[df['termName']==term]
tmp=tmp.reset_index(drop=True)
ids=tmp.xs(0)['geneIds']
ids=pd.DataFrame(data=ids.split(", "))
ids.columns=['geneIds']
ids['geneIds']=ids['geneIds'].map(str.lower)
GTF['gene_id']=GTF['gene_id'].astype(str)
GTF['gene_id']=GTF['gene_id'].map(str.lower)
ids=pd.merge(ids, GTF, how='left', left_on='geneIds', right_on='gene_id')
names=ids['gene_name'].tolist()
names= ', '.join(names)
tmp["Gene_names"]=names
#tmp=tmp.replace(to_replace=tmp.xs(0)['Gene_names'], value=names)
enrichN=pd.concat([enrichN, tmp])
enrichN=enrichN.reset_index(drop=True)
gene_names=enrichN[['Gene_names']]
gpos=enrichN.columns.get_loc("geneIds")
enrichN=enrichN.drop(['Gene_names'],axis=1)
cols=enrichN.columns.tolist()
enrichN=pd.concat([enrichN[cols[:gpos+1]],gene_names,enrichN[cols[gpos+1:]]],axis=1)
return enrichN
def DAVIDgetGeneAttribute(x,df,refCol="ensembl_gene_id",fieldTOretrieve="gene_name"):
"""
Returns a list of gene names for given gene ids.
:param x: a string with the list of IDs separated by ', '
:param df: a dataframe with the reference column and a the column to retrieve
:param refCol: the header of the column containing the identifiers
:param fieldTOretrieve: the field to retrieve from parsedGTF eg. 'gene_name'
:returns: list of fieldTOretrieve separeted by ', ' in the same order as the given in x
"""
l=x.split(", ")
l=[ s.upper() for s in l ]
tmpdf=pd.DataFrame({refCol:l},index=range(len(l)))
df_fix=df[[refCol,fieldTOretrieve]].drop_duplicates()
sys.stdout.flush()
df_fix[refCol]=df_fix[refCol].apply(lambda x: x.upper())
ids=pd.merge(tmpdf,df_fix,how="left",on=[refCol])
ids=ids[fieldTOretrieve].tolist()
ids=[ str(s) for s in ids ]
ids=", ".join(ids)
return ids
def DAVIDplot(database, categories, user, df_ids, output, df_ids_bg = None, name = '', \
name_bg = '', verbose = False, p = 0.1, n = 2):
"""
Queries the DAVID database for an enrichment analysis and plots CellPlots as
well as SymPlots (see plots).
Check https://david.ncifcrf.gov/content.jsp?file=DAVID_API.html for database == "type" tag and categories == "annot" tag.
:param database: a string for the database to query, e.g. 'WORMBASE_GENE_ID'
:param categories: a comma separated string with databases
:param user: a user ID registered at DAVID for querying
:param df_ids: a dataframe where the first column contains the identifiers
to be queried and the second column the respective log2fc for each identifier.
:param output: /path/to/output/prefix
:param df_ids_bg: a dataframe where the first column contains the identifiers
to be used as background. None for whole set.
:param name: a string with the name for the query set
:param name_bg: a string with the name for the background set
:param p: Maximum p value for enrichment of a term
:param n: Minimum number of genes within a term
:returns: Nothing
"""
idsc1=df_ids.columns.tolist()[0]
idsc2=df_ids.columns.tolist()[1]
ids=df_ids[idsc1].tolist()
if type(df_ids_bg)==type(pd.DataFrame()):
ids_bg=df_ids_bg[df_ids_bg.columns.tolist()[0]]
else:
ids_bg=None
print(categories)
david=DAVIDenrich(database, categories, user, ids, ids_bg = ids_bg, \
name = name, name_bg = name_bg, verbose = verbose, p = p, n = n)
if type(david)!=type(pd.DataFrame()):
print("For this dataset no enrichments could be returned.")
sys.stdout.flush()
else:
david[idsc2]=david["geneIds"].apply(lambda x: \
DAVIDgetGeneAttribute(x,\
df_ids,\
refCol=idsc1,\
fieldTOretrieve=idsc2))
david[idsc2]=david[idsc2].apply(lambda x: x.replace(", ", ","))
EXC=pd.ExcelWriter(output+".xlsx")
for category in list(set(david["categoryName"].tolist())):
david_=david[david["categoryName"]==category]
print(category)
david_.to_excel(EXC,category)
tmp=david_[:20]
tmp["-log10(p)"]=np.log10(tmp["ease"].astype(float)) * -1
#tmp["Term"]=tmp['termName']
#tmp["Annotated"]=tmp["listHits"]
cellplot=CellPlot(tmp, output_file=output+"."+category, gene_expression_col=idsc2, gene_expression=idsc2, \
figure_title=category+"\n"+output.split("/")[-1], pvalCol="ease", \
lowerLimit=None, upperLimit=None, colorBarType='bwr', xaxis_label = "GO Term -log10(p-value)")
symplot=SymPlot(tmp, output_file=output+"."+category, \
gene_expression_col=idsc2,\
figure_title=category+"\n"+output.split("/")[-1], \
pvalCol="ease", xaxis_label = "GO Term -log10(p-value)")
EXC.save()
|
mpg-age-bioinformatics/AGEpy
|
AGEpy/david.py
|
Python
|
mit
| 9,205
|
# (C) British Crown Copyright 2011 - 2019, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Provide shapely geometry <-> matplotlib path support.
See also `Shapely Geometric Objects <see_also_shapely>`_
and `Matplotlib Path API <https://matplotlib.org/api/path_api.html>`_.
.. see_also_shapely:
https://shapely.readthedocs.io/en/latest/manual.html#geometric-objects
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import matplotlib
from matplotlib.path import Path
import shapely.geometry as sgeom
def geos_to_path(shape):
"""
Create a list of :class:`matplotlib.path.Path` objects that describe
a shape.
Parameters
----------
shape
A list, tuple or single instance of any of the following
types: :class:`shapely.geometry.point.Point`,
:class:`shapely.geometry.linestring.LineString`,
:class:`shapely.geometry.linestring.LinearRing`,
:class:`shapely.geometry.polygon.Polygon`,
:class:`shapely.geometry.multipoint.MultiPoint`,
:class:`shapely.geometry.multipolygon.MultiPolygon`,
:class:`shapely.geometry.multilinestring.MultiLineString`,
:class:`shapely.geometry.collection.GeometryCollection`,
or any type with a _as_mpl_path() method.
Returns
-------
paths
A list of :class:`matplotlib.path.Path` objects.
"""
if isinstance(shape, (list, tuple)):
paths = []
for shp in shape:
paths.extend(geos_to_path(shp))
return paths
if isinstance(shape, sgeom.LinearRing):
return [Path(np.column_stack(shape.xy), closed=True)]
elif isinstance(shape, (sgeom.LineString, sgeom.Point)):
return [Path(np.column_stack(shape.xy))]
elif isinstance(shape, sgeom.Polygon):
def poly_codes(poly):
codes = np.ones(len(poly.xy[0])) * Path.LINETO
codes[0] = Path.MOVETO
codes[-1] = Path.CLOSEPOLY
return codes
if shape.is_empty:
return []
vertices = np.concatenate([np.array(shape.exterior.xy)] +
[np.array(ring.xy) for ring in
shape.interiors], 1).T
codes = np.concatenate([poly_codes(shape.exterior)] +
[poly_codes(ring) for ring in shape.interiors])
return [Path(vertices, codes)]
elif isinstance(shape, (sgeom.MultiPolygon, sgeom.GeometryCollection,
sgeom.MultiLineString, sgeom.MultiPoint)):
paths = []
for geom in shape.geoms:
paths.extend(geos_to_path(geom))
return paths
elif hasattr(shape, '_as_mpl_path'):
vertices, codes = shape._as_mpl_path()
return [Path(vertices, codes)]
else:
raise ValueError('Unsupported shape type {}.'.format(type(shape)))
def path_segments(path, **kwargs):
"""
Create an array of vertices and a corresponding array of codes from a
:class:`matplotlib.path.Path`.
Parameters
----------
path
A :class:`matplotlib.path.Path` instance.
Other Parameters
----------------
kwargs
See :func:`matplotlib.path.iter_segments` for details of the keyword
arguments.
Returns
-------
vertices, codes
A (vertices, codes) tuple, where vertices is a numpy array of
coordinates, and codes is a numpy array of matplotlib path codes.
See :class:`matplotlib.path.Path` for information on the types of
codes and their meanings.
"""
pth = path.cleaned(**kwargs)
return pth.vertices[:-1, :], pth.codes[:-1]
def path_to_geos(path, force_ccw=False):
"""
Create a list of Shapely geometric objects from a
:class:`matplotlib.path.Path`.
Parameters
----------
path
A :class:`matplotlib.path.Path` instance.
Other Parameters
----------------
force_ccw
Boolean flag determining whether the path can be inverted to enforce
ccw. Defaults to False.
Returns
-------
A list of instances of the following type(s):
:class:`shapely.geometry.polygon.Polygon`,
:class:`shapely.geometry.linestring.LineString` and/or
:class:`shapely.geometry.multilinestring.MultiLineString`.
"""
# Convert path into numpy array of vertices (and associated codes)
path_verts, path_codes = path_segments(path, curves=False)
# Split into subarrays such that each subarray consists of connected
# line segments based on the start of each one being marked by a
# matplotlib MOVETO code.
verts_split_inds = np.where(path_codes == Path.MOVETO)[0]
verts_split = np.split(path_verts, verts_split_inds)
codes_split = np.split(path_codes, verts_split_inds)
# Iterate through the vertices generating a list of
# (external_geom, [internal_polygons]) tuples.
other_result_geoms = []
collection = []
for path_verts, path_codes in zip(verts_split, codes_split):
if len(path_verts) == 0:
continue
verts_same_as_first = np.all(path_verts[0, :] == path_verts[1:, :],
axis=1)
if all(verts_same_as_first):
geom = sgeom.Point(path_verts[0, :])
elif path_verts.shape[0] > 4 and path_codes[-1] == Path.CLOSEPOLY:
geom = sgeom.Polygon(path_verts[:-1, :])
elif (matplotlib.__version__ < '2.2.0' and
# XXX A path can be given which does not end with close poly,
# in that situation, we have to guess?
path_verts.shape[0] > 3 and verts_same_as_first[-1]):
geom = sgeom.Polygon(path_verts)
else:
geom = sgeom.LineString(path_verts)
# If geom is a Polygon and is contained within the last geom in
# collection, add it to its list of internal polygons, otherwise
# simply append it as a new external geom.
if geom.is_empty:
pass
elif (len(collection) > 0 and
isinstance(collection[-1][0], sgeom.Polygon) and
isinstance(geom, sgeom.Polygon) and
collection[-1][0].contains(geom.exterior)):
collection[-1][1].append(geom.exterior)
elif isinstance(geom, sgeom.Point):
other_result_geoms.append(geom)
else:
collection.append((geom, []))
# Convert each (external_geom, [internal_polygons]) pair into a
# a shapely Polygon that encapsulates the internal polygons, if the
# external geom is a LineString leave it alone.
geom_collection = []
for external_geom, internal_polys in collection:
if internal_polys:
# XXX worry about islands within lakes
geom = sgeom.Polygon(external_geom.exterior, internal_polys)
else:
geom = external_geom
# Correctly orientate the polygon (ccw)
if isinstance(geom, sgeom.Polygon):
if force_ccw and not geom.exterior.is_ccw:
geom = sgeom.polygon.orient(geom)
geom_collection.append(geom)
# If the geom_collection only contains LineStrings combine them
# into a single MultiLinestring.
if geom_collection and all(isinstance(geom, sgeom.LineString) for
geom in geom_collection):
geom_collection = [sgeom.MultiLineString(geom_collection)]
# Remove any zero area Polygons
def not_zero_poly(geom):
return ((isinstance(geom, sgeom.Polygon) and not geom._is_empty and
geom.area != 0) or
not isinstance(geom, sgeom.Polygon))
result = list(filter(not_zero_poly, geom_collection))
return result + other_result_geoms
|
ocefpaf/cartopy
|
lib/cartopy/mpl/patch.py
|
Python
|
lgpl-3.0
| 8,414
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import re
import sys
from collections import defaultdict
from paddle.fluid.framework import Program, Variable, name_scope, default_main_program
from . import framework
from . import layers
from .backward import append_backward
from .framework import program_guard
from . import unique_name
from .initializer import Constant
from .layer_helper import LayerHelper
from .regularizer import append_regularization_ops
from .clip import append_gradient_clip_ops, error_clip_callback
from contextlib import contextmanager
from .layers import ops
__all__ = [
'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad', 'Ftrl',
'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer', 'AdamOptimizer',
'AdamaxOptimizer', 'DecayedAdagradOptimizer', 'RMSPropOptimizer',
'FtrlOptimizer', 'Adadelta', 'ModelAverage', 'LarsMomentum',
'LarsMomentumOptimizer'
]
class Optimizer(object):
"""Optimizer Base class.
Define the common interface of an optimizer.
User should not use this class directly,
but need to use one of it's implementation.
"""
def __init__(self, learning_rate, regularization=None, name=None):
if not isinstance(learning_rate, float) and \
not isinstance(learning_rate, framework.Variable):
raise TypeError("learning rate should be float or Variable")
self._name = name
self.regularization = regularization
self._learning_rate = learning_rate
# the learning rate type should be inferenced from loss
self._dtype = None
# each program should have a independent learning rate
# program -> Variable(learning_rate)
self._learning_rate_map = dict()
if isinstance(self._learning_rate, framework.Variable):
self._learning_rate_map[framework.default_main_program(
)] = self._learning_rate
# Dictionary of accumulators. Some optimizer subclasses need to
# allocate and manage extra variables associated with the parameters
# to train. These variables are called accumulators.
# {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
self._accumulators = defaultdict(lambda: dict())
self.helper = None
def _create_global_learning_rate(self):
lr = self._global_learning_rate()
if isinstance(lr, framework.Variable):
return
else:
if not isinstance(self._learning_rate, float):
raise TypeError(
"learning rate variable is create outside optimizer,"
"can not create new learning rate variable for new program")
# create learning rate in the current main program
self._learning_rate_map[framework.default_main_program(
)] = layers.create_global_var(
name=unique_name.generate("learning_rate"),
shape=[1],
value=float(self._learning_rate),
dtype='float32' if self._dtype == None else self._dtype,
persistable=True)
def _global_learning_rate(self, program=None):
"""
get global decayed learning rate
:return:
"""
if program is None:
program = framework.default_main_program()
return self._learning_rate_map.get(program, None)
def _append_optimize_op(self, block, param_and_grad):
""" append optimize operator to block and return all the added optimize_op
"""
raise NotImplementedError()
def _create_param_lr(self, param_and_grad):
# create learning rate variable for every parameter
param = param_and_grad[0]
param_lr = param.optimize_attr['learning_rate']
if type(param_lr) == Variable:
return param_lr
else:
if param_lr == 1.0:
return self._global_learning_rate()
else:
with default_main_program()._lr_schedule_guard(
is_with_opt=True), framework.name_scope(
'scale_with_param_lr'):
return self._global_learning_rate() * param_lr
def _create_accumulators(self, block, parameters):
"""Create all accumulators needed by the parameters
Args:
block: the block in which the loss variable is present
parameters: list of parameter variables for the optimizer
"""
pass
def _finish_update(self, block, parameters_and_grads):
"""Finish any custom updates needed
before completing an optimization step
Args:
block: the block in which the loss variable is present
parameters: list of parameter variables for the optimizer
Returns:
None
"""
pass
def _add_accumulator(self,
name,
param,
dtype=None,
fill_value=0.0,
shape=None):
"""Utility function to add an accumulator for a parameter
Args:
block: the block in which the loss variable is present
name: name of the accumulator
param: parameter variable for which accumulator is to be added
dtype: data type of the accumulator variable
fill_value: value to initialize the accumulator variable
"""
if self._name is not None:
name = self._name + "_" + name
if (name in self._accumulators and
param.name in self._accumulators[name]):
raise Exception("Accumulator {} already exists for parameter {}".
format(name, param.name))
if shape == None:
shape = param.shape
assert isinstance(self.helper, LayerHelper)
var = self.helper.create_global_variable(
name=unique_name.generate(name),
persistable=True,
dtype=dtype or param.dtype,
type=param.type,
shape=shape)
self.helper.set_variable_initializer(
var, initializer=Constant(value=float(fill_value)))
self._accumulators[name][param.name] = var
return var
def _get_accumulator(self, name, param):
"""Utility function to fetch an accumulator for a parameter
Args:
name: name of the accumulator
param: parameter variable for which accumulator is to be fetched
Returns:
accumulator variable for the parameter
"""
if self._name is not None:
name = self._name + "_" + name
if (name not in self._accumulators or
param.name not in self._accumulators[name]):
raise Exception("Accumulator {} does not exist for parameter {}".
format(name, param.name))
return self._accumulators[name][param.name]
def _create_optimization_pass(self,
parameters_and_grads,
loss,
startup_program=None):
"""Add optimization operators to update gradients to variables.
Args:
loss(Variable): the target that this optimization is for.
parameters_and_grads(list(tuple(Variable, Variable))):
a list of (variable, gradient) pair to update.
Returns:
return_op_list: a list of operators that will complete one step of
optimization. This will include parameter update ops, global step
update ops and any other custom ops required by subclasses to manage
their internal state.
"""
# This is a default implementation of create_optimization_pass that
# can be shared by most optimizers. This implementation assumes that
# the subclass will implement the _append_optimize_op method and the
# _initialize_tensors method. The subclass can extend the
# _create_accumulators method if it needs to create accumulators
# for parameters and extend _finish_update method to add custom ops.
# Create any accumulators
program = loss.block.program
self._dtype = loss.dtype
with program_guard(program, startup_program):
global_block = framework.default_main_program().global_block()
start = len(global_block.ops)
self.helper = LayerHelper(self.__class__.__name__)
self._create_accumulators(loss.block,
[p[0] for p in parameters_and_grads])
self._create_global_learning_rate()
optimize_ops = []
for param_and_grad in parameters_and_grads:
if param_and_grad[1] is None:
continue
with param_and_grad[0].block.program._optimized_guard(
param_and_grad), name_scope("optimizer"):
if param_and_grad[0].trainable is True:
optimize_op = self._append_optimize_op(loss.block,
param_and_grad)
optimize_ops.append(optimize_op)
# Get custom finish ops for subclasses
# FIXME: Need to fix this once we figure out how to handle dependencies
self._finish_update(loss.block, parameters_and_grads)
end = len(global_block.ops)
return global_block._slice_ops(start, end)
def minimize(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
"""Add operations to minimize `loss` by updating `parameter_list`.
This method combines interface `append_backward()` and
`create_optimization_pass()` into one.
"""
params_grads = append_backward(loss, parameter_list, no_grad_set,
[error_clip_callback])
params_grads = sorted(params_grads, key=lambda x: x[0].name)
params_grads = append_gradient_clip_ops(params_grads)
# Add regularization if any
params_grads = append_regularization_ops(params_grads,
self.regularization)
optimize_ops = self._create_optimization_pass(params_grads, loss,
startup_program)
return optimize_ops, params_grads
class SGDOptimizer(Optimizer):
"""
Optimizer of the stochastic gradient descent algorithm.
.. math::
param\_out = param - learning\_rate * grad
Args:
learning_rate (float|Variable): the learning rate used to update parameters. \
Can be a float value or a Variable with one float value as data element.
regularization: A Regularizer, such as
fluid.regularizer.L2DecayRegularizer.
name: A optional name prefix.
Examples:
.. code-block:: python
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.2)
sgd_optimizer.minimize(cost)
"""
def __init__(self, learning_rate, regularization=None, name=None):
assert learning_rate is not None
super(SGDOptimizer, self).__init__(
learning_rate=learning_rate,
regularization=regularization,
name=name)
self.type = "sgd"
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
# create the optimize op
sgd_op = block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"LearningRate": self._create_param_lr(param_and_grad)
},
outputs={"ParamOut": param_and_grad[0]})
return sgd_op
class MomentumOptimizer(Optimizer):
"""
Simple Momentum optimizer with velocity state
This optimizer has a flag for Nestrov Momentum.
The update equations are as follows:
.. math::
& velocity = mu * velocity + gradient
& if (use\_nesterov):
&\quad param = param - (gradient + mu * velocity) * learning\_rate
& else:
&\quad param = param - learning\_rate * velocity
Args:
learning_rate (float|Variable): the learning rate used to update parameters. \
Can be a float value or a Variable with one float value as data element.
momentum (float): momentum factor
use_nesterov (bool): enables Nesterov momentum
regularization: A Regularizer, such as
fluid.regularizer.L2DecayRegularizer.
name: A optional name prefix.
Examples:
.. code-block:: python
optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1)
optimizer.minimize(cost)
"""
_velocity_acc_str = "velocity"
def __init__(self,
learning_rate,
momentum,
use_nesterov=False,
regularization=None,
name=None):
assert learning_rate is not None
assert momentum is not None
super(MomentumOptimizer, self).__init__(
learning_rate=learning_rate,
regularization=regularization,
name=name)
self.type = "momentum"
self._momentum = momentum
self._use_nesterov = bool(use_nesterov)
def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block)
for p in parameters:
self._add_accumulator(self._velocity_acc_str, p)
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
velocity_acc = self._get_accumulator(self._velocity_acc_str,
param_and_grad[0])
# create the momentum optimize op
momentum_op = block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"Velocity": velocity_acc,
"LearningRate": self._create_param_lr(param_and_grad)
},
outputs={
"ParamOut": param_and_grad[0],
"VelocityOut": velocity_acc
},
attrs={"mu": self._momentum,
"use_nesterov": self._use_nesterov})
return momentum_op
class LarsMomentumOptimizer(Optimizer):
"""
Momentum optimizer with LARS support
The update equations are as follows:
.. math::
& local\_learning\_rate = learning\_rate * lars\_coeff * \\
\\frac{||param||}{||gradient|| + lars\_weight\_decay * ||param||}
& velocity = mu * velocity + local\_learning\_rate * (gradient + lars\_weight\_decay * param)
& param = param - velocity
Args:
learning_rate (float|Variable): the learning rate used to update parameters. \
Can be a float value or a Variable with one float value as data element.
momentum (float): momentum factor
lars_coeff (float): defines how much we trust the layer to change its weights.
lars_weight_decay (float): weight decay coefficient for decaying using LARS.
regularization: A Regularizer, such as
fluid.regularizer.L2DecayRegularizer.
name: A optional name prefix.
Examples:
.. code-block:: python
optimizer = fluid.optimizer.LarsMomentum(learning_rate=0.2, momentum=0.1, lars_weight_decay=0.001)
optimizer.minimize(cost)
"""
_velocity_acc_str = "velocity"
def __init__(self,
learning_rate,
momentum,
lars_coeff=0.001,
lars_weight_decay=0.0005,
regularization=None,
name=None):
assert learning_rate is not None
assert momentum is not None
super(LarsMomentumOptimizer, self).__init__(
learning_rate=learning_rate,
regularization=regularization,
name=name)
self.type = "lars_momentum"
self._momentum = momentum
self._lars_coeff = float(lars_coeff)
self._lars_weight_decay = float(lars_weight_decay)
def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block)
for p in parameters:
self._add_accumulator(self._velocity_acc_str, p)
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
velocity_acc = self._get_accumulator(self._velocity_acc_str,
param_and_grad[0])
# create the momentum optimize op
momentum_op = block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"Velocity": velocity_acc,
"LearningRate": self._create_param_lr(param_and_grad)
},
outputs={
"ParamOut": param_and_grad[0],
"VelocityOut": velocity_acc
},
attrs={
"mu": self._momentum,
"lars_coeff": self._lars_coeff,
"lars_weight_decay": self._lars_weight_decay
})
return momentum_op
class AdagradOptimizer(Optimizer):
"""
**Adaptive Gradient Algorithm (Adagrad)**
The update is done as follows:
.. math::
moment\_out &= moment + grad * grad
param\_out &= param - \\frac{learning\_rate * grad}{\sqrt{moment\_out} + \epsilon}
The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
does not have the epsilon attribute. It is added here in our implementation
as also proposed here: http://cs231n.github.io/neural-networks-3/#ada
for numerical stability to avoid the division by zero error.
Args:
learning_rate (float|Variable): the learning rate used to update parameters. \
Can be a float value or a Variable with one float value as data element.
epsilon (float): a small float value for numerical stability.
regularization: A Regularizer, such as
fluid.regularizer.L2DecayRegularizer.
name: A optional name prefix.
Examples:
.. code-block:: python
optimizer = fluid.optimizer.Adagrad(learning_rate=0.2)
optimizer.minimize(cost)
"""
_moment_acc_str = "moment"
def __init__(self,
learning_rate,
epsilon=1.0e-6,
regularization=None,
name=None):
assert learning_rate is not None
assert epsilon is not None
super(AdagradOptimizer, self).__init__(
learning_rate=learning_rate,
regularization=regularization,
name=name)
self.type = "adagrad"
self._epsilon = epsilon
def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block)
for p in parameters:
self._add_accumulator(self._moment_acc_str, p)
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
moment_acc = self._get_accumulator(self._moment_acc_str,
param_and_grad[0])
# Create the adagrad optimizer op
adagrad_op = block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"Moment": moment_acc,
"LearningRate": self._create_param_lr(param_and_grad)
},
outputs={"ParamOut": param_and_grad[0],
"MomentOut": moment_acc},
attrs={"epsilon": self._epsilon})
return adagrad_op
class AdamOptimizer(Optimizer):
"""
This implements the Adam optimizer from Section 2 of the Adam
paper : https://arxiv.org/abs/1412.6980.
Adam is a first-order gradient-based optimization method based on
adaptive estimates of lower-order moments.
Adam updates:
.. math::
t & = t + 1
moment\_1\_out & = {\\beta}_1 * moment\_1 + (1 - {\\beta}_1) * grad
moment\_2\_out & = {\\beta}_2 * moment\_2 + (1 - {\\beta}_2) * grad * grad
learning\_rate & = learning\_rate * \\
\\frac{\sqrt{1 - {\\beta}_2^t}}{1 - {\\beta}_1^t}
param\_out & = param - learning\_rate * \\frac{moment\_1}{\sqrt{moment\_2} + \epsilon}
Args:
learning_rate (float|Variable): the learning rate used to update parameters. \
Can be a float value or a Variable with one float value as data element.
beta1 (float): The exponential decay rate for the 1st moment estimates.
beta2 (float): The exponential decay rate for the 2nd moment estimates.
epsilon (float): a small float value for numerical stability.
regularization: A Regularizer, such as
fluid.regularizer.L2DecayRegularizer.
name: A optional name prefix.
Examples:
.. code-block:: python
optimizer = fluid.optimizer.Adam(learning_rate=0.2)
optimizer.minimize(cost)
"""
_moment1_acc_str = "moment1"
_moment2_acc_str = "moment2"
_beta1_pow_acc_str = "beta1_pow_acc"
_beta2_pow_acc_str = "beta2_pow_acc"
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
regularization=None,
name=None):
assert learning_rate is not None
assert beta1 is not None
assert beta2 is not None
assert epsilon is not None
super(AdamOptimizer, self).__init__(
learning_rate=learning_rate,
regularization=regularization,
name=name)
self.type = "adam"
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block)
# Create accumulator tensors for first and second moments
for p in parameters:
self._add_accumulator(self._moment1_acc_str, p)
self._add_accumulator(self._moment2_acc_str, p)
self._add_accumulator(
name=self._beta1_pow_acc_str,
param=p,
dtype='float32',
fill_value=self._beta1,
shape=[1])
self._add_accumulator(
name=self._beta2_pow_acc_str,
param=p,
dtype='float32',
fill_value=self._beta2,
shape=[1])
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
moment1 = self._get_accumulator(self._moment1_acc_str,
param_and_grad[0])
moment2 = self._get_accumulator(self._moment2_acc_str,
param_and_grad[0])
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param_and_grad[0])
beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
param_and_grad[0])
# create the adam optimize op
adam_op = block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"LearningRate": self._create_param_lr(param_and_grad),
"Moment1": moment1,
"Moment2": moment2,
"Beta1Pow": beta1_pow_acc,
"Beta2Pow": beta2_pow_acc
},
outputs={
"ParamOut": param_and_grad[0],
"Moment1Out": moment1,
"Moment2Out": moment2
},
attrs={
"beta1": self._beta1,
"beta2": self._beta2,
"epsilon": self._epsilon
})
return adam_op
def _finish_update(self, block, param_and_grads):
"""Update Beta1 and Beta2 Power accumulators
"""
assert isinstance(block, framework.Block)
main_block = block.program.global_block()
for param, grad in param_and_grads:
if grad is None:
continue
with param.block.program._optimized_guard(
[param, grad]), name_scope("optimizer"):
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param)
beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
param)
main_block.append_op(
type="scale",
inputs={"X": beta1_pow_acc},
outputs={"Out": beta1_pow_acc},
attrs={"scale": self._beta1})
main_block.append_op(
type="scale",
inputs={"X": beta2_pow_acc},
outputs={"Out": beta2_pow_acc},
attrs={"scale": self._beta2})
class AdamaxOptimizer(Optimizer):
"""
We implement the Adamax optimizer from Section 7 of the Adam
paper: https://arxiv.org/abs/1412.6980. Adamax is a variant of the
Adam algorithm based on the infinity norm.
Adamax updates:
.. math::
t & = t + 1
moment\_out & = {\\beta}_1 * moment + (1 - {\\beta}_1) * grad
inf\_norm\_out & = max({\\beta}_2 * inf\_norm + \epsilon, |grad|)
learning\_rate & = \\frac{learning\_rate}{1 - {\\beta}_1^t}
param\_out & = param - learning\_rate * \\frac{moment\_out}{inf\_norm\_out}
The original paper does not have an epsilon attribute.
However, it is added here for numerical stability to prevent the
division by 0 error.
Args:
learning_rate (float|Variable): the learning rate used to update parameters. \
Can be a float value or a Variable with one float value as data element.
beta1 (float): The exponential decay rate for the 1st moment estimates.
beta2 (float): The exponential decay rate for the 2nd moment estimates.
epsilon (float): a small float value for numerical stability.
regularization: A Regularizer, such as
fluid.regularizer.L2DecayRegularizer.
name: A optional name prefix.
Examples:
.. code-block:: python
optimizer = fluid.optimizer.Adamax(learning_rate=0.2)
optimizer.minimize(cost)
Notes:
Currently, AdamaxOptimizer doesn't support sparse parameter optimization.
"""
_moment_acc_str = "moment"
_inf_norm_acc_str = "inf_norm"
_beta1_pow_acc_str = "beta1_pow_acc"
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
regularization=None,
name=None):
assert learning_rate is not None
assert beta1 is not None
assert beta2 is not None
assert epsilon is not None
super(AdamaxOptimizer, self).__init__(
learning_rate=learning_rate,
regularization=regularization,
name=name)
self.type = "adamax"
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
def _create_accumulators(self, block, parameters):
# Create accumulator tensors for first moment and infinity norm
for p in parameters:
self._add_accumulator(self._moment_acc_str, p)
self._add_accumulator(self._inf_norm_acc_str, p)
self._add_accumulator(
name=self._beta1_pow_acc_str,
param=p,
dtype='float32',
fill_value=self._beta1,
shape=[1])
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
moment = self._get_accumulator(self._moment_acc_str, param_and_grad[0])
inf_norm = self._get_accumulator(self._inf_norm_acc_str,
param_and_grad[0])
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param_and_grad[0])
# create the adamax optimize op
adamax_op = block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"LearningRate": self._create_param_lr(param_and_grad),
"Moment": moment,
"InfNorm": inf_norm,
"Beta1Pow": beta1_pow_acc
},
outputs={
"ParamOut": param_and_grad[0],
"MomentOut": moment,
"InfNormOut": inf_norm
},
attrs={
"beta1": self._beta1,
"beta2": self._beta2,
"epsilon": self._epsilon
})
return adamax_op
def _finish_update(self, block, parameters_and_grads):
"""Update Beta1 Power accumulator
"""
assert isinstance(block, framework.Block)
main_block = block.program.global_block()
for param, grad in parameters_and_grads:
if grad is None:
continue
with param.block.program._optimized_guard(
[param, grad]), name_scope('adamx'):
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param)
main_block.append_op(
type="scale",
inputs={"X": beta1_pow_acc},
outputs={"Out": beta1_pow_acc},
attrs={"scale": self._beta1})
class DecayedAdagradOptimizer(Optimizer):
"""
**Decayed Adagrad Optimizer**
The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
The update is done as follows:
.. math::
moment\_out & = decay * moment + (1 - decay) * grad * grad
param\_out & = param - \\frac{learning\_rate * grad}{\sqrt{moment\_out} + \epsilon}
The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
does not have an epsilon attribute. It is added here for numerical
stability to avoid the division by zero error.
Args:
learning_rate (float|Variable): the learning rate used to update parameters. \
Can be a float value or a Variable with one float value as data element.
decay (float): decay rate.
epsilon (float): a small float value for numerical stability.
regularization: A Regularizer, such as
fluid.regularizer.L2DecayRegularizer.
name: A optional name prefix.
Examples:
.. code-block:: python
optimizer = fluid.optimizer.DecayedAdagrad(learning_rate=0.2)
optimizer.minimize(cost)
Notes:
Currently, DecayedAdagradOptimizer doesn't support sparse parameter optimization.
"""
_moment_acc_str = "moment"
def __init__(self,
learning_rate,
decay=0.95,
epsilon=1.0e-6,
regularization=None,
name=None):
assert learning_rate is not None
assert decay is not None
assert epsilon is not None
super(DecayedAdagradOptimizer, self).__init__(
learning_rate=learning_rate,
regularization=regularization,
name=name)
self.type = "decayed_adagrad"
self._decay = decay
self._epsilon = epsilon
def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block)
for p in parameters:
self._add_accumulator(self._moment_acc_str, p)
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
moment_acc = self._get_accumulator(self._moment_acc_str,
param_and_grad[0])
# Create the decayed adagrad optimizer op
decayed_adagrad_op = block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"Moment": moment_acc,
"LearningRate": self._create_param_lr(param_and_grad)
},
outputs={"ParamOut": param_and_grad[0],
"MomentOut": moment_acc},
attrs={"epsilon": self._epsilon})
return decayed_adagrad_op
class AdadeltaOptimizer(Optimizer):
"""
**Adadelta Optimizer**
Simple Adadelta optimizer with average squared grad state and
average squared update state.
The details of adadelta please refer to this
`ADADELTA: AN ADAPTIVE LEARNING RATE METHOD
<http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf>`_.
.. math::
E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2 \\\\
learning\\_rate &= sqrt( ( E(dx_{t-1}^2) + \\epsilon ) / ( \\
E(g_t^2) + \\epsilon ) ) \\\\
E(dx_t^2) &= \\rho * E(dx_{t-1}^2) + (1-\\rho) * (-g*learning\\_rate)^2
Args:
learning_rate(float): global learning rate
rho(float): rho in equation
epsilon(float): epsilon in equation
regularization: A Regularizer, such as
fluid.regularizer.L2DecayRegularizer.
name: A optional name prefix.
Examples:
.. code-block:: python
optimizer = fluid.optimizer.Adadelta(
learning_rate=0.0003, epsilon=1.0e-6, rho=0.95)
_, params_grads = optimizer.minimize(cost)
Notes:
Currently, AdadeltaOptimizer doesn't support sparse parameter optimization.
"""
_avg_squared_grad_acc_str = "_avg_squared_grad"
_avg_squared_update_acc_str = "_avg_squared_update"
def __init__(self,
learning_rate,
epsilon=1.0e-6,
rho=0.95,
regularization=None,
name=None):
if learning_rate is None:
raise ValueError("learning_rate is not set.")
if epsilon is None:
raise ValueError("epsilon is not set.")
if rho is None:
raise ValueError("rho is not set.")
super(AdadeltaOptimizer, self).__init__(
learning_rate=learning_rate,
regularization=regularization,
name=name)
self.type = "adadelta"
self._epsilon = epsilon
self._rho = rho
def _create_accumulators(self, block, parameters):
if not isinstance(block, framework.Block):
raise TypeError("block is not instance of framework.Block.")
for p in parameters:
self._add_accumulator(self._avg_squared_grad_acc_str, p)
self._add_accumulator(self._avg_squared_update_acc_str, p)
def _append_optimize_op(self, block, param_and_grad):
if not isinstance(block, framework.Block):
raise TypeError("block is not instance of framework.Block.")
avg_squared_grad_acc = self._get_accumulator(
self._avg_squared_grad_acc_str, param_and_grad[0])
avg_squared_update_acc = self._get_accumulator(
self._avg_squared_update_acc_str, param_and_grad[0])
# Create the adadelta optimizer op
adadelta_op = block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"AvgSquaredGrad": avg_squared_grad_acc,
"AvgSquaredUpdate": avg_squared_update_acc
},
outputs={
"ParamOut": param_and_grad[0],
"AvgSquaredGradOut": avg_squared_grad_acc,
"AvgSquaredUpdateOut": avg_squared_update_acc
},
attrs={"epsilon": self._epsilon,
"rho": self._rho})
return adadelta_op
class RMSPropOptimizer(Optimizer):
"""
Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning
rate method. The original slides proposed RMSProp: Slide 29 of
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf .
The original equation is as follows:
.. math::
r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2
w & = w - \\frac{\\eta} {\\sqrt{r(w,t) + \\epsilon}} \\nabla Q_{i}(w)
The first equation calculates moving average of the squared gradient for
each weight. Then dividing the gradient by :math:`sqrt{v(w,t)}`.
In some cases, adding a momentum term :math: `\\beta` is beneficial.
In our implementation, Nesterov momentum is used:
.. math::
r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2
v(w, t) & = \\beta v(w, t-1) + \\frac{\\eta} {\\sqrt{r(w,t) +
\\epsilon}} \\nabla Q_{i}(w)
w & = w - v(w, t)
if centered is True:
.. math::
r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2
g(w, t) & = \\rho g(w, t-1) + (1 - \\rho)\\nabla Q_{i}(w)
v(w, t) & = \\beta v(w, t-1) + \\frac{\\eta} {\\sqrt{r(w,t) - (g(w, t))^2 +
\\epsilon}} \\nabla Q_{i}(w)
w & = w - v(w, t)
where, :math:`\\rho` is a hyperparameter and typical values are 0.9, 0.95
and so on. :math: `beta` is the momentum term. :math: `\\epsilon` is a
smoothing term to avoid division by zero, usually set somewhere in range
from 1e-4 to 1e-8.
Args:
learning_rate(float): global learning rate.
rho(float): rho is :math: `\\rho` in equation, set 0.95 by default.
epsilon(float): :math: `\\epsilon` in equation is smoothing term to
avoid division by zero, set 1e-6 by default.
momentum(float): :math:`\\beta` in equation is the momentum term,
set 0.0 by default.
centered(bool): If True, gradients are normalized by the estimated variance of
the gradient; if False, by the uncentered second moment. Setting this to
True may help with training, but is slightly more expensive in terms of
computation and memory. Defaults to False.
regularization: A Regularizer, such as
fluid.regularizer.L2DecayRegularizer.
name: A optional name prefix.
Raises:
ValueError: If learning_rate, rho, epsilon, momentum are None.
Examples:
.. code-block:: python
optimizer = fluid.optimizer.RMSProp(0.0001)
_, params_grads = optimizer.minimize(cost)
"""
_momentum_acc_str = "momentum"
_mean_square_acc_str = "mean_square"
_mean_grad_acc_str = "mean_grad"
def __init__(self,
learning_rate,
rho=0.95,
epsilon=1.0e-6,
momentum=0.0,
centered=False,
regularization=None,
name=None):
super(RMSPropOptimizer, self).__init__(
learning_rate=learning_rate,
regularization=regularization,
name=name)
if learning_rate is None:
raise ValueError("learning_rate is not set.")
if rho is None:
raise ValueError("rho is not set.")
if epsilon is None:
raise ValueError("epsilon is not set.")
if momentum is None:
raise ValueError("momentum is not set.")
self.type = "rmsprop"
self._rho = rho
self._epsilon = epsilon
self._momentum = momentum
self._centered = centered
def _create_accumulators(self, block, parameters):
if not isinstance(block, framework.Block):
raise TypeError("block is not instance of framework.Block.")
for p in parameters:
self._add_accumulator(self._momentum_acc_str, p)
self._add_accumulator(self._mean_square_acc_str, p)
self._add_accumulator(self._mean_grad_acc_str, p)
def _append_optimize_op(self, block, param_and_grad):
if not isinstance(block, framework.Block):
raise TypeError("block is not instance of framework.Block.")
momentum_acc = self._get_accumulator(self._momentum_acc_str,
param_and_grad[0])
mean_square_acc = self._get_accumulator(self._mean_square_acc_str,
param_and_grad[0])
mean_grad_acc = self._get_accumulator(self._mean_grad_acc_str,
param_and_grad[0])
rmsprop_op = block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"Moment": momentum_acc,
"MeanSquare": mean_square_acc,
"MeanGrad": mean_grad_acc,
"LearningRate": self._create_param_lr(param_and_grad),
},
outputs={
"ParamOut": param_and_grad[0],
"MomentOut": momentum_acc,
"MeanSquareOut": mean_square_acc,
"MeanGradOut": mean_grad_acc
},
attrs={
"epsilon": self._epsilon,
"decay": self._rho,
"momentum": self._momentum,
"centered": self._centered
})
return rmsprop_op
class FtrlOptimizer(Optimizer):
"""
FTRL (Follow The Regularized Leader) Optimizer.
The paper that proposed Follow The Regularized Leader (FTRL):
(https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf)
.. math::
&new\_accum = squared\_accum + grad^2
&if (lr\_power == -0.5):
&\quad linear\_accum += grad - \\frac{\\sqrt{new\_accum} - \\sqrt{squared\_accum}}{learning\_rate * param}
&else:
&\quad linear\_accum += grad - \\frac{new\_accum^{-lr\_power} - accum^{-lr\_power}}{learning\_rate * param}
&x = l1 * sign(linear\_accum) - linear\_accum
&if (lr\_power == -0.5):
&\quad y = \\frac{\\sqrt{new\_accum}}{learning\_rate} + (2 * l2)
&\quad pre\_shrink = \\frac{x}{y}
&\quad param = (abs(linear\_accum) > l1).select(pre\_shrink, 0.0)
&else:
&\quad y = \\frac{new\_accum^{-lr\_power}}{learning\_rate} + (2 * l2)
&\quad pre\_shrink = \\frac{x}{y}
&\quad param = (abs(linear\_accum) > l1).select(pre\_shrink, 0.0)
&squared\_accum += grad^2
Args:
learning_rate (float|Variable): global learning rate.
l1 (float):
l2 (float):
lr_power (float):
regularization: A Regularizer, such as
fluid.regularizer.L2DecayRegularizer.
name: A optional name prefix.
Raises:
ValueError: If learning_rate, rho, epsilon, momentum are None.
Examples:
.. code-block:: python
optimizer = fluid.optimizer.Ftrl(0.0001)
_, params_grads = optimizer.minimize(cost)
Notes:
Currently, FtrlOptimizer doesn't support sparse parameter optimization.
"""
_squared_acc_str = "squared"
_linear_acc_str = "linear"
def __init__(self,
learning_rate,
l1=0.0,
l2=0.0,
lr_power=-0.5,
regularization=None,
name=None):
super(FtrlOptimizer, self).__init__(
learning_rate=learning_rate,
regularization=regularization,
name=name)
if learning_rate is None:
raise ValueError("learning_rate is not set.")
self.type = "ftrl"
self._l1 = l1
self._l2 = l2
self._lr_power = lr_power
def _create_accumulators(self, block, parameters):
if not isinstance(block, framework.Block):
raise TypeError("block is not instance of framework.Block.")
for p in parameters:
self._add_accumulator(self._squared_acc_str, p)
self._add_accumulator(self._linear_acc_str, p)
def _append_optimize_op(self, block, param_and_grad):
if not isinstance(block, framework.Block):
raise TypeError("block is not instance of framework.Block.")
squared_acc = self._get_accumulator(self._squared_acc_str,
param_and_grad[0])
linear_acc = self._get_accumulator(self._linear_acc_str,
param_and_grad[0])
ftrl_op = block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"SquaredAccumulator": squared_acc,
"LinearAccumulator": linear_acc,
"LearningRate": self._create_param_lr(param_and_grad),
},
outputs={
"ParamOut": param_and_grad[0],
"SquaredAccumOut": squared_acc,
"LinearAccumOut": linear_acc
},
attrs={"l1": self._l1,
"l2": self._l1,
"lr_power": self._lr_power})
return ftrl_op
# We short the class name, since users will use the optimizer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# sgd = fluid.optimizer.SGD(...)
#
# It is no need to add an `Optimizer` as the class suffix
SGD = SGDOptimizer
Momentum = MomentumOptimizer
Adagrad = AdagradOptimizer
Adam = AdamOptimizer
Adamax = AdamaxOptimizer
DecayedAdagrad = DecayedAdagradOptimizer
Adadelta = AdadeltaOptimizer
RMSProp = RMSPropOptimizer
Ftrl = FtrlOptimizer
LarsMomentum = LarsMomentumOptimizer
class ModelAverage(Optimizer):
"""Accumulate the average of parameters whtin sliding window. The average
result will be saved in temporary variables which can be applied to
parameter variables of current model by calling 'apply()' method. And the
'restore()' method is used to restored the parameter values of current model.
The size of average window is determined by average_window_rate,
min_average_window, max_average_window and current update times.
Args:
average_window_rate: The rate of average window.
min_average_window: The minimum size of average window.
max_average_window: The maximum size of average window.
regularization: A Regularizer, such as
fluid.regularizer.L2DecayRegularizer.
name: A optional name prefix.
Examples:
.. code-block:: python
optimizer = fluid.optimizer.Momentum()
optimizer.minimize(cost)
model_average = fluid.optimizer.ModelAverage(0.15,
min_average_window=10000,
max_average_window=20000)
for pass_id in range(args.pass_num):
for data in train_reader():
exe.run(fluid.default_main_program()...)
with model_average.apply(exe):
for data in test_reader():
exe.run(inference_program...)
"""
def __init__(self,
average_window_rate,
min_average_window=10000,
max_average_window=10000,
regularization=None,
name=None):
super(ModelAverage, self).__init__(
0.0, regularization=regularization, name=name)
self.average_window = average_window_rate
self.min_average_window = min_average_window
self.max_average_window = max_average_window
self.params_grads = []
for param in framework.default_main_program().global_block(
).all_parameters():
if param.do_model_average != False:
grad = param.block.create_var(
name=unique_name.generate(".".join([param.name, 'tmp'])),
dtype=param.dtype,
persistable=False,
stop_gradient=True)
self.params_grads.append((param, grad))
for param, grad in self.params_grads:
if grad is None:
continue
with param.block.program._optimized_guard(
[param, grad]), name_scope('move_average'):
self._append_average_accumulate_op(param)
self.apply_program = Program()
block = self.apply_program.global_block()
with program_guard(main_program=self.apply_program):
for param_grad in self.params_grads:
self._add_average_apply_op(block, param_grad)
self.restore_program = Program()
block = self.restore_program.global_block()
with program_guard(main_program=self.restore_program):
for param_grad in self.params_grads:
self._add_average_restore_op(block, param_grad)
def _add_average_apply_op(self, block, param_grad):
param = block._clone_variable(param_grad[0])
grad = block._clone_variable(param_grad[1])
sum_1 = block._clone_variable(self._get_accumulator('sum_1', param))
sum_2 = block._clone_variable(self._get_accumulator('sum_2', param))
sum_3 = block._clone_variable(self._get_accumulator('sum_3', param))
num_accumulates = block._clone_variable(
self._get_accumulator('num_accumulates', param))
old_num_accumulates = block._clone_variable(
self._get_accumulator('old_num_accumulates', param))
num_updates = block._clone_variable(
self._get_accumulator('num_updates', param))
# backup param value to grad
layers.assign(input=param, output=grad)
# param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)
tmp = layers.sum(x=[num_accumulates, old_num_accumulates])
sum = layers.sum(x=[sum_1, sum_2, sum_3])
tmp = layers.cast(
x=tmp, dtype='float32' if self._dtype == None else self._dtype)
sum = layers.cast(
x=sum, dtype='float32' if self._dtype == None else self._dtype)
ops._elementwise_div(x=sum, y=tmp, out=param)
def _add_average_restore_op(self, block, param_grad):
param = block._clone_variable(param_grad[0])
grad = block._clone_variable(param_grad[1])
layers.assign(input=grad, output=param)
def _append_average_accumulate_op(self, param):
self.helper = LayerHelper("average_accumulate")
sum_1 = self._add_accumulator('sum_1', param)
sum_2 = self._add_accumulator('sum_2', param)
sum_3 = self._add_accumulator('sum_3', param)
num_accumulates = self._add_accumulator(
'num_accumulates', param, dtype='int64', shape=[1])
old_num_accumulates = self._add_accumulator(
'old_num_accumulates', param, dtype='int64', shape=[1])
num_updates = self._add_accumulator(
'num_updates', param, dtype='int64', shape=[1])
self.helper.append_op(
type='average_accumulates',
inputs={
"param": param,
"in_sum_1": sum_1,
"in_sum_2": sum_2,
"in_sum_3": sum_3,
"in_num_accumulates": num_accumulates,
"in_old_num_accumulates": old_num_accumulates,
"in_num_updates": num_updates
},
outputs={
"out_sum_1": sum_1,
"out_sum_2": sum_2,
"out_sum_3": sum_3,
"out_num_accumulates": num_accumulates,
"out_old_num_accumulates": old_num_accumulates,
"out_num_updates": num_updates,
},
attrs={
"average_window": self.average_window,
"min_average_window": self.min_average_window,
"max_average_window": self.max_average_window,
})
@contextmanager
def apply(self, executor, need_restore=True):
"""Apply average values to parameters of current model.
"""
executor.run(self.apply_program)
try:
yield
finally:
if need_restore:
self.restore(executor)
def restore(self, executor):
"""Restore parameter values of current model.
"""
executor.run(self.restore_program)
|
reyoung/Paddle
|
python/paddle/fluid/optimizer.py
|
Python
|
apache-2.0
| 53,808
|
# general class containing info about a transcribed region. Can come from UCSC Knowngenes (BED) or Ensembl GTF files currently
# Stephen N. Floor
# Fall 2014
class Transcript:
def __init__(self):
#properties defined in UCSC knowngenes
self.name = ''
self.chrom = ''
self.strand = ''
self.txStart = 0
self.txEnd = 0
self.cdsStart = 0
self.cdsEnd = 0
self.exonCt = 0
self.exonStarts = []
self.exonEnds = []
self.exonLengths = []
#meta properties to be computed during construction. these are lists of BED first four field tuples with the exception of Len terms which are the length of the total region for the gene
self.utr5 = []
self.utr5Len = 0
self.utr5start = []
self.utr5startLen = 0
self.cds = []
self.cdsLen = 0
self.utr3 = []
self.utr3Len = 0
self.exons = []
self.exonsLen = 0
self.introns = []
self.intronsLen = 0
self.coding = False
def __str__(self): #currently roughly knownGenes format with a second line containing metadata
return "%s\t%s\t%s\t%d\t%d\t%d\t%d\t%d\t%s\t%s\n%s\t%d\t%s\t%d\t%s\t%d\t%s\t%d\t%s\t%d\t%s" % (self.name, self.chrom, self.strand, self.txStart, self.txEnd, self.cdsStart, self.cdsEnd, self.exonCt, self.exonStarts, self.exonEnds, self.utr5, self.utr5Len, self.cds, self.cdsLen, self.utr3, self.utr3Len, self.exons, self.exonsLen, self.introns, self.intronsLen, self.coding)
#BED format output is goal. Fields are optional after featureEnd
# chrom featureStart featureEnd nameOfLine score(0-1000) strand thickStart thickEnd itemRGBtuple blockCount blockSizes blockStarts
#this function returns a list of BED-formatted strings for the feature passed as region with multiple entries per region possible, one for each primitive (exon/intron)
def bedFormat(self, region="exons"):
if (not self.coding and (region == "5utr" or region == "cds" or region == "3utr")):
print "Transcript.py bedFormat error: noncoding transcripts do not have 5utr/cds/3utr"
return []
returnVal = []
if (region == "5utr"):
for chunk in self.utr5:
returnVal.append("%s\t%d\t%d\t%s\t0\t%c\t%d\t%d\t0" % (chunk[0], chunk[1], chunk[2], chunk[3]+"_5utr",self.strand, chunk[1],chunk[2]))
elif (region == "5utr_start"):
for chunk in self.utr5start:
returnVal.append("%s\t%d\t%d\t%s\t0\t%c\t%d\t%d\t0" % (chunk[0], chunk[1], chunk[2], chunk[3]+"_5utr_start",self.strand, chunk[1],chunk[2]))
elif (region == "cds"):
for chunk in self.cds:
returnVal.append("%s\t%d\t%d\t%s\t0\t%c\t%d\t%d\t0" % (chunk[0], chunk[1], chunk[2], chunk[3]+"_cds",self.strand, chunk[1],chunk[2]))
elif (region == "3utr"):
for chunk in self.utr3:
returnVal.append("%s\t%d\t%d\t%s\t0\t%c\t%d\t%d\t0" % (chunk[0], chunk[1], chunk[2], chunk[3]+"_3utr",self.strand, chunk[1],chunk[2]))
elif (region == "exons"):
for chunk in self.exons:
returnVal.append("%s\t%d\t%d\t%s\t0\t%c\t%d\t%d\t0" % (chunk[0], chunk[1], chunk[2], chunk[3]+"_exon",self.strand, chunk[1],chunk[2]))
elif (region == "introns"):
for chunk in self.introns:
returnVal.append("%s\t%d\t%d\t%s\t0\t%c\t%d\t%d\t0" % (chunk[0], chunk[1], chunk[2], chunk[3]+"_intron",self.strand, chunk[1],chunk[2]))
else:
print "Transcript.py bedFormat error: currently only regions 5utr/cds/3utr/exons/introns are supported"
return returnVal
#BED format output is goal. Fields are optional after featureEnd
# chrom featureStart featureEnd nameOfLine score(0-1000) strand thickStart thickEnd itemRGBtuple blockCount blockSizes blockStarts
#blockCount - The number of blocks (exons) in the BED line.
#blockSizes - A comma-separated list of the block sizes. The number of items in this list should correspond to blockCount.
#blockStarts - A comma-separated list of block starts. All of the blockStart positions should be calculated relative to chromStart. The number of items in this list should correspond to blockCount.
#this function returns a BED-formatted string for the feature passed as region with blocks defining the exons as per the BED file format
def blockBedFormat(self, region="exons"):
if (not self.coding and (region == "5utr" or region == "cds" or region == "3utr")):
print "Transcript.py blockBedFormat error: noncoding transcripts do not have 5utr/cds/3utr"
return ""
returnVal = ""
score = 0
rgb = 0
if (region == "5utr"):
chromStart = self.utr5[0][1] # start of feature is start of first block
chromEnd = self.utr5[-1][2] # end of feature is end of last block
regionName = self.name + "_5utr"
blockCount = len(self.utr5)
blockSizes = ''.join(["%d," % (chunk[2]-chunk[1]) for chunk in self.utr5])
blockStarts = ''.join(["%d," % (chunk[1]-chromStart) for chunk in self.utr5])
#print "blockCount %d blockSizes %s blockStarts %s" % (blockCount, blockSizes, blockStarts)
elif (region == "5utr_start"):
chromStart = self.utr5start[0][1] # start of feature is start of first block
chromEnd = self.utr5start[-1][2] # end of feature is end of last block
regionName = self.name + "_5utr_start"
blockCount = len(self.utr5start)
blockSizes = ''.join(["%d," % (chunk[2]-chunk[1]) for chunk in self.utr5start])
blockStarts = ''.join(["%d," % (chunk[1]-chromStart) for chunk in self.utr5start])
#print "blockCount %d blockSizes %s blockStarts %s" % (blockCount, blockSizes, blockStarts)
elif (region == "cds"):
chromStart = self.cds[0][1] # start of feature is start of first block
chromEnd = self.cds[-1][2] # end of feature is end of last block
regionName = self.name + "_cds"
blockCount = len(self.cds)
blockSizes = ''.join(["%d," % (chunk[2]-chunk[1]) for chunk in self.cds])
blockStarts = ''.join(["%d," % (chunk[1]-chromStart) for chunk in self.cds])
#print "blockCount %d blockSizes %s blockStarts %s" % (blockCount, blockSizes, blockStarts)
elif (region == "3utr"):
chromStart = self.utr3[0][1] # start of feature is start of first block
chromEnd = self.utr3[-1][2] # end of feature is end of last block
regionName = self.name + "_3utr"
blockCount = len(self.utr3)
blockSizes = ''.join(["%d," % (chunk[2]-chunk[1]) for chunk in self.utr3])
blockStarts = ''.join(["%d," % (chunk[1]-chromStart) for chunk in self.utr3])
#print "blockCount %d blockSizes %s blockStarts %s" % (blockCount, blockSizes, blockStarts)
elif (region == "exons"):
chromStart = self.exons[0][1] # start of feature is start of first block
chromEnd = self.exons[-1][2] # end of feature is end of last block
regionName = self.name + "_exon"
blockCount = len(self.exons)
blockSizes = ''.join(["%d," % (chunk[2]-chunk[1]) for chunk in self.exons])
blockStarts = ''.join(["%d," % (chunk[1]-chromStart) for chunk in self.exons])
#print "blockCount %d blockSizes %s blockStarts %s" % (blockCount, blockSizes, blockStarts)
elif (region == "introns"):
chromStart = self.introns[0][1] # start of feature is start of first block
chromEnd = self.introns[-1][2] # end of feature is end of last block
regionName = self.name + "_intron"
blockCount = len(self.introns)
blockSizes = ''.join(["%d," % (chunk[2]-chunk[1]) for chunk in self.introns])
blockStarts = ''.join(["%d," % (chunk[1]-chromStart) for chunk in self.introns])
#print "blockCount %d blockSizes %s blockStarts %s" % (blockCount, blockSizes, blockStarts)
else:
print "UCSCKnownGene blockBedFormat error: currently only regions 5utr/cds/3utr/exons/introns are supported"
returnVal = "%s\t%d\t%d\t%s\t%d\t%c\t%d\t%d\t%s\t%d\t%s\t%s" % (self.chrom, chromStart, chromEnd, regionName, score, self.strand, chromStart, chromEnd, rgb, blockCount, blockSizes, blockStarts)
return returnVal
def computeMetadata(self):
# -- begin computing metadata --
# -- note: chose clarity of code and conditionals here over most efficient computation (i.e. some clauses may be redundant)
if (self.strand == "+"):
#print ("DBUG - exonCt %d i %d exonEnds[i] %d cdsStart %d exonStarts[i] %d cdsEnd %d") % \
# (self.exonCt, i, self.exonEnds[i], self.cdsStart, self.exonStarts[i], self.cdsEnd)
for i in range (self.exonCt):
if (self.cdsStart != self.cdsEnd): # if this is a coding transcript
self.coding = True
# -- first compute 5'utr, CDS, 3'utr regions --
#case 1 - exon spans 5' UTR/CDS/3' UTR
if (self.exonStarts[i] < self.cdsStart and self.exonEnds[i] > self.cdsEnd):
self.utr5.append((self.chrom, self.exonStarts[i], self.cdsStart, self.name))
self.utr5Len += self.cdsStart - self.exonStarts[i]
self.utr5start.append((self.chrom, self.exonStarts[i], self.cdsStart, self.name)) # for now just append the 5' utr exons to the utr5start
self.utr5startLen += self.cdsStart - self.exonStarts[i]
self.cds.append((self.chrom, self.cdsStart, self.cdsEnd, self.name))
self.cdsLen += self.cdsEnd - self.cdsStart
self.utr3.append((self.chrom, self.cdsEnd, self.exonEnds[i], self.name))
self.utr3Len += self.exonEnds[i] - self.cdsEnd
#case 2 - exon spans 5' UTR/CDS junction
elif (self.exonStarts[i] < self.cdsStart and self.exonEnds[i] >= self.cdsStart):
self.utr5.append((self.chrom, self.exonStarts[i], self.cdsStart, self.name))
self.utr5Len += self.cdsStart - self.exonStarts[i]
self.utr5start.append((self.chrom, self.exonStarts[i], self.cdsStart, self.name))
self.utr5startLen += self.cdsStart - self.exonStarts[i]
self.cds.append((self.chrom, self.cdsStart, self.exonEnds[i], self.name))
self.cdsLen += self.exonEnds[i]- self.cdsStart
#case 3 - exon spans CDS/3'UTR junction
elif (self.exonStarts[i] >= self.cdsStart and self.exonStarts[i] <= self.cdsEnd and self.exonEnds[i] > self.cdsEnd):
self.cds.append((self.chrom, self.exonStarts[i], self.cdsEnd, self.name))
self.cdsLen += self.cdsEnd - self.exonStarts[i]
self.utr3.append((self.chrom, self.cdsEnd, self.exonEnds[i], self.name))
self.utr3Len += self.exonEnds[i] - self.cdsEnd
#case 4 - exon is 5' UTR only
elif (self.exonStarts[i] < self.cdsStart and self.exonEnds[i] < self.cdsStart):
self.utr5.append((self.chrom, self.exonStarts[i], self.exonEnds[i], self.name))
self.utr5Len += self.exonEnds[i] - self.exonStarts[i]
self.utr5start.append((self.chrom, self.exonStarts[i], self.exonEnds[i], self.name))
self.utr5startLen += self.exonEnds[i] - self.exonStarts[i]
#case 5 - exon is CDS only
elif (self.exonStarts[i] >= self.cdsStart and self.exonEnds[i] <= self.cdsEnd):
self.cds.append((self.chrom, self.exonStarts[i], self.exonEnds[i], self.name))
self.cdsLen += self.exonEnds[i] - self.exonStarts[i]
#case 6 - exon is 3' UTR only
elif (self.exonStarts[i] > self.cdsEnd and self.exonEnds[i] > self.cdsEnd):
self.utr3.append((self.chrom, self.exonStarts[i], self.exonEnds[i], self.name))
self.utr3Len += self.exonEnds[i] - self.exonStarts[i]
else:
print "Thar be dragons - Transcript computeMetadata + stranded gene region parsing"
# -- generate combined exonic and intronic regions --
#exons are easy
self.exons.append((self.chrom, self.exonStarts[i], self.exonEnds[i], self.name))
self.exonsLen += self.exonEnds[i] - self.exonStarts[i]
#print "DBUG2: i %d self.exonCt-1 %d self.exonEnds %s self.exonStarts %s" % (i, self.exonCt-1, self.exonEnds, self.exonStarts)
if (i < self.exonCt - 1): # only compute introns for nonterminal exons
# an intron is the region between the end of the current exon and start of the next
self.introns.append((self.chrom, self.exonEnds[i], self.exonStarts[i+1], self.name))
self.intronsLen += self.exonStarts[i+1] - self.exonEnds[i]
# append 27 nt of the coding sequence onto the utr5start regions by creating another block, only if there is a 5' utr already
if (self.coding and self.utr5Len > 0):
added = 0
index = 0
while(added < 27 and index < len(self.cds)):
if (self.cds[index][2] - self.cds[index][1]) > 27 - added:
self.utr5start.append((self.chrom, self.cds[index][1], self.cds[index][1] + 27 - added, self.name))
self.utr5startLen += 27 - added
added += 27 - added
else:
self.utr5start.append((self.chrom, self.cds[index][1], self.cds[index][2], self.name))
self.utr5startLen += self.cds[index][2] - self.cds[index][1]
added += self.cds[index][2] - self.cds[index][1]
index += 1
if (added < 27):
#print "Transcript.py: aborting 5' UTR start for transcript %s because %d nts added (CDS length %d)" % (self.name, added, self.cdsLen)
self.utr5startLen = 0
self.utr5start = []
elif (self.strand == "-"):
#uc001ach.2 chr1 - 910578 917473 911551 916546 5 910578,911878,914260,916516,917444, 911649,912004,916037,916553,917473, Q5SV97 uc001ach.2
# name chrom strand txStart txEnd cdsStart self.cdsEnd exonCt exonStarts exonEnds proteinID alignID
# for the minus strand everything is the same except the order of encountering regions is reversed
# i.e. 3' UTR -> CDS -> 5' UTR
for i in range (self.exonCt):
#print ("DBUG - exonCt %d i %d self.exonEnds[i] %d self.cdsStart %d exonStarts[i] %d self.cdsEnd %d") % \
# (self.exonCt, i, self.exonEnds[i], self.cdsStart, self.exonStarts[i], self.cdsEnd)
if (self.cdsStart != self.cdsEnd):
self.coding = True
# -- first compute 5'utr, CDS, 3'utr regions --
# -- this is the same as for + sense except 5' UTR and 3' UTR are swapped throughout
#case 1 - exon spans 3' UTR/CDS/5' UTR
if (self.exonStarts[i] < self.cdsStart and self.exonEnds[i] > self.cdsEnd):
self.utr3.append((self.chrom, self.exonStarts[i], self.cdsStart, self.name))
self.utr3Len += self.cdsStart - self.exonStarts[i]
self.cds.append((self.chrom, self.cdsStart, self.cdsEnd, self.name))
self.cdsLen += self.cdsEnd - self.cdsStart
self.utr5.append((self.chrom, self.cdsEnd, self.exonEnds[i], self.name))
self.utr5Len += self.exonEnds[i] - self.cdsEnd
self.utr5start.append((self.chrom, self.cdsEnd, self.exonEnds[i], self.name))
self.utr5startLen += self.exonEnds[i] - (self.cdsEnd)
#case 2 - exon spans 3' UTR/CDS junction
elif (self.exonStarts[i] < self.cdsStart and self.exonEnds[i] >= self.cdsStart):
self.utr3.append((self.chrom, self.exonStarts[i], self.cdsStart, self.name))
self.utr3Len += self.cdsStart - self.exonStarts[i]
self.cds.append((self.chrom, self.cdsStart, self.exonEnds[i], self.name))
self.cdsLen += self.exonEnds[i]- self.cdsStart
#case 3 - exon spans CDS/5'UTR junction
elif (self.exonStarts[i] >= self.cdsStart and self.exonStarts[i] <= self.cdsEnd and self.exonEnds[i] > self.cdsEnd):
self.cds.append((self.chrom, self.exonStarts[i], self.cdsEnd, self.name))
self.cdsLen += self.cdsEnd - self.exonStarts[i]
self.utr5.append((self.chrom, self.cdsEnd, self.exonEnds[i], self.name))
self.utr5Len += self.exonEnds[i] - self.cdsEnd
self.utr5start.append((self.chrom, self.cdsEnd, self.exonEnds[i], self.name))
self.utr5startLen += self.exonEnds[i] - (self.cdsEnd)
#case 4 - exon is 3' UTR only
elif (self.exonStarts[i] < self.cdsStart and self.exonEnds[i] < self.cdsStart):
self.utr3.append((self.chrom, self.exonStarts[i], self.exonEnds[i], self.name))
self.utr3Len += self.exonEnds[i] - self.exonStarts[i]
#case 5 - exon is CDS only
elif (self.exonStarts[i] >= self.cdsStart and self.exonEnds[i] <= self.cdsEnd):
self.cds.append((self.chrom, self.exonStarts[i], self.exonEnds[i], self.name))
self.cdsLen += self.exonEnds[i] - self.exonStarts[i]
#case 6 - exon is 5' UTR only
elif (self.exonStarts[i] > self.cdsEnd and self.exonEnds[i] > self.cdsEnd):
self.utr5.append((self.chrom, self.exonStarts[i], self.exonEnds[i], self.name))
self.utr5Len += self.exonEnds[i] - self.exonStarts[i]
self.utr5start.append((self.chrom, self.exonStarts[i] , self.exonEnds[i], self.name))
self.utr5startLen += self.exonEnds[i] - self.exonStarts[i]
else:
print "Thar be dragons - Transcript computeMetadata - stranded gene region parsing"
#else:
# print "- strand noncoding transcript"
# -- generate combined exonic and intronic regions --
#exons are easy
self.exons.append((self.chrom, self.exonStarts[i], self.exonEnds[i], self.name))
self.exonsLen += self.exonEnds[i] - self.exonStarts[i]
if (i < self.exonCt - 1): # only compute introns for nonterminal exons
# an intron is the region between the end of the current exon and start of the next
self.introns.append((self.chrom, self.exonEnds[i], self.exonStarts[i+1], self.name))
self.intronsLen += self.exonStarts[i+1] - self.exonEnds[i]
# append 27 nt of the coding sequence onto the utr5start regions by creating another block, only if there is a 5' utr already
if (self.coding and self.utr5Len > 0):
added = 0
index = -1
while(added < 27 and index >= len(self.cds)*-1): #cdsEnd is the start, and the last exon is the first exon of the cds
#print self.cds
#print self.cds[index]
# need to insert at the beginning here and not append
if (self.cds[index][2] - self.cds[index][1]) > 27 - added:
self.utr5start.insert(0,(self.chrom, self.cds[index][2] - (27 - added), self.cds[index][2], self.name))
self.utr5startLen += 27 - added
added += 27 - added
else:
self.utr5start.insert(0,(self.chrom, self.cds[index][1], self.cds[index][2], self.name))
self.utr5startLen += self.cds[index][2] - self.cds[index][1]
added += self.cds[index][2] - self.cds[index][1]
index -= 1
if (added < 27):
#print "Transcript.py: aborting 5' UTR start for transcript %s because %d nts added (CDS length %d)" % (self.name, added, self.cdsLen)
self.utr5startLen = 0
self.utr5start = []
else:
print "Thar be dragons - Transcript computeMetadata strand does not match + or -"
#example line format from knownGenes file (from UCSC)
# uc010nxq.1 chr1 + 11873 14409 12189 13639 3 11873,12594,13402, 12227,12721,14409, B7ZGX9 uc010nxq.1
# line format
# name chrom strand txStart txEnd cdsStart cdsEnd exonCt exonStarts exonEnds proteinID alignID
def createUCSCTranscript(knownGeneLine):
foo = Transcript()
line = knownGeneLine.split()
# -- read in knownGene fields --
foo.name = line[0]
foo.chrom = line[1]
foo.strand = line[2]
foo.txStart = int(line[3])
foo.txEnd = int(line[4])
foo.cdsStart = int(line[5])
foo.cdsEnd = int(line[6])
foo.exonCt = int(line[7])
starts = line[8].split(",")
ends = line[9].split(",")
for i in range(foo.exonCt):
foo.exonStarts.append(int(starts[i]))
foo.exonEnds.append(int(ends[i]))
foo.computeMetadata()
return foo
# input to createGTFTranscript below must be a list of dictionaries for each line of the input GTF file
# these are created inside knowngenes_to_transcript_regions.py
# example input:
#[{'gene_name': 'DDX11L1', 'seqname': '1', 'end': '12227', 'start': '11869', 'frame': None, 'transcript_source': 'havana', 'feature': 'exon', 'exon_number': '1', 'exon_id': 'ENSE00002234944', 'tss_id': 'TSS15145', 'source': 'processed_transcript', 'gene_source': 'ensembl_havana', 'score': None, 'gene_biotype': 'pseudogene', 'gene_id': 'ENSG00000223972', 'transcript_id': 'ENST00000456328', 'transcript_name': 'DDX11L1-002', 'strand': '+'}, {'seqname': '1', 'end': '14409', 'start': '11869', 'frame': None, 'transcript_source': 'havana', 'feature': 'transcript', 'gene_id': 'ENSG00000223972', 'tss_id': 'TSS15145', 'source': 'processed_transcript', 'gene_source': 'ensembl_havana', 'score': None, 'gene_biotype': 'pseudogene', 'gene_name': 'DDX11L1', 'transcript_id': 'ENST00000456328', 'transcript_name': 'DDX11L1-002', 'strand': '+'}]
# keys for each dict:
# gene_name
# seqname
# start
# end
# frame
# transcript_source
# feature
# exon_number
# exon_id
# tss_id
# source
# gene_source
# score
# gene_biotype
# gene_id
# transcript_id
# transcript_name
# strand
def createGTFTranscript(gtfLines):
foo = Transcript()
# these properties (better be) all identical for each entry in the list of dicts
first = gtfLines[0]
foo.name = first["transcript_id"]
foo.chrom = first["seqname"]
foo.strand = first["strand"]
# now process all lines for this transcript ID
for dict in gtfLines:
# ensembl GTFs have special lines where feature = "transcript" and feature = "CDS" that define the transcript and CDS start/ends, respectively
# GTF files are closed intervals while BED are right-open-left-closed, so ---
# need to subtract one from all start coordinates? seems counterintuitive maybe the input genome.fa is zero based?
if (dict["feature"] == "transcript"):
if (foo.txStart == 0 or int(dict["start"]) < foo.txStart):
foo.txStart = int(dict["start"]) - 1
if (foo.txEnd == 0 or int(dict["end"]) > foo.txEnd):
foo.txEnd = int(dict["end"])
if (dict["feature"] == "CDS"):
if (foo.cdsStart == 0 or int(dict["start"]) < foo.cdsStart):
foo.cdsStart = int(dict["start"]) - 1
if (foo.cdsEnd== 0 or int(dict["end"]) > foo.cdsEnd):
foo.cdsEnd = int(dict["end"])
if (dict["feature"] == "exon"):
foo.exonCt += 1
foo.exonStarts.append(int(dict["start"]) - 1)
foo.exonEnds.append(int(dict["end"]))
foo.exonStarts = sorted(foo.exonStarts)
foo.exonEnds = sorted(foo.exonEnds)
foo.computeMetadata()
return foo
|
stephenfloor/extract-transcript-regions
|
Transcript.py
|
Python
|
gpl-2.0
| 25,460
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import itertools
from frappe import _
from frappe.utils import cstr, flt
import json
from frappe import ValidationError
class ItemVariantExistsError(frappe.ValidationError): pass
class InvalidItemAttributeValueError(frappe.ValidationError): pass
class ItemTemplateCannotHaveStock(frappe.ValidationError): pass
@frappe.whitelist()
def get_variant(template, args=None, variant=None, manufacturer=None,
manufacturer_part_no=None):
"""Validates Attributes and their Values, then looks for an exactly
matching Item Variant
:param item: Template Item
:param args: A dictionary with "Attribute" as key and "Attribute Value" as value
"""
item_template = frappe.get_doc('Item', template)
if item_template.variant_based_on=='Manufacturer' and manufacturer:
return make_variant_based_on_manufacturer(item_template, manufacturer,
manufacturer_part_no)
else:
if isinstance(args, basestring):
args = json.loads(args)
if not args:
frappe.throw(_("Please specify at least one attribute in the Attributes table"))
return find_variant(template, args, variant)
def make_variant_based_on_manufacturer(template, manufacturer, manufacturer_part_no):
'''Make and return a new variant based on manufacturer and
manufacturer part no'''
from frappe.model.naming import append_number_if_name_exists
variant = frappe.new_doc('Item')
copy_attributes_to_variant(template, variant)
variant.manufacturer = manufacturer
variant.manufacturer_part_no = manufacturer_part_no
variant.item_code = append_number_if_name_exists('Item', template.name)
return variant
def validate_item_variant_attributes(item, args=None):
if isinstance(item, basestring):
item = frappe.get_doc('Item', item)
if not args:
args = {d.attribute.lower():d.attribute_value for d in item.attributes}
attribute_values, numeric_values = get_attribute_values()
for attribute, value in args.items():
if not value:
continue
if attribute.lower() in numeric_values:
numeric_attribute = numeric_values[attribute.lower()]
validate_is_incremental(numeric_attribute, attribute, value, item.name)
else:
attributes_list = attribute_values.get(attribute.lower(), [])
validate_item_attribute_value(attributes_list, attribute, value, item.name)
def validate_is_incremental(numeric_attribute, attribute, value, item):
from_range = numeric_attribute.from_range
to_range = numeric_attribute.to_range
increment = numeric_attribute.increment
if increment == 0:
# defensive validation to prevent ZeroDivisionError
frappe.throw(_("Increment for Attribute {0} cannot be 0").format(attribute))
is_in_range = from_range <= flt(value) <= to_range
precision = max(len(cstr(v).split(".")[-1].rstrip("0")) for v in (value, increment))
#avoid precision error by rounding the remainder
remainder = flt((flt(value) - from_range) % increment, precision)
is_incremental = remainder==0 or remainder==increment
if not (is_in_range and is_incremental):
frappe.throw(_("Value for Attribute {0} must be within the range of {1} to {2} in the increments of {3} for Item {4}")\
.format(attribute, from_range, to_range, increment, item),
InvalidItemAttributeValueError, title=_('Invalid Attribute'))
def validate_item_attribute_value(attributes_list, attribute, attribute_value, item):
if attribute_value not in attributes_list:
frappe.throw(_("Value {0} for Attribute {1} does not exist in the list of valid Item Attribute Values for Item {2}").format(
attribute_value, attribute, item), InvalidItemAttributeValueError, title=_('Invalid Attribute'))
def get_attribute_values():
if not frappe.flags.attribute_values:
attribute_values = {}
numeric_values = {}
for t in frappe.get_all("Item Attribute Value", fields=["parent", "attribute_value"]):
attribute_values.setdefault(t.parent.lower(), []).append(t.attribute_value)
for t in frappe.get_all('Item Attribute',
fields=["name", "from_range", "to_range", "increment"], filters={'numeric_values': 1}):
numeric_values[t.name.lower()] = t
frappe.flags.attribute_values = attribute_values
frappe.flags.numeric_values = numeric_values
return frappe.flags.attribute_values, frappe.flags.numeric_values
def find_variant(template, args, variant_item_code=None):
conditions = ["""(iv_attribute.attribute="{0}" and iv_attribute.attribute_value="{1}")"""\
.format(frappe.db.escape(key), frappe.db.escape(cstr(value))) for key, value in args.items()]
conditions = " or ".join(conditions)
# use approximate match and shortlist possible variant matches
# it is approximate because we are matching using OR condition
# and it need not be exact match at this stage
# this uses a simpler query instead of using multiple exists conditions
possible_variants = frappe.db.sql_list("""select name from `tabItem` item
where variant_of=%s and exists (
select name from `tabItem Variant Attribute` iv_attribute
where iv_attribute.parent=item.name
and ({conditions})
)""".format(conditions=conditions), template)
for variant in possible_variants:
variant = frappe.get_doc("Item", variant)
if len(args.keys()) == len(variant.get("attributes")):
# has the same number of attributes and values
# assuming no duplication as per the validation in Item
match_count = 0
for attribute, value in args.items():
for row in variant.attributes:
if row.attribute==attribute and row.attribute_value== cstr(value):
# this row matches
match_count += 1
break
if match_count == len(args.keys()):
return variant.name
@frappe.whitelist()
def create_variant(item, args):
if isinstance(args, basestring):
args = json.loads(args)
total_projected_qty = frappe.db.sql("""select ifnull(sum(projected_qty),0) as total_projected_qty from tabBin
where item_code=%s""", item)[0][0]
if total_projected_qty != 0:
frappe.throw(_("Item stock must be zero to generate variants"))
template = frappe.get_doc("Item", item)
variant = frappe.new_doc("Item")
variant.variant_based_on = 'Item Attribute'
variant_attributes = []
for d in template.attributes:
variant_attributes.append({
"attribute": d.attribute,
"attribute_value": args.get(d.attribute)
})
variant.set("attributes", variant_attributes)
copy_attributes_to_variant(template, variant)
make_variant_item_code(template.item_code, template.item_name, variant)
variant.set("main_title", '{} - {}'.format(variant.item_code, variant.item_name))
return variant
@frappe.whitelist()
def create_all_variants(item, attributes):
"""creates variant for each combination of attributes of a template"""
template = frappe.get_doc("Item", item)
# calculates total projected qty of template to know if item was previously used in a transaction
total_projected_qty = frappe.db.sql("""select ifnull(sum(projected_qty),0) as total_projected_qty from tabBin
where item_code=%s""", item)[0][0]
if total_projected_qty != 0:
frappe.throw(_("Item stock must be zero to generate variants"))
attributes_values = json.loads(attributes)
variants = []
for combination in itertools.combinations(itertools.chain(*attributes_values), len(attributes_values)):
if is_valid_combination(combination, attributes_values):
variants.append(combination)
for variant_attributes in make_variant_dict(template, variants):
variant = frappe.new_doc("Item")
variant.variant_based_on = 'Item Attribute'
variant.set("attributes", variant_attributes)
copy_attributes_to_variant(template, variant)
make_variant_item_code(template.item_code, template.item_name, variant)
variant.set("main_title", '{} - {}'.format(variant.item_code, variant.item_name))
try:
variant.save()
except ItemVariantExistsError:
# the goal is to continue generating variants
pass
def is_valid_combination(combination, attributes_values):
for i in range(len(combination)):
if not combination[i] in attributes_values[i]:
return False
return True
def make_variant_dict(template, variants):
variants_dicts = []
for variant in variants:
variant_dict = []
for i in range(len(template.attributes)):
variant_dict.append({"attribute": template.attributes[i].attribute, "attribute_value": variant[i]})
variants_dicts.append(variant_dict)
return variants_dicts
def copy_attributes_to_variant(item, variant):
from frappe.model import no_value_fields
# copy non no-copy fields
exclude_fields = ["item_code", "item_name", "show_in_website", "main_title", "total_projected_qty"]
if item.variant_based_on=='Manufacturer':
# don't copy manufacturer values if based on part no
exclude_fields += ['manufacturer', 'manufacturer_part_no']
for field in item.meta.fields:
# "Table" is part of `no_value_field` but we shouldn't ignore tables
if (field.fieldtype == 'Table' or field.fieldtype not in no_value_fields) \
and (not field.no_copy) and field.fieldname not in exclude_fields:
if variant.get(field.fieldname) != item.get(field.fieldname):
variant.set(field.fieldname, item.get(field.fieldname))
variant.variant_of = item.name
variant.has_variants = 0
if not variant.description:
variant.description = ''
if item.variant_based_on=='Item Attribute':
if variant.attributes:
variant.description += "\n"
for d in variant.attributes:
variant.description += "<p>" + d.attribute + ": " + cstr(d.attribute_value) + "</p>"
def make_variant_item_code(template_item_code, template_item_name, variant):
"""Uses template's item code and abbreviations to make variant's item code"""
if variant.item_code:
return
abbreviations = []
for attr in variant.attributes:
item_attribute = frappe.db.sql("""select i.numeric_values, v.abbr
from `tabItem Attribute` i left join `tabItem Attribute Value` v
on (i.name=v.parent)
where i.name=%(attribute)s and (v.attribute_value=%(attribute_value)s or i.numeric_values = 1)""", {
"attribute": attr.attribute,
"attribute_value": attr.attribute_value
}, as_dict=True)
if not item_attribute:
return
# frappe.throw(_('Invalid attribute {0} {1}').format(frappe.bold(attr.attribute),
# frappe.bold(attr.attribute_value)), title=_('Invalid Attribute'),
# exc=InvalidItemAttributeValueError)
abbr_or_value = cstr(attr.attribute_value) if item_attribute[0].numeric_values else item_attribute[0].abbr
abbreviations.append(abbr_or_value)
if abbreviations:
variant.item_code = "{0}-{1}".format(template_item_code, "-".join(abbreviations))
variant.item_name = "{0}-{1}".format(template_item_name, "-".join(abbreviations))
|
bazz-erp/erpnext
|
erpnext/controllers/item_variant.py
|
Python
|
gpl-3.0
| 11,820
|
if 'imported' in locals():
import imp
import bpy
imp.reload(blendergltf)
else:
imported = True
import blendergltf
import json
import math
import bpy
def togl(matrix):
return [i for col in matrix.col for i in col]
class BTFConverter:
def __init__(self, gltf_settings=None):
if gltf_settings is None:
available_extensions = blendergltf.extension_exporters
gltf_settings = {
'images_data_storage': 'REFERENCE',
'nodes_export_hidden': True,
'images_allow_srgb': True,
'asset_profile': 'DESKTOP',
'asset_version': '1.0',
'hacks_streaming': True,
'meshes_apply_modifiers': False, # Cannot be done in a thread
'extension_exporters': [
available_extensions.khr_materials_common.KhrMaterialsCommon(),
available_extensions.blender_physics.BlenderPhysics(),
],
}
self.gltf_settings = gltf_settings
def convert(self, add_delta, update_delta, remove_delta, view_delta):
for key, value in update_delta.items():
if value:
add_delta[key] = value
data = blendergltf.export_gltf(add_delta, self.gltf_settings)
if view_delta:
self.export_view(view_delta, data)
return data
def export_view(self, view_delta, gltf):
if 'extras' not in gltf:
gltf['extras'] = {}
gltf['extras']['view'] = {}
if 'viewport' in view_delta:
gltf['extras']['view'] = {
'width' : view_delta['viewport'].width,
'height' : view_delta['viewport'].height,
}
if 'projection_matrix' in view_delta:
gltf['extras']['view']['projection_matrix'] = togl(view_delta['projection_matrix'])
if 'view_matrix' in view_delta:
gltf['extras']['view']['view_matrix'] = togl(view_delta['view_matrix'])
|
Kupoman/BlenderRealtimeEngineAddon
|
brte/converters/btf.py
|
Python
|
mit
| 2,024
|
"""
WSGI config for jsonform_example project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jsonform_example.settings")
application = get_wsgi_application()
|
carlmjohnson/django-json-form
|
jsonform_example/wsgi.py
|
Python
|
mit
| 409
|
from django import forms
from members.models import Member
class Registration(forms.Form):
given_name = forms.CharField()
class ChoiceLeader(forms.Form):
choice = forms.ModelChoiceField(queryset=Member.objects.none(), widget=forms.RadioSelect)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
super(ChoiceLeader, self).__init__(*args, **kwargs)
if user:
m = Member.objects.get(id=user.id)
team_members = Member.objects.filter(team=m.team).filter(is_participant=True)
print "team_membersteam_membersteam_members", team_members
self.fields['choice'] = forms.ModelChoiceField(queryset=team_members,
widget=forms.RadioSelect,
empty_label=None)
|
venetay/Photo-Competition
|
members/forms.py
|
Python
|
mit
| 868
|
"""Test fixtures for Tasmota component."""
from unittest.mock import patch
from hatasmota.discovery import get_status_sensor_entities
import pytest
from homeassistant import config_entries
from homeassistant.components.tasmota.const import (
CONF_DISCOVERY_PREFIX,
DEFAULT_PREFIX,
DOMAIN,
)
from tests.common import (
MockConfigEntry,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.light.conftest import mock_light_profiles # noqa
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def disable_debounce():
"""Set MQTT debounce timer to zero."""
with patch("hatasmota.mqtt.DEBOUNCE_TIMEOUT", 0):
yield
@pytest.fixture
def status_sensor_disabled():
"""Fixture to allow overriding MQTT config."""
return True
@pytest.fixture(autouse=True)
def disable_status_sensor(status_sensor_disabled):
"""Disable Tasmota status sensor."""
wraps = None if status_sensor_disabled else get_status_sensor_entities
with patch("hatasmota.discovery.get_status_sensor_entities", wraps=wraps):
yield
async def setup_tasmota_helper(hass):
"""Set up Tasmota."""
hass.config.components.add("tasmota")
entry = MockConfigEntry(
connection_class=config_entries.CONN_CLASS_LOCAL_PUSH,
data={CONF_DISCOVERY_PREFIX: DEFAULT_PREFIX},
domain=DOMAIN,
title="Tasmota",
)
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert "tasmota" in hass.config.components
@pytest.fixture
async def setup_tasmota(hass):
"""Set up Tasmota."""
await setup_tasmota_helper(hass)
|
turbokongen/home-assistant
|
tests/components/tasmota/conftest.py
|
Python
|
apache-2.0
| 2,047
|
"""
IO for NURBS and Bezier extraction data.
"""
from __future__ import absolute_import
import numpy as nm
import six
from six.moves import range
from sfepy.base.ioutils import HDF5ContextManager, enc, dec
def write_iga_data(filename, group, knots, degrees, control_points, weights,
cs, conn, bezier_control_points, bezier_weights, bezier_conn,
regions, name=None):
"""
Write IGA-related data into a HDF5 file using pytables.
filename: str or tables.File
File to read the hdf5 mesh to.
group: tables.group.Group, optional
HDF5 file group to read the data from.
If None, the root of file is used.
Returns
-------
tuple
Data for restoring IGA domain.
"""
with HDF5ContextManager(filename, mode = 'w',
title='SfePy IGA data file') as fd:
if group is None:
group = fd.root
if isinstance(degrees, int): degrees = [degrees]
degrees = nm.asarray(degrees)
nurbs = fd.create_group(group, 'nurbs', 'nurbs')
fd.create_array(nurbs, 'dim', control_points.shape[1], 'dim')
fd.create_array(nurbs, 'tdim', len(degrees), 'tdim')
for ii, kv in enumerate(knots):
key = 'knots_%d' % ii
fd.create_array(nurbs, key, kv, key)
fd.create_array(nurbs, 'degrees', degrees, 'degrees')
fd.create_array(nurbs, 'control_points', control_points,
'control_points')
fd.create_array(nurbs, 'weights', weights, 'weights')
bezier = fd.create_group(group, 'bezier', 'bezier')
fd.create_array(bezier, 'bezier_control_points', bezier_control_points,
'bezier_control_points')
fd.create_array(bezier, 'bezier_weights', bezier_weights,
'bezier_weights')
for ii, op in enumerate(cs):
key = 'extraction_%d' % ii
fd.create_array(bezier, key, op, key)
fd.create_array(bezier, 'global_connectivity', conn,
'global_connectivity')
fd.create_array(bezier, 'bezier_connectivity', bezier_conn,
'bezier_connectivity')
regs = fd.create_group(group, 'regions', 'regions')
for key, val in six.iteritems(regions):
fd.create_array(regs, key, val, key)
if name is not None:
fd.create_array( group, 'name', nm.array( enc(name)) )
def read_iga_data(filename, group=None):
"""
Read IGA-related data from a HDF5 file using pytables.
filename: str or tables.File
File to read the hdf5 mesh to.
group: tables.group.Group or None
HDF5 file group to read the mesh from.
If it's None, the root of file is used.
Returns
-------
tuple
Data for restoring IGA domain.
"""
with HDF5ContextManager(filename, 'r') as fd:
if group is None:
group = fd.root
nurbs = group.nurbs
tdim = nurbs.tdim.read()
knots = []
for ii in range(tdim):
name = 'knots_%d' % ii
knots.append(nurbs._f_get_child(name).read())
knots = tuple(knots)
degrees = nurbs.degrees.read()
control_points = nurbs.control_points.read()
weights = nurbs.weights.read()
bezier = group.bezier
cs = []
for ii in range(tdim):
name = 'extraction_%d' % ii
cs.append(bezier._f_get_child(name).read())
conn = bezier.global_connectivity.read()
bezier_control_points = bezier.bezier_control_points.read()
bezier_weights = bezier.bezier_weights.read()
bezier_conn = bezier.bezier_connectivity.read()
regions = {}
for region in group.regions:
regions[region.name] = region.read()
out = (knots, degrees, control_points, weights, cs, conn,
bezier_control_points, bezier_weights, bezier_conn, regions)
if hasattr(group, 'name'):
out = out + ( dec(group.name.read().item()), )
return out
|
lokik/sfepy
|
sfepy/discrete/iga/io.py
|
Python
|
bsd-3-clause
| 4,107
|
from txpostgres import txpostgres
from twisted.internet import reactor
from twisted.python import log, util
# define the libpq connection string and the query to use
connstr = 'dbname=postgres'
query = 'select tablename from pg_tables order by tablename'
# connect to the database
conn = txpostgres.Connection()
d = conn.connect('dbname=postgres')
def useCursor(cur):
# execute a query
d = cur.execute(query)
# fetch the first row from the result
d.addCallback(lambda _: cur.fetchone())
# output it
d.addCallback(lambda result: util.println('First table name:', result[0]))
# and close the cursor
return d.addCallback(lambda _: cur.close())
# create a cursor and use it
d.addCallback(lambda _: conn.cursor())
d.addCallback(useCursor)
# log any errors and stop the reactor
d.addErrback(log.err)
d.addBoth(lambda _: reactor.stop())
# start the reactor to kick off connection estabilishing
reactor.run()
|
wulczer/txpostgres
|
doc/cursor_example.py
|
Python
|
mit
| 941
|
from ddt import data, ddt
from rest_framework import status, test
from waldur_core.structure.tests import factories, fixtures
@ddt
class DivisionListTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.UserFixture()
self.division_1 = factories.DivisionFactory()
self.division_2 = factories.DivisionFactory()
self.url = factories.DivisionFactory.get_list_url()
@data('staff', 'user', None)
def test_user_can_list_divisions(self, user):
if user:
self.client.force_authenticate(user=getattr(self.fixture, user))
response = self.client.get(self.url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(len(response.data), 2)
def test_list_filters(self):
"""Test of divisions' list filter by name, type and parent UUID."""
division_parent = factories.DivisionFactory()
self.division_1.parent = division_parent
self.division_1.save()
rows = [
{'name': 'name', 'valid': self.division_1.name[2:], 'invalid': 'AAA'},
{
'name': 'name_exact',
'valid': self.division_1.name,
'invalid': self.division_1.name[2:],
},
{
'name': 'type',
'valid': self.division_1.type.name,
'invalid': self.division_1.type.name[2:],
},
{
'name': 'parent',
'valid': division_parent.uuid.hex,
'invalid': division_parent.uuid.hex[2:],
},
]
self.client.force_authenticate(user=self.fixture.staff)
for row in rows:
response = self.client.get(self.url, data={row['name']: row['valid']})
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(len(response.data), 1)
response = self.client.get(self.url, data={row['name']: row['invalid']})
if row['name'] == 'parent':
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
else:
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(len(response.data), 0)
@ddt
class DivisionChangeTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.CustomerFixture()
self.division_1 = factories.DivisionFactory()
self.division_2 = factories.DivisionFactory()
self.fixture.customer.division = self.division_1
self.fixture.customer.save()
self.url = factories.CustomerFactory.get_url(self.fixture.customer)
@data('staff',)
def test_staff_can_change_customer_division(self, user):
self.client.force_authenticate(user=getattr(self.fixture, user))
new_division_url = factories.DivisionFactory.get_url(self.division_2)
response = self.client.patch(self.url, {'division': new_division_url})
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.fixture.customer.refresh_from_db()
self.assertEqual(self.fixture.customer.division, self.division_2)
@data('owner',)
def test_other_can_not_change_customer_division(self, user):
self.client.force_authenticate(user=getattr(self.fixture, user))
new_division_url = factories.DivisionFactory.get_url(self.division_2)
response = self.client.patch(self.url, {'division': new_division_url})
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
@ddt
class DivisionTypeListTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.UserFixture()
self.type_1 = factories.DivisionTypeFactory()
self.type_2 = factories.DivisionTypeFactory()
self.url = factories.DivisionTypeFactory.get_list_url()
@data('staff', 'user', None)
def test_user_can_list_division_types(self, user):
if user:
self.client.force_authenticate(user=getattr(self.fixture, user))
response = self.client.get(self.url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(len(response.data), 2)
def test_list_filters(self):
rows = [
{'name': 'name', 'valid': self.type_1.name[2:], 'invalid': 'AAA'},
{
'name': 'name_exact',
'valid': self.type_1.name,
'invalid': self.type_1.name[2:],
},
]
self.client.force_authenticate(user=self.fixture.staff)
for row in rows:
response = self.client.get(self.url, data={row['name']: row['valid']})
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(len(response.data), 1)
response = self.client.get(self.url, data={row['name']: row['invalid']})
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(len(response.data), 0)
|
opennode/nodeconductor-assembly-waldur
|
src/waldur_core/structure/tests/test_division.py
|
Python
|
mit
| 5,033
|
"""Template tags for dealing with podcast channels."""
from django import template
from uryplayer.models import PodcastChannel
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter
@stringfilter
def channel_singular(channel_name):
"""Replaces an internal channel name with the channel's
singular noun metadatum.
"""
return PodcastChannel.get(channel_name).singular
|
CaptainHayashi/lass
|
website/templatetags/podcast_channel_tags.py
|
Python
|
gpl-2.0
| 436
|
import re
import sys
import imp
import time
import logging
import datetime
import multiprocessing
from django.conf import settings
from django.core.management import call_command
from . import app_settings
from .task import task
from .utils import set_process_title, get_backend, configure_logging
class CronScheduler(multiprocessing.Process):
def __init__(self, running, log_level, log_filename):
self.running = running
self.log_level = log_level
self.log_filename = log_filename
# Logfiles must be opened in child process
self.log = None
self.config = get_config()
super(CronScheduler, self).__init__()
def run(self):
set_process_title("Cron scheduler process")
self.log = logging.getLogger()
for x in self.log.handlers:
self.log.removeHandler(x)
configure_logging(
level=self.log_level,
format='%(asctime)-15s %(process)d cron_scheduler %(levelname).1s: '
'%(message)s',
filename=self.log_filename,
)
self.log.debug("Starting")
backend = get_backend()
self.log.info("Loaded backend %s", backend)
while self.running.value:
try:
self.tick(backend)
# Sleep until the next second boundary. This corrects for skew
# caused by the accumulation of tick() runtime.
time.sleep((1 - time.time() % 1))
except KeyboardInterrupt:
sys.exit(1)
self.log.info("Exiting")
def tick(self, backend):
self.log.debug("tick()")
t = datetime.datetime.utcnow()
# Run once per minute
if t.second != 0:
return
for row in self.config:
if not (
row['hour_matcher'](t.hour) and
row['min_matcher'](t.minute) and
row['day_matcher'](t.isoweekday())
):
continue
self.log.info("Enqueueing %s", row['command'])
execute(
row['command'],
django_lightweight_queue_queue=row['queue'],
django_lightweight_queue_timeout=row['timeout'],
django_lightweight_queue_sigkill_on_stop=row['sigkill_on_stop'],
*row.get('command_args', []),
**row.get('command_kwargs', {})
)
self.log.debug("Enqueued %s", row)
def get_config():
config = []
def get_matcher(minval, maxval, t):
if t == '*':
return lambda x: True
parts = re.split(r'\s*,\s*', t)
if not parts:
return
t_parts = [int(x) for x in parts]
for num in t_parts:
assert num >= minval and num <= maxval, \
"Invalid time specified in cron config. " \
"Specified: %s, minval: %s, maxval: %s" % (
num,
minval,
maxval,
)
return lambda x: x in t_parts
for app in settings.INSTALLED_APPS:
try:
app_path = __import__(app, {}, {}, [app.split('.')[-1]]).__path__
except AttributeError:
continue
try:
imp.find_module('cron', app_path)
except ImportError:
continue
mod = __import__('%s.cron' % app, fromlist=(app,))
for row in mod.CONFIG:
row['min_matcher'] = get_matcher(0, 59, row.get('minutes'))
row['hour_matcher'] = get_matcher(0, 23, row.get('hours'))
row['day_matcher'] = get_matcher(1, 7, row.get('days', '*'))
row['queue'] = row.get('queue', 'cron')
row['timeout'] = row.get('timeout', None)
row['sigkill_on_stop'] = row.get('sigkill_on_stop', False)
config.append(row)
# We must ensure we have at least one worker for this queue.
app_settings.WORKERS.setdefault(row['queue'], 1)
return config
@task()
def execute(name, *args, **kwargs):
call_command(name, *args, **kwargs)
|
lamby/django-lightweight-queue
|
django_lightweight_queue/cron_scheduler.py
|
Python
|
bsd-3-clause
| 4,123
|
#!/usr/bin/env python
#
# test_ldp_oc_acl_topo1.py
# Part of NetDEF Topology Tests
#
# Copyright (c) 2020 by Volta Networks
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
test_ldp_oc_acl_topo1.py: Simple FRR LDP Test
+---------+
| r1 |
| 1.1.1.1 |
+----+----+
| .1 r1-eth0
|
~~~~~~~~~~~~~
~~ sw0 ~~
~~ 10.0.1.0/24 ~~
~~~~~~~~~~~~~
|10.0.1.0/24
|
| .2 r2-eth0
+----+----+
| r2 |
| 2.2.2.2 |
+--+---+--+
r2-eth2 .2 | | .2 r2-eth1
______/ \______
/ \
~~~~~~~~~~~~~ ~~~~~~~~~~~~~
~~ sw2 ~~ ~~ sw1 ~~
~~ 10.0.3.0/24 ~~ ~~ 10.0.2.0/24 ~~
~~~~~~~~~~~~~ ~~~~~~~~~~~~~
| / |
\ _________/ |
\ / \
r3-eth1 .3 | | .3 r3-eth0 | .4 r4-eth0
+----+--+---+ +----+----+
| r3 | | r4 |
| 3.3.3.3 | | 4.4.4.4 |
+-----------+ +---------+
"""
import os
import sys
import pytest
import json
from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
# Required to instantiate the topology builder class.
pytestmark = [pytest.mark.ldpd, pytest.mark.ospfd]
def build_topo(tgen):
"Build function"
#
# Define FRR Routers
#
for router in ["r1", "r2", "r3", "r4"]:
tgen.add_router(router)
#
# Define connections
#
switch = tgen.add_switch("s0")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r2"])
switch = tgen.add_switch("s1")
switch.add_link(tgen.gears["r2"])
switch.add_link(tgen.gears["r3"])
switch.add_link(tgen.gears["r4"])
switch = tgen.add_switch("s2")
switch.add_link(tgen.gears["r2"])
switch.add_link(tgen.gears["r3"])
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
# For all registered routers, load the zebra configuration file
for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
# Don't start ospfd and ldpd in the CE nodes
if router.name[0] == "r":
router.load_config(
TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
)
router.load_config(
TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname))
)
tgen.start_router()
def teardown_module(mod):
"Teardown the pytest environment"
tgen = get_topogen()
# This function tears down the whole topology.
tgen.stop_topology()
def router_compare_json_output(rname, command, reference):
"Compare router JSON output"
logger.info('Comparing router "%s" "%s" output', rname, command)
tgen = get_topogen()
filename = "{}/{}/{}".format(CWD, rname, reference)
expected = json.loads(open(filename).read())
# Run test function until we get an result. Wait at most 80 seconds.
test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected)
_, diff = topotest.run_and_expect(test_func, None, count=160, wait=0.5)
assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
assert diff is None, assertmsg
def test_ospf_convergence():
logger.info("Test: check OSPF adjacencies")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3", "r4"]:
router_compare_json_output(
rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json"
)
def test_rib():
logger.info("Test: verify RIB")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3", "r4"]:
router_compare_json_output(rname, "show ip route json", "show_ip_route.ref")
def test_ldp_adjacencies():
logger.info("Test: verify LDP adjacencies")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3", "r4"]:
router_compare_json_output(
rname, "show mpls ldp discovery json", "show_ldp_discovery.ref"
)
def test_ldp_neighbors():
logger.info("Test: verify LDP neighbors")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3", "r4"]:
router_compare_json_output(
rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref"
)
def test_ldp_bindings():
logger.info("Test: verify LDP bindings")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3", "r4"]:
router_compare_json_output(
rname, "show mpls ldp binding json", "show_ldp_binding.ref"
)
def test_ldp_bindings_all_routes():
logger.info("Test: verify LDP bindings after host filter removed")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# remove ACL that blocks advertising everything but host routes */
cmd = 'vtysh -c "configure terminal" -c "mpls ldp" -c "address-family ipv4" -c "no label local allocate host-routes"'
tgen.net["r1"].cmd(cmd)
sleep(2)
for rname in ["r1", "r2", "r3", "r4"]:
router_compare_json_output(
rname, "show mpls ldp binding json", "show_ldp_all_binding.ref"
)
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
|
freerangerouting/frr
|
tests/topotests/ldp_oc_acl_topo1/test_ldp_oc_acl_topo1.py
|
Python
|
gpl-2.0
| 7,507
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.