repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
0k/OpenUpgrade | addons/portal_gamification/__openerp__.py | 381 | 1571 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Gamification',
'version': '1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds security rules for gamification to allow portal users to participate to challenges
===================================================================================================
""",
'author': 'OpenERP SA',
'depends': ['gamification','portal'],
'data': [
'security/ir.model.access.csv',
'security/portal_security.xml',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
| agpl-3.0 |
archen/django | django/conf/locale/tr/formats.py | 82 | 1147 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'd F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'd F'
SHORT_DATE_FORMAT = 'd M Y'
SHORT_DATETIME_FORMAT = 'd M Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Pazartesi
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%y-%m-%d', # '06-10-25'
# '%d %B %Y', '%d %b. %Y', # '25 Ekim 2006', '25 Eki. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
jonycgn/scipy | scipy/linalg/tests/test_decomp_polar.py | 126 | 2797 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_, assert_allclose, assert_equal,
run_module_suite)
from scipy.linalg import polar, eigh
diag2 = np.array([[2, 0], [0, 3]])
a13 = np.array([[1, 2, 2]])
precomputed_cases = [
[[[0]], 'right', [[1]], [[0]]],
[[[0]], 'left', [[1]], [[0]]],
[[[9]], 'right', [[1]], [[9]]],
[[[9]], 'left', [[1]], [[9]]],
[diag2, 'right', np.eye(2), diag2],
[diag2, 'left', np.eye(2), diag2],
[a13, 'right', a13/norm(a13[0]), a13.T.dot(a13)/norm(a13[0])],
]
verify_cases = [
[[1, 2], [3, 4]],
[[1, 2, 3]],
[[1], [2], [3]],
[[1, 2, 3], [3, 4, 0]],
[[1, 2], [3, 4], [5, 5]],
[[1, 2], [3, 4+5j]],
[[1, 2, 3j]],
[[1], [2], [3j]],
[[1, 2, 3+2j], [3, 4-1j, -4j]],
[[1, 2], [3-2j, 4+0.5j], [5, 5]],
[[10000, 10, 1], [-1, 2, 3j], [0, 1, 2]],
]
def check_precomputed_polar(a, side, expected_u, expected_p):
# Compare the result of the polar decomposition to a
# precomputed result.
u, p = polar(a, side=side)
assert_allclose(u, expected_u, atol=1e-15)
assert_allclose(p, expected_p, atol=1e-15)
def verify_polar(a):
# Compute the polar decomposition, and then verify that
# the result has all the expected properties.
product_atol = np.sqrt(np.finfo(float).eps)
aa = np.asarray(a)
m, n = aa.shape
u, p = polar(a, side='right')
assert_equal(u.shape, (m, n))
assert_equal(p.shape, (n, n))
# a = up
assert_allclose(u.dot(p), a, atol=product_atol)
if m >= n:
assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
else:
assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
# p is Hermitian positive semidefinite.
assert_allclose(p.conj().T, p)
evals = eigh(p, eigvals_only=True)
nonzero_evals = evals[abs(evals) > 1e-14]
assert_((nonzero_evals >= 0).all())
u, p = polar(a, side='left')
assert_equal(u.shape, (m, n))
assert_equal(p.shape, (m, m))
# a = pu
assert_allclose(p.dot(u), a, atol=product_atol)
if m >= n:
assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
else:
assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
# p is Hermitian positive semidefinite.
assert_allclose(p.conj().T, p)
evals = eigh(p, eigvals_only=True)
nonzero_evals = evals[abs(evals) > 1e-14]
assert_((nonzero_evals >= 0).all())
def test_precomputed_cases():
for a, side, expected_u, expected_p in precomputed_cases:
yield check_precomputed_polar, a, side, expected_u, expected_p
def test_verify_cases():
for a in verify_cases:
yield verify_polar, a
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
erikdejonge/youtube-dl | test/test_youtube_lists.py | 19 | 2532 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.extractor import (
YoutubePlaylistIE,
YoutubeIE,
)
class TestYoutubeLists(unittest.TestCase):
def assertIsPlaylist(self, info):
"""Make sure the info has '_type' set to 'playlist'"""
self.assertEqual(info['_type'], 'playlist')
def test_youtube_playlist_noplaylist(self):
dl = FakeYDL()
dl.params['noplaylist'] = True
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertEqual(result['_type'], 'url')
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
def test_youtube_course(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
# TODO find a > 100 (paginating?) videos course
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
entries = list(result['entries'])
self.assertEqual(YoutubeIE().extract_id(entries[0]['url']), 'j9WZyLZCBzs')
self.assertEqual(len(entries), 25)
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
def test_youtube_mix(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/watch?v=W01L70IGBgE&index=2&list=RDOQpdSVF_k_w')
entries = result['entries']
self.assertTrue(len(entries) >= 50)
original_video = entries[0]
self.assertEqual(original_video['id'], 'OQpdSVF_k_w')
def test_youtube_toptracks(self):
print('Skipping: The playlist page gives error 500')
return
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=MCUS')
entries = result['entries']
self.assertEqual(len(entries), 100)
def test_youtube_flat_playlist_titles(self):
dl = FakeYDL()
dl.params['extract_flat'] = True
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=PL-KKIb8rvtMSrAO9YFbeM6UQrAqoFTUWv')
self.assertIsPlaylist(result)
for entry in result['entries']:
self.assertTrue(entry.get('title'))
if __name__ == '__main__':
unittest.main()
| unlicense |
cronuspaas/cronusagent | agent/agent/controllers/agentaction.py | 1 | 10321 | #pylint: disable=W0703,W0141,W0105
""" agent actions """
import json
import logging
import os
from pylons import request, response, config
import pylons
import signal
import traceback
from agent.lib import manifestutil, configutil, utils
from agent.lib.agent_globals import stopAgentGlobals
from agent.lib.agent_thread.agent_update import AgentUpdate
from agent.lib.agent_thread.safe_shutdown import SafeShutdown
from agent.lib.base import BaseController
from agent.lib.errors import Errors, AgentException
from agent.lib.result import statusResult, errorResult, doneResult
from agent.lib.security.agentauth import authorize
from agent.lib.utils import trackable
import gc
import cPickle
import sys
from pympler.asizeof import asizeof
from agent.lib.packagemgr import PackageMgr
LOG = logging.getLogger(__name__)
class AgentactionController(BaseController):
""" Action Controller class. Responsible for all actions of a service/agent """
@authorize()
@trackable()
def shutdown(self):
""" This controller shutdown the agent.
This should not be exposed to external usage.
This should only be used for testing and also for self update.
"""
LOG.info('[AGENT_SUICIDE] shutdown called. exiting the agent. This is an expected behavior when rest api shutdown is called. ')
stopAgentGlobals()
pid = os.getpid()
if (hasattr(signal, 'SIGKILL')):
os.kill(pid, signal.SIGKILL)
else:
os.kill(pid, signal.SIGTERM)
@authorize()
@trackable()
def safeshutdown(self):
""" This controller shutdown the agent.
This should not be exposed to external usage.
This should only be used for testing and also for self update.
"""
LOG.info('[AGENT_SUICIDE] safe shutdown called. exiting the agent. This is an expected behavior when rest api shutdown is called. ')
try:
appGlobal = config['pylons.app_globals']
shutdownThread = SafeShutdown(appGlobal.threadMgr)
self.injectJobCtx(shutdownThread)
shutdownThread.start()
shutdownThread.threadMgrEvent.wait()
return statusResult(request, response, shutdownThread, controller = self)
except Exception as excep:
msg = 'Unknown error for safeshutdown %s - %s' % (str(excep), traceback.format_exc(2))
return errorResult(request, response, error = Errors.UNKNOWN_ERROR,
errorMsg = msg, controller = self)
@authorize()
@trackable()
def reloadmonitors(self):
""" reload all monitors.
"""
appGlobal = pylons.config['pylons.app_globals']
appGlobal.agentMonitor.reloadMonitors()
return doneResult(request, response, controller = self)
@authorize()
@trackable()
def selfupdate(self):
""" agent selfupdate through api
"""
LOG.info('selfupdate agent with body: %s', request.body)
try:
appGlobal = config['pylons.app_globals']
wisbVersion = None
wisbSource = None
if request.body:
requestjson = json.loads(request.body)
if 'version' not in requestjson:
raise AgentException(Errors.INVALID_REQUEST, 'version is required')
wisbVersion = requestjson['version']
wisbSource = requestjson['wisbSource'] if 'wisbSource' in requestjson else configutil.getConfig('selfupdate_source')
updateThread = AgentUpdate(appGlobal.threadMgr, wisbVersion, wisbSource)
self.injectJobCtx(updateThread)
updateThread.start()
updateThread.threadMgrEvent.wait()
return statusResult(request, response, updateThread, controller = self)
except AgentException as aexcep:
return errorResult(request, response, error = aexcep.getCode(),
errorMsg = aexcep.getMsg(), controller = self)
except Exception as excep:
msg = 'Unknown error for agent update(%s) - %s - %s' % (wisbVersion, str(excep), traceback.format_exc(2))
return errorResult(request, response, error = Errors.UNKNOWN_ERROR,
errorMsg = msg, controller = self)
@authorize()
@trackable()
def cancelSelfUpdate(self):
""" cancel an in-flight agent selfupdate
"""
appGlobal = config['pylons.app_globals']
catThreads = appGlobal.threadMgr.getThreadByCat(AgentUpdate.THREAD_NAME, fastbreak = False)
result = {}
for catThread in catThreads:
catThread.stop()
result['uuid'] = catThread.getUuid()
return doneResult(request, response, result = result, controller = self)
@trackable()
def getConfig(self):
""" get config overrides """
LOG.info('get config overrides for agent')
result = {}
try:
result = manifestutil.readJsonServiceMeta('agent', ['configs'])
return doneResult(request, response, result = result, controller = self)
except Exception as excep:
return errorResult(request, response, error = Errors.UNKNOWN_ERROR,
errorMsg = 'Unknown error when clean agent configs %s - %s' %
(str(excep), traceback.format_exc(2)), controller = self)
@authorize()
@trackable()
def pokeConfig(self):
""" config poke """
LOG.info('config poke agent with body: %s', request.body)
configs = None
result = {}
try:
if request.body:
body = json.loads(request.body)
if 'configs' in body:
configs = body['configs']
result = manifestutil.updateServiceCatMeta('agent', manifestutil.CFG_META, configs)
return doneResult(request, response, result = result, controller = self)
raise AgentException(Errors.INVALID_REQUEST, 'Invalid request, expect configs in request body')
except Exception as excep:
return errorResult(request, response, error = Errors.UNKNOWN_ERROR,
errorMsg = 'Unknown error when update agent configs %s - %s' %
(str(excep), traceback.format_exc(2)), controller = self)
finally:
# reload config overrides
configutil.loadConfigOverrides()
@authorize()
@trackable()
def cleanConfig(self):
""" clean all configs """
LOG.info('clean agent config overrides')
result = {}
try:
result = manifestutil.updateServiceCatMeta('agent', manifestutil.CFG_META, None)
return doneResult(request, response, result = result, controller = self)
except Exception as excep:
return errorResult(request, response, error = Errors.UNKNOWN_ERROR,
errorMsg = 'Unknown error when clean agent configs %s - %s' %
(str(excep), traceback.format_exc(2)), controller = self)
finally:
# reload config overrides
configutil.loadConfigOverrides()
@trackable()
def getTaskSlaReport(self, task, threshold=0, starttime=0, fmt='raw'):
""" get task SLA report """
LOG.info('generating task SLA report')
try:
script = os.path.join(manifestutil.manifestPath('agent'), 'agent', 'cronus', 'scripts', 'perfmetric')
LOG.info('task sla report script %s' % script)
if not task or task == '':
raise AgentException(Errors.INVALID_REQUEST, 'task name cannot be empty')
tmp = [script, task, str(threshold), str(starttime), fmt]
cmds = []
for cmd in tmp:
cmds.append(cmd.encode('ascii', 'ignore'))
cmdout = utils.runsyscmdwstdout(cmds)
result = json.loads(cmdout)
return doneResult(request, response, result=result, controller = self)
except Exception as excep:
return errorResult(request, response, error = Errors.UNKNOWN_ERROR,
errorMsg = 'Error when getting task sla report %s - %s' %
(str(excep), traceback.format_exc(2)), controller = self)
@trackable()
def getRoutes(self):
""" print all available routes """
mapper = config['routes.map']
apistr = mapper.__str__()
apis = apistr.splitlines()
result = map(lambda x: x.strip(), apis)
return doneResult(request, response, result=result, controller = self)
@authorize()
@trackable()
def dumpMemory(self):
""" dump what's in the class in memory """
dumpFile = os.path.join(manifestutil.manifestPath('agent'), 'agent', 'logs', 'memory.pickle')
with open(dumpFile, 'w') as dump:
for obj in gc.get_objects():
i = id(obj)
size = sys.getsizeof(obj, 0)
referents = [id(o) for o in gc.get_referents(obj) if hasattr(o, '__class__')]
if hasattr(obj, '__class__'):
cls = str(obj.__class__)
cPickle.dump({'id': i, 'class': cls, 'size': size, 'referents': referents}, dump)
return doneResult(request, response, controller = self)
@trackable()
def getSizeOfMgrs(self):
""" get size of object """
appGlobal = config['pylons.app_globals']
result = {}
result['threadmgr'] = asizeof(appGlobal.threadMgr)
result['packagemgr'] = asizeof(appGlobal.packageMgr)
result['montior'] = asizeof(appGlobal.agentMonitor)
result['all'] = asizeof(appGlobal)
return doneResult(request, response, result = result, controller = self)
@authorize()
@trackable()
def runAgentGc(self):
""" force agent GC """
LOG.info('Received runAgentGc request')
PackageMgr.runAgentGc()
return doneResult(request, response, controller = self)
| apache-2.0 |
MCMic/Sick-Beard | lib/unidecode/x028.py | 253 | 5069 | data = (
' ', # 0x00
'a', # 0x01
'1', # 0x02
'b', # 0x03
'\'', # 0x04
'k', # 0x05
'2', # 0x06
'l', # 0x07
'@', # 0x08
'c', # 0x09
'i', # 0x0a
'f', # 0x0b
'/', # 0x0c
'm', # 0x0d
's', # 0x0e
'p', # 0x0f
'"', # 0x10
'e', # 0x11
'3', # 0x12
'h', # 0x13
'9', # 0x14
'o', # 0x15
'6', # 0x16
'r', # 0x17
'^', # 0x18
'd', # 0x19
'j', # 0x1a
'g', # 0x1b
'>', # 0x1c
'n', # 0x1d
't', # 0x1e
'q', # 0x1f
',', # 0x20
'*', # 0x21
'5', # 0x22
'<', # 0x23
'-', # 0x24
'u', # 0x25
'8', # 0x26
'v', # 0x27
'.', # 0x28
'%', # 0x29
'[', # 0x2a
'$', # 0x2b
'+', # 0x2c
'x', # 0x2d
'!', # 0x2e
'&', # 0x2f
';', # 0x30
':', # 0x31
'4', # 0x32
'\\', # 0x33
'0', # 0x34
'z', # 0x35
'7', # 0x36
'(', # 0x37
'_', # 0x38
'?', # 0x39
'w', # 0x3a
']', # 0x3b
'#', # 0x3c
'y', # 0x3d
')', # 0x3e
'=', # 0x3f
'[d7]', # 0x40
'[d17]', # 0x41
'[d27]', # 0x42
'[d127]', # 0x43
'[d37]', # 0x44
'[d137]', # 0x45
'[d237]', # 0x46
'[d1237]', # 0x47
'[d47]', # 0x48
'[d147]', # 0x49
'[d247]', # 0x4a
'[d1247]', # 0x4b
'[d347]', # 0x4c
'[d1347]', # 0x4d
'[d2347]', # 0x4e
'[d12347]', # 0x4f
'[d57]', # 0x50
'[d157]', # 0x51
'[d257]', # 0x52
'[d1257]', # 0x53
'[d357]', # 0x54
'[d1357]', # 0x55
'[d2357]', # 0x56
'[d12357]', # 0x57
'[d457]', # 0x58
'[d1457]', # 0x59
'[d2457]', # 0x5a
'[d12457]', # 0x5b
'[d3457]', # 0x5c
'[d13457]', # 0x5d
'[d23457]', # 0x5e
'[d123457]', # 0x5f
'[d67]', # 0x60
'[d167]', # 0x61
'[d267]', # 0x62
'[d1267]', # 0x63
'[d367]', # 0x64
'[d1367]', # 0x65
'[d2367]', # 0x66
'[d12367]', # 0x67
'[d467]', # 0x68
'[d1467]', # 0x69
'[d2467]', # 0x6a
'[d12467]', # 0x6b
'[d3467]', # 0x6c
'[d13467]', # 0x6d
'[d23467]', # 0x6e
'[d123467]', # 0x6f
'[d567]', # 0x70
'[d1567]', # 0x71
'[d2567]', # 0x72
'[d12567]', # 0x73
'[d3567]', # 0x74
'[d13567]', # 0x75
'[d23567]', # 0x76
'[d123567]', # 0x77
'[d4567]', # 0x78
'[d14567]', # 0x79
'[d24567]', # 0x7a
'[d124567]', # 0x7b
'[d34567]', # 0x7c
'[d134567]', # 0x7d
'[d234567]', # 0x7e
'[d1234567]', # 0x7f
'[d8]', # 0x80
'[d18]', # 0x81
'[d28]', # 0x82
'[d128]', # 0x83
'[d38]', # 0x84
'[d138]', # 0x85
'[d238]', # 0x86
'[d1238]', # 0x87
'[d48]', # 0x88
'[d148]', # 0x89
'[d248]', # 0x8a
'[d1248]', # 0x8b
'[d348]', # 0x8c
'[d1348]', # 0x8d
'[d2348]', # 0x8e
'[d12348]', # 0x8f
'[d58]', # 0x90
'[d158]', # 0x91
'[d258]', # 0x92
'[d1258]', # 0x93
'[d358]', # 0x94
'[d1358]', # 0x95
'[d2358]', # 0x96
'[d12358]', # 0x97
'[d458]', # 0x98
'[d1458]', # 0x99
'[d2458]', # 0x9a
'[d12458]', # 0x9b
'[d3458]', # 0x9c
'[d13458]', # 0x9d
'[d23458]', # 0x9e
'[d123458]', # 0x9f
'[d68]', # 0xa0
'[d168]', # 0xa1
'[d268]', # 0xa2
'[d1268]', # 0xa3
'[d368]', # 0xa4
'[d1368]', # 0xa5
'[d2368]', # 0xa6
'[d12368]', # 0xa7
'[d468]', # 0xa8
'[d1468]', # 0xa9
'[d2468]', # 0xaa
'[d12468]', # 0xab
'[d3468]', # 0xac
'[d13468]', # 0xad
'[d23468]', # 0xae
'[d123468]', # 0xaf
'[d568]', # 0xb0
'[d1568]', # 0xb1
'[d2568]', # 0xb2
'[d12568]', # 0xb3
'[d3568]', # 0xb4
'[d13568]', # 0xb5
'[d23568]', # 0xb6
'[d123568]', # 0xb7
'[d4568]', # 0xb8
'[d14568]', # 0xb9
'[d24568]', # 0xba
'[d124568]', # 0xbb
'[d34568]', # 0xbc
'[d134568]', # 0xbd
'[d234568]', # 0xbe
'[d1234568]', # 0xbf
'[d78]', # 0xc0
'[d178]', # 0xc1
'[d278]', # 0xc2
'[d1278]', # 0xc3
'[d378]', # 0xc4
'[d1378]', # 0xc5
'[d2378]', # 0xc6
'[d12378]', # 0xc7
'[d478]', # 0xc8
'[d1478]', # 0xc9
'[d2478]', # 0xca
'[d12478]', # 0xcb
'[d3478]', # 0xcc
'[d13478]', # 0xcd
'[d23478]', # 0xce
'[d123478]', # 0xcf
'[d578]', # 0xd0
'[d1578]', # 0xd1
'[d2578]', # 0xd2
'[d12578]', # 0xd3
'[d3578]', # 0xd4
'[d13578]', # 0xd5
'[d23578]', # 0xd6
'[d123578]', # 0xd7
'[d4578]', # 0xd8
'[d14578]', # 0xd9
'[d24578]', # 0xda
'[d124578]', # 0xdb
'[d34578]', # 0xdc
'[d134578]', # 0xdd
'[d234578]', # 0xde
'[d1234578]', # 0xdf
'[d678]', # 0xe0
'[d1678]', # 0xe1
'[d2678]', # 0xe2
'[d12678]', # 0xe3
'[d3678]', # 0xe4
'[d13678]', # 0xe5
'[d23678]', # 0xe6
'[d123678]', # 0xe7
'[d4678]', # 0xe8
'[d14678]', # 0xe9
'[d24678]', # 0xea
'[d124678]', # 0xeb
'[d34678]', # 0xec
'[d134678]', # 0xed
'[d234678]', # 0xee
'[d1234678]', # 0xef
'[d5678]', # 0xf0
'[d15678]', # 0xf1
'[d25678]', # 0xf2
'[d125678]', # 0xf3
'[d35678]', # 0xf4
'[d135678]', # 0xf5
'[d235678]', # 0xf6
'[d1235678]', # 0xf7
'[d45678]', # 0xf8
'[d145678]', # 0xf9
'[d245678]', # 0xfa
'[d1245678]', # 0xfb
'[d345678]', # 0xfc
'[d1345678]', # 0xfd
'[d2345678]', # 0xfe
'[d12345678]', # 0xff
)
| gpl-3.0 |
ruuk/script.bluray.com | lib/unidecode/x0f9.py | 253 | 4567 | data = (
'Kay ', # 0x00
'Kayng ', # 0x01
'Ke ', # 0x02
'Ko ', # 0x03
'Kol ', # 0x04
'Koc ', # 0x05
'Kwi ', # 0x06
'Kwi ', # 0x07
'Kyun ', # 0x08
'Kul ', # 0x09
'Kum ', # 0x0a
'Na ', # 0x0b
'Na ', # 0x0c
'Na ', # 0x0d
'La ', # 0x0e
'Na ', # 0x0f
'Na ', # 0x10
'Na ', # 0x11
'Na ', # 0x12
'Na ', # 0x13
'Nak ', # 0x14
'Nak ', # 0x15
'Nak ', # 0x16
'Nak ', # 0x17
'Nak ', # 0x18
'Nak ', # 0x19
'Nak ', # 0x1a
'Nan ', # 0x1b
'Nan ', # 0x1c
'Nan ', # 0x1d
'Nan ', # 0x1e
'Nan ', # 0x1f
'Nan ', # 0x20
'Nam ', # 0x21
'Nam ', # 0x22
'Nam ', # 0x23
'Nam ', # 0x24
'Nap ', # 0x25
'Nap ', # 0x26
'Nap ', # 0x27
'Nang ', # 0x28
'Nang ', # 0x29
'Nang ', # 0x2a
'Nang ', # 0x2b
'Nang ', # 0x2c
'Nay ', # 0x2d
'Nayng ', # 0x2e
'No ', # 0x2f
'No ', # 0x30
'No ', # 0x31
'No ', # 0x32
'No ', # 0x33
'No ', # 0x34
'No ', # 0x35
'No ', # 0x36
'No ', # 0x37
'No ', # 0x38
'No ', # 0x39
'No ', # 0x3a
'Nok ', # 0x3b
'Nok ', # 0x3c
'Nok ', # 0x3d
'Nok ', # 0x3e
'Nok ', # 0x3f
'Nok ', # 0x40
'Non ', # 0x41
'Nong ', # 0x42
'Nong ', # 0x43
'Nong ', # 0x44
'Nong ', # 0x45
'Noy ', # 0x46
'Noy ', # 0x47
'Noy ', # 0x48
'Noy ', # 0x49
'Nwu ', # 0x4a
'Nwu ', # 0x4b
'Nwu ', # 0x4c
'Nwu ', # 0x4d
'Nwu ', # 0x4e
'Nwu ', # 0x4f
'Nwu ', # 0x50
'Nwu ', # 0x51
'Nuk ', # 0x52
'Nuk ', # 0x53
'Num ', # 0x54
'Nung ', # 0x55
'Nung ', # 0x56
'Nung ', # 0x57
'Nung ', # 0x58
'Nung ', # 0x59
'Twu ', # 0x5a
'La ', # 0x5b
'Lak ', # 0x5c
'Lak ', # 0x5d
'Lan ', # 0x5e
'Lyeng ', # 0x5f
'Lo ', # 0x60
'Lyul ', # 0x61
'Li ', # 0x62
'Pey ', # 0x63
'Pen ', # 0x64
'Pyen ', # 0x65
'Pwu ', # 0x66
'Pwul ', # 0x67
'Pi ', # 0x68
'Sak ', # 0x69
'Sak ', # 0x6a
'Sam ', # 0x6b
'Sayk ', # 0x6c
'Sayng ', # 0x6d
'Sep ', # 0x6e
'Sey ', # 0x6f
'Sway ', # 0x70
'Sin ', # 0x71
'Sim ', # 0x72
'Sip ', # 0x73
'Ya ', # 0x74
'Yak ', # 0x75
'Yak ', # 0x76
'Yang ', # 0x77
'Yang ', # 0x78
'Yang ', # 0x79
'Yang ', # 0x7a
'Yang ', # 0x7b
'Yang ', # 0x7c
'Yang ', # 0x7d
'Yang ', # 0x7e
'Ye ', # 0x7f
'Ye ', # 0x80
'Ye ', # 0x81
'Ye ', # 0x82
'Ye ', # 0x83
'Ye ', # 0x84
'Ye ', # 0x85
'Ye ', # 0x86
'Ye ', # 0x87
'Ye ', # 0x88
'Ye ', # 0x89
'Yek ', # 0x8a
'Yek ', # 0x8b
'Yek ', # 0x8c
'Yek ', # 0x8d
'Yen ', # 0x8e
'Yen ', # 0x8f
'Yen ', # 0x90
'Yen ', # 0x91
'Yen ', # 0x92
'Yen ', # 0x93
'Yen ', # 0x94
'Yen ', # 0x95
'Yen ', # 0x96
'Yen ', # 0x97
'Yen ', # 0x98
'Yen ', # 0x99
'Yen ', # 0x9a
'Yen ', # 0x9b
'Yel ', # 0x9c
'Yel ', # 0x9d
'Yel ', # 0x9e
'Yel ', # 0x9f
'Yel ', # 0xa0
'Yel ', # 0xa1
'Yem ', # 0xa2
'Yem ', # 0xa3
'Yem ', # 0xa4
'Yem ', # 0xa5
'Yem ', # 0xa6
'Yep ', # 0xa7
'Yeng ', # 0xa8
'Yeng ', # 0xa9
'Yeng ', # 0xaa
'Yeng ', # 0xab
'Yeng ', # 0xac
'Yeng ', # 0xad
'Yeng ', # 0xae
'Yeng ', # 0xaf
'Yeng ', # 0xb0
'Yeng ', # 0xb1
'Yeng ', # 0xb2
'Yeng ', # 0xb3
'Yeng ', # 0xb4
'Yey ', # 0xb5
'Yey ', # 0xb6
'Yey ', # 0xb7
'Yey ', # 0xb8
'O ', # 0xb9
'Yo ', # 0xba
'Yo ', # 0xbb
'Yo ', # 0xbc
'Yo ', # 0xbd
'Yo ', # 0xbe
'Yo ', # 0xbf
'Yo ', # 0xc0
'Yo ', # 0xc1
'Yo ', # 0xc2
'Yo ', # 0xc3
'Yong ', # 0xc4
'Wun ', # 0xc5
'Wen ', # 0xc6
'Yu ', # 0xc7
'Yu ', # 0xc8
'Yu ', # 0xc9
'Yu ', # 0xca
'Yu ', # 0xcb
'Yu ', # 0xcc
'Yu ', # 0xcd
'Yu ', # 0xce
'Yu ', # 0xcf
'Yu ', # 0xd0
'Yuk ', # 0xd1
'Yuk ', # 0xd2
'Yuk ', # 0xd3
'Yun ', # 0xd4
'Yun ', # 0xd5
'Yun ', # 0xd6
'Yun ', # 0xd7
'Yul ', # 0xd8
'Yul ', # 0xd9
'Yul ', # 0xda
'Yul ', # 0xdb
'Yung ', # 0xdc
'I ', # 0xdd
'I ', # 0xde
'I ', # 0xdf
'I ', # 0xe0
'I ', # 0xe1
'I ', # 0xe2
'I ', # 0xe3
'I ', # 0xe4
'I ', # 0xe5
'I ', # 0xe6
'I ', # 0xe7
'I ', # 0xe8
'I ', # 0xe9
'I ', # 0xea
'Ik ', # 0xeb
'Ik ', # 0xec
'In ', # 0xed
'In ', # 0xee
'In ', # 0xef
'In ', # 0xf0
'In ', # 0xf1
'In ', # 0xf2
'In ', # 0xf3
'Im ', # 0xf4
'Im ', # 0xf5
'Im ', # 0xf6
'Ip ', # 0xf7
'Ip ', # 0xf8
'Ip ', # 0xf9
'Cang ', # 0xfa
'Cek ', # 0xfb
'Ci ', # 0xfc
'Cip ', # 0xfd
'Cha ', # 0xfe
'Chek ', # 0xff
)
| gpl-2.0 |
ravindrapanda/tensorflow | tensorflow/contrib/metrics/python/metrics/classification.py | 111 | 2647 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification metrics library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# TODO(nsilberman): move into metrics/python/ops/
def accuracy(predictions, labels, weights=None, name=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
name: A name for the operation (optional).
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not bool, integer, or string.
"""
if not (labels.dtype.is_integer or
labels.dtype in (dtypes.bool, dtypes.string)):
raise ValueError(
'Labels should have bool, integer, or string dtype, not %r' %
labels.dtype)
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
with ops.name_scope(name, 'accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.multiply(is_correct, weights)
num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct))
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct)
| apache-2.0 |
hzlf/openbroadcast | website/tools/suit/config.py | 2 | 2069 | from django.contrib.admin import ModelAdmin
from django.conf import settings
from . import VERSION
def default_config():
return {
'VERSION': VERSION,
# configurable
'ADMIN_NAME': 'Django Suit',
'HEADER_DATE_FORMAT': 'l, jS F Y',
'HEADER_TIME_FORMAT': 'H:i',
# form
'SHOW_REQUIRED_ASTERISK': True,
'CONFIRM_UNSAVED_CHANGES': True,
# menu
'SEARCH_URL': '/admin/auth/user/',
'MENU_OPEN_FIRST_CHILD': True,
'MENU_ICONS': {
'auth': 'icon-lock',
'sites': 'icon-leaf',
},
# 'MENU_EXCLUDE': ('auth.group',),
# 'MENU': (
# 'sites',
# {'app': 'auth', 'icon':'icon-lock', 'models': ('user', 'group')},
# {'label': 'Settings', 'icon':'icon-cog', 'models': ('auth.user', 'auth.group')},
# {'label': 'Support', 'icon':'icon-question-sign', 'url': '/support/'},
# ),
# misc
'LIST_PER_PAGE': 20
}
def get_config(param=None):
config_key = 'SUIT_CONFIG'
if hasattr(settings, config_key):
config = getattr(settings, config_key, {})
else:
config = default_config()
if param:
value = config.get(param)
if value is None:
value = default_config().get(param)
return value
return config
# Reverse default actions position
ModelAdmin.actions_on_top = False
ModelAdmin.actions_on_bottom = True
# Set global list_per_page
ModelAdmin.list_per_page = get_config('LIST_PER_PAGE')
def setup_filer():
from suit.widgets import AutosizedTextarea
from filer.admin.imageadmin import ImageAdminForm
from filer.admin.fileadmin import FileAdminChangeFrom
def ensure_meta_widgets(meta_cls):
if not hasattr(meta_cls, 'widgets'):
meta_cls.widgets = {}
meta_cls.widgets['description'] = AutosizedTextarea
ensure_meta_widgets(ImageAdminForm.Meta)
ensure_meta_widgets(FileAdminChangeFrom.Meta)
if 'filer' in settings.INSTALLED_APPS:
setup_filer()
| gpl-3.0 |
ActiveState/code | recipes/Python/580623_New_VersiGUI_PDF_Table_Contents_Editor_using/recipe-580623.py | 1 | 36018 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on Sun May 03 16:15:08 2015
@author: Jorj McKie
Copyright (c) 2015 Jorj X. McKie
The license of this program is governed by the GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007. See the "COPYING" file of this repository.
Example program for the Python binding PyMuPDF of MuPDF.
Changes in version 1.9.1
-------------------------
- removed depedency on PyPDF2 by using PyMuPDF's new methods setMetadata() and
setToC().
- using incremental saves if output file equals input.
Dependencies:
--------------
PyMuPDF 1.9.1 or later
wxPython 3.0 or later
This is a program for editing a PDF file's table of contents (ToC).
After choosing a file in a file selection dialog, its ToC is displayed
in a grid, together with an image of the currently displayed PDF page.
Entries in the grid can be edited, added, duplicated, deleted and moved.
Permanent changes to the underlying file are made only when the SAVE button is
pressed.
The overall screen layout is as follows:
+--------------------+--------------------+
| | |
| le_szr | ri_szr |
| | |
+--------------------+--------------------+
Layout of left sizer "le_szr"
+-----------------------------------------+
| szr10: Button "New Row", expl. text |
+-----------------------------------------+
| szr20: MyGrid (table of contents) |
+-----------------------------------------+
| szr30: PDF metadata |
+-----------------------------------------+
| szr31: check data fields |
+-----------------------------------------+
| szr40: OK / Cancel buttons |
+-----------------------------------------+
Layout of right sizer "ri_szr"
+-----------------------------------------+
| re_szr20: forw / backw / pages |
+-----------------------------------------+
| PDFBild: Bitmap image of pdf page |
+-----------------------------------------+
'''
import os, sys
import wx
import wx.grid as gridlib
import wx.lib.gridmovers as gridmovers
import fitz # = PyMuPDF
ENCODING = "latin-1" # used for item title only
def getint(v):
import types
# extract digits from a string to form an integer
try:
return int(v)
except ValueError:
pass
if not isinstance(v, types.StringTypes):
return 0
a = "0"
for d in v:
if d in "0123456789":
a += d
return int(a)
#==============================================================================
# define scale factor for displaying page images (20% larger)
#==============================================================================
scaling = fitz.Matrix(1.2, 1.2)
#==============================================================================
# just abbreviations
#==============================================================================
defPos = wx.DefaultPosition
defSiz = wx.DefaultSize
khaki = wx.Colour(240, 230, 140)
#==============================================================================
# convenience class for storing information across functions
#==============================================================================
class PDFconfig():
def __init__(self):
self.doc = None # fitz.Document
self.meta = {} # PDF meta information
self.seiten = 0 # max pages
self.inhalt = [] # table of contents storage
self.file = None # pdf filename
self.oldpage = 0 # stores displayed page number
#==============================================================================
# render a PDF page and return wx.Bitmap
#==============================================================================
def pdf_show(seite):
page_idx = getint(seite) - 1
pix = PDFcfg.doc.getPagePixmap(page_idx, matrix = scaling)
# the following method returns just RGB data - no alpha bytes
# this seems to be required in Windows versions of wx.
# on other platforms try instead:
#bmp = wx.BitmapfromBufferRGBA(pix.w, pix.h, pix.samples)
a = pix.samplesRGB() # samples without alpha bytes
bmp = wx.BitmapFromBuffer(pix.w, pix.h, a)
pix = None
a = None
return bmp
#==============================================================================
# PDFTable = a tabular grid class in wx
#==============================================================================
class PDFTable(gridlib.PyGridTableBase):
def __init__(self):
gridlib.PyGridTableBase.__init__(self)
self.colLabels = ['Level','Title','Page']
self.dataTypes = [gridlib.GRID_VALUE_NUMBER,
gridlib.GRID_VALUE_STRING,
gridlib.GRID_VALUE_NUMBER,
]
# initial load of table with outline data
# each line consists of [lvl, title, page]
# for display, we "indent" the title with spaces
self.data = [[PDFcfg.inhalt[i][0], # indentation level
" "*(PDFcfg.inhalt[i][0] -1) + \
PDFcfg.inhalt[i][1].decode("utf-8","ignore"), # title
PDFcfg.inhalt[i][2]] \
for i in range(len(PDFcfg.inhalt))]
if not PDFcfg.inhalt:
self.data = [[0, "*** no outline ***", 0]]
# used for correctly placing new lines. insert at end = -1
self.cur_row = -1
#==============================================================================
# Methods required by wxPyGridTableBase interface.
# Will be called by the grid.
#==============================================================================
def GetNumberRows(self): # row count in my data table
return len(self.data)
def GetNumberCols(self): # column count in my data table
return len(self.colLabels)
def IsEmptyCell(self, row, col): # is-cell-empty checker
try:
return not self.data[row][col]
except IndexError:
return True
def GetValue(self, row, col): # get value (to be put into a cell)
if col == 1: # simulate indentation if title column
lvl = int(self.data[row][0]) - 1
value = " " * lvl + self.data[row][1].strip()
else:
value = self.data[row][col]
return value
def SetValue(self, row, col, value): # put value from cell to data table
if col == 1:
x_val = value.strip() # strip off simulated indentations
else:
x_val = value
self.data[row][col] = x_val
#==============================================================================
# set col names
#==============================================================================
def GetColLabelValue(self, col):
return self.colLabels[col]
#==============================================================================
# set row names (just row counters in our case). Only needed, because we have
# row-based operations (dragging, duplicating), and these require some label.
#==============================================================================
def GetRowLabelValue(self,row):
return str(row +1)
#==============================================================================
# determine cell content type, controls the grid behaviour for the cells
#==============================================================================
def GetTypeName(self, row, col):
return self.dataTypes[col]
#==============================================================================
# move a row, called when user drags rows with the mouse.
# called with row numbers from -> to
#==============================================================================
def MoveRow(self, frm, to):
grid = self.GetView()
if grid and frm != to: # actually moving something?
self.cur_row = to
# Move the data rows
oldData = self.data[frm] # list of row values
del self.data[frm] # delete it from the data
# determine place for the moving row, and insert it
if to > frm:
self.data.insert(to-1,oldData)
else:
self.data.insert(to,oldData)
#==============================================================================
# inform the Grid about this by special "message batches"
#==============================================================================
grid.BeginBatch()
msg = gridlib.GridTableMessage(
self, gridlib.GRIDTABLE_NOTIFY_ROWS_DELETED, frm, 1)
grid.ProcessTableMessage(msg)
msg = gridlib.GridTableMessage(
self, gridlib.GRIDTABLE_NOTIFY_ROWS_INSERTED, to, 1)
grid.ProcessTableMessage(msg)
grid.EndBatch()
#==============================================================================
# insert a new row, called with the new cell value list (zeile).
# we use self.cur_row to determine where to put it.
#==============================================================================
def NewRow(self, zeile):
grid = self.GetView()
if grid:
if self.cur_row in range(len(self.data)): # insert in the middle?
self.data.insert(self.cur_row, zeile)
grid.BeginBatch() # inform the grid
msg = gridlib.GridTableMessage(self,
gridlib.GRIDTABLE_NOTIFY_ROWS_INSERTED, self.cur_row, 1)
grid.ProcessTableMessage(msg)
grid.EndBatch()
else: # insert at end (append)
self.data.append(zeile)
grid.BeginBatch() # inform grid
msg = gridlib.GridTableMessage(self,
gridlib.GRIDTABLE_NOTIFY_ROWS_APPENDED, 1)
grid.ProcessTableMessage(msg)
grid.EndBatch()
#==============================================================================
# Duplicate a row, called with row number
#==============================================================================
def DuplicateRow(self, row):
grid = self.GetView()
if grid:
zeile = [self.data[row][0], self.data[row][1],
self.data[row][2]]
self.data.insert(row, zeile)
grid.BeginBatch()
msg = gridlib.GridTableMessage(self,
gridlib.GRIDTABLE_NOTIFY_ROWS_INSERTED, row, 1)
grid.ProcessTableMessage(msg)
grid.EndBatch()
self.cur_row = row
#==============================================================================
# Delete a row. called with row number.
#==============================================================================
def DeleteRow(self, row):
grid = self.GetView()
if grid:
del self.data[row]
grid.BeginBatch() # inform the grid
msg = gridlib.GridTableMessage(self,
gridlib.GRIDTABLE_NOTIFY_ROWS_DELETED, row, 1)
grid.ProcessTableMessage(msg)
grid.EndBatch()
if self.cur_row not in range(len(self.data)): # update indicator
self.cur_row = -1
#==============================================================================
# define Grid
#==============================================================================
class MyGrid(gridlib.Grid):
def __init__(self, parent):
gridlib.Grid.__init__(self, parent, -1)
table = PDFTable() # initialize table
#==============================================================================
# announce table to Grid
# 'True' = enable Grid to manage the table (destroy, etc.)
#==============================================================================
self.SetTable(table, True)
#==============================================================================
# set font, width, alignment in the grid
#==============================================================================
self.SetDefaultCellFont(wx.Font(wx.NORMAL_FONT.GetPointSize(),
70, 90, 90, False, "DejaVu Sans Mono"))
# center columns (indent level, delete check box)
ct_al1 = gridlib.GridCellAttr()
ct_al1.SetAlignment(wx.ALIGN_CENTER, wx.ALIGN_CENTER)
self.SetColAttr(0, ct_al1)
self.SetColAttr(3, ct_al1)
# page number right aligned
re_al1 = gridlib.GridCellAttr()
re_al1.SetAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTER)
self.SetColAttr(2, re_al1)
#==============================================================================
# Enable Row moving
#==============================================================================
gridmovers.GridRowMover(self)
#==============================================================================
# Bind: move row
#==============================================================================
self.Bind(gridmovers.EVT_GRID_ROW_MOVE, self.OnRowMove, self)
#==============================================================================
# Bind: duplicate a row
#==============================================================================
self.Bind(gridlib.EVT_GRID_LABEL_LEFT_DCLICK, self.OnRowDup, self)
#==============================================================================
# Bind: delete a row
#==============================================================================
self.Bind(gridlib.EVT_GRID_LABEL_RIGHT_DCLICK, self.OnRowDel, self)
#==============================================================================
# Bind: (double) click a cell
#==============================================================================
self.Bind(gridlib.EVT_GRID_CELL_LEFT_CLICK, self.OnCellClick, self)
self.Bind(gridlib.EVT_GRID_CELL_LEFT_DCLICK, self.OnCellDClick, self)
#==============================================================================
# Bind: cell is changing
#==============================================================================
self.Bind(gridlib.EVT_GRID_CELL_CHANGING, self.OnCellChanging, self)
#==============================================================================
# Event Method: cell is changing
#==============================================================================
def OnCellChanging(self, evt):
if evt.GetCol() == 2: # page number is changing
value = evt.GetString() # new cell value
PicRefresh(value) # we show corresponding image
self.AutoSizeColumn(1) # as always: title width adjust
DisableOK() # check data before save is possible
#==============================================================================
# Event Method: cell click
#==============================================================================
def OnCellClick(self, evt):
row = evt.GetRow() # row
col = evt.GetCol() # col
table = self.GetTable()
grid = table.GetView()
grid.GoToCell(row, col) # force "select" for the cell
self.cur_row = row # memorize current row
self.AutoSizeColumn(1) # adjust title col width to content
#==============================================================================
# Event Method: cell double click
#==============================================================================
def OnCellDClick(self, evt):
row = evt.GetRow() # row
col = evt.GetCol() # col
table = self.GetTable()
if col == 1 or col == 2: # refresh picture if title or page col
seite = table.GetValue(row, 2)
PicRefresh(seite)
grid = table.GetView()
grid.GoToCell(row, col) # force "select" of that cell
self.cur_row = row # memorize current row
self.AutoSizeColumn(1)
#==============================================================================
# Event Method: move row
#==============================================================================
def OnRowMove(self,evt):
frm = evt.GetMoveRow() # row being moved
to = evt.GetBeforeRow() # before which row to insert
self.GetTable().MoveRow(frm,to)
DisableOK()
#==============================================================================
# Event Method: delete row
#==============================================================================
def OnRowDel(self, evt):
row = evt.GetRow()
self.GetTable().DeleteRow(row)
DisableOK()
#==============================================================================
# Event Method: delete row
#==============================================================================
def OnRowDup(self, evt):
row = evt.GetRow()
col = evt.GetCol()
if col < 0 and row >= 0: # else this is not a row duplication!
self.GetTable().DuplicateRow(row) # duplicate the row and ...
self.GetParent().Layout() # possibly enlarge the grid
DisableOK()
#==============================================================================
#
# define dialog
#
#==============================================================================
class PDFDialog (wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__ (self, parent, id = wx.ID_ANY,
title = "Maintain the Table of Contents",
pos = defPos, size = defSiz,
style = wx.CAPTION|wx.CLOSE_BOX|
wx.DEFAULT_DIALOG_STYLE|
wx.MAXIMIZE_BOX|wx.MINIMIZE_BOX|
wx.RESIZE_BORDER)
self.SetBackgroundColour(khaki)
# maximize the screen
#self.Maximize()
# alternatively, try more scrutiny:
width = wx.GetDisplaySize()[0]-500 # define maximum width
height = wx.GetDisplaySize()[1]-35 # define maximum height
self.SetSize(wx.Size(width, height))
#==============================================================================
# Sizer 10: Button 'new row' and an explaining text
#==============================================================================
self.szr10 = wx.BoxSizer(wx.HORIZONTAL)
self.btn_neu = wx.Button(self, wx.ID_ANY, "New Row",
defPos, defSiz, 0)
self.szr10.Add(self.btn_neu, 0, wx.ALIGN_CENTER|wx.ALL, 5)
msg_txt = """NEW rows will be inserted at the end, or before the row with a right-clicked field.\nDUPLICATE row: double-click its number. DELETE row: right-double-click its number.\nDouble-click titles or page numbers to display the page image."""
explain = wx.StaticText(self, wx.ID_ANY, msg_txt,
defPos, wx.Size(-1, 50), 0)
self.szr10.Add(explain, 0, wx.ALIGN_CENTER, 5)
#==============================================================================
# Sizer 20: define outline grid and do some layout adjustments
#==============================================================================
self.szr20 = MyGrid(self)
self.szr20.AutoSizeColumn(0)
self.szr20.AutoSizeColumn(1)
self.szr20.SetColSize(2, 45)
self.szr20.SetRowLabelSize(30)
#==============================================================================
# Sizer 30: PDF meta information
#==============================================================================
self.szr30 = wx.FlexGridSizer(6, 2, 0, 0)
self.szr30.SetFlexibleDirection(wx.BOTH)
self.szr30.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.tx_input = wx.StaticText(self, wx.ID_ANY, "Input:",
defPos, defSiz, 0)
self.tx_input.Wrap(-1)
self.szr30.Add(self.tx_input, 0, wx.ALIGN_CENTER, 5)
self.tx_eindat = wx.StaticText(self, wx.ID_ANY,
" %s (%s pages)" % (PDFcfg.file, str(PDFcfg.seiten)),
defPos, defSiz, 0)
self.tx_eindat.Wrap(-1)
self.szr30.Add(self.tx_eindat, 0, wx.ALL, 5)
self.tx_ausdat = wx.StaticText(self, wx.ID_ANY, "Output:",
defPos, defSiz, 0)
self.tx_ausdat.Wrap(-1)
self.szr30.Add(self.tx_ausdat, 0, wx.ALIGN_CENTER, 5)
self.btn_aus = wx.FilePickerCtrl(self, wx.ID_ANY,
PDFcfg.file, "set output file", "*.pdf",
defPos, wx.Size(480,-1),
wx.FLP_OVERWRITE_PROMPT|wx.FLP_SAVE|
wx.FLP_USE_TEXTCTRL)
self.szr30.Add(self.btn_aus, 0, wx.ALL, 5)
self.tx_autor = wx.StaticText(self, wx.ID_ANY, "Author:",
defPos, defSiz, 0)
self.tx_autor.Wrap(-1)
self.szr30.Add(self.tx_autor, 0, wx.ALIGN_CENTER, 5)
self.ausaut = wx.TextCtrl(self, wx.ID_ANY,
PDFcfg.meta["author"],
defPos, wx.Size(480, -1), 0)
self.szr30.Add(self.ausaut, 0, wx.ALL, 5)
self.pdf_titel = wx.StaticText(self, wx.ID_ANY, "Title:",
defPos, defSiz, 0)
self.pdf_titel.Wrap(-1)
self.szr30.Add(self.pdf_titel, 0, wx.ALIGN_CENTER, 5)
self.austit = wx.TextCtrl(self, wx.ID_ANY,
PDFcfg.meta["title"],
defPos, wx.Size(480, -1), 0)
self.szr30.Add(self.austit, 0, wx.ALL, 5)
self.tx_subject = wx.StaticText(self, wx.ID_ANY, "Subject:",
defPos, defSiz, 0)
self.tx_subject.Wrap(-1)
self.szr30.Add(self.tx_subject, 0, wx.ALIGN_CENTER, 5)
self.aussub = wx.TextCtrl(self, wx.ID_ANY,
PDFcfg.meta["subject"],
defPos, wx.Size(480, -1), 0)
self.szr30.Add(self.aussub, 0, wx.ALL, 5)
#==============================================================================
# Sizer 31: check data
#==============================================================================
self.szr31 = wx.FlexGridSizer(1, 2, 0, 0)
self.btn_chk = wx.Button(self, wx.ID_ANY, "Check Data",
defPos, defSiz, 0)
self.szr31.Add(self.btn_chk, 0, wx.ALIGN_TOP|wx.ALL, 5)
self.msg = wx.StaticText(self, wx.ID_ANY, "Before data can be saved, "\
"they must be checked with this button.\n"\
"Warning: Any original 'Output' file will be overwritten, "\
"once you press SAVE!",
defPos, defSiz, 0)
self.msg.Wrap(-1)
self.szr31.Add(self.msg, 0, wx.ALL, 5)
#==============================================================================
# Sizer 40: OK / Cancel
#==============================================================================
self.szr40 = wx.StdDialogButtonSizer()
self.szr40OK = wx.Button(self, wx.ID_OK, label="SAVE")
self.szr40OK.Disable()
self.szr40.AddButton(self.szr40OK)
self.szr40Cancel = wx.Button(self, wx.ID_CANCEL)
self.szr40.AddButton(self.szr40Cancel)
self.szr40.Realize()
#==============================================================================
# define lines (decoration only)
#==============================================================================
linie1 = wx.StaticLine(self, wx.ID_ANY,
defPos, defSiz, wx.LI_HORIZONTAL)
linie2 = wx.StaticLine(self, wx.ID_ANY,
defPos, defSiz, wx.LI_HORIZONTAL)
linie3 = wx.StaticLine(self, wx.ID_ANY,
defPos, defSiz, wx.LI_HORIZONTAL)
#==============================================================================
# Left Sizer: Outline and other PDF information
#==============================================================================
le_szr = wx.BoxSizer(wx.VERTICAL)
le_szr.Add(self.szr10, 0, wx.EXPAND, 5)
le_szr.Add(linie1, 0, wx.EXPAND|wx.ALL, 5)
le_szr.Add(self.szr20, 1, wx.EXPAND, 5)
le_szr.Add(self.szr31, 0, wx.EXPAND, 5)
le_szr.Add(linie2, 0, wx.EXPAND|wx.ALL, 5)
le_szr.Add(self.szr30, 0, wx.EXPAND, 5)
le_szr.Add(linie3, 0, wx.EXPAND|wx.ALL, 5)
le_szr.Add(self.szr40, 0, wx.ALIGN_TOP|wx.ALIGN_CENTER_HORIZONTAL, 5)
#==============================================================================
# Right Sizer: display a PDF page image
#==============================================================================
ri_szr = wx.BoxSizer(wx.VERTICAL) # a control line and the picture
ri_szr20 = wx.BoxSizer(wx.HORIZONTAL) # defines the control line
self.btn_vor = wx.Button(self, wx.ID_ANY, "forward",
defPos, defSiz, 0)
ri_szr20.Add(self.btn_vor, 0, wx.ALL, 5)
self.btn_zur = wx.Button(self, wx.ID_ANY, "backward",
defPos, defSiz, 0)
ri_szr20.Add(self.btn_zur, 0, wx.ALL, 5)
self.zuSeite = wx.TextCtrl(self, wx.ID_ANY, "1",
defPos, wx.Size(40, -1),
wx.TE_PROCESS_ENTER|wx.TE_RIGHT)
ri_szr20.Add(self.zuSeite, 0, wx.ALL, 5)
max_pages = wx.StaticText(self, wx.ID_ANY,
"of %s pages" % (str(PDFcfg.seiten),),
defPos, defSiz, 0)
ri_szr20.Add(max_pages, 0, wx.ALIGN_CENTER, 5)
# control line sizer composed, now add it to the vertical sizer
ri_szr.Add(ri_szr20, 0, wx.EXPAND, 5)
# define the bitmap for the pdf image ...
bmp = pdf_show(1)
self.PDFbild = wx.StaticBitmap(self, wx.ID_ANY, bmp,
defPos, defSiz, wx.BORDER_NONE)
# ... and add it to the vertical sizer
ri_szr.Add(self.PDFbild, 0, wx.ALL, 0)
#==============================================================================
# Main Sizer composition
#==============================================================================
mainszr= wx.BoxSizer(wx.HORIZONTAL)
mainszr.Add(le_szr, 1, wx.ALL, 5)
mainszr.Add(ri_szr, 0, wx.ALL, 5)
self.SetSizer(mainszr)
self.Layout()
self.Centre(wx.BOTH)
#==============================================================================
# bind buttons
#==============================================================================
self.btn_neu.Bind(wx.EVT_BUTTON, self.insertRow) # "new row"
self.btn_chk.Bind(wx.EVT_BUTTON, self.DataOK) # "check data"
self.btn_vor.Bind(wx.EVT_BUTTON, self.forwPage) # "forward"
self.btn_zur.Bind(wx.EVT_BUTTON, self.backPage) # "backward"
self.zuSeite.Bind(wx.EVT_TEXT_ENTER, self.gotoPage) # "page number"
self.PDFbild.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel) # mouse scroll
def __del__(self):
pass
def OnMouseWheel(self, event):
# process wheel as paging operations
d = event.GetWheelRotation() # int indicating direction
if d < 0:
self.forwPage(event)
elif d > 0:
self.backPage(event)
return
def forwPage(self, event):
seite = getint(self.zuSeite.Value) + 1
PicRefresh(seite)
event.Skip()
def backPage(self, event):
seite = getint(self.zuSeite.Value) - 1
PicRefresh(seite)
event.Skip()
def gotoPage(self, event):
seite = self.zuSeite.Value
PicRefresh(seite)
event.Skip()
#==============================================================================
# "insertRow" - Event Handler for new rows: insert a model row
#==============================================================================
def insertRow(self, event):
zeile = [1, "*** new row ***", 1, ""]
self.szr20.Table.NewRow(zeile)
DisableOK()
self.Layout()
#==============================================================================
# Check Data: enable / disable OK button
#==============================================================================
def DataOK(self, event):
valide = True
self.msg.Label = "Data OK!"
d = self.szr20.GetTable()
for i in range(self.szr20.Table.GetNumberRows()):
if i == 0 and int(d.GetValue(0, 0)) != 1:
valide = False
self.msg.Label = "row 1 must have level 1"
break
if int(d.GetValue(i, 0)) < 1:
valide = False
self.msg.Label = "row %s: level < 1" % (str(i+1),)
break
if int(d.GetValue(i, 2)) > PDFcfg.seiten or \
int(d.GetValue(i, 2)) < 1:
valide = False
self.msg.Label = "row %s: page# out of range" \
% (str(i+1),)
break
if i > 0 and (int(d.GetValue(i, 0)) - int(d.GetValue(i-1, 0))) > 1:
valide = False
self.msg.Label = "row %s: level stepping > 1" % (str(i+1),)
break
if not d.GetValue(i, 1):
valide = False
self.msg.Label = "row %s: missing title" % (str(i+1),)
break
if valide and (self.btn_aus.GetPath() == PDFcfg.file):
if PDFcfg.doc.openErrCode > 0 or PDFcfg.doc.needsPass == 1:
valide = False
self.msg.Label = "repaired or encrypted document - choose a different Output"
if not valide:
self.szr40OK.Disable()
else:
self.szr40OK.Enable()
self.Layout()
#==============================================================================
# display a PDF page
#==============================================================================
def PicRefresh(seite):
i_seite = getint(seite)
i_seite = max(1, i_seite) # ensure page# is within boundaries
i_seite = min(PDFcfg.seiten, i_seite)
dlg.zuSeite.Value = str(i_seite) # set page number in dialog fields
if PDFcfg.oldpage == i_seite:
return
PDFcfg.oldpage = i_seite
bmp = pdf_show(i_seite)
dlg.PDFbild.SetSize(bmp.Size)
dlg.PDFbild.SetBitmap(bmp)
dlg.PDFbild.Refresh(True)
bmp = None
dlg.Layout()
#==============================================================================
# Disable OK button
#==============================================================================
def DisableOK():
dlg.szr40OK.Disable()
dlg.msg.Label = "Data have changed.\nPress Check Data (again) " \
+ "before saving."
#==============================================================================
# Read PDF document information
#==============================================================================
def getPDFinfo():
PDFcfg.doc = fitz.open(PDFcfg.file)
if PDFcfg.doc.needsPass:
decrypt_doc()
if PDFcfg.doc.isEncrypted:
return True
PDFcfg.inhalt = PDFcfg.doc.getToC()
PDFcfg.seiten = PDFcfg.doc.pageCount
PDFmeta = {"author":"", "title":"", "subject":""}
for key, wert in PDFcfg.doc.metadata.items():
if wert:
PDFmeta[key] = wert.decode("utf-8", "ignore")
else:
PDFmeta[key] = ""
PDFcfg.meta = PDFmeta
return False
def decrypt_doc():
# let user enter document password
pw = None
dlg = wx.TextEntryDialog(None, 'Please enter password below:',
'Document is password protected', '',
style = wx.TextEntryDialogStyle|wx.TE_PASSWORD)
while pw is None:
rc = dlg.ShowModal()
if rc == wx.ID_OK:
pw = str(dlg.GetValue().encode("utf-8"))
PDFcfg.doc.authenticate(pw)
else:
return
if PDFcfg.doc.isEncrypted:
pw = None
dlg.SetTitle("Wrong password. Enter correct password or cancel.")
return
#==============================================================================
# Write the changed PDF file
#============================================================================
def make_pdf(dlg):
cdate = wx.DateTime.Now().Format("D:%Y%m%d%H%M%S-04'00'")
PDFmeta = {"creator":"PDFoutline.py",
"producer":"PyMuPDF",
"creationDate": cdate,
"modDate": cdate,
"title":dlg.austit.Value,
"author":dlg.ausaut.Value,
"subject":dlg.aussub.Value}
PDFcfg.doc.setMetadata(PDFmeta) # set new metadata
newtoc = []
#==============================================================================
# store our outline entries as bookmarks
#==============================================================================
for z in dlg.szr20.Table.data:
lvl = int(z[0])
pno = int(z[2])
tit = z[1].strip()
tit = tit.encode(ENCODING, "ignore")
newtoc.append([lvl, tit, pno])
PDFcfg.doc.setToC(newtoc)
outfile = dlg.btn_aus.GetPath() # get dir & name of file in screen
if outfile == PDFcfg.file:
PDFcfg.doc.save(outfile, incremental=True)
else: # equal: replace input file
PDFcfg.doc.save(outfile, garbage=3)
return
#==============================================================================
#
# Main Program
#
#==============================================================================
if wx.VERSION[0] >= 3:
pass
else:
print "need wxPython version 3.0 or higher"
sys.exit(1)
app = None
app = wx.App()
#==============================================================================
# Check if we have been invoked with a PDF to edit
#==============================================================================
if len(sys.argv) == 2:
infile = sys.argv[1]
if not infile.endswith(".pdf"):
infile = None
else:
infile = None
#==============================================================================
# let user select the file. Can only allow true PDFs.
#==============================================================================
if not infile:
dlg = wx.FileDialog(None, message = "Choose a PDF file to edit",
defaultDir = os.path.expanduser('~'),
defaultFile = wx.EmptyString,
wildcard = "PDF files (*.pdf)|*.pdf",
style=wx.OPEN | wx.CHANGE_DIR)
# We got a file only when one was selected and OK pressed
if dlg.ShowModal() == wx.ID_OK:
# This returns a Python list of selected files.
infile = dlg.GetPaths()[0]
else:
infile = None
# destroy this dialog
dlg.Destroy()
if infile: # if we have a filename ...
PDFcfg = PDFconfig() # create our PDF scratchpad
PDFcfg.file = infile
if getPDFinfo() == 0: # input is not encrypted
dlg = PDFDialog(None) # create dialog
rc = dlg.ShowModal() # show dialog
if rc == wx.ID_OK: # output PDF if SAVE pressed
make_pdf(dlg)
dlg.Destroy()
app = None
| mit |
larks/mbed | workspace_tools/settings.py | 33 | 3909 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os.path import join, abspath, dirname
import logging
ROOT = abspath(join(dirname(__file__), ".."))
# These default settings have two purposes:
# 1) Give a template for writing local "private_settings.py"
# 2) Give default initialization fields for the "toolchains.py" constructors
##############################################################################
# Build System Settings
##############################################################################
BUILD_DIR = abspath(join(ROOT, "build"))
# ARM
armcc = "standalone" # "keil", or "standalone", or "ds-5"
if armcc == "keil":
ARM_PATH = "C:/Keil_4_54/ARM"
ARM_BIN = join(ARM_PATH, "BIN40")
ARM_INC = join(ARM_PATH, "RV31", "INC")
ARM_LIB = join(ARM_PATH, "RV31", "LIB")
elif armcc == "standalone":
ARM_PATH = "C:/Program Files/ARM/armcc_4.1_791"
ARM_BIN = join(ARM_PATH, "bin")
ARM_INC = join(ARM_PATH, "include")
ARM_LIB = join(ARM_PATH, "lib")
elif armcc == "ds-5":
ARM_PATH = "C:/Program Files (x86)/DS-5"
ARM_BIN = join(ARM_PATH, "bin")
ARM_INC = join(ARM_PATH, "include")
ARM_LIB = join(ARM_PATH, "lib")
ARM_CPPLIB = join(ARM_LIB, "cpplib")
MY_ARM_CLIB = join(ARM_PATH, "lib", "microlib")
# GCC ARM
GCC_ARM_PATH = ""
# GCC CodeSourcery
GCC_CS_PATH = "C:/Program Files (x86)/CodeSourcery/Sourcery_CodeBench_Lite_for_ARM_EABI/bin"
# GCC CodeRed
GCC_CR_PATH = "C:/code_red/RedSuite_4.2.0_349/redsuite/Tools/bin"
# IAR
IAR_PATH = "C:/Program Files (x86)/IAR Systems/Embedded Workbench 7.0/arm"
# GCC Code Warrior
CW_GCC_PATH = "C:/Freescale/CW MCU v10.3/Cross_Tools/arm-none-eabi-gcc-4_6_2/bin"
CW_EWL_PATH = "C:/Freescale/CW MCU v10.3/MCU/ARM_GCC_Support/ewl/lib"
# Goanna static analyser. Please overload it in private_settings.py
GOANNA_PATH = "c:/Program Files (x86)/RedLizards/Goanna Central 3.2.3/bin"
# cppcheck path (command) and output message format
CPPCHECK_CMD = ["cppcheck", "--enable=all"]
CPPCHECK_MSG_FORMAT = ["--template=[{severity}] {file}@{line}: {id}:{message}"]
BUILD_OPTIONS = []
# mbed.org username
MBED_ORG_USER = ""
##############################################################################
# Test System Settings
##############################################################################
SERVER_PORT = 59432
SERVER_ADDRESS = "10.2.200.94"
LOCALHOST = "10.2.200.94"
MUTs = {
"1" : {"mcu": "LPC1768",
"port":"COM41", "disk":'E:\\',
"peripherals": ["TMP102", "digital_loop", "port_loop", "analog_loop", "SD"]
},
"2": {"mcu": "LPC11U24",
"port":"COM42", "disk":'F:\\',
"peripherals": ["TMP102", "digital_loop", "port_loop", "SD"]
},
"3" : {"mcu": "KL25Z",
"port":"COM43", "disk":'G:\\',
"peripherals": ["TMP102", "digital_loop", "port_loop", "analog_loop", "SD"]
},
}
##############################################################################
# Private Settings
##############################################################################
try:
# Allow to overwrite the default settings without the need to edit the
# settings file stored in the repository
from workspace_tools.private_settings import *
except ImportError:
print '[WARNING] Using default settings. Define your settings in the file "workspace_tools/private_settings.py" or in "./mbed_settings.py"'
| apache-2.0 |
bbc/kamaelia | Sketches/MPS/BugReports/FixTests/Kamaelia/Tools/DocGen/renderHTML.py | 9 | 8546 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
========================
Doctree to HTML Renderer
========================
Renderer for converting docutils document trees to HTML output with Kamaelia
website specific directives, and automatic links for certain text patterns.
"""
import textwrap
import inspect
import pprint
import time
from docutils import core
from docutils import nodes
import docutils
import re
class RenderHTML(object):
"""\
RenderHTML([debug][,titlePrefix][,urlPrefix][,rawFooter]) -> new RenderHTML object
Renders docutils document trees to html with Kamaelia website specific
directives.
Also contains helper functions for determining filenames and URIs for
documents.
Keyword arguments::
- debug -- Optional. True for debugging mode - currently does nothing (default=False)
- titlePrefix -- Optional. Prefix for the HTML <head><title> (default="")
- urlPrefix -- Optional. Prefix for all URLs. Should include a trailing slash if needed (default="")
- rawFooter -- Optional. Footer text that will be appended just before the </body></html> tags (default="")
"""
def __init__(self, debug=False, titlePrefix="", urlPrefix="",rawFooter=""):
super(RenderHTML,self).__init__()
self.titlePrefix=titlePrefix
self.debug=debug
self.urlPrefix=urlPrefix
self.rawFooter=rawFooter
self.mappings={}
def makeFilename(self, docName):
"""\
Returns the file name for a given document name.
Eg. "Kamaelia.Chassis" will be mapped to something like "Kamaelia.Chassis.html"
"""
return docName + ".html"
def makeURI(self, docName,internalRef=None):
"""\
Returns the URI for a given document name. Takes into account the url prefix.
Eg. "Kamaelia.Chassis" will be mapped to something like "/mydocs/Kamaelia.Chassis.html"
"""
if internalRef is not None:
suffix="#"+internalRef
else:
suffix=""
return self.urlPrefix+self.makeFilename(docName)+suffix
def setAutoCrossLinks(self, mappings):
"""\
Set mapping for the automagic generation of hyperlinks between content.
Supply a dict of mappings mapping patterns (strings) to the fully qualified
entity name to be linked to.
"""
self.mappings = {}
for (key,ref) in mappings.items():
# compile as an RE - detects the pattern providing nothign preceeds it,
# and it is not part of a larger pattern, eg A.B is part of A.B.C
pattern=re.compile("(?<![a-zA-Z0-9._])"+re.escape(key)+"(?!\.?[a-zA-Z0-9_])")
# convert the destination to a URI
uri = self.makeURI(ref)
self.mappings[pattern] = uri
def addAutoLinksToURI(self, mappings):
for (key,uri) in mappings.items():
pattern=re.compile("(?<![a-zA-Z0-9._])"+re.escape(key)+"(?!\.?[a-zA-Z0-9_])")
self.mappings[pattern] = uri
def render(self, docName, docTree):
"""\
Render the named document tree as HTML with Kamaelia website specific directives.
Returns string containing the entire HTML document.
"""
if not isinstance(docTree, nodes.document):
root = core.publish_doctree('')
root.append(docTree)
docTree = root
docTree.attributes['title']=docName
# do this first, before we turn the boxright nodes into "[ [boxright] ... ]"
docTree.transformer.add_transform(squareBracketReplace_transform)
docTree.transformer.apply_transforms()
docTree.transformer.add_transform(boxright_transform)
docTree.transformer.add_transform(crosslink_transform, priority=None, mappings=self.mappings)
docTree.transformer.apply_transforms()
reader = docutils.readers.doctree.Reader(parser_name='null')
pub = core.Publisher(reader, None, None, source=docutils.io.DocTreeInput(docTree),
destination_class=docutils.io.StringOutput)
pub.set_writer("html")
output = pub.publish(enable_exit_status=None)
parts = pub.writer.parts
doc = parts["html_title"] \
+ parts["html_subtitle"] \
+ parts["docinfo"] \
+ parts["fragment"]
wholedoc = self.headers(docTree) + doc + self.footers(docTree)
return wholedoc
def headers(self,doc):
title = self.titlePrefix + doc.attributes['title']
return """\
<html>
<head>
<title>"""+title+"""</title>
<style type="test/css">
pre.literal-block, pre.doctest-block {
margin-left: 2em ;
margin-right: 2em ;
background-color: #eeeeee }
</style>
</head>
<body>
"""
def footers(self,doc):
return self.rawFooter+"</body></html>\n"
from Nodes import boxright
class boxright_transform(docutils.transforms.Transform):
"""\
Transform that replaces boxright nodes with the corresponding Kamaelia
website [[boxright] <child node content> ] directive
"""
default_priority=100
def apply(self):
boxes=[]
for target in self.document.traverse(boxright):
target.insert(0, nodes.Text("[[boxright] "))
target.append(nodes.Text("]"))
boxes.append(target)
for box in boxes:
box.replace_self( nodes.container('', *box.children) )
class crosslink_transform(docutils.transforms.Transform):
"""\
Transform that searches text in the document for any of the patterns in the
supplied set of mappings. If a pattern is found it is converted to a
hyperlink
"""
default_priority=100
def apply(self, mappings):
self.mappings = mappings
self.recurse(self.document)
def recurse(self, parent):
i=0
while i<len(parent.children):
thisNode = parent[i]
if isinstance(thisNode, nodes.Text):
changeMade = self.crosslink(parent, i)
if not changeMade:
i=i+1
else:
if isinstance(thisNode, (nodes.reference,)): # nodes.literal_block)):
pass
elif thisNode.children:
self.recurse(thisNode)
i=i+1
def crosslink(self, parent, i):
text = parent[i].astext()
for pattern in self.mappings.keys():
match = pattern.search(text)
if match:
head = text[:match.start()]
tail = text[match.end():]
middle = text[match.start():match.end()]
URI = self.mappings[pattern]
parent.remove(parent[i])
if tail:
parent.insert(i, nodes.Text(tail))
if middle:
parent.insert(i, nodes.reference('', nodes.Text(middle), refuri=URI))
if head:
parent.insert(i, nodes.Text(head))
return True
return False
class squareBracketReplace_transform(docutils.transforms.Transform):
"""\
Transform that replaces square brackets in text with escape codes, so that
the Kamaelia website doesn't interpret them as directives
"""
default_priority=100
def apply(self):
for target in self.document.traverse(nodes.Text):
newText = target.replace("[","%91%")
newText = newText.replace("]","%93%")
target.parent.replace(target, newText)
| apache-2.0 |
tersmitten/ansible | lib/ansible/modules/network/vyos/vyos_ping.py | 13 | 7091 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: vyos_ping
short_description: Tests reachability using ping from VyOS network devices
description:
- Tests reachability using ping from a VyOS device to a remote destination.
- Tested against VyOS 1.1.8 (helium)
- For a general purpose network module, see the M(net_ping) module.
- For Windows targets, use the M(win_ping) module instead.
- For targets running Python, use the M(ping) module instead.
author:
- Nilashish Chakraborty (@NilashishC)
version_added: '2.8'
options:
dest:
description:
- The IP Address or hostname (resolvable by the device) of the remote node.
required: true
count:
description:
- Number of packets to send to check reachability.
type: int
default: 5
source:
description:
- The source interface or IP Address to use while sending the ping packet(s).
ttl:
description:
- The time-to-live value for the ICMP packet(s).
type: int
size:
description:
- Determines the size (in bytes) of the ping packet(s).
type: int
interval:
description:
- Determines the interval (in seconds) between consecutive pings.
type: int
state:
description:
- Determines if the expected result is success or fail.
choices: [ absent, present ]
default: present
notes:
- For a general purpose network module, see the M(net_ping) module.
- For Windows targets, use the M(win_ping) module instead.
- For targets running Python, use the M(ping) module instead.
extends_documentation_fragment: vyos
"""
EXAMPLES = """
- name: Test reachability to 10.10.10.10
vyos_ping:
dest: 10.10.10.10
- name: Test reachability to 10.20.20.20 using source and ttl set
vyos_ping:
dest: 10.20.20.20
source: eth0
ttl: 128
- name: Test unreachability to 10.30.30.30 using interval
vyos_ping:
dest: 10.30.30.30
interval: 3
state: absent
- name: Test reachability to 10.40.40.40 setting count and source
vyos_ping:
dest: 10.40.40.40
source: eth1
count: 20
size: 512
"""
RETURN = """
commands:
description: List of commands sent.
returned: always
type: list
sample: ["ping 10.8.38.44 count 10 interface eth0 ttl 128"]
packet_loss:
description: Percentage of packets lost.
returned: always
type: str
sample: "0%"
packets_rx:
description: Packets successfully received.
returned: always
type: int
sample: 20
packets_tx:
description: Packets successfully transmitted.
returned: always
type: int
sample: 20
rtt:
description: The round trip time (RTT) stats.
returned: when ping succeeds
type: dict
sample: {"avg": 2, "max": 8, "min": 1, "mdev": 24}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.vyos.vyos import run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
import re
def main():
""" main entry point for module execution
"""
argument_spec = dict(
count=dict(type="int", default=5),
dest=dict(type="str", required=True),
source=dict(type="str"),
ttl=dict(type='int'),
size=dict(type='int'),
interval=dict(type='int'),
state=dict(type="str", choices=["absent", "present"], default="present"),
)
argument_spec.update(vyos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec)
count = module.params["count"]
dest = module.params["dest"]
source = module.params["source"]
size = module.params["size"]
ttl = module.params["ttl"]
interval = module.params["interval"]
warnings = list()
results = {}
if warnings:
results["warnings"] = warnings
results["commands"] = [build_ping(dest, count, size, interval, source, ttl)]
ping_results = run_commands(module, commands=results["commands"])
ping_results_list = ping_results[0].split("\n")
rtt_info, rate_info = None, None
for line in ping_results_list:
if line.startswith('rtt'):
rtt_info = line
if line.startswith('%s packets transmitted' % count):
rate_info = line
if rtt_info:
rtt = parse_rtt(rtt_info)
for k, v in rtt.items():
if rtt[k] is not None:
rtt[k] = int(v)
results["rtt"] = rtt
pkt_loss, rx, tx = parse_rate(rate_info)
results["packet_loss"] = str(pkt_loss) + "%"
results["packets_rx"] = int(rx)
results["packets_tx"] = int(tx)
validate_results(module, pkt_loss, results)
module.exit_json(**results)
def build_ping(dest, count, size=None, interval=None, source=None, ttl=None):
cmd = "ping {0} count {1}".format(dest, str(count))
if source:
cmd += " interface {0}".format(source)
if ttl:
cmd += " ttl {0}".format(str(ttl))
if size:
cmd += " size {0}".format(str(size))
if interval:
cmd += " interval {0}".format(str(interval))
return cmd
def parse_rate(rate_info):
rate_re = re.compile(
r"(?P<tx>\d+) (?:\w+) (?:\w+), (?P<rx>\d+) (?:\w+), (?P<pkt_loss>\d+)% (?:\w+) (?:\w+), (?:\w+) (?P<time>\d+)")
rate_err_re = re.compile(
r"(?P<tx>\d+) (?:\w+) (?:\w+), (?P<rx>\d+) (?:\w+), (?:[+-])(?P<err>\d+) (?:\w+), (?P<pkt_loss>\d+)% (?:\w+) (?:\w+), (?:\w+) (?P<time>\d+)")
if rate_re.match(rate_info):
rate = rate_re.match(rate_info)
elif rate_err_re.match(rate_info):
rate = rate_err_re.match(rate_info)
return rate.group("pkt_loss"), rate.group("rx"), rate.group("tx")
def parse_rtt(rtt_info):
rtt_re = re.compile(
r"rtt (?:.*)=(?:\s*)(?P<min>\d*).(?:\d*)/(?P<avg>\d*).(?:\d*)/(?P<max>\d+).(?:\d*)/(?P<mdev>\d*)")
rtt = rtt_re.match(rtt_info)
return rtt.groupdict()
def validate_results(module, loss, results):
state = module.params["state"]
if state == "present" and int(loss) == 100:
module.fail_json(msg="Ping failed unexpectedly", **results)
elif state == "absent" and int(loss) < 100:
module.fail_json(msg="Ping succeeded unexpectedly", **results)
if __name__ == "__main__":
main()
| gpl-3.0 |
CourseTalk/edx-platform | common/lib/xmodule/xmodule/modulestore/inheritance.py | 6 | 13812 | """
Support for inheritance of fields down an XBlock hierarchy.
"""
from __future__ import absolute_import
from datetime import datetime
from django.conf import settings
from pytz import UTC
from xmodule.partitions.partitions import UserPartition
from xblock.fields import Scope, Boolean, String, Float, XBlockMixin, Dict, Integer, List
from xblock.runtime import KeyValueStore, KvsFieldData
from xmodule.fields import Date, Timedelta
from ..course_metadata_utils import DEFAULT_START_DATE
# Make '_' a no-op so we can scrape strings
# Using lambda instead of `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class UserPartitionList(List):
"""Special List class for listing UserPartitions"""
def from_json(self, values):
return [UserPartition.from_json(v) for v in values]
def to_json(self, values):
return [user_partition.to_json()
for user_partition in values]
class InheritanceMixin(XBlockMixin):
"""Field definitions for inheritable fields."""
graded = Boolean(
help="Whether this module contributes to the final course grade",
scope=Scope.settings,
default=False,
)
start = Date(
help="Start time when this module is visible",
default=DEFAULT_START_DATE,
scope=Scope.settings
)
due = Date(
display_name=_("Due Date"),
help=_("Enter the default date by which problems are due."),
scope=Scope.settings,
)
visible_to_staff_only = Boolean(
help=_("If true, can be seen only by course staff, regardless of start date."),
default=False,
scope=Scope.settings,
)
course_edit_method = String(
display_name=_("Course Editor"),
help=_("Enter the method by which this course is edited (\"XML\" or \"Studio\")."),
default="Studio",
scope=Scope.settings,
deprecated=True # Deprecated because user would not change away from Studio within Studio.
)
giturl = String(
display_name=_("GIT URL"),
help=_("Enter the URL for the course data GIT repository."),
scope=Scope.settings
)
xqa_key = String(
display_name=_("XQA Key"),
help=_("This setting is not currently supported."), scope=Scope.settings,
deprecated=True
)
annotation_storage_url = String(
help=_("Enter the location of the annotation storage server. The textannotation, videoannotation, and imageannotation advanced modules require this setting."),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("URL for Annotation Storage")
)
annotation_token_secret = String(
help=_("Enter the secret string for annotation storage. The textannotation, videoannotation, and imageannotation advanced modules require this string."),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
graceperiod = Timedelta(
help="Amount of time after the due date that submissions will be accepted",
scope=Scope.settings,
)
group_access = Dict(
help=_("Enter the ids for the content groups this problem belongs to."),
scope=Scope.settings,
)
showanswer = String(
display_name=_("Show Answer"),
help=_(
# Translators: DO NOT translate the words in quotes here, they are
# specific words for the acceptable values.
'Specify when the Show Answer button appears for each problem. '
'Valid values are "always", "answered", "attempted", "closed", '
'"finished", "past_due", "correct_or_past_due", and "never".'
),
scope=Scope.settings,
default="finished",
)
rerandomize = String(
display_name=_("Randomization"),
help=_(
# Translators: DO NOT translate the words in quotes here, they are
# specific words for the acceptable values.
'Specify the default for how often variable values in a problem are randomized. '
'This setting should be set to "never" unless you plan to provide a Python '
'script to identify and randomize values in most of the problems in your course. '
'Valid values are "always", "onreset", "never", and "per_student".'
),
scope=Scope.settings,
default="never",
)
days_early_for_beta = Float(
display_name=_("Days Early for Beta Users"),
help=_("Enter the number of days before the start date that beta users can access the course."),
scope=Scope.settings,
default=None,
)
static_asset_path = String(
display_name=_("Static Asset Path"),
help=_("Enter the path to use for files on the Files & Uploads page. This value overrides the Studio default, c4x://."),
scope=Scope.settings,
default='',
)
text_customization = Dict(
display_name=_("Text Customization"),
help=_("Enter string customization substitutions for particular locations."),
scope=Scope.settings,
)
use_latex_compiler = Boolean(
display_name=_("Enable LaTeX Compiler"),
help=_("Enter true or false. If true, you can use the LaTeX templates for HTML components and advanced Problem components."),
default=False,
scope=Scope.settings
)
max_attempts = Integer(
display_name=_("Maximum Attempts"),
help=_("Enter the maximum number of times a student can try to answer problems. By default, Maximum Attempts is set to null, meaning that students have an unlimited number of attempts for problems. You can override this course-wide setting for individual problems. However, if the course-wide setting is a specific number, you cannot set the Maximum Attempts for individual problems to unlimited."),
values={"min": 0}, scope=Scope.settings
)
matlab_api_key = String(
display_name=_("Matlab API key"),
help=_("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
"This key is granted for exclusive use in this course for the specified duration. "
"Do not share the API key with other courses. Notify MathWorks immediately "
"if you believe the key is exposed or compromised. To obtain a key for your course, "
"or to report an issue, please contact moocsupport@mathworks.com"),
scope=Scope.settings
)
# This is should be scoped to content, but since it's defined in the policy
# file, it is currently scoped to settings.
user_partitions = UserPartitionList(
display_name=_("Group Configurations"),
help=_("Enter the configurations that govern how students are grouped together."),
default=[],
scope=Scope.settings
)
video_speed_optimizations = Boolean(
display_name=_("Enable video caching system"),
help=_("Enter true or false. If true, video caching will be used for HTML5 videos."),
default=True,
scope=Scope.settings
)
video_bumper = Dict(
display_name=_("Video Pre-Roll"),
help=_(
"Identify a video, 5-10 seconds in length, to play before course videos. Enter the video ID from "
"the Video Uploads page and one or more transcript files in the following format: {format}. "
"For example, an entry for a video with two transcripts looks like this: {example}"
).format(
format='{"video_id": "ID", "transcripts": {"language": "/static/filename.srt"}}',
example=(
'{'
'"video_id": "77cef264-d6f5-4cf2-ad9d-0178ab8c77be", '
'"transcripts": {"en": "/static/DemoX-D01_1.srt", "uk": "/static/DemoX-D01_1_uk.srt"}'
'}'
),
),
scope=Scope.settings
)
reset_key = "DEFAULT_SHOW_RESET_BUTTON"
default_reset_button = getattr(settings, reset_key) if hasattr(settings, reset_key) else False
show_reset_button = Boolean(
display_name=_("Show Reset Button for Problems"),
help=_(
"Enter true or false. If true, problems in the course default to always displaying a 'Reset' button. "
"You can override this in each problem's settings. All existing problems are affected when "
"this course-wide setting is changed."
),
scope=Scope.settings,
default=default_reset_button
)
edxnotes = Boolean(
display_name=_("Enable Student Notes"),
help=_("Enter true or false. If true, students can use the Student Notes feature."),
default=False,
scope=Scope.settings
)
edxnotes_visibility = Boolean(
display_name="Student Notes Visibility",
help=_("Indicates whether Student Notes are visible in the course. "
"Students can also show or hide their notes in the courseware."),
default=True,
scope=Scope.user_info
)
in_entrance_exam = Boolean(
display_name=_("Tag this module as part of an Entrance Exam section"),
help=_("Enter true or false. If true, answer submissions for problem modules will be "
"considered in the Entrance Exam scoring/gating algorithm."),
scope=Scope.settings,
default=False
)
def compute_inherited_metadata(descriptor):
"""Given a descriptor, traverse all of its descendants and do metadata
inheritance. Should be called on a CourseDescriptor after importing a
course.
NOTE: This means that there is no such thing as lazy loading at the
moment--this accesses all the children."""
if descriptor.has_children:
parent_metadata = descriptor.xblock_kvs.inherited_settings.copy()
# add any of descriptor's explicitly set fields to the inheriting list
for field in InheritanceMixin.fields.values():
if field.is_set_on(descriptor):
# inherited_settings values are json repr
parent_metadata[field.name] = field.read_json(descriptor)
for child in descriptor.get_children():
inherit_metadata(child, parent_metadata)
compute_inherited_metadata(child)
def inherit_metadata(descriptor, inherited_data):
"""
Updates this module with metadata inherited from a containing module.
Only metadata specified in self.inheritable_metadata will
be inherited
`inherited_data`: A dictionary mapping field names to the values that
they should inherit
"""
try:
descriptor.xblock_kvs.inherited_settings = inherited_data
except AttributeError: # the kvs doesn't have inherited_settings probably b/c it's an error module
pass
def own_metadata(module):
"""
Return a JSON-friendly dictionary that contains only non-inherited field
keys, mapped to their serialized values
"""
return module.get_explicitly_set_fields_by_scope(Scope.settings)
class InheritingFieldData(KvsFieldData):
"""A `FieldData` implementation that can inherit value from parents to children."""
def __init__(self, inheritable_names, **kwargs):
"""
`inheritable_names` is a list of names that can be inherited from
parents.
"""
super(InheritingFieldData, self).__init__(**kwargs)
self.inheritable_names = set(inheritable_names)
def default(self, block, name):
"""
The default for an inheritable name is found on a parent.
"""
if name in self.inheritable_names:
# Walk up the content tree to find the first ancestor
# that this field is set on. Use the field from the current
# block so that if it has a different default than the root
# node of the tree, the block's default will be used.
field = block.fields[name]
ancestor = block.get_parent()
while ancestor is not None:
if field.is_set_on(ancestor):
return field.read_json(ancestor)
else:
ancestor = ancestor.get_parent()
return super(InheritingFieldData, self).default(block, name)
def inheriting_field_data(kvs):
"""Create an InheritanceFieldData that inherits the names in InheritanceMixin."""
return InheritingFieldData(
inheritable_names=InheritanceMixin.fields.keys(),
kvs=kvs,
)
class InheritanceKeyValueStore(KeyValueStore):
"""
Common superclass for kvs's which know about inheritance of settings. Offers simple
dict-based storage of fields and lookup of inherited values.
Note: inherited_settings is a dict of key to json values (internal xblock field repr)
"""
def __init__(self, initial_values=None, inherited_settings=None):
super(InheritanceKeyValueStore, self).__init__()
self.inherited_settings = inherited_settings or {}
self._fields = initial_values or {}
def get(self, key):
return self._fields[key.field_name]
def set(self, key, value):
# xml backed courses are read-only, but they do have some computed fields
self._fields[key.field_name] = value
def delete(self, key):
del self._fields[key.field_name]
def has(self, key):
return key.field_name in self._fields
def default(self, key):
"""
Check to see if the default should be from inheritance. If not
inheriting, this will raise KeyError which will cause the caller to use
the field's global default.
"""
return self.inherited_settings[key.field_name]
| agpl-3.0 |
drawquest/drawquest-web | website/canvas/migrations/0030_really_add_content_referenced_only_by_stash.py | 2 | 8779 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for stash in orm.StashContent.objects.all():
if stash.content_id:
content = orm.Content(
id=stash.content_id,
timestamp=1288483232, # 30 days ago from date of writing
ip='0.0.0.0'
)
content.url_mapping = orm.ContentUrlMapping()
content.url_mapping.save()
content.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['canvas.Content']"}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.FloatField', [], {}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('django.db.models.fields.FloatField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contentsticker': {
'Meta': {'object_name': 'ContentSticker'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('django.db.models.fields.FloatField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.hashtag': {
'Meta': {'object_name': 'Hashtag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'canvas.post': {
'Meta': {'object_name': 'Post'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blacklisted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'post_id': ('django.db.models.fields.IntegerField', [], {}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['canvas.Thread']"}),
'thumb_down': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thumb_up': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'timestamp': ('django.db.models.fields.FloatField', [], {})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.thread': {
'Meta': {'object_name': 'Thread'},
'hashtags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['canvas.Hashtag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
| bsd-3-clause |
rooshilp/CMPUT410W15-project | testenv/lib/python2.7/site-packages/django/contrib/gis/gdal/libgdal.py | 48 | 3621 | from __future__ import unicode_literals
import logging
import os
import re
from ctypes import c_char_p, c_int, CDLL, CFUNCTYPE
from ctypes.util import find_library
from django.contrib.gis.gdal.error import OGRException
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger('django.contrib.gis')
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GDAL_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT shared libraries
lib_names = ['gdal111', 'gdal110', 'gdal19', 'gdal18', 'gdal17', 'gdal16']
elif os.name == 'posix':
# *NIX library names.
lib_names = ['gdal', 'GDAL', 'gdal1.11.0', 'gdal1.10.0', 'gdal1.9.0',
'gdal1.8.0', 'gdal1.7.0', 'gdal1.6.0']
else:
raise OGRException('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the
# path to the GDAL library from the list of library names.
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
if lib_path is None:
raise OGRException('Could not find the GDAL library (tried "%s"). '
'Try setting GDAL_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names))
# This loads the GDAL/OGR C library
lgdal = CDLL(lib_path)
# On Windows, the GDAL binaries have some OSR routines exported with
# STDCALL, while others are not. Thus, the library will also need to
# be loaded up as WinDLL for said OSR functions that require the
# different calling convention.
if os.name == 'nt':
from ctypes import WinDLL
lwingdal = WinDLL(lib_path)
def std_call(func):
"""
Returns the correct STDCALL function for certain OSR routines on Win32
platforms.
"""
if os.name == 'nt':
return lwingdal[func]
else:
return lgdal[func]
#### Version-information functions. ####
# Returns GDAL library version information with the given key.
_version_info = std_call('GDALVersionInfo')
_version_info.argtypes = [c_char_p]
_version_info.restype = c_char_p
def gdal_version():
"Returns only the GDAL version number information."
return _version_info(b'RELEASE_NAME')
def gdal_full_version():
"Returns the full GDAL version information."
return _version_info('')
version_regex = re.compile(r'^(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<subminor>\d+))?')
def gdal_version_info():
ver = gdal_version().decode()
m = version_regex.match(ver)
if not m:
raise OGRException('Could not parse GDAL version string "%s"' % ver)
return dict((key, m.group(key)) for key in ('major', 'minor', 'subminor'))
_verinfo = gdal_version_info()
GDAL_MAJOR_VERSION = int(_verinfo['major'])
GDAL_MINOR_VERSION = int(_verinfo['minor'])
GDAL_SUBMINOR_VERSION = _verinfo['subminor'] and int(_verinfo['subminor'])
GDAL_VERSION = (GDAL_MAJOR_VERSION, GDAL_MINOR_VERSION, GDAL_SUBMINOR_VERSION)
del _verinfo
# Set library error handling so as errors are logged
CPLErrorHandler = CFUNCTYPE(None, c_int, c_int, c_char_p)
def err_handler(error_class, error_number, message):
logger.error('GDAL_ERROR %d: %s' % (error_number, message))
err_handler = CPLErrorHandler(err_handler)
def function(name, args, restype):
func = std_call(name)
func.argtypes = args
func.restype = restype
return func
set_error_handler = function('CPLSetErrorHandler', [CPLErrorHandler], CPLErrorHandler)
set_error_handler(err_handler)
| gpl-2.0 |
iamjstates/django-formsfive | formsfive/models.py | 1 | 6471 | #!/usr/bin/env python
from formsfive.widgets import Select, SelectMultiple, Textarea
from django.forms.forms import get_declared_fields
from django.utils.datastructures import SortedDict
from django.forms.widgets import media_property
from django.core.exceptions import FieldError
from django.forms import models as original
from formsfive import fields as five
from django.db import models
__all__ = ('ModelChoiceField', 'ModelMultipleChoiceField', 'HTML5ModelForm')
class ModelChoiceField(original.ModelChoiceField):
widget = Select
class ModelMultipleChoiceField(original.ModelMultipleChoiceField):
widget = SelectMultiple
HTML5FIELD_FOR_DBFIELD = {
models.BigIntegerField: {'form_class': five.IntegerField},
models.BooleanField: {'form_class': five.BooleanField},
models.CharField: {'form_class': five.CharField},
models.DateField: {'form_class': five.DateField},
models.DateTimeField: {'form_class': five.DateTimeField},
models.DecimalField: {'form_class': five.DecimalField},
models.EmailField: {'form_class': five.EmailField},
models.FileField: {'form_class': five.FileField},
models.FilePathField: {'form_class': five.FilePathField},
models.FloatField: {'form_class': five.FloatField},
models.ForeignKey: {'form_class': ModelChoiceField},
models.ImageField: {'form_class': five.ImageField},
models.IntegerField: {'form_class': five.IntegerField},
models.IPAddressField: {'form_class': five.IPAddressField},
models.ManyToManyField: {'form_class': ModelMultipleChoiceField},
models.NullBooleanField: {'form_class': five.CharField},
models.PositiveIntegerField: {'form_class': five.IntegerField},
models.PositiveSmallIntegerField: {'form_class': five.IntegerField},
models.SlugField: {'form_class': five.SlugField},
models.SmallIntegerField: {'form_class': five.IntegerField},
models.PositiveIntegerField: {'form_class': five.IntegerField},
models.PositiveSmallIntegerField: {'form_class': five.IntegerField},
models.TimeField: {'form_class': five.TimeField},
models.TextField: {'form_class': five.CharField, 'widget': Textarea},
models.URLField: {'form_class': five.URLField},
}
class _BaseForm(object):
def clean(self):
for field in self.cleaned_data:
if isinstance(self.cleaned_data[field], basestring):
self.cleaned_data[field] = self.cleaned_data[field].strip()
return self.cleaned_data
def fields_for_model(model, fields=None, exclude=None, widgets=None, formfield_callback=None):
"""
Returns a ``SortedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
add HTML5FIELD to call the HTML5 form
"""
field_list = []
ignored = []
opts = model._meta
for f in sorted(opts.fields + opts.many_to_many):
if not f.editable:
continue
if fields is not None and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if widgets and f.name in widgets:
kwargs = {'widget': widgets[f.name]}
else:
kwargs = {}
if formfield_callback is None:
try:
# Change the dbfield to the html5 forms
kwargs = dict(HTML5FIELD_FOR_DBFIELD[f.__class__], **kwargs)
except:
kwargs = {}
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = SortedDict(field_list)
if fields:
field_dict = SortedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class HTML5ModelFormMetaclass(type):
def __new__(cls, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
try:
parents = [b for b in bases if issubclass(b, HTML5ModelForm)]
except NameError:
# We are defining ModelForm itself.
parents = None
declared_fields = get_declared_fields(bases, attrs, False)
new_class = super(HTML5ModelFormMetaclass, cls).__new__(cls, name, bases,
attrs)
if not parents:
return new_class
if 'media' not in attrs:
new_class.media = media_property(new_class)
opts = new_class._meta = original.ModelFormOptions(getattr(new_class, 'Meta', None))
if opts.model:
# If a model is defined, extract form fields from it.
fields = fields_for_model(opts.model, opts.fields,
opts.exclude, opts.widgets, formfield_callback)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in fields.iteritems() if not v]
missing_fields = set(none_model_fields) - \
set(declared_fields.keys())
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
fields.update(declared_fields)
else:
fields = declared_fields
new_class.declared_fields = declared_fields
new_class.base_fields = fields
return new_class
class HTML5ModelForm(_BaseForm, original.BaseModelForm):
__metaclass__ = HTML5ModelFormMetaclass
| bsd-3-clause |
simomarsili/ndd | ndd/utils.py | 1 | 1994 | # -*- coding: utf-8 -*-
"""Utils functions."""
import sys
from functools import wraps
import numpy
def unexpected_value(x):
"""Define supspicious values."""
return (x is None) or x > 1.e4 or x == 0 or numpy.isnan(x)
def dump_on_fail(fp=sys.stdout):
"""Dump args for unexpected results to `fp`.
Parameters
----------
fp : path or file-like
"""
# pylint:disable=unused-variable
if not hasattr(fp, 'write'):
fp = open(fp, 'w')
def unexpected(result):
result = numpy.array(result, copy=False, ndmin=1)
return numpy.any(unexpected_value(result))
def decorate(func):
@wraps(func)
def dump_args_to_file(*args, **kwargs):
result = func(*args, **kwargs)
if unexpected(result):
print(func.__name__, *args, **kwargs, file=fp)
def delimited_to_camelcase(string, d='_', remove=None):
"""Convert string from delimiter_separated to CamelCase."""
if d not in string: # no delimiter
if string[0].isupper():
return string
return string.title()
string = string.title()
if remove:
string = string.replace(remove.lower().title(), '')
return string.replace(d, '') # TODO!!! use remove
def camelcase_to_delimited(string, d='_', remove=None):
"""Convert string from CamelCase to delimiter_separated."""
result = []
for i, c in enumerate(string):
if c.isupper():
if i > 0:
result.append(d)
result.append(c.lower())
result = ''.join(result)
if remove and remove in result:
remove = remove.lower()
result = d.join([x for x in result.split(d) if x != remove])
return result
def as_class_name(*args, **kwargs):
"""Convert string into a CamelCase class name."""
return delimited_to_camelcase(*args, **kwargs)
def register_class(cls, register):
"""Add a class to register."""
register[cls.__name__] = cls
return register
| bsd-3-clause |
ejucovy/reportlab | docs/userguide/graph_widgets.py | 5 | 13578 | #Copyright ReportLab Europe Ltd. 2000-2008
#see license.txt for license details
__version__=''' $Id: graph_widgets.py 3375 2009-01-16 18:23:19Z jonas $ '''
from tools.docco.rl_doc_utils import *
from reportlab.graphics.shapes import *
from reportlab.graphics.widgets import signsandsymbols
heading2("Widgets")
disc("""
We now describe widgets and how they relate to shapes.
Using many examples it is shown how widgets make reusable
graphics components.
""")
heading3("Shapes vs. Widgets")
disc("""Up until now, Drawings have been 'pure data'. There is no code in them
to actually do anything, except assist the programmer in checking and
inspecting the drawing. In fact, that's the cornerstone of the whole
concept and is what lets us achieve portability - a renderer only
needs to implement the primitive shapes.""")
disc("""We want to build reusable graphic objects, including a powerful chart
library. To do this we need to reuse more tangible things than
rectangles and circles. We should be able to write objects for other
to reuse - arrows, gears, text boxes, UML diagram nodes, even fully
fledged charts.""")
disc("""
The Widget standard is a standard built on top of the shapes module.
Anyone can write new widgets, and we can build up libraries of them.
Widgets support the $getProperties()$ and $setProperties()$ methods,
so you can inspect and modify as well as document them in a uniform
way.
""")
bullet("A widget is a reusable shape ")
bullet("""it can be initialized with no arguments
when its $draw()$ method is called it creates a primitive Shape or a
Group to represent itself""")
bullet("""It can have any parameters you want, and they can drive the way it is
drawn""")
bullet("""it has a $demo()$ method which should return an attractively drawn
example of itself in a 200x100 rectangle. This is the cornerstone of
the automatic documentation tools. The $demo()$ method should also have
a well written docstring, since that is printed too!""")
disc("""Widgets run contrary to the idea that a drawing is just a bundle of
shapes; surely they have their own code? The way they work is that a
widget can convert itself to a group of primitive shapes. If some of
its components are themselves widgets, they will get converted too.
This happens automatically during rendering; the renderer will not see
your chart widget, but just a collection of rectangles, lines and
strings. You can also explicitly 'flatten out' a drawing, causing all
widgets to be converted to primitives.""")
heading3("Using a Widget")
disc("""
Let's imagine a simple new widget.
We will use a widget to draw a face, then show how it was implemented.""")
eg("""
>>> from reportlab.lib import colors
>>> from reportlab.graphics import shapes
>>> from reportlab.graphics import widgetbase
>>> from reportlab.graphics import renderPDF
>>> d = shapes.Drawing(200, 100)
>>> f = widgetbase.Face()
>>> f.skinColor = colors.yellow
>>> f.mood = "sad"
>>> d.add(f)
>>> renderPDF.drawToFile(d, 'face.pdf', 'A Face')
""")
from reportlab.graphics import widgetbase
d = Drawing(200, 120)
f = widgetbase.Face()
f.x = 50
f.y = 10
f.skinColor = colors.yellow
f.mood = "sad"
d.add(f)
draw(d, 'A sample widget')
disc("""
Let's see what properties it has available, using the $setProperties()$
method we have seen earlier:
""")
eg("""
>>> f.dumpProperties()
eyeColor = Color(0.00,0.00,1.00)
mood = sad
size = 80
skinColor = Color(1.00,1.00,0.00)
x = 10
y = 10
>>>
""")
disc("""
One thing which seems strange about the above code is that we did not
set the size or position when we made the face.
This is a necessary trade-off to allow a uniform interface for
constructing widgets and documenting them - they cannot require
arguments in their $__init__()$ method.
Instead, they are generally designed to fit in a 200 x 100
window, and you move or resize them by setting properties such as
x, y, width and so on after creation.
""")
disc("""
In addition, a widget always provides a $demo()$ method.
Simple ones like this always do something sensible before setting
properties, but more complex ones like a chart would not have any
data to plot.
The documentation tool calls $demo()$ so that your fancy new chart
class can create a drawing showing what it can do.
""")
disc("""
Here are a handful of simple widgets available in the module
<i>signsandsymbols.py</i>:
""")
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.widgets import signsandsymbols
d = Drawing(230, 230)
ne = signsandsymbols.NoEntry()
ds = signsandsymbols.DangerSign()
fd = signsandsymbols.FloppyDisk()
ns = signsandsymbols.NoSmoking()
ne.x, ne.y = 10, 10
ds.x, ds.y = 120, 10
fd.x, fd.y = 10, 120
ns.x, ns.y = 120, 120
d.add(ne)
d.add(ds)
d.add(fd)
d.add(ns)
draw(d, 'A few samples from signsandsymbols.py')
disc("""
And this is the code needed to generate them as seen in the drawing above:
""")
eg("""
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.widgets import signsandsymbols
d = Drawing(230, 230)
ne = signsandsymbols.NoEntry()
ds = signsandsymbols.DangerSign()
fd = signsandsymbols.FloppyDisk()
ns = signsandsymbols.NoSmoking()
ne.x, ne.y = 10, 10
ds.x, ds.y = 120, 10
fd.x, fd.y = 10, 120
ns.x, ns.y = 120, 120
d.add(ne)
d.add(ds)
d.add(fd)
d.add(ns)
""")
heading3("Compound Widgets")
disc("""Let's imagine a compound widget which draws two faces side by side.
This is easy to build when you have the Face widget.""")
eg("""
>>> tf = widgetbase.TwoFaces()
>>> tf.faceOne.mood
'happy'
>>> tf.faceTwo.mood
'sad'
>>> tf.dumpProperties()
faceOne.eyeColor = Color(0.00,0.00,1.00)
faceOne.mood = happy
faceOne.size = 80
faceOne.skinColor = None
faceOne.x = 10
faceOne.y = 10
faceTwo.eyeColor = Color(0.00,0.00,1.00)
faceTwo.mood = sad
faceTwo.size = 80
faceTwo.skinColor = None
faceTwo.x = 100
faceTwo.y = 10
>>>
""")
disc("""The attributes 'faceOne' and 'faceTwo' are deliberately exposed so you
can get at them directly. There could also be top-level attributes,
but there aren't in this case.""")
heading3("Verifying Widgets")
disc("""The widget designer decides the policy on verification, but by default
they work like shapes - checking every assignment - if the designer
has provided the checking information.""")
heading3("Implementing Widgets")
disc("""We tried to make it as easy to implement widgets as possible. Here's
the code for a Face widget which does not do any type checking:""")
eg("""
class Face(Widget):
\"\"\"This draws a face with two eyes, mouth and nose.\"\"\"
def __init__(self):
self.x = 10
self.y = 10
self.size = 80
self.skinColor = None
self.eyeColor = colors.blue
self.mood = 'happy'
def draw(self):
s = self.size # abbreviate as we will use this a lot
g = shapes.Group()
g.transform = [1,0,0,1,self.x, self.y]
# background
g.add(shapes.Circle(s * 0.5, s * 0.5, s * 0.5,
fillColor=self.skinColor))
# CODE OMITTED TO MAKE MORE SHAPES
return g
""")
disc("""We left out all the code to draw the shapes in this document, but you
can find it in the distribution in $widgetbase.py$.""")
disc("""By default, any attribute without a leading underscore is returned by
setProperties. This is a deliberate policy to encourage consistent
coding conventions.""")
disc("""Once your widget works, you probably want to add support for
verification. This involves adding a dictionary to the class called
$_verifyMap$, which map from attribute names to 'checking functions'.
The $widgetbase.py$ module defines a bunch of checking functions with names
like $isNumber$, $isListOfShapes$ and so on. You can also simply use $None$,
which means that the attribute must be present but can have any type.
And you can and should write your own checking functions. We want to
restrict the "mood" custom attribute to the values "happy", "sad" or
"ok". So we do this:""")
eg("""
class Face(Widget):
\"\"\"This draws a face with two eyes. It exposes a
couple of properties to configure itself and hides
all other details\"\"\"
def checkMood(moodName):
return (moodName in ('happy','sad','ok'))
_verifyMap = {
'x': shapes.isNumber,
'y': shapes.isNumber,
'size': shapes.isNumber,
'skinColor':shapes.isColorOrNone,
'eyeColor': shapes.isColorOrNone,
'mood': checkMood
}
""")
disc("""This checking will be performed on every attribute assignment; or, if
$config.shapeChecking$ is off, whenever you call $myFace.verify()$.""")
heading3("Documenting Widgets")
disc("""
We are working on a generic tool to document any Python package or
module; this is already checked into ReportLab and will be used to
generate a reference for the ReportLab package.
When it encounters widgets, it adds extra sections to the
manual including:""")
bullet("the doc string for your widget class ")
bullet("the code snippet from your <i>demo()</i> method, so people can see how to use it")
bullet("the drawing produced by the <i>demo()</i> method ")
bullet("the property dump for the widget in the drawing. ")
disc("""
This tool will mean that we can have guaranteed up-to-date
documentation on our widgets and charts, both on the web site
and in print; and that you can do the same for your own widgets,
too!
""")
heading3("Widget Design Strategies")
disc("""We could not come up with a consistent architecture for designing
widgets, so we are leaving that problem to the authors! If you do not
like the default verification strategy, or the way
$setProperties/getProperties$ works, you can override them yourself.""")
disc("""For simple widgets it is recommended that you do what we did above:
select non-overlapping properties, initialize every property on
$__init__$ and construct everything when $draw()$ is called. You can
instead have $__setattr__$ hooks and have things updated when certain
attributes are set. Consider a pie chart. If you want to expose the
individual wedges, you might write code like this:""")
eg("""
from reportlab.graphics.charts import piecharts
pc = piecharts.Pie()
pc.defaultColors = [navy, blue, skyblue] #used in rotation
pc.data = [10,30,50,25]
pc.slices[7].strokeWidth = 5
""")
#removed 'pc.backColor = yellow' from above code example
disc("""The last line is problematic as we have only created four wedges - in
fact we might not have created them yet. Does $pc.wedges[7]$ raise an
error? Is it a prescription for what should happen if a seventh wedge
is defined, used to override the default settings? We dump this
problem squarely on the widget author for now, and recommend that you
get a simple one working before exposing 'child objects' whose
existence depends on other properties' values :-)""")
disc("""We also discussed rules by which parent widgets could pass properties
to their children. There seems to be a general desire for a global way
to say that 'all wedges get their lineWidth from the lineWidth of
their parent' without a lot of repetitive coding. We do not have a
universal solution, so again leave that to widget authors. We hope
people will experiment with push-down, pull-down and pattern-matching
approaches and come up with something nice. In the meantime, we
certainly can write monolithic chart widgets which work like the ones
in, say, Visual Basic and Delphi.""")
disc("""For now have a look at the following sample code using an early
version of a pie chart widget and the output it generates:""")
eg("""
from reportlab.lib.colors import *
from reportlab.graphics import shapes,renderPDF
from reportlab.graphics.charts.piecharts import Pie
d = Drawing(400,200)
d.add(String(100,175,"Without labels", textAnchor="middle"))
d.add(String(300,175,"With labels", textAnchor="middle"))
pc = Pie()
pc.x = 25
pc.y = 50
pc.data = [10,20,30,40,50,60]
pc.slices[0].popout = 5
d.add(pc, 'pie1')
pc2 = Pie()
pc2.x = 150
pc2.y = 50
pc2.data = [10,20,30,40,50,60]
pc2.labels = ['a','b','c','d','e','f']
d.add(pc2, 'pie2')
pc3 = Pie()
pc3.x = 275
pc3.y = 50
pc3.data = [10,20,30,40,50,60]
pc3.labels = ['a','b','c','d','e','f']
pc3.wedges.labelRadius = 0.65
pc3.wedges.fontName = "Helvetica-Bold"
pc3.wedges.fontSize = 16
pc3.wedges.fontColor = colors.yellow
d.add(pc3, 'pie3')
""")
# Hack to force a new paragraph before the todo() :-(
disc("")
from reportlab.lib.colors import *
from reportlab.graphics import shapes,renderPDF
from reportlab.graphics.charts.piecharts import Pie
d = Drawing(400,200)
d.add(String(100,175,"Without labels", textAnchor="middle"))
d.add(String(300,175,"With labels", textAnchor="middle"))
pc = Pie()
pc.x = 25
pc.y = 50
pc.data = [10,20,30,40,50,60]
pc.slices[0].popout = 5
d.add(pc, 'pie1')
pc2 = Pie()
pc2.x = 150
pc2.y = 50
pc2.data = [10,20,30,40,50,60]
pc2.labels = ['a','b','c','d','e','f']
d.add(pc2, 'pie2')
pc3 = Pie()
pc3.x = 275
pc3.y = 50
pc3.data = [10,20,30,40,50,60]
pc3.labels = ['a','b','c','d','e','f']
pc3.slices.labelRadius = 0.65
pc3.slices.fontName = "Helvetica-Bold"
pc3.slices.fontSize = 16
pc3.slices.fontColor = colors.yellow
d.add(pc3, 'pie3')
draw(d, 'Some sample Pies')
| bsd-3-clause |
kashyap32/scrapy | scrapy/settings/__init__.py | 124 | 6388 | import six
import json
import copy
import warnings
from collections import MutableMapping
from importlib import import_module
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.exceptions import ScrapyDeprecationWarning
from . import default_settings
SETTINGS_PRIORITIES = {
'default': 0,
'command': 10,
'project': 20,
'spider': 30,
'cmdline': 40,
}
class SettingsAttribute(object):
"""Class for storing data related to settings attributes.
This class is intended for internal usage, you should try Settings class
for settings configuration, not this one.
"""
def __init__(self, value, priority):
self.value = value
self.priority = priority
def set(self, value, priority):
"""Sets value if priority is higher or equal than current priority."""
if priority >= self.priority:
self.value = value
self.priority = priority
def __str__(self):
return "<SettingsAttribute value={self.value!r} " \
"priority={self.priority}>".format(self=self)
__repr__ = __str__
class Settings(object):
def __init__(self, values=None, priority='project'):
self.frozen = False
self.attributes = {}
self.setmodule(default_settings, priority='default')
if values is not None:
self.setdict(values, priority)
def __getitem__(self, opt_name):
value = None
if opt_name in self.attributes:
value = self.attributes[opt_name].value
return value
def get(self, name, default=None):
return self[name] if self[name] is not None else default
def getbool(self, name, default=False):
"""
True is: 1, '1', True
False is: 0, '0', False, None
"""
return bool(int(self.get(name, default)))
def getint(self, name, default=0):
return int(self.get(name, default))
def getfloat(self, name, default=0.0):
return float(self.get(name, default))
def getlist(self, name, default=None):
value = self.get(name, default or [])
if isinstance(value, six.string_types):
value = value.split(',')
return list(value)
def getdict(self, name, default=None):
value = self.get(name, default or {})
if isinstance(value, six.string_types):
value = json.loads(value)
return dict(value)
def set(self, name, value, priority='project'):
self._assert_mutability()
if isinstance(priority, six.string_types):
priority = SETTINGS_PRIORITIES[priority]
if name not in self.attributes:
self.attributes[name] = SettingsAttribute(value, priority)
else:
self.attributes[name].set(value, priority)
def setdict(self, values, priority='project'):
self._assert_mutability()
for name, value in six.iteritems(values):
self.set(name, value, priority)
def setmodule(self, module, priority='project'):
self._assert_mutability()
if isinstance(module, six.string_types):
module = import_module(module)
for key in dir(module):
if key.isupper():
self.set(key, getattr(module, key), priority)
def _assert_mutability(self):
if self.frozen:
raise TypeError("Trying to modify an immutable Settings object")
def copy(self):
return copy.deepcopy(self)
def freeze(self):
self.frozen = True
def frozencopy(self):
copy = self.copy()
copy.freeze()
return copy
@property
def overrides(self):
warnings.warn("`Settings.overrides` attribute is deprecated and won't "
"be supported in Scrapy 0.26, use "
"`Settings.set(name, value, priority='cmdline')` instead",
category=ScrapyDeprecationWarning, stacklevel=2)
try:
o = self._overrides
except AttributeError:
self._overrides = o = _DictProxy(self, 'cmdline')
return o
@property
def defaults(self):
warnings.warn("`Settings.defaults` attribute is deprecated and won't "
"be supported in Scrapy 0.26, use "
"`Settings.set(name, value, priority='default')` instead",
category=ScrapyDeprecationWarning, stacklevel=2)
try:
o = self._defaults
except AttributeError:
self._defaults = o = _DictProxy(self, 'default')
return o
class _DictProxy(MutableMapping):
def __init__(self, settings, priority):
self.o = {}
self.settings = settings
self.priority = priority
def __len__(self):
return len(self.o)
def __getitem__(self, k):
return self.o[k]
def __setitem__(self, k, v):
self.settings.set(k, v, priority=self.priority)
self.o[k] = v
def __delitem__(self, k):
del self.o[k]
def __iter__(self, k, v):
return iter(self.o)
class CrawlerSettings(Settings):
def __init__(self, settings_module=None, **kw):
Settings.__init__(self, **kw)
self.settings_module = settings_module
def __getitem__(self, opt_name):
if opt_name in self.overrides:
return self.overrides[opt_name]
if self.settings_module and hasattr(self.settings_module, opt_name):
return getattr(self.settings_module, opt_name)
if opt_name in self.defaults:
return self.defaults[opt_name]
return Settings.__getitem__(self, opt_name)
def __str__(self):
return "<CrawlerSettings module=%r>" % self.settings_module
CrawlerSettings = create_deprecated_class(
'CrawlerSettings', CrawlerSettings,
new_class_path='scrapy.settings.Settings')
def iter_default_settings():
"""Return the default settings as an iterator of (name, value) tuples"""
for name in dir(default_settings):
if name.isupper():
yield name, getattr(default_settings, name)
def overridden_settings(settings):
"""Return a dict of the settings that have been overridden"""
for name, defvalue in iter_default_settings():
value = settings[name]
if not isinstance(defvalue, dict) and value != defvalue:
yield name, value
| bsd-3-clause |
TheBootloader/android_kernel_shooter | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
jwass/mplexporter | mplexporter/renderers/vincent_renderer.py | 64 | 1922 | import warnings
from .base import Renderer
from ..exporter import Exporter
class VincentRenderer(Renderer):
def open_figure(self, fig, props):
self.chart = None
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
def draw_line(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
linedata = {'x': data[:, 0],
'y': data[:, 1]}
line = vincent.Line(linedata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
line.scales['color'].range = [style['color']]
if self.chart is None:
self.chart = line
else:
warnings.warn("Multiple plot elements not yet supported")
def draw_markers(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
markerdata = {'x': data[:, 0],
'y': data[:, 1]}
markers = vincent.Scatter(markerdata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
markers.scales['color'].range = [style['facecolor']]
if self.chart is None:
self.chart = markers
else:
warnings.warn("Multiple plot elements not yet supported")
def fig_to_vincent(fig):
"""Convert a matplotlib figure to a vincent object"""
renderer = VincentRenderer()
exporter = Exporter(renderer)
exporter.run(fig)
return renderer.chart
| bsd-3-clause |
WillianPaiva/1flow | oneflow/core/migrations/0108_auto__add_field_chainedprocessor_is_active.py | 2 | 67573 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ChainedProcessor.is_active'
db.add_column(u'core_chainedprocessor', 'is_active',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ChainedProcessor.is_active'
db.delete_column(u'core_chainedprocessor', 'is_active')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.user': {
'Meta': {'object_name': 'User'},
'address_book': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'avatar_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': 'f5d5e4a6552f4c0b9e11c6345ea6f44a'}", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.article': {
'Meta': {'object_name': 'Article', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'comments_feed_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'is_orphaned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'publications'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'url_absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version_description': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.author': {
'Meta': {'unique_together': "(('origin_name', 'website'),)", 'object_name': 'Author'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Author']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identities': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'identities_rel_+'", 'null': 'True', 'to': "orm['core.Author']"}),
'is_unsure': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '7168', 'null': 'True', 'blank': 'True'}),
'origin_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'origin_id_str': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'origin_name': ('django.db.models.fields.CharField', [], {'max_length': '7168', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authors'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'}),
'website_data': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'})
},
'core.baseaccount': {
'Meta': {'object_name': 'BaseAccount'},
'conn_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_conn': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_usable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseaccount_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'accounts'", 'to': u"orm['base.User']"})
},
'core.basefeed': {
'Meta': {'object_name': 'BaseFeed'},
'closed_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_fetch': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'errors': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'fetch_interval': ('django.db.models.fields.IntegerField', [], {'default': '43200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.basefeed_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.baseitem': {
'Meta': {'object_name': 'BaseItem'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authored_items'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseItem']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseitem_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sources_rel_+'", 'null': 'True', 'to': "orm['core.BaseItem']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'})
},
'core.chainedprocessor': {
'Meta': {'object_name': 'ChainedProcessor'},
'chain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'processors'", 'to': "orm['core.ProcessorChain']"}),
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notes_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'processor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'chains'", 'to': "orm['core.Processor']"})
},
'core.combinedfeed': {
'Meta': {'object_name': 'CombinedFeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.combinedfeedrule': {
'Meta': {'ordering': "('position',)", 'object_name': 'CombinedFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'combinedfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.CombinedFeed']"}),
'feeds': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.BaseFeed']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.folder': {
'Meta': {'unique_together': "(('name', 'user', 'parent'),)", 'object_name': 'Folder'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'folders'", 'to': u"orm['base.User']"})
},
'core.helpcontent': {
'Meta': {'ordering': "['ordering', 'id']", 'object_name': 'HelpContent'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_en': ('django.db.models.fields.TextField', [], {}),
'content_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'core.helpwizards': {
'Meta': {'object_name': 'HelpWizards'},
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'wizards'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_all': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'welcome_beta_shown': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'core.historicalarticle': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalArticle'},
u'baseitem_ptr_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'comments_feed_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'is_orphaned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'db_index': 'True'}),
'url_absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'version_description': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.historyentry': {
'Meta': {'object_name': 'HistoryEntry'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.historyentry_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.homepreferences': {
'Meta': {'object_name': 'HomePreferences'},
'experimental_features': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'home'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_shows': ('django.db.models.fields.IntegerField', [], {'default': '2', 'blank': 'True'}),
'show_advanced_preferences': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'style': ('django.db.models.fields.CharField', [], {'default': "u'RL'", 'max_length': '2', 'blank': 'True'})
},
'core.language': {
'Meta': {'object_name': 'Language'},
'dj_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso639_1': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'iso639_2': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'iso639_3': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Language']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.mailaccount': {
'Meta': {'object_name': 'MailAccount', '_ormbases': ['core.BaseAccount']},
u'baseaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseAccount']", 'unique': 'True', 'primary_key': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'password': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'use_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'core.mailfeed': {
'Meta': {'object_name': 'MailFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mail_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.MailAccount']"}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'match_action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'rules_operation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'core.mailfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'MailFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'header_field': ('django.db.models.fields.IntegerField', [], {'default': '4', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mailfeed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['core.MailFeed']"}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_type': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'other_header': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.nodepermissions': {
'Meta': {'object_name': 'NodePermissions'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SyncNode']", 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'17c1275a37f74e49a648527570da5208'", 'max_length': '32', 'blank': 'True'})
},
'core.originaldata': {
'Meta': {'object_name': 'OriginalData'},
'feedparser': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feedparser_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'google_reader': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'google_reader_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'original_data'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.BaseItem']"}),
'raw_email': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'raw_email_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'twitter_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.preferences': {
'Meta': {'object_name': 'Preferences'},
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['base.User']", 'unique': 'True', 'primary_key': 'True'})
},
'core.processingerror': {
'Meta': {'object_name': 'ProcessingError'},
'data': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'instance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'issue_ref': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'processor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'processors'", 'to': "orm['core.ChainedProcessor']"})
},
'core.processor': {
'Meta': {'object_name': 'Processor'},
'accept_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Processor']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'needs_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Processor']"}),
'process_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'processor_type': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'requirements': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'source_address': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'processors'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.processorchain': {
'Meta': {'object_name': 'ProcessorChain'},
'applies_on': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ProcessorChain']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.ProcessorChain']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'processor_chains'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.read': {
'Meta': {'unique_together': "(('user', 'item'),)", 'object_name': 'Read'},
'bookmark_type': ('django.db.models.fields.CharField', [], {'default': "u'U'", 'max_length': '2'}),
'check_set_subscriptions_131004_done': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_analysis': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_archived': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_auto_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_bookmarked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_fact': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_fun': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowhow': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowledge': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_number': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_prospective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_quote': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_rules': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_starred': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_analysis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_auto_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_fact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fun': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_knowhow': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_number': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_prospective': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_quote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_rules': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_starred': ('django.db.models.fields.NullBooleanField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reads'", 'to': "orm['core.BaseItem']"}),
'knowledge_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'senders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reads_sent'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_reads'", 'to': u"orm['base.User']"})
},
'core.readpreferences': {
'Meta': {'object_name': 'ReadPreferences'},
'auto_mark_read_delay': ('django.db.models.fields.IntegerField', [], {'default': '4500', 'blank': 'True'}),
'bookmarked_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bookmarked_marks_unread': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'read'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_switches_to_fullscreen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reading_speed': ('django.db.models.fields.IntegerField', [], {'default': '200', 'blank': 'True'}),
'show_bottom_navbar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'starred_removes_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'watch_attributes_mark_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.rssatomfeed': {
'Meta': {'object_name': 'RssAtomFeed', '_ormbases': ['core.BaseFeed']},
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'last_etag': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': "orm['core.WebSite']"})
},
'core.selectorpreferences': {
'Meta': {'object_name': 'SelectorPreferences'},
'extended_folders_depth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'folders_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lists_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'selector'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_closed_streams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscriptions_in_multiple_folders': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'titles_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.sharepreferences': {
'Meta': {'object_name': 'SharePreferences'},
'default_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'share'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"})
},
'core.simpletag': {
'Meta': {'unique_together': "(('name', 'language'),)", 'object_name': 'SimpleTag'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'origin_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'origin_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.SimpleTag']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.snappreferences': {
'Meta': {'object_name': 'SnapPreferences'},
'default_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'snap'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'select_paragraph': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.staffpreferences': {
'Meta': {'object_name': 'StaffPreferences'},
'no_home_redirect': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'staff'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'reading_lists_show_bad_articles': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'selector_shows_admin_links': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'super_powers_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'core.subscription': {
'Meta': {'unique_together': "(('feed', 'user'),)", 'object_name': 'Subscription'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'blank': 'True', 'to': "orm['core.BaseFeed']"}),
'folders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Folder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reads': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Read']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_subscriptions'", 'blank': 'True', 'to': u"orm['base.User']"})
},
'core.syncnode': {
'Meta': {'object_name': 'SyncNode'},
'broadcast': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_seen': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_local_instance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'local_token': ('django.db.models.fields.CharField', [], {'default': "'e25c6c4d2f914b8ea62520a8a608011b'", 'max_length': '32', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'remote_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'blank': 'True'}),
'strategy': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'sync_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '384', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'core.tweet': {
'Meta': {'object_name': 'Tweet', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'entities': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tweets'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'entities_fetched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mentions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mentions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'unique': 'True', 'blank': 'True'})
},
'core.twitteraccount': {
'Meta': {'object_name': 'TwitterAccount', '_ormbases': ['core.BaseAccount']},
u'baseaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseAccount']", 'unique': 'True', 'primary_key': 'True'}),
'fetch_owned_lists': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fetch_subscribed_lists': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'social_auth': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'twitter_account'", 'unique': 'True', 'to': u"orm['default.UserSocialAuth']"}),
'timeline': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'twitter_account'", 'unique': 'True', 'null': 'True', 'to': "orm['core.TwitterFeed']"})
},
'core.twitterfeed': {
'Meta': {'object_name': 'TwitterFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'twitter_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.TwitterAccount']"}),
'backfill_completed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'is_backfilled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_action': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'rules_operation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'track_locations': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'track_terms': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'core.twitterfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'TwitterFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.TwitterFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_field': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_type': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'other_field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'twitterfeed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['core.TwitterFeed']"})
},
'core.usercounters': {
'Meta': {'object_name': 'UserCounters'},
'placeholder': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_counters'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"})
},
'core.userfeeds': {
'Meta': {'object_name': 'UserFeeds'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_feeds'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"})
},
'core.userimport': {
'Meta': {'object_name': 'UserImport', '_ormbases': ['core.HistoryEntry']},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'historyentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.HistoryEntry']", 'unique': 'True', 'primary_key': 'True'}),
'lines': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'results': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'urls': ('django.db.models.fields.TextField', [], {})
},
'core.usersubscriptions': {
'Meta': {'object_name': 'UserSubscriptions'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'blogs'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Subscription']"}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_subscriptions'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"})
},
'core.website': {
'Meta': {'object_name': 'WebSite'},
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fetch_limit_nr': ('django.db.models.fields.IntegerField', [], {'default': '16', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'mail_warned': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.WebSite']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'})
},
u'default.usersocialauth': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'UserSocialAuth', 'db_table': "'social_auth_usersocialauth'"},
'extra_data': ('social.apps.django_app.default.fields.JSONField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_auth'", 'to': u"orm['base.User']"})
}
}
complete_apps = ['core'] | agpl-3.0 |
nachandr/cfme_tests | cfme/tests/control/test_smoke_control.py | 2 | 1956 | """This test contains necessary smoke tests for the Control."""
import pytest
from cfme import control
from cfme import test_requirements
from cfme.utils.appliance.implementations.ui import navigate_to
pytestmark = [
test_requirements.control,
pytest.mark.smoke,
pytest.mark.tier(2)
]
destinations = [
control.explorer.ControlExplorer.__name__,
control.simulation.ControlSimulation.__name__,
control.import_export.ControlImportExport.__name__,
control.log.ControlLog.__name__
]
control_explorer_accordions = [
"Policy Profiles",
"Policies",
"Events",
"Conditions",
"Actions",
"Alert Profiles",
"Alerts"
]
@pytest.fixture(scope="module")
def control_explorer_view(appliance):
return navigate_to(appliance.server, "ControlExplorer")
@pytest.mark.parametrize("destination", destinations)
def test_control_navigation(destination, appliance):
"""This test verifies presence of destinations of Control tab.
Steps:
* Open each destination of Control tab.
Polarion:
assignee: dgaikwad
casecomponent: WebUI
initialEstimate: 1/60h
"""
# some of views like Control -> Log incredibly long first time
view = navigate_to(appliance.server, destination, wait_for_view=60)
assert view.is_displayed
@pytest.mark.parametrize("destination", control_explorer_accordions)
def test_control_explorer_tree(control_explorer_view, destination, appliance):
"""This test checks the accordion of Control/Explorer.
Steps:
* Open each accordion tab and click on top node of the tree.
Polarion:
assignee: dgaikwad
casecomponent: WebUI
initialEstimate: 1/60h
"""
navigate_to(appliance.server, 'ControlExplorer', wait_for_view=30)
accordion_name = destination.lower().replace(" ", "_")
accordion = getattr(control_explorer_view, accordion_name)
accordion.tree.click_path(f"All {destination}")
| gpl-2.0 |
raghavrv/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
facetoe/i3pystatus | i3pystatus/core/__init__.py | 3 | 5457 | import logging
import os
import sys
from threading import Thread
from i3pystatus.core import io, util
from i3pystatus.core.exceptions import ConfigError
from i3pystatus.core.imputil import ClassFinder
from i3pystatus.core.modules import Module
DEFAULT_LOG_FORMAT = '%(asctime)s [%(levelname)-8s][%(name)s %(lineno)d] %(message)s'
log = logging.getLogger(__name__)
class CommandEndpoint:
"""
Endpoint for i3bar click events: http://i3wm.org/docs/i3bar-protocol.html#_click_events
:param modules: dict-like object with item access semantics via .get()
:param io_handler_factory: function creating a file-like object returning a JSON generator on .read()
"""
def __init__(self, modules, io_handler_factory, io):
self.modules = modules
self.io_handler_factory = io_handler_factory
self.io = io
self.thread = Thread(target=self._command_endpoint)
self.thread.daemon = True
def start(self):
"""Starts the background thread"""
self.thread.start()
def _command_endpoint(self):
for cmd in self.io_handler_factory().read():
target_module = self.modules.get(cmd["instance"])
button = cmd["button"]
kwargs = {"button_id": button}
try:
kwargs.update({"pos_x": cmd["x"],
"pos_y": cmd["y"]})
except Exception:
continue
if target_module:
target_module.on_click(button, **kwargs)
target_module.run()
self.io.async_refresh()
class Status:
"""
The main class used for registering modules and managing I/O
:param bool standalone: Whether i3pystatus should read i3status-compatible input from `input_stream`.
:param int interval: Update interval in seconds.
:param input_stream: A file-like object that provides the input stream, if `standalone` is False.
:param bool click_events: Enable click events, if `standalone` is True.
:param str logfile: Path to log file that will be used by i3pystatus.
:param tuple internet_check: Address of server that will be used to check for internet connection by :py:class:`.internet`.
:param keep_alive: If True, modules that define the keep_alive flag will not be put to sleep when the status bar is hidden.
:param dictionary default_hints: Dictionary of default hints to apply to all modules. Can be overridden at a module level.
"""
def __init__(self, standalone=True, click_events=True, interval=1,
input_stream=None, logfile=None, internet_check=None,
keep_alive=False, logformat=DEFAULT_LOG_FORMAT,
default_hints=None):
self.standalone = standalone
self.default_hints = default_hints
self.click_events = standalone and click_events
input_stream = input_stream or sys.stdin
logger = logging.getLogger("i3pystatus")
if logfile:
for handler in logger.handlers:
logger.removeHandler(handler)
logfile = os.path.expandvars(logfile)
handler = logging.FileHandler(logfile, delay=True)
logger.addHandler(handler)
logger.setLevel(logging.CRITICAL)
if logformat:
for index in range(len(logger.handlers)):
logger.handlers[index].setFormatter(logging.Formatter(logformat))
if internet_check:
util.internet.address = internet_check
self.modules = util.ModuleList(self, ClassFinder(Module))
if self.standalone:
self.io = io.StandaloneIO(self.click_events, self.modules, keep_alive, interval)
if self.click_events:
self.command_endpoint = CommandEndpoint(
self.modules,
lambda: io.JSONIO(io=io.IOHandler(sys.stdin, open(os.devnull, "w")), skiplines=1),
self.io)
else:
self.io = io.IOHandler(input_stream)
def register(self, module, *args, **kwargs):
"""
Register a new module.
:param module: Either a string module name, or a module class,
or a module instance (in which case args and kwargs are
invalid).
:param kwargs: Settings for the module.
:returns: module instance
"""
from i3pystatus.text import Text
if not module:
return
# Merge the module's hints with the default hints
# and overwrite any duplicates with the hint from the module
hints = self.default_hints.copy() if self.default_hints else {}
hints.update(kwargs.get('hints', {}))
if hints:
kwargs['hints'] = hints
try:
return self.modules.append(module, *args, **kwargs)
except Exception as e:
log.exception(e)
return self.modules.append(Text(
color="#FF0000",
text="{i3py_mod}: Fatal Error - {ex}({msg})".format(
i3py_mod=module,
ex=e.__class__.__name__,
msg=e
)
))
def run(self):
"""
Run main loop.
"""
if self.click_events:
self.command_endpoint.start()
for j in io.JSONIO(self.io).read():
for module in self.modules:
module.inject(j)
| mit |
marcwebbie/youtube-dl | youtube_dl/extractor/plays.py | 23 | 1805 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class PlaysTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?plays\.tv/video/(?P<id>[0-9a-f]{18})'
_TEST = {
'url': 'http://plays.tv/video/56af17f56c95335490/when-you-outplay-the-azir-wall',
'md5': 'dfeac1198506652b5257a62762cec7bc',
'info_dict': {
'id': '56af17f56c95335490',
'ext': 'mp4',
'title': 'When you outplay the Azir wall',
'description': 'Posted by Bjergsen',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
content = self._parse_json(
self._search_regex(
r'R\.bindContent\(({.+?})\);', webpage,
'content'), video_id)['content']
mpd_url, sources = re.search(
r'(?s)<video[^>]+data-mpd="([^"]+)"[^>]*>(.+?)</video>',
content).groups()
formats = self._extract_mpd_formats(
self._proto_relative_url(mpd_url), video_id, mpd_id='DASH')
for format_id, height, format_url in re.findall(r'<source\s+res="((\d+)h?)"\s+src="([^"]+)"', sources):
formats.append({
'url': self._proto_relative_url(format_url),
'format_id': 'http-' + format_id,
'height': int_or_none(height),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'formats': formats,
}
| unlicense |
jackyh/qt210_ics_external_chromium | net/tools/testserver/backoff_server.py | 64 | 1382 | #!/usr/bin/python2.4
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a simple HTTP server for manually testing exponential
back-off functionality in Chrome.
"""
import BaseHTTPServer
import sys
import urlparse
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
keep_running = True
def do_GET(self):
if self.path == '/quitquitquit':
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('QUITTING')
RequestHandler.keep_running = False
return
params = urlparse.parse_qs(urlparse.urlparse(self.path).query)
if not params or not 'code' in params or params['code'][0] == '200':
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('OK')
else:
self.send_error(int(params['code'][0]))
def main():
if len(sys.argv) != 2:
print "Usage: %s PORT" % sys.argv[0]
sys.exit(1)
port = int(sys.argv[1])
print "To stop the server, go to http://localhost:%d/quitquitquit" % port
httpd = BaseHTTPServer.HTTPServer(('', port), RequestHandler)
while RequestHandler.keep_running:
httpd.handle_request()
if __name__ == '__main__':
main()
| bsd-3-clause |
Great-Li-Xin/PythonDev | Games/Chap4/Bomb Catcher.py | 1 | 2545 | import sys, random, time, pygame
from pygame.locals import *
def print_text(font, x, y, text, color=(255, 255, 255), shadow=True):
if shadow:
imgText = font.render(text, True, (0, 0, 0))
screen.blit(imgText, (x - 2, y - 2))
imgText = font.render(text, True, color)
screen.blit(imgText, (x, y))
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((600, 500))
pygame.display.set_caption("Bomb Catching Game")
font1 = pygame.font.Font(None, 24)
pygame.mouse.set_visible(False)
white = 255, 255, 255
red = 220, 50, 50
yellow = 230, 230, 50
black = 0, 0, 0
lives = 3
score = 0
game_over = True
mouse_x = mouse_y = 0
pos_x = 300
pos_y = 460
bomb_x = random.randint(0, 500)
bomb_y = -50
vel_y = 0
while True:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
elif event.type == MOUSEMOTION:
mouse_x, mouse_y = event.pos
move_x, move_y = event.rel
elif event.type == MOUSEBUTTONUP:
if game_over:
game_over = False
lives = 3
score = 0
vel_y = 5
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
sys.exit()
screen.fill((0, 0, 100))
if game_over:
print_text(font1, 100, 200, "<CLICK TO PLAY>")
else:
bomb_y += vel_y
if bomb_y > 500:
bomb_x = random.randint(0, 500)
bomb_y = -50
lives -= 1
if lives == 0:
game_over = True
elif bomb_y > pos_y:
if pos_x < bomb_x < pos_x + 120:
score += 10
bomb_x = random.randint(0, 500)
bomb_y = -50
vel_y *= 1.1
pygame.draw.circle(screen, black, (bomb_x - 4, int(bomb_y) - 4), 30, 0)
pygame.draw.circle(screen, yellow, (bomb_x, int(bomb_y)), 30, 0)
pos_x = mouse_x
if pos_x < 0:
pos_x = 0
elif pos_x > 500:
pos_x = 500
pygame.draw.rect(screen, black, (pos_x - 4, pos_y - 4, 120, 40), 0)
pygame.draw.rect(screen, red, (pos_x, pos_y, 120, 40), 0)
print_text(font1, 0, 0, "LIVES: " + str(lives))
print_text(font1, 500, 0, "SCORE: " + str(score))
pygame.display.update()
| mit |
XCage15/privacyidea | privacyidea/api/validate.py | 2 | 9957 | # -*- coding: utf-8 -*-
#
# http://www.privacyidea.org
# (c) cornelius kölbel, privacyidea.org
#
# 2015-06-17 Cornelius Kölbel <cornelius@privacyidea.org>
# Add policy decorator for API key requirement
# 2014-12-08 Cornelius Kölbel, <cornelius@privacyidea.org>
# Complete rewrite during flask migration
# Try to provide REST API
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """This module contains the REST API for doing authentication.
The methods are tested in the file tests/test_api_validate.py
Authentication is either done by providing a username and a password or a
serial number and a password.
**Authentication workflow**
Authentication workflow is like this:
In case of authenticating a user:
* lib/token/check_user_pass (user, passw, options)
* lib/token/check_token_list(list, passw, user, options)
* lib/tokenclass/authenticate(pass, user, options)
* lib/tokenclass/check_pin(pin, user, options)
* lib/tokenclass/check_otp(otpval, options)
IN case if authenitcating a serial number:
* lib/token/check_serial_pass(serial, passw, options)
* lib/token/check_token_list(list, passw, user, options)
* lib/tokenclass/authenticate(pass, user, options)
* lib/tokenclass/check_pin(pin, user, options)
* lib/tokenclass/check_otp(otpval, options)
"""
from flask import (Blueprint, request, g, current_app)
from privacyidea.lib.user import get_user_from_param
from lib.utils import send_result, getParam
from ..lib.decorators import (check_user_or_serial_in_request)
from lib.utils import required
from privacyidea.lib.token import (check_user_pass, check_serial_pass)
from privacyidea.api.lib.utils import get_all_params
from privacyidea.lib.audit import getAudit
from privacyidea.api.lib.prepolicy import (prepolicy, set_realm,
api_key_required, mangle)
from privacyidea.api.lib.postpolicy import (postpolicy,
check_tokentype, check_serial,
no_detail_on_fail,
no_detail_on_success, autoassign,
offline_info)
from privacyidea.lib.policy import PolicyClass
import logging
log = logging.getLogger(__name__)
validate_blueprint = Blueprint('validate_blueprint', __name__)
@validate_blueprint.before_request
def before_request():
"""
This is executed before the request
"""
request.all_data = get_all_params(request.values, request.data)
# Create a policy_object, that reads the database audit settings
# and contains the complete policy definition during the request.
# This audit_object can be used in the postpolicy and prepolicy and it
# can be passed to the innerpolicies.
g.policy_object = PolicyClass()
g.audit_object = getAudit(current_app.config)
g.audit_object.log({"success": False,
"action_detail": "",
"client": request.remote_addr,
"client_user_agent": request.user_agent.browser,
"privacyidea_server": request.host,
"action": "%s %s" % (request.method, request.url_rule),
"info": ""})
@validate_blueprint.after_request
def after_request(response):
"""
This function is called after a request
:return: The response
"""
# In certain error cases the before_request was not handled
# completely so that we do not have an audit_object
if "audit_object" in g:
g.audit_object.finalize_log()
# No caching!
response.headers['Cache-Control'] = 'no-cache'
return response
@validate_blueprint.route('/check', methods=['POST', 'GET'])
@postpolicy(no_detail_on_fail, request=request)
@postpolicy(no_detail_on_success, request=request)
@postpolicy(offline_info, request=request)
@postpolicy(check_tokentype, request=request)
@postpolicy(check_serial, request=request)
@postpolicy(autoassign, request=request)
@prepolicy(set_realm, request=request)
@prepolicy(mangle, request=request)
@check_user_or_serial_in_request
@prepolicy(api_key_required, request=request)
def check():
"""
check the authentication for a user or a serial number.
Either a ``serial`` or a ``user`` is required to authenticate.
The PIN and OTP value is sent in the parameter ``pass``.
:param serial: The serial number of the token, that tries to authenticate.
:param user: The loginname/username of the user, who tries to authenticate.
:param realm: The realm of the user, who tries to authenticate. If the
realm is omitted, the user is looked up in the default realm.
:param pass: The password, that consists of the OTP PIN and the OTP value.
:param transaction_id: The transaction ID for a response to a challenge
request
:param state: The state ID for a response to a challenge request
:return: a json result with a boolean "result": true
**Example response** for a successful authentication:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"detail": {
"message": "matching 1 tokens",
"serial": "PISP0000AB00",
"type": "spass"
},
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": true
},
"version": "privacyIDEA unknown"
}
"""
user = get_user_from_param(request.all_data)
serial = getParam(request.all_data, "serial")
password = getParam(request.all_data, "pass", required)
options = {"g": g,
"clientip": request.remote_addr}
transaction_id = getParam(request.all_data, "transaction_id")
state = getParam(request.all_data, "state")
if transaction_id:
options["transaction_id"] = transaction_id
if state:
options["state"] = state
g.audit_object.log({"user": user.login,
"realm": user.realm})
if serial:
result, details = check_serial_pass(serial, password, options=options)
else:
result, details = check_user_pass(user, password, options=options)
g.audit_object.log({"info": details.get("message"),
"success": result,
"serial": serial or details.get("serial"),
"tokentype": details.get("type")})
return send_result(result, details=details)
@validate_blueprint.route('/samlcheck', methods=['POST', 'GET'])
@postpolicy(no_detail_on_fail, request=request)
@postpolicy(no_detail_on_success, request=request)
@postpolicy(check_tokentype, request=request)
@postpolicy(check_serial, request=request)
@postpolicy(autoassign, request=request)
@prepolicy(set_realm, request=request)
@prepolicy(mangle, request=request)
@check_user_or_serial_in_request
@prepolicy(api_key_required, request=request)
def samlcheck():
"""
Authenticate the user and return the SAML user information.
:param user: The loginname/username of the user, who tries to authenticate.
:param realm: The realm of the user, who tries to authenticate. If the
realm is omitted, the user is looked up in the default realm.
:param pass: The password, that consists of the OTP PIN and the OTP value.
:return: a json result with a boolean "result": true
**Example response** for a successful authentication:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"detail": {
"message": "matching 1 tokens",
"serial": "PISP0000AB00",
"type": "spass"
},
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": {"auth": true,
"username: <loginname>,
"realm": ....,
"surname": ....,
"givenname": .....,
"mobile": ....,
"phone": ....,
"email": ....
}
},
"version": "privacyIDEA unknown"
}
"""
user = get_user_from_param(request.all_data)
password = getParam(request.all_data, "pass", required)
options = {"g": g,
"clientip": request.remote_addr}
auth, details = check_user_pass(user, password, options=options)
ui = user.get_user_info()
result_obj = {"auth": auth,
"username": user.login,
"realm": user.realm,
"email": ui.get("email"),
"surname": ui.get("surname"),
"givenname": ui.get("givenname"),
"mobile": ui.get("mobile"),
"phone": ui.get("phone")}
g.audit_object.log({"info": details.get("message"),
"success": auth,
"serial": details.get("serial"),
"tokentype": details.get("type"),
"user": user.login,
"realm": user.realm})
return send_result(result_obj, details=details)
| agpl-3.0 |
jhutar/spacewalk | backend/satellite_exporter/satexport.py | 10 | 9704 | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# Server-side uploading code
import time
import sys
try:
# python 2
import xmlrpclib
except ImportError:
# python3
import xmlrpc.client as xmlrpclib # pylint: disable=F0401
from rhn.connections import idn_puny_to_unicode
from spacewalk.common import apache
from spacewalk.common.usix import raise_with_tb
from spacewalk.common import rhnFlags
from spacewalk.common.rhnLog import log_debug, log_error, log_setreq, initLOG
from spacewalk.common.rhnConfig import CFG, initCFG
from spacewalk.common.rhnTranslate import _
from spacewalk.common.rhnTB import Traceback
from spacewalk.common.rhnException import rhnException, rhnFault
from spacewalk.server import rhnSQL, rhnImport
from spacewalk.satellite_tools.disk_dumper.dumper import ClosedConnectionError
from spacewalk.satellite_tools import constants
class BaseApacheServer:
def __init__(self):
# Init log to stderr
initLOG()
self.start_time = 0
self._cleanup()
def headerParserHandler(self, req):
# pylint: disable=W0201
log_setreq(req)
self.start_time = time.time()
# init configuration options with proper component
options = req.get_options()
# if we are initializing out of a <Location> handler don't
# freak out
if "RHNComponentType" not in options:
# clearly nothing to do
return apache.OK
initCFG(options["RHNComponentType"])
initLOG(CFG.LOG_FILE, CFG.DEBUG)
# short-circuit everything if sending a system-wide message.
if CFG.SEND_MESSAGE_TO_ALL:
# Drop the database connection
# pylint: disable=W0702
try:
rhnSQL.closeDB()
except:
pass
# Fetch global message being sent to clients if applicable.
msg = open(CFG.MESSAGE_TO_ALL).read()
log_debug(3, "Sending message to all clients: %s" % msg)
return self._send_xmlrpc(req, rhnFault(-1,
_("IMPORTANT MESSAGE FOLLOWS:\n%s") % msg, explain=0))
rhnSQL.initDB()
self.server = options['SERVER']
self.server_classes = rhnImport.load("satellite_exporter/handlers")
if self.server not in self.server_classes:
# XXX do something interesting here
log_error("Missing server", self.server)
return apache.HTTP_NOT_FOUND
return self._wrapper(req, self._headerParserHandler)
def handler(self, req):
return self._wrapper(req, self._handler)
def cleanupHandler(self, req):
self._timer()
retval = self._wrapper(req, self._cleanupHandler)
self._cleanup()
# Reset the logger to stderr
initLOG()
return retval
def _cleanup(self):
# pylint: disable=W0201
self.server = None
self.server_classes = None
self.server_instance = {}
# Virtual functions
# pylint: disable=R0201
def _headerParserHandler(self, _req):
return apache.OK
def _handler(self, _req):
return apache.OK
def _cleanupHandler(self, _req):
return apache.OK
def _wrapper(self, req, function):
try:
ret = function(req)
except rhnFault:
e = sys.exc_info()[1]
return self._send_xmlrpc(req, e)
except ClosedConnectionError:
# The error code most likely doesn't matter, the client won't see
# it anyway
return apache.HTTP_NOT_ACCEPTABLE
except Exception: # pylint: disable=E0012, W0703
Traceback("satexport._wrapper", req=req)
return apache.HTTP_INTERNAL_SERVER_ERROR
return ret
def _send_xmlrpc(self, req, data):
log_debug(1)
req.content_type = "text/xml"
if isinstance(data, rhnFault):
data = data.getxml()
else:
data = (data, )
ret = xmlrpclib.dumps(data, methodresponse=1)
req.headers_out['Content-Length'] = str(len(ret))
req.send_http_header()
req.write(ret)
return apache.OK
def _timer(self):
if not self.start_time:
return 0
log_debug(2, "%.2f sec" % (time.time() - self.start_time))
return 0
class ApacheServer(BaseApacheServer):
def __init__(self):
BaseApacheServer.__init__(self)
def _headerParserHandler(self, req):
log_debug(3, "Method", req.method)
self._validate_version(req)
return apache.OK
def _handler(self, req):
log_debug(3, "Method", req.method)
# Read all the request
data = req.read()
log_debug(7, "Received", data)
# Decode the data
try:
params, methodname = xmlrpclib.loads(data)
except:
raise
log_debug(5, params, methodname)
try:
f = self.get_function(methodname, req)
except FunctionRetrievalError:
e = sys.exc_info()[1]
Traceback(methodname, req)
return self._send_xmlrpc(req, rhnFault(3008, str(e), explain=0))
if len(params) < 2:
params = []
else:
params = params[1:]
result = f(*params)
if result:
# Error of some sort
return self._send_xmlrpc(req, rhnFault(3009))
# Presumably the function did all the sending
log_debug(4, "Exiting OK")
return apache.OK
# pylint: disable=R0201
def get_function(self, method_name, req):
iss_slave_condition = self.auth_system(req)
# Get the module name
idx = method_name.rfind('.')
module_name, function_name = method_name[:idx], method_name[idx + 1:]
log_debug(5, module_name, function_name)
handler_classes = self.server_classes[self.server]
if module_name not in handler_classes:
raise FunctionRetrievalError("Module %s not found" % module_name)
mod = handler_classes[module_name](req)
mod.set_exportable_orgs(iss_slave_condition)
f = mod.get_function(function_name)
if f is None:
raise FunctionRetrievalError(
"Module %s: function %s not found" %
(module_name, function_name))
return f
def auth_system(self, req):
if CFG.DISABLE_ISS:
raise rhnFault(2005, _('ISS is disabled on this satellite.'))
remote_hostname = req.get_remote_host(apache.REMOTE_DOUBLE_REV)
row = rhnSQL.fetchone_dict("""
select id, allow_all_orgs
from rhnISSSlave
where slave = :hostname
and enabled = 'Y'
""", hostname=idn_puny_to_unicode(remote_hostname))
if not row:
raise rhnFault(2004,
_('Server "%s" is not enabled for ISS.')
% remote_hostname)
iss_slave_condition = "select id from web_customer"
if not(row['allow_all_orgs'] == 'Y'):
iss_slave_condition = "select rhnISSSlaveOrgs.org_id from rhnISSSlaveOrgs where slave_id = %d" % row['id']
return iss_slave_condition
@staticmethod
def _validate_version(req):
server_version = constants.PROTOCOL_VERSION
vstr = 'X-RHN-Satellite-XML-Dump-Version'
if vstr not in req.headers_in:
raise rhnFault(3010, "Missing version string")
client_version = req.headers_in[vstr]
# set the client version through rhnFlags to access later
rhnFlags.set('X-RHN-Satellite-XML-Dump-Version', client_version)
log_debug(1, "Server version", server_version, "Client version",
client_version)
client_ver_arr = str(client_version).split(".")
server_ver_arr = str(server_version).split(".")
client_major = client_ver_arr[0]
server_major = server_ver_arr[0]
if len(client_ver_arr) >= 2:
client_minor = client_ver_arr[1]
else:
client_minor = 0
server_minor = server_ver_arr[1]
try:
client_major = int(client_major)
client_minor = int(client_minor)
except ValueError:
raise_with_tb(rhnFault(3011, "Invalid version string %s" % client_version), sys.exc_info()[2])
try:
server_major = int(server_major)
server_minor = int(server_minor)
except ValueError:
raise_with_tb(rhnException("Invalid server version string %s"
% server_version), sys.exc_info()[2])
if client_major != server_major:
raise rhnFault(3012, "Client version %s does not match"
" server version %s" % (client_version, server_version),
explain=0)
class FunctionRetrievalError(Exception):
pass
apache_server = ApacheServer()
HeaderParserHandler = apache_server.headerParserHandler
Handler = apache_server.handler
CleanupHandler = apache_server.cleanupHandler
| gpl-2.0 |
puckipedia/youtube-dl | youtube_dl/extractor/kontrtube.py | 60 | 2999 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class KontrTubeIE(InfoExtractor):
IE_NAME = 'kontrtube'
IE_DESC = 'KontrTube.ru - Труба зовёт'
_VALID_URL = r'http://(?:www\.)?kontrtube\.ru/videos/(?P<id>\d+)/(?P<display_id>[^/]+)/'
_TEST = {
'url': 'http://www.kontrtube.ru/videos/2678/nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag/',
'md5': '975a991a4926c9a85f383a736a2e6b80',
'info_dict': {
'id': '2678',
'display_id': 'nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag',
'ext': 'mp4',
'title': 'Над олимпийской деревней в Сочи поднят российский флаг',
'description': 'md5:80edc4c613d5887ae8ccf1d59432be41',
'thumbnail': 'http://www.kontrtube.ru/contents/videos_screenshots/2000/2678/preview.mp4.jpg',
'duration': 270,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(
url, display_id, 'Downloading page')
video_url = self._html_search_regex(
r"video_url\s*:\s*'(.+?)/?',", webpage, 'video URL')
thumbnail = self._html_search_regex(
r"preview_url\s*:\s*'(.+?)/?',", webpage, 'video thumbnail', fatal=False)
title = self._html_search_regex(
r'<title>(.+?)</title>', webpage, 'video title')
description = self._html_search_meta(
'description', webpage, 'video description')
mobj = re.search(
r'<div class="col_2">Длительность: <span>(?P<minutes>\d+)м:(?P<seconds>\d+)с</span></div>',
webpage)
duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None
view_count = self._html_search_regex(
r'<div class="col_2">Просмотров: <span>(\d+)</span></div>',
webpage, 'view count', fatal=False)
comment_count = None
comment_str = self._html_search_regex(
r'Комментарии: <span>([^<]+)</span>', webpage, 'comment count', fatal=False)
if comment_str.startswith('комментариев нет'):
comment_count = 0
else:
mobj = re.search(r'\d+ из (?P<total>\d+) комментариев', comment_str)
if mobj:
comment_count = mobj.group('total')
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'thumbnail': thumbnail,
'title': title,
'description': description,
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
}
| unlicense |
kvar/ansible | lib/ansible/modules/cloud/google/gcp_cloudtasks_queue_info.py | 5 | 8749 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_cloudtasks_queue_info
description:
- Gather info for GCP Queue
short_description: Gather info for GCP Queue
version_added: 2.9
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
location:
description:
- The location of the queue.
required: true
type: str
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: get info on a queue
gcp_cloudtasks_queue_info:
location: us-central1
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
name:
description:
- The queue name.
returned: success
type: str
appEngineRoutingOverride:
description:
- Overrides for task-level appEngineRouting. These settings apply only to App
Engine tasks in this queue .
returned: success
type: complex
contains:
service:
description:
- App service.
- By default, the task is sent to the service which is the default service
when the task is attempted.
returned: success
type: str
version:
description:
- App version.
- By default, the task is sent to the version which is the default version
when the task is attempted.
returned: success
type: str
instance:
description:
- App instance.
- By default, the task is sent to an instance which is available when the
task is attempted.
returned: success
type: str
host:
description:
- The host that the task is sent to.
returned: success
type: str
rateLimits:
description:
- 'Rate limits for task dispatches. The queue''s actual dispatch rate is the
result of: * Number of tasks in the queue * User-specified throttling: rateLimits,
retryConfig, and the queue''s state.'
- "* System throttling due to 429 (Too Many Requests) or 503 (Service Unavailable)
responses from the worker, high error rates, or to smooth sudden large traffic
spikes."
returned: success
type: complex
contains:
maxDispatchesPerSecond:
description:
- The maximum rate at which tasks are dispatched from this queue.
- If unspecified when the queue is created, Cloud Tasks will pick the default.
returned: success
type: int
maxConcurrentDispatches:
description:
- The maximum number of concurrent tasks that Cloud Tasks allows to be dispatched
for this queue. After this threshold has been reached, Cloud Tasks stops
dispatching tasks until the number of concurrent requests decreases.
returned: success
type: int
maxBurstSize:
description:
- The max burst size.
- Max burst size limits how fast tasks in queue are processed when many
tasks are in the queue and the rate is high. This field allows the queue
to have a high rate so processing starts shortly after a task is enqueued,
but still limits resource usage when many tasks are enqueued in a short
period of time.
returned: success
type: int
retryConfig:
description:
- Settings that determine the retry behavior.
returned: success
type: complex
contains:
maxAttempts:
description:
- Number of attempts per task.
- Cloud Tasks will attempt the task maxAttempts times (that is, if the first
attempt fails, then there will be maxAttempts - 1 retries). Must be >=
-1.
- If unspecified when the queue is created, Cloud Tasks will pick the default.
- "-1 indicates unlimited attempts."
returned: success
type: int
maxRetryDuration:
description:
- If positive, maxRetryDuration specifies the time limit for retrying a
failed task, measured from when the task was first attempted. Once maxRetryDuration
time has passed and the task has been attempted maxAttempts times, no
further attempts will be made and the task will be deleted.
- If zero, then the task age is unlimited.
returned: success
type: str
minBackoff:
description:
- A task will be scheduled for retry between minBackoff and maxBackoff duration
after it fails, if the queue's RetryConfig specifies that the task should
be retried.
returned: success
type: str
maxBackoff:
description:
- A task will be scheduled for retry between minBackoff and maxBackoff duration
after it fails, if the queue's RetryConfig specifies that the task should
be retried.
returned: success
type: str
maxDoublings:
description:
- The time between retries will double maxDoublings times.
- A task's retry interval starts at minBackoff, then doubles maxDoublings
times, then increases linearly, and finally retries retries at intervals
of maxBackoff up to maxAttempts times.
returned: success
type: int
purgeTime:
description:
- The last time this queue was purged.
returned: success
type: str
status:
description:
- The current state of the queue.
returned: success
type: str
location:
description:
- The location of the queue.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(location=dict(required=True, type='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
return "https://cloudtasks.googleapis.com/v2/projects/{project}/locations/{location}/queues".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'cloudtasks')
return auth.list(link, return_if_object, array_name='queues')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
ltilve/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/boto/boto/mws/exception.py | 153 | 2396 | # Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.exception import BotoServerError
from boto.mws.response import ResponseFactory
class ResponseErrorFactory(ResponseFactory):
def __call__(self, status, reason, body=None):
server = BotoServerError(status, reason, body=body)
supplied = self.find_element(server.error_code, '', ResponseError)
print(supplied.__name__)
return supplied(status, reason, body=body)
class ResponseError(BotoServerError):
"""
Undefined response error.
"""
retry = False
def __repr__(self):
return '{0.__name__}({1.reason}: "{1.message}")' \
.format(self.__class__, self)
def __str__(self):
doc = self.__doc__ and self.__doc__.strip() + "\n" or ''
return '{1.__name__}: {0.reason} {2}\n{3}' \
'{0.message}'.format(self, self.__class__,
self.retry and '(Retriable)' or '', doc)
class RetriableResponseError(ResponseError):
retry = True
class InvalidParameterValue(ResponseError):
"""
One or more parameter values in the request is invalid.
"""
class InvalidParameter(ResponseError):
"""
One or more parameters in the request is invalid.
"""
class InvalidAddress(ResponseError):
"""
Invalid address.
"""
| bsd-3-clause |
anisfeld/MachineLearning | Pipeline/util.py | 1 | 2394 | import pandas as pd
import re
from random import sample
# Helper Functions
def check_nulls(df, col):
'''
returns df with NaN in specified column(s)
'''
return df.ix[df.ix[:,col].isnull()]
def get_notnulls(df, col):
'''
returns df without NaN in specified column(s)
'''
return df.ix[df.ix[:,col].notnull()]
def clean_data(df, cleaning_tuples):
'''
replace a string in a column (pat) with a clean string (repl):
e.g. cleaning_tuples = [(col, pat, repl)]
'''
for col, pat, repl in cleaning_tuples:
df.ix[:,col] = df.ix[:,col].str.replace(pat, repl)
def clean_grouped_data(grouped_df,col=0):
'''
returns df with counts that result from groupby
'''
counts = pd.DataFrame(grouped_df.count().ix[:,col])
counts = counts.unstack()
counts.columns = counts.columns.droplevel()
counts.columns.name = None
counts.index.name = None
counts.fillna(0, inplace=True)
return counts
def combine_cols(df, col, extra):
'''
Inputs:
df (pd.DataFrame)
col,extra (string) column names
Combines columns with similar information into a single column and drops extra.
'''
df.ix[:,col] = df.ix[:,col].where(df.ix[:,col].notnull(), df.ix[:,extra])
df.drop(extra, axis=1, inplace=True)
def get_subsample(x, n, method = "sample"):
'''
input:
x (array-like)
n (numeric) sample size
method keywods that determine how to subsample ("sample", "head")
'''
if n > len(x):
return "ERROR: n > len(x)"
#Look into ways of passing part of a function name to call the function?
# e.g. pass sample, do pd.DataFrame.sample(df, n)
if method == "sample":
return x.sample(n)
elif method == "head":
return x.head(n)
def camel_to_snake(column_name):
"""
converts a string that is camelCase into snake_case
Example:
print camel_to_snake("javaLovesCamelCase")
> java_loves_camel_case
See Also:
http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-camel-case
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', column_name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def map_camel_to_snake(s):
'''
Converts a series of strings in camelCase to snake_case
'''
return s.map(camel_to_snake)
| mit |
smathot/PyGaze | opensesame_plugins/pygaze_start_recording/pygaze_start_recording.py | 4 | 1538 | #-*- coding:utf-8 -*-
"""
This file is part of PyGaze.
PyGaze is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyGaze is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyGaze. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.item import item
from libqtopensesame.items.qtautoplugin import qtautoplugin
from pygaze.display import Display
class pygaze_start_recording(item):
"""Plug-in runtime definition."""
description = u'Puts the eye tracker into recording mode'
def reset(self):
"""
desc:
Resets plug-in settings.
"""
self.var.status_msg = u'start_trial'
def run(self):
"""The run phase of the plug-in goes here."""
self.set_item_onset()
self.experiment.pygaze_eyetracker.start_recording()
self.experiment.pygaze_eyetracker.status_msg(self.var.status_msg)
self.experiment.pygaze_eyetracker.log(self.var.status_msg)
class qtpygaze_start_recording(pygaze_start_recording, qtautoplugin):
def __init__(self, name, experiment, script=None):
pygaze_start_recording.__init__(self, name, experiment, script)
qtautoplugin.__init__(self, __file__)
| gpl-3.0 |
pwns4cash/vivisect | vtrace/platforms/linux.py | 3 | 28154 | """
Linux Platform Module
"""
# Copyright (C) 2007 Invisigoth - See LICENSE file for details
import os
import time
import struct
import signal
import traceback
import platform
import envi.cli as e_cli
import envi.memory as e_mem
import envi.registers as e_reg
import vtrace
import vtrace.archs.arm as v_arm
import vtrace.archs.i386 as v_i386
import vtrace.archs.amd64 as v_amd64
import vtrace.platforms.base as v_base
import vtrace.platforms.posix as v_posix
from ctypes import *
import ctypes.util as cutil
if os.getenv('ANDROID_ROOT'):
libc = CDLL('/system/lib/libc.so')
else:
libc = CDLL(cutil.find_library("c"))
libc.lseek64.restype = c_ulonglong
libc.lseek64.argtypes = [c_uint, c_ulonglong, c_uint]
libc.read.restype = c_long
libc.read.argtypes = [c_uint, c_void_p, c_long]
libc.write.restype = c_long
libc.write.argtypes = [c_uint, c_void_p, c_long]
O_RDWR = 2
O_LARGEFILE = 0x8000
MAP_ANONYMOUS = 0x20
MAP_PRIVATE = 0x02
# Linux specific ptrace extensions
PT_GETREGS = 12
PT_SETREGS = 13
PT_GETFPREGS = 14
PT_SETFPREGS = 15
PT_ATTACH = 16
PT_DETACH = 17
PT_GETFPXREGS = 18
PT_SETFPXREGS = 19
PT_SYSCALL = 24
PT_SETOPTIONS = 0x4200
PT_GETEVENTMSG = 0x4201
PT_GETSIGINFO = 0x4202
PT_SETSIGINFO = 0x4203
# PT set options stuff. ONLY TRACESYSGOOD may be used in 2.4...
PT_O_TRACESYSGOOD = 0x00000001 # add 0x80 to TRAP when generated by syscall
# For each of the options below, the stop signal is (TRAP | PT_EVENT_FOO << 8)
PT_O_TRACEFORK = 0x00000002 # Cause a trap at fork
PT_O_TRACEVFORK = 0x00000004 # Cause a trap at vfork
PT_O_TRACECLONE = 0x00000008 # Cause a trap at clone
PT_O_TRACEEXEC = 0x00000010 # Cause a trap at exec
PT_O_TRACEVFORKDONE = 0x00000020 # Cause a trap when vfork done
PT_O_TRACEEXIT = 0x00000040 # Cause a trap on exit
PT_O_MASK = 0x0000007f
# Ptrace event types (TRAP | PT_EVENT_FOO << 8) means that type
# when using GETEVENTMSG for most of these, the new pid is the data
PT_EVENT_FORK = 1
PT_EVENT_VFORK = 2
PT_EVENT_CLONE = 3
PT_EVENT_EXEC = 4
PT_EVENT_VFORK_DONE = 5
PT_EVENT_EXIT = 6
# Used to tell some of the additional events apart
SIG_LINUX_SYSCALL = signal.SIGTRAP | 0x80
SIG_LINUX_CLONE = signal.SIGTRAP | (PT_EVENT_CLONE << 8)
SIG_LINUX_EXIT = signal.SIGTRAP | (PT_EVENT_EXIT << 8)
#following from Pandaboard ES (OMAP4460) Armv7a (cortex-a9)
class user_regs_arm(Structure):
_fields_ = (
("r0", c_ulong),
("r1", c_ulong),
("r2", c_ulong),
("r3", c_ulong),
("r4", c_ulong),
("r5", c_ulong),
("r6", c_ulong),
("r7", c_ulong),
("r8", c_ulong),
("r9", c_ulong),
("r10", c_ulong), #aka 'sl' ?
("r11", c_ulong),
("r12", c_ulong),
("sp", c_ulong),
("lr", c_ulong),
("pc", c_ulong),
("cpsr", c_ulong),
("orig_r0", c_ulong),
)
class fp_reg_arm(Structure):
_fields_ = (
("sign1", c_long, 1),
("unused", c_long, 15),
("sign2", c_long, 1),
("exponent", c_long, 14),
("j", c_long, 1),
("mantissa1", c_long, 31),
("mantissa0", c_long, 32),
)
class user_fpregs_arm(Structure):
_fields_ = (
("fpregs", fp_reg_arm*8),
("fpsr", c_ulong, 32),
("fpcr", c_ulong, 32),
("ftype", c_ubyte*8),
("init_flag", c_ulong),
)
class USER_arm(Structure):
_fields_ = (
("regs", user_regs_arm),
("u_fpvalid", c_long),
("u_tsize", c_ulong),
("u_dsize", c_ulong),
("u_ssize", c_ulong),
("start_code", c_ulong),
("start_stack",c_ulong),
("signal", c_long),
("reserved", c_long),
("u_ar0", c_void_p),
("magic", c_ulong),
("u_comm", c_char*32),
("u_debugreg", c_long*8),
("fpregs", user_fpregs_arm),
("u_fp0", c_void_p)
)
class user_regs_i386(Structure):
_fields_ = (
("ebx", c_ulong),
("ecx", c_ulong),
("edx", c_ulong),
("esi", c_ulong),
("edi", c_ulong),
("ebp", c_ulong),
("eax", c_ulong),
("ds", c_ushort),
("__ds", c_ushort),
("es", c_ushort),
("__es", c_ushort),
("fs", c_ushort),
("__fs", c_ushort),
("gs", c_ushort),
("__gs", c_ushort),
("orig_eax", c_ulong),
("eip", c_ulong),
("cs", c_ushort),
("__cs", c_ushort),
("eflags", c_ulong),
("esp", c_ulong),
("ss", c_ushort),
("__ss", c_ushort),
)
class USER_i386(Structure):
_fields_ = (
# NOTE: Expand out the user regs struct so
# we can make one call to _rctx_Import
("regs", user_regs_i386),
("u_fpvalid", c_ulong),
("u_tsize", c_ulong),
("u_dsize", c_ulong),
("u_ssize", c_ulong),
("start_code", c_ulong),
("start_stack",c_ulong),
("signal", c_ulong),
("reserved", c_ulong),
("u_ar0", c_void_p),
("u_fpstate", c_void_p),
("magic", c_ulong),
("u_comm", c_char*32),
("debug0", c_ulong),
("debug1", c_ulong),
("debug2", c_ulong),
("debug3", c_ulong),
("debug4", c_ulong),
("debug5", c_ulong),
("debug6", c_ulong),
("debug7", c_ulong),
)
class user_regs_amd64(Structure):
_fields_ = [
('r15', c_uint64),
('r14', c_uint64),
('r13', c_uint64),
('r12', c_uint64),
('rbp', c_uint64),
('rbx', c_uint64),
('r11', c_uint64),
('r10', c_uint64),
('r9', c_uint64),
('r8', c_uint64),
('rax', c_uint64),
('rcx', c_uint64),
('rdx', c_uint64),
('rsi', c_uint64),
('rdi', c_uint64),
('orig_rax', c_uint64),
('rip', c_uint64),
('cs', c_uint64),
('eflags', c_uint64),
('rsp', c_uint64),
('ss', c_uint64),
('fs_base', c_uint64),
('gs_base', c_uint64),
('ds', c_uint64),
('es', c_uint64),
('fs', c_uint64),
('gs', c_uint64),
]
intel_dbgregs = (0,1,2,3,6,7)
class LinuxMixin(v_posix.PtraceMixin, v_posix.PosixMixin):
"""
The mixin to take care of linux specific platform traits.
(mostly proc)
"""
def __init__(self):
# Wrap reads from proc in our worker thread
v_posix.PtraceMixin.__init__(self)
v_posix.PosixMixin.__init__(self)
self.memfd = None
self._stopped_cache = {}
self._stopped_hack = False
self.fireTracerThread()
self.initMode("Syscall", False, "Break On Syscalls")
def setupMemFile(self, offset):
"""
A utility to open (if necessary) and seek the memfile
"""
if self.memfd == None:
self.memfd = libc.open("/proc/%d/mem" % self.pid, O_RDWR | O_LARGEFILE, 0755)
x = libc.lseek64(self.memfd, offset, 0)
@v_base.threadwrap
def platformReadMemory(self, address, size):
"""
A *much* faster way of reading memory that the 4 bytes
per syscall allowed by ptrace
"""
self.setupMemFile(address)
# Use ctypes cause python implementation is teh ghey
buf = create_string_buffer(size)
x = libc.read(self.memfd, addressof(buf), size)
if x != size:
#libc.perror('libc.read %d (size: %d)' % (x,size))
raise Exception("reading from invalid memory %s (%d returned)" % (hex(address), x))
# We have to slice cause ctypes "helps" us by adding a null byte...
return buf.raw
@v_base.threadwrap
def whynot_platformWriteMemory(self, address, data):
"""
A *much* faster way of writting memory that the 4 bytes
per syscall allowed by ptrace
"""
self.setupMemFile(address)
buf = create_string_buffer(data)
size = len(data)
x = libc.write(self.memfd, addressof(buf), size)
if x != size:
libc.perror('write mem failed: 0x%.8x (%d)' % (address, size))
raise Exception("write memory failed: %d" % x)
return x
def _findExe(self, pid):
exe = os.readlink("/proc/%d/exe" % pid)
if "(deleted)" in exe:
if "#prelink#" in exe:
exe = exe.split(".#prelink#")[0]
elif ";" in exe:
exe = exe.split(";")[0]
else:
exe = exe.split("(deleted)")[0].strip()
return exe
@v_base.threadwrap
def platformExec(self, cmdline):
# Very similar to posix, but not
# quite close enough...
self.execing = True
cmdlist = e_cli.splitargs(cmdline)
os.stat(cmdlist[0])
pid = os.fork()
if pid == 0:
v_posix.ptrace(v_posix.PT_TRACE_ME, 0, 0, 0)
# Make sure our parent gets some cycles
time.sleep(0.1)
os.execv(cmdlist[0], cmdlist)
sys.exit(-1)
if v_posix.ptrace(PT_ATTACH, pid, 0, 0) != 0:
raise Exception("PT_ATTACH failed! linux platformExec")
self.pthreads = [pid,]
self.setMeta("ExeName", self._findExe(pid))
return pid
@v_base.threadwrap
def platformAttach(self, pid):
self.pthreads = [pid,]
self.setMeta("ThreadId", pid)
if v_posix.ptrace(PT_ATTACH, pid, 0, 0) != 0:
raise Exception("PT_ATTACH failed!")
self.setMeta("ExeName", self._findExe(pid))
def platformPs(self):
pslist = []
for dname in self.platformListDir('/proc'):
try:
if not dname.isdigit():
continue
cmdline = self.platformReadFile('/proc/%s/cmdline' % dname)
cmdline = cmdline.replace("\x00"," ")
if len(cmdline) > 0:
pslist.append((int(dname),cmdline))
except:
pass # Permissions... quick process... whatev.
return pslist
def _simpleCreateThreads(self):
for tid in self.threadsForPid( self.pid ):
if tid == self.pid:
continue
self.attachThread( tid )
def attachThread(self, tid, attached=False):
self.doAttachThread(tid,attached=attached)
self.setMeta("ThreadId", tid)
self.fireNotifiers(vtrace.NOTIFY_CREATE_THREAD)
@v_base.threadwrap
def detachThread(self, tid, ecode):
self.setMeta('ThreadId', tid)
self._fireExitThread(tid, ecode)
if v_posix.ptrace(PT_DETACH, tid, 0, 0) != 0:
raise Exception("ERROR ptrace detach failed for thread %d" % tid)
self.pthreads.remove(tid)
@v_base.threadwrap
def platformWait(self):
# Blocking wait once...
pid, status = os.waitpid(-1, 0x40000002)
self.setMeta("ThreadId", pid)
# Stop the rest of the threads...
# why is linux debugging so Ghetto?!?!
if not self.stepping: # If we're stepping, only do the one
for tid in self.pthreads:
if tid == pid:
continue
try:
# We use SIGSTOP here because they can't mask it.
os.kill(tid, signal.SIGSTOP)
os.waitpid(tid, 0x40000002)
except Exception, e:
print "WARNING TID is invalid %d %s" % (tid,e)
return pid,status
@v_base.threadwrap
def platformContinue(self):
cmd = v_posix.PT_CONTINUE
if self.getMode("Syscall", False):
cmd = PT_SYSCALL
pid = self.getPid()
sig = self.getCurrentSignal()
if sig == None:
sig = 0
# Only deliver signals to the main thread
if v_posix.ptrace(cmd, pid, 0, sig) != 0:
libc.perror('ptrace PT_CONTINUE failed for pid %d' % pid)
raise Exception("ERROR ptrace failed for pid %d" % pid)
for tid in self.pthreads:
if tid == pid:
continue
if v_posix.ptrace(cmd, tid, 0, 0) != 0:
pass
@v_base.threadwrap
def platformStepi(self):
self.stepping = True
tid = self.getMeta("ThreadId", 0)
if v_posix.ptrace(v_posix.PT_STEP, tid, 0, 0) != 0:
raise Exception("ERROR ptrace failed!")
@v_base.threadwrap
def platformDetach(self):
libc.close(self.memfd)
for tid in self.pthreads:
v_posix.ptrace(PT_DETACH, tid, 0, 0)
@v_base.threadwrap
def doAttachThread(self, tid, attached=False):
"""
Do the work for attaching a thread. This must be *under*
attachThread() so callers in notifiers may call it (because
it's also gotta be thread wrapped).
"""
if not attached:
if v_posix.ptrace(PT_ATTACH, tid, 0, 0) != 0:
raise Exception("ERROR ptrace attach failed for thread %d" % tid)
# We may have already revcieved the stop signal
if not self._stopped_cache.pop( tid, None ):
os.waitpid(tid, 0x40000002)
self.setupPtraceOptions(tid)
self.pthreads.append(tid)
@v_base.threadwrap
def setupPtraceOptions(self, tid):
"""
Called per pid/tid to setup proper options
for ptrace.
"""
opts = PT_O_TRACESYSGOOD
if platform.release()[:3] in ('2.6','3.0','3.1','3.2'):
opts |= PT_O_TRACECLONE | PT_O_TRACEEXIT
x = v_posix.ptrace(PT_SETOPTIONS, tid, 0, opts)
if x != 0:
libc.perror('ptrace PT_SETOPTION failed for thread %d' % tid)
def threadsForPid(self, pid):
ret = []
tpath = "/proc/%s/task" % pid
if os.path.exists(tpath):
for pidstr in os.listdir(tpath):
ret.append(int(pidstr))
return ret
def platformProcessEvent(self, event):
# Skim some linux specific events before passing to posix
tid, status = event
if os.WIFSTOPPED(status):
sig = status >> 8 # Cant use os.WSTOPSIG() here...
#print('STOPPED: %d %d %.8x %d' % (self.pid, tid, status, sig))
# Ok... this is a crazy little state engine that tries
# to account for the discrepancies in how linux posts
# signals to the debugger...
# Thread Creation:
# In each case below, the kernel may deliver
# any of the 3 signals in any order... ALSO
# (and more importantly) *if* the kernel sends
# SIGSTOP to the thread first, the debugger
# will get a SIGSTOP *instead* of SIG_LINUX_CLONE
# ( this will go back and forth on *ONE BOX* with
# the same kernel version... Finally squished it
# because it presents more frequently ( 1 in 10 )
# on my new ARM linux dev board. WTF?!1?!one?!? )
#
# Case 1 (SIG_LINUX_CLONE):
# debugger gets SIG_LINUX CLONE as expected
# and can then use ptrace(PT_GETEVENTMSG)
# to get new TID and attach as normal
# Case 2 (SIGSTOP delivered to thread)
# Thread is already stoped and attached but
# parent debugger doesn't know yet. We add
# the tid to the stopped_cache so when the
# kernel gets around to telling the debugger
# we don't wait on him again.
# Case 3 (SIGSTOP delivered to debugger)
# In both case 2 and case 3, this will cause
# the SIG_LINUX_CLONE to be skipped. Either
# way, we should head down into thread attach.
# ( The thread may be already stopped )
if sig == SIG_LINUX_SYSCALL:
self.fireNotifiers(vtrace.NOTIFY_SYSCALL)
elif sig == SIG_LINUX_EXIT:
ecode = self.getPtraceEvent() >> 8
if tid == self.getPid():
self._fireExit( ecode )
self.platformDetach()
else:
self.detachThread(tid, ecode)
elif sig == SIG_LINUX_CLONE:
# Handle a new thread here!
newtid = self.getPtraceEvent()
#print('CLONE (new tid: %d)' % newtid)
self.attachThread(newtid, attached=True)
elif sig == signal.SIGSTOP and tid != self.pid:
#print('OMG IM THE NEW THREAD! %d' % tid)
# We're not even a real event right now...
self.runAgain()
self._stopped_cache[tid] = True
elif sig == signal.SIGSTOP:
# If we are still 'exec()'ing, we havent hit the SIGTRAP
# yet ( so our process info is still python, lets skip it )
if self.execing:
self._stopped_hack = True
self.setupPtraceOptions(tid)
self.runAgain()
elif self._stopped_hack:
newtid = self.getPtraceEvent(tid)
#print("WHY DID WE GET *ANOTHER* STOP?: %d" % tid)
#print('PTRACE EVENT: %d' % newtid)
self.attachThread(newtid, attached=True)
else: # on first attach...
self._stopped_hack = True
self.setupPtraceOptions(tid)
self.handlePosixSignal(sig)
#FIXME eventually implement child catching!
else:
self.handlePosixSignal(sig)
return
v_posix.PosixMixin.platformProcessEvent(self, event)
@v_base.threadwrap
def getPtraceEvent(self, tid=None):
"""
This *thread wrapped* function will get any pending GETEVENTMSG
msgs.
"""
p = c_ulong(0)
if tid == None:
tid = self.getMeta("ThreadId", -1)
if v_posix.ptrace(PT_GETEVENTMSG, tid, 0, addressof(p)) != 0:
raise Exception('ptrace PT_GETEVENTMSG failed!')
return p.value
def platformGetThreads(self):
ret = {}
for tid in self.pthreads:
ret[tid] = tid #FIXME make this pthread struct or stackbase soon
return ret
def platformGetMaps(self):
maps = []
mapfile = file("/proc/%d/maps" % self.pid)
for line in mapfile:
perms = 0
sline = line.split(" ")
addrs = sline[0]
permstr = sline[1]
fname = sline[-1].strip()
addrs = addrs.split("-")
base = long(addrs[0],16)
max = long(addrs[1],16)
mlen = max-base
if "r" in permstr:
perms |= e_mem.MM_READ
if "w" in permstr:
perms |= e_mem.MM_WRITE
if "x" in permstr:
perms |= e_mem.MM_EXEC
#if "p" in permstr:
#pass
maps.append((base,mlen,perms,fname))
return maps
def platformGetFds(self):
fds = []
for name in os.listdir("/proc/%d/fd/" % self.pid):
try:
fdnum = int(name)
fdtype = vtrace.FD_UNKNOWN
link = os.readlink("/proc/%d/fd/%s" % (self.pid,name))
if "socket:" in link:
fdtype = vtrace.FD_SOCKET
elif "pipe:" in link:
fdtype = vtrace.FD_PIPE
elif "/" in link:
fdtype = vtrace.FD_FILE
fds.append((fdnum,fdtype,link))
except:
traceback.print_exc()
return fds
############################################################################
#
# NOTE: Both of these use class locals set by the i386/amd64 variants
#
@v_base.threadwrap
def platformGetRegCtx(self, tid):
ctx = self.archGetRegCtx()
u = self.user_reg_struct()
if v_posix.ptrace(PT_GETREGS, tid, 0, addressof(u)) == -1:
raise Exception("Error: ptrace(PT_GETREGS...) failed!")
ctx._rctx_Import(u)
return ctx
@v_base.threadwrap
def platformSetRegCtx(self, tid, ctx):
u = self.user_reg_struct()
# Populate the reg struct with the current values (to allow for
# any regs in that struct that we don't track... *fs_base*ahem*
if v_posix.ptrace(PT_GETREGS, tid, 0, addressof(u)) == -1:
raise Exception("Error: ptrace(PT_GETREGS...) failed!")
ctx._rctx_Export(u)
if v_posix.ptrace(PT_SETREGS, tid, 0, addressof(u)) == -1:
raise Exception("Error: ptrace(PT_SETREGS...) failed!")
"""
for i in intel_dbgregs:
val = ctx.getRegister(self.dbgidx + i)
offset = self.user_dbg_offset + (self.psize * i)
if v_posix.ptrace(v_posix.PT_WRITE_U, tid, offset, val) != 0:
libc.perror('PT_WRITE_U failed for debug%d' % i)
#raise Exception("PT_WRITE_U for debug%d failed!" % i)
"""
class Linuxi386Trace(
vtrace.Trace,
LinuxMixin,
v_i386.i386Mixin,
v_posix.ElfMixin,
v_base.TracerBase):
user_reg_struct = user_regs_i386
user_dbg_offset = 252
reg_val_mask = 0xffffffff
def __init__(self):
vtrace.Trace.__init__(self)
v_base.TracerBase.__init__(self)
v_posix.ElfMixin.__init__(self)
v_i386.i386Mixin.__init__(self)
LinuxMixin.__init__(self)
# Pre-calc the index of the debug regs
self.dbgidx = self.archGetRegCtx().getRegisterIndex("debug0")
@v_base.threadwrap
def platformGetRegCtx(self, tid):
ctx = LinuxMixin.platformGetRegCtx( self, tid )
for i in intel_dbgregs:
offset = self.user_dbg_offset + (self.psize * i)
r = v_posix.ptrace(v_posix.PT_READ_U, tid, offset, 0)
ctx.setRegister(self.dbgidx+i, r & self.reg_val_mask)
return ctx
@v_base.threadwrap
def platformSetRegCtx(self, tid, ctx):
LinuxMixin.platformSetRegCtx( self, tid, ctx )
for i in intel_dbgregs:
val = ctx.getRegister(self.dbgidx + i)
offset = self.user_dbg_offset + (self.psize * i)
if v_posix.ptrace(v_posix.PT_WRITE_U, tid, offset, val) != 0:
libc.perror('PT_WRITE_U failed for debug%d' % i)
@v_base.threadwrap
def platformAllocateMemory(self, size, perms=e_mem.MM_RWX, suggestaddr=0):
sp = self.getStackCounter()
pc = self.getProgramCounter()
# Xlate perms (mmap is backward)
realperm = 0
if perms & e_mem.MM_READ:
realperm |= 1
if perms & e_mem.MM_WRITE:
realperm |= 2
if perms & e_mem.MM_EXEC:
realperm |= 4
#mma is struct of mmap args for linux syscall
mma = struct.pack("<6L", suggestaddr, size, realperm, MAP_ANONYMOUS|MAP_PRIVATE, 0, 0)
regsave = self.getRegisters()
stacksave = self.readMemory(sp, len(mma))
ipsave = self.readMemory(pc, 2)
SYS_mmap = 90
self.writeMemory(sp, mma)
self.writeMemory(pc, "\xcd\x80")
self.setRegisterByName("eax", SYS_mmap)
self.setRegisterByName("ebx", sp)
self._syncRegs()
try:
# Step over our syscall instruction
tid = self.getMeta("ThreadId", 0)
self.platformStepi()
os.waitpid(tid, 0)
eax = self.getRegisterByName("eax")
if eax & 0x80000000:
raise Exception("Linux mmap syscall error: %d" % eax)
return eax
finally:
# Clean up all our fux0ring
self.writeMemory(sp, stacksave)
self.writeMemory(pc, ipsave)
self.setRegisters(regsave)
class LinuxAmd64Trace(
vtrace.Trace,
LinuxMixin,
v_amd64.Amd64Mixin,
v_posix.ElfMixin,
v_base.TracerBase):
user_reg_struct = user_regs_amd64
user_dbg_offset = 848
reg_val_mask = 0xffffffffffffffff
def __init__(self):
vtrace.Trace.__init__(self)
v_base.TracerBase.__init__(self)
v_posix.ElfMixin.__init__(self)
v_amd64.Amd64Mixin.__init__(self)
LinuxMixin.__init__(self)
self.dbgidx = self.archGetRegCtx().getRegisterIndex("debug0")
@v_base.threadwrap
def platformGetRegCtx(self, tid):
ctx = LinuxMixin.platformGetRegCtx( self, tid )
for i in intel_dbgregs:
offset = self.user_dbg_offset + (self.psize * i)
r = v_posix.ptrace(v_posix.PT_READ_U, tid, offset, 0)
ctx.setRegister(self.dbgidx+i, r & self.reg_val_mask)
return ctx
@v_base.threadwrap
def platformSetRegCtx(self, tid, ctx):
LinuxMixin.platformSetRegCtx( self, tid, ctx )
for i in intel_dbgregs:
val = ctx.getRegister(self.dbgidx + i)
offset = self.user_dbg_offset + (self.psize * i)
if v_posix.ptrace(v_posix.PT_WRITE_U, tid, offset, val) != 0:
libc.perror('PT_WRITE_U failed for debug%d' % i)
arm_break_be = 'e7f001f0'.decode('hex')
arm_break_le = 'f001f0e7'.decode('hex')
class LinuxArmTrace(
vtrace.Trace,
LinuxMixin,
v_arm.ArmMixin,
v_posix.ElfMixin,
v_base.TracerBase):
user_reg_struct = user_regs_arm
reg_val_mask = 0xffffffff
def __init__(self):
vtrace.Trace.__init__(self)
v_base.TracerBase.__init__(self)
v_posix.ElfMixin.__init__(self)
v_arm.ArmMixin.__init__(self)
LinuxMixin.__init__(self)
self._break_after_bp = False
self._step_cleanup = []
def _fireStep(self):
# See notes below about insanity...
if self._step_cleanup != None:
[ self.writeMemory( bva, bytes ) for (bva,bytes) in self._step_cleanup ]
self._step_cleanup = None
return v_base.TracerBase._fireStep( self )
def archGetBreakInstr(self):
return arm_break_le
@v_base.threadwrap
def platformStepi(self):
# This is a total rediculous hack to account
# for the fact that the arm platform couldn't
# be bothered to implement single stepping in
# the stupid hardware...
self.stepping = True
pc = self.getProgramCounter()
op = self.parseOpcode( pc )
branches = op.getBranches( self )
if not branches:
raise Exception('''
The branches for the instruction %r were not decoded correctly. This means that
we cant properly predict the possible next instruction executions in a way that allows us
to account for the STUPID INSANE FACT THAT THERE IS NO HARDWARE SINGLE STEP CAPABILITY ON
ARM (non-realtime or JTAG anyway). We *would* have written invalid instructions to each
of those locations and cleaned them up before you ever knew anything was amiss... which is
how we pretend arm can single step... even though IT CANT. (please tell visi...)
''' % op)
# Save the memory at the branches for later
# restoration in the _fireStep callback.
self._step_cleanup = []
for bva,bflags in op.getBranches( self ):
self._step_cleanup.append( (bva, self.readMemory( bva, 4 )) )
self.writeMemory( bva, arm_break_le )
tid = self.getMeta('ThreadId')
if v_posix.ptrace(v_posix.PT_CONTINUE, tid, 0, 0) != 0:
raise Exception("ERROR ptrace failed for tid %d" % tid)
| apache-2.0 |
jthi3rry/dps-pxpy | dps/transactions/decorators.py | 1 | 1048 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import functools
def accept_txn(*types):
"""
Checks first argument against a list of valid types. Use kwargs otherwise.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(self, transaction=None, **kwargs):
if transaction:
if isinstance(transaction, types):
transaction.validate()
return f(self, **dict(transaction))
raise ValueError("Invalid transaction type. (got: {}, expects: {})".format(transaction.__class__.__name__, ", ".join((cls.__name__ for cls in types))))
elif kwargs:
if any(txn_class(**kwargs).is_valid() for txn_class in types):
return f(self, **kwargs)
raise ValueError("Invalid kwargs for transaction types: {}".format(", ".join((cls.__name__ for cls in types))))
raise ValueError("Expects either a transaction or kwargs")
return wrapper
return decorator
| mit |
auduny/home-assistant | homeassistant/components/neato/__init__.py | 6 | 8802 | """Support for Neato botvac connected vacuum cleaners."""
import logging
from datetime import timedelta
from urllib.error import HTTPError
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import discovery
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'neato'
NEATO_ROBOTS = 'neato_robots'
NEATO_LOGIN = 'neato_login'
NEATO_MAP_DATA = 'neato_map_data'
NEATO_PERSISTENT_MAPS = 'neato_persistent_maps'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
MODE = {
1: 'Eco',
2: 'Turbo'
}
ACTION = {
0: 'Invalid',
1: 'House Cleaning',
2: 'Spot Cleaning',
3: 'Manual Cleaning',
4: 'Docking',
5: 'User Menu Active',
6: 'Suspended Cleaning',
7: 'Updating',
8: 'Copying logs',
9: 'Recovering Location',
10: 'IEC test',
11: 'Map cleaning',
12: 'Exploring map (creating a persistent map)',
13: 'Acquiring Persistent Map IDs',
14: 'Creating & Uploading Map',
15: 'Suspended Exploration'
}
ERRORS = {
'ui_error_battery_battundervoltlithiumsafety': 'Replace battery',
'ui_error_battery_critical': 'Replace battery',
'ui_error_battery_invalidsensor': 'Replace battery',
'ui_error_battery_lithiumadapterfailure': 'Replace battery',
'ui_error_battery_mismatch': 'Replace battery',
'ui_error_battery_nothermistor': 'Replace battery',
'ui_error_battery_overtemp': 'Replace battery',
'ui_error_battery_overvolt': 'Replace battery',
'ui_error_battery_undercurrent': 'Replace battery',
'ui_error_battery_undertemp': 'Replace battery',
'ui_error_battery_undervolt': 'Replace battery',
'ui_error_battery_unplugged': 'Replace battery',
'ui_error_brush_stuck': 'Brush stuck',
'ui_error_brush_overloaded': 'Brush overloaded',
'ui_error_bumper_stuck': 'Bumper stuck',
'ui_error_check_battery_switch': 'Check battery',
'ui_error_corrupt_scb': 'Call customer service corrupt board',
'ui_error_deck_debris': 'Deck debris',
'ui_error_dflt_app': 'Check Neato app',
'ui_error_disconnect_chrg_cable': 'Disconnected charge cable',
'ui_error_disconnect_usb_cable': 'Disconnected USB cable',
'ui_error_dust_bin_missing': 'Dust bin missing',
'ui_error_dust_bin_full': 'Dust bin full',
'ui_error_dust_bin_emptied': 'Dust bin emptied',
'ui_error_hardware_failure': 'Hardware failure',
'ui_error_ldrop_stuck': 'Clear my path',
'ui_error_lds_jammed': 'Clear my path',
'ui_error_lds_bad_packets': 'Check Neato app',
'ui_error_lds_disconnected': 'Check Neato app',
'ui_error_lds_missed_packets': 'Check Neato app',
'ui_error_lwheel_stuck': 'Clear my path',
'ui_error_navigation_backdrop_frontbump': 'Clear my path',
'ui_error_navigation_backdrop_leftbump': 'Clear my path',
'ui_error_navigation_backdrop_wheelextended': 'Clear my path',
'ui_error_navigation_noprogress': 'Clear my path',
'ui_error_navigation_origin_unclean': 'Clear my path',
'ui_error_navigation_pathproblems': 'Cannot return to base',
'ui_error_navigation_pinkycommsfail': 'Clear my path',
'ui_error_navigation_falling': 'Clear my path',
'ui_error_navigation_noexitstogo': 'Clear my path',
'ui_error_navigation_nomotioncommands': 'Clear my path',
'ui_error_navigation_rightdrop_leftbump': 'Clear my path',
'ui_error_navigation_undockingfailed': 'Clear my path',
'ui_error_picked_up': 'Picked up',
'ui_error_qa_fail': 'Check Neato app',
'ui_error_rdrop_stuck': 'Clear my path',
'ui_error_reconnect_failed': 'Reconnect failed',
'ui_error_rwheel_stuck': 'Clear my path',
'ui_error_stuck': 'Stuck!',
'ui_error_unable_to_return_to_base': 'Unable to return to base',
'ui_error_unable_to_see': 'Clean vacuum sensors',
'ui_error_vacuum_slip': 'Clear my path',
'ui_error_vacuum_stuck': 'Clear my path',
'ui_error_warning': 'Error check app',
'batt_base_connect_fail': 'Battery failed to connect to base',
'batt_base_no_power': 'Battery base has no power',
'batt_low': 'Battery low',
'batt_on_base': 'Battery on base',
'clean_tilt_on_start': 'Clean the tilt on start',
'dustbin_full': 'Dust bin full',
'dustbin_missing': 'Dust bin missing',
'gen_picked_up': 'Picked up',
'hw_fail': 'Hardware failure',
'hw_tof_sensor_sensor': 'Hardware sensor disconnected',
'lds_bad_packets': 'Bad packets',
'lds_deck_debris': 'Debris on deck',
'lds_disconnected': 'Disconnected',
'lds_jammed': 'Jammed',
'lds_missed_packets': 'Missed packets',
'maint_brush_stuck': 'Brush stuck',
'maint_brush_overload': 'Brush overloaded',
'maint_bumper_stuck': 'Bumper stuck',
'maint_customer_support_qa': 'Contact customer support',
'maint_vacuum_stuck': 'Vacuum is stuck',
'maint_vacuum_slip': 'Vacuum is stuck',
'maint_left_drop_stuck': 'Vacuum is stuck',
'maint_left_wheel_stuck': 'Vacuum is stuck',
'maint_right_drop_stuck': 'Vacuum is stuck',
'maint_right_wheel_stuck': 'Vacuum is stuck',
'not_on_charge_base': 'Not on the charge base',
'nav_robot_falling': 'Clear my path',
'nav_no_path': 'Clear my path',
'nav_path_problem': 'Clear my path',
'nav_backdrop_frontbump': 'Clear my path',
'nav_backdrop_leftbump': 'Clear my path',
'nav_backdrop_wheelextended': 'Clear my path',
'nav_mag_sensor': 'Clear my path',
'nav_no_exit': 'Clear my path',
'nav_no_movement': 'Clear my path',
'nav_rightdrop_leftbump': 'Clear my path',
'nav_undocking_failed': 'Clear my path'
}
ALERTS = {
'ui_alert_dust_bin_full': 'Please empty dust bin',
'ui_alert_recovering_location': 'Returning to start',
'ui_alert_battery_chargebasecommerr': 'Battery error',
'ui_alert_busy_charging': 'Busy charging',
'ui_alert_charging_base': 'Base charging',
'ui_alert_charging_power': 'Charging power',
'ui_alert_connect_chrg_cable': 'Connect charge cable',
'ui_alert_info_thank_you': 'Thank you',
'ui_alert_invalid': 'Invalid check app',
'ui_alert_old_error': 'Old error',
'ui_alert_swupdate_fail': 'Update failed',
'dustbin_full': 'Please empty dust bin',
'maint_brush_change': 'Change the brush',
'maint_filter_change': 'Change the filter',
'clean_completed_to_start': 'Cleaning completed',
'nav_floorplan_not_created': 'No floorplan found',
'nav_floorplan_load_fail': 'Failed to load floorplan',
'nav_floorplan_localization_fail': 'Failed to load floorplan',
'clean_incomplete_to_start': 'Cleaning incomplete',
'log_upload_failed': 'Logs failed to upload'
}
def setup(hass, config):
"""Set up the Neato component."""
from pybotvac import Account
hass.data[NEATO_LOGIN] = NeatoHub(hass, config[DOMAIN], Account)
hub = hass.data[NEATO_LOGIN]
if not hub.login():
_LOGGER.debug("Failed to login to Neato API")
return False
hub.update_robots()
for component in ('camera', 'vacuum', 'switch'):
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
class NeatoHub:
"""A My Neato hub wrapper class."""
def __init__(self, hass, domain_config, neato):
"""Initialize the Neato hub."""
self.config = domain_config
self._neato = neato
self._hass = hass
self.my_neato = neato(
domain_config[CONF_USERNAME],
domain_config[CONF_PASSWORD])
self._hass.data[NEATO_ROBOTS] = self.my_neato.robots
self._hass.data[NEATO_PERSISTENT_MAPS] = self.my_neato.persistent_maps
self._hass.data[NEATO_MAP_DATA] = self.my_neato.maps
def login(self):
"""Login to My Neato."""
try:
_LOGGER.debug("Trying to connect to Neato API")
self.my_neato = self._neato(
self.config[CONF_USERNAME], self.config[CONF_PASSWORD])
return True
except HTTPError:
_LOGGER.error("Unable to connect to Neato API")
return False
@Throttle(timedelta(seconds=300))
def update_robots(self):
"""Update the robot states."""
_LOGGER.debug("Running HUB.update_robots %s",
self._hass.data[NEATO_ROBOTS])
self._hass.data[NEATO_ROBOTS] = self.my_neato.robots
self._hass.data[NEATO_PERSISTENT_MAPS] = self.my_neato.persistent_maps
self._hass.data[NEATO_MAP_DATA] = self.my_neato.maps
def download_map(self, url):
"""Download a new map image."""
map_image_data = self.my_neato.get_map_image(url)
return map_image_data
| apache-2.0 |
kasioumis/invenio | invenio/modules/formatter/format_elements/bfe_comments.py | 13 | 2351 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints comments posted for the record
"""
__revision__ = "$Id$"
from ...comments.api import get_first_comments_or_remarks
def format_element(bfo, nbReviews='all', nbComments='all', show_reviews='true'):
"""
Prints comments posted for the record.
@param nbReviews: The max number of reviews to print
@param nbComments: The max number of comments to print
@param show_reviews: Shows or hides the complete review block
"""
nb_reviews = nbReviews
if nb_reviews.isdigit():
nb_reviews = int(nb_reviews)
nb_comments = nbComments
if nb_comments.isdigit():
nb_comments = int(nb_comments)
if show_reviews in ('true', 'True'):
show_reviews = True
else:
show_reviews = False
(comments, reviews) = get_first_comments_or_remarks(recID=bfo.recID,
ln=bfo.lang,
nb_comments=nb_comments,
nb_reviews=nb_reviews,
voted=-1,
reported=-1,
user_info=bfo.user_info,
show_reviews=show_reviews)
return comments + reviews
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
0k/odoo | addons/l10n_it/__openerp__.py | 165 | 2072 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010
# OpenERP Italian Community (<http://www.openerp-italia.org>)
# Servabit srl
# Agile Business Group sagl
# Domsense srl
# Albatos srl
#
# Copyright (C) 2011-2012
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Italy - Accounting',
'version': '0.2',
'depends': ['base_vat','account_chart','base_iban'],
'author': 'OpenERP Italian Community',
'description': """
Piano dei conti italiano di un'impresa generica.
================================================
Italian accounting chart and localization.
""",
'license': 'AGPL-3',
'category': 'Localization/Account Charts',
'website': 'http://www.openerp-italia.org/',
'data': [
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'account_chart.xml',
'data/account.tax.template.csv',
'data/account.fiscal.position.template.csv',
'l10n_chart_it_generic.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
'images': ['images/config_chart_l10n_it.jpeg','images/l10n_it_chart.jpeg'],
}
| agpl-3.0 |
feuerchop/increOCSVM | results/profile_mnist/line_profiler_mnist_10000_0.2_10.py | 1 | 44650 | ['../evaluation_tmp.py', '10000']
mnist classes = 2
size: 10000
(2609,)
(7391,)
data size: 10000, nu: 0.2, gamma: 1
============ 1. Fold of CV ============
1) Incremental OCSVM
0 data points processed
1000 data points processed
2000 data points processed
3000 data points processed
4000 data points processed
5000 data points processed
6000 data points processed
None
Confusion matrix:
Prediction -1 1
Target
-1 2085 524
1 5915 1476
precision: 0.738, recall: 0.199702340685, f1-score: 0.314343520392
Number of support vectors: 8000
-----------
2) cvxopt-OCSVM
Confusion matrix:
Prediction 1
Target
-1 2609
1 7391
precision: 0.7391, recall: 1.0, f1-score: 0.849979874648
Number of support vectors: 8000
---------
3) sklearn-OCSVM
Confusion matrix:
Prediction -1 1
Target
-1 1677 932
1 4723 2668
Number of support vectors: 8000
precision: 0.741111111111, recall: 0.360979569747, f1-score: 0.485488126649
Wrote profile results to evaluation_tmp.py.lprof
Timer unit: 1e-06 s
Total time: 4446.27 s
File: ../ocsvm.py
Function: increment at line 97
Line # Hits Time Per Hit % Time Line Contents
==============================================================
97 @profile
98 def increment(self, Xc, init_ac=0, break_count=-1):
99
100 # epsilon
101 1 6 6.0 0.0 e = self._data._e
102 1 3 3.0 0.0 mu = 0
103 1 3 3.0 0.0 imin = None
104
105 # initialize existing X, coefficients a, C
106 1 7 7.0 0.0 X_origin = self._data.X()
107 1 6 6.0 0.0 K_X_origin = self._data.K_X()
108 1 6 6.0 0.0 n_data = X_origin.shape[0]
109 1 3 3.0 0.0 n_feature = X_origin.shape[1]
110
111 1 6 6.0 0.0 C = self._data.C()
112 1 6 6.0 0.0 a_origin = self._data.alpha()
113
114 # number of new incremental points
115 1 3 3.0 0.0 n_new = Xc.shape[0]
116
117 # number of all (new and existing) points
118 1 4 4.0 0.0 n_all = n_data + n_new
119
120 # concatenate all new points with all existing points
121 1 21 21.0 0.0 X = empty((n_new + n_data, n_feature))
122 1 31954 31954.0 0.0 X[0:n_new, :] = Xc
123 1 8589 8589.0 0.0 X[n_new:, :] = X_origin
124
125 # create kernel matrix for all new and existing points
126
127 # create of all data points
128 1 8 8.0 0.0 if K_X_origin == None:
129 1 10559662 10559662.0 0.2 K_X = self.gram(X)
130 else:
131 K_X = empty((n_all, n_all))
132 K_X[n_new:, n_new:] = K_X_origin
133 K_X_new = self.gram(Xc, X_origin)
134 K_X[0:n_new, :] = K_X_new
135 K_X[:, 0:n_new] = K_X_new.T
136
137 # creating coefficient vector alpha for all data points
138 1 28 28.0 0.0 a = empty(n_all)
139 1 19 19.0 0.0 a[n_new:] = a_origin
140 1 25 25.0 0.0 a[:n_new] = init_ac
141
142 # creating gradient vector
143 1 26 26.0 0.0 g = zeros(n_all)
144
145 # create sensitivity vector
146 1 7 7.0 0.0 gamma = empty(n_all)
147 1 4 4.0 0.0 check_gradient = False
148 # loop through all new points to increment
149 6316 34329 5.4 0.0 for x_count in range(n_new):
150 6315 34922 5.5 0.0 if x_count % 1000 == 0:
151 7 128 18.3 0.0 print "%s data points processed" % x_count
152 #print "--------- START %s ---------" % x_count
153
154 6315 25174 4.0 0.0 if x_count == break_count:
155 self._data.set_X(X)
156 self._data.set_alpha(a)
157 self._data.set_C(C)
158 self._data.set_K_X(K_X)
159 self.rho()
160 return False
161
162 # initialize X, a, C, g, indices, kernel values
163 6315 28553 4.5 0.0 start_origin = n_new - x_count
164 6315 23484 3.7 0.0 start_new = start_origin - 1
165
166 6315 22499 3.6 0.0 if x_count == 0:
167 1 4 4.0 0.0 inds = []
168 1 4 4.0 0.0 indr = []
169 1 3 3.0 0.0 inde = []
170 1 4 4.0 0.0 indo = []
171 1686 6578 3.9 0.0 for i in range(n_new, n_all):
172 1685 9710 5.8 0.0 if e < a[i] < C - e:
173 1685 7113 4.2 0.0 inds.append(i)
174 else:
175 indr.append(i)
176 if a[i] <= e:
177 indo.append(i)
178 else:
179 inde.append(i)
180
181 1 6 6.0 0.0 ls = len(inds) # support vectors length
182 1 4 4.0 0.0 lr = len(indr) # error and non-support vectors length
183 1 4 4.0 0.0 le = len(inde) # error vectors lenght
184 1 4 4.0 0.0 lo = len(indo)
185 #mu_old = mu
186 1 170 170.0 0.0 mu = - K_X[inds[0], :][start_origin:].dot(a[start_origin:])
187 1 4 4.0 0.0 if lr > 0:
188 g[indr] = K_X[indr, :][:, start_origin:].dot(a[start_origin:]) + mu
189 # calculate mu according to KKT-conditions
190
191
192 6315 930130 147.3 0.0 c_inds = [start_new] + inds
193
194 # kernel of support vectors
195 #Kss = K_X[:, inds][inds, :]
196 #print "difference indo: %s" % unique(round(K_X[indo, :][:, start_origin:].dot(a[start_origin:]) + mu - g[indo],6))
197 #check_gradient = True
198 #if check_gradient:
199 #g[indr] = K_X[indr, :][:, start_origin:].dot(a[start_origin:]) + mu
200 #g[indo] += K_X[indo[0], :][start_origin:].dot(a[start_origin:]) + mu - g[indo[0]]
201 #check_gradient = False
202 #print "difference indo: %s" % unique(round(K_X[indo, :][:, start_origin:].dot(a[start_origin:]) + mu - g[indo],6))
203 6315 28325 4.5 0.0 if ls > 0:
204 6315 2500668 396.0 0.1 gc = K_X[start_new, start_origin:].dot(a[start_origin:]) + mu
205
206 6315 31463 5.0 0.0 ac = a[start_new]
207
208 6315 23865 3.8 0.0 if x_count == 0:
209 1 5438 5438.0 0.0 Q = ones((ls+1, ls+1))
210 1 10 10.0 0.0 Q[0, 0] = 0
211 #Kss = self.gram(X[inds])
212 1686 7977 4.7 0.0 inds_row = [[i] for i in inds]
213 1 371349 371349.0 0.0 Q[1:, 1:] = K_X[inds_row, inds]
214 1 6 6.0 0.0 try:
215 1 1887830 1887830.0 0.0 R = inv(Q)
216 except np.linalg.linalg.LinAlgError:
217 x = 1e-11
218 found = False
219 print "singular matrix"
220 while not found:
221 try:
222 R = inv(Q + diag(ones(ls+1) * x))
223 found = True
224 except np.linalg.linalg.LinAlgError:
225 x = x*10
226 6315 24024 3.8 0.0 loop_count = 1
227 #print "gc: %s, ac: %s" % (gc, ac)
228 6315 96477 15.3 0.0 while gc < e and ac < C - e:
229 6315 24173 3.8 0.0 if ls == 0: check_gradient = True
230 #print "-------------------- incremental %s-%s ---------" % (x_count, loop_count)
231
232 6315 24031 3.8 0.0 if ls > 0:
233 6315 7278480 1152.6 0.2 n = K_X[start_new, :][c_inds]
234 6315 276420874 43772.1 6.2 beta = - R.dot(n)
235 6315 70854 11.2 0.0 betas = beta[1:]
236
237 # calculate gamma
238 6315 30666 4.9 0.0 if lr > 0 and ls > 0:
239 gamma_tmp = K_X[:, c_inds][start_new:]
240 gamma_tmp[:, 0] = 1
241 gamma[start_new:] = gamma_tmp.dot(beta) + K_X[start_new, :][start_new:]
242 gammac = gamma[start_new]
243
244 6315 26280 4.2 0.0 elif ls > 0:
245 # empty R set
246 6315 11314559 1791.7 0.3 gammac = K_X[start_new, :][c_inds].dot(beta) + 1
247
248 else:
249 # empty S set
250 gammac = 1
251 gamma[indr] = 1
252 #gamma[indo] = -1
253
254 # accounting
255 #case 1: Some alpha_i in S reaches a bound
256 6315 28753 4.6 0.0 if ls > 0:
257 6315 171869 27.2 0.0 IS_plus = betas > e
258 6315 115733 18.3 0.0 IS_minus = betas < - e
259 6315 297828 47.2 0.0 gsmax = ones(ls)*inf
260 #if np.isnan(np.min(gsmax)):
261 # gsmax = ones(ls)*inf
262 6315 6462461 1023.4 0.1 gsmax[IS_plus] = -a[inds][IS_plus] + C
263 6315 6554085 1037.9 0.1 gsmax[IS_minus] = - a[inds][IS_minus]
264 #gsmax[IS_plus] = -a[inds][IS_plus]
265 #gsmax[IS_plus] += C
266 #gsmax[IS_minus] = - a[inds][IS_minus]
267 6315 284358 45.0 0.0 gsmax = divide(gsmax, betas)
268 6315 4342393 687.6 0.1 gsmin = min(absolute(gsmax))
269 #print where(absolute(gsmax) == gsmin)
270 6315 459013 72.7 0.0 ismin = where(absolute(gsmax) == gsmin)[0][0]
271
272 else: gsmin = inf
273
274 #case 2: Some g_i in E reaches zero
275 6315 27912 4.4 0.0 if le > 0:
276
277 gamma_inde = gamma[inde]
278 g_inde = g[inde]
279 Ie_plus = gamma_inde > e
280
281 if len(g_inde[Ie_plus]) > 0:
282 gec = divide(-g_inde[Ie_plus], gamma_inde[Ie_plus])
283 gec[gec <= 0] = inf
284 gemin = min(gec)
285 if gemin < inf:
286 iemin = where(gec == gemin)[0][0]
287 else: gemin = inf
288 6315 27717 4.4 0.0 else: gemin = inf
289 #case 2: Some g_i in O reaches zero
290 6315 26844 4.3 0.0 if lo > 0 and ls > 0:
291 gamma_indo = gamma[indo]
292 g_indo = g[indo]
293 Io_minus = gamma_indo < - e
294 if len(g_indo[Io_minus]) > 0:
295 goc = divide(-g_indo[Io_minus], gamma_indo[Io_minus])
296 goc[goc <= 0] = inf
297 goc[g_indo[Io_minus] < 0] = inf
298 gomin = min(goc)
299 if gomin < inf:
300 iomin = where(goc == gomin)[0][0]
301 else: gomin = inf
302 6315 26472 4.2 0.0 else: gomin = inf
303
304 # case 3: gc becomes zero
305 6315 52912 8.4 0.0 if gammac > e: gcmin = - gc/gammac
306 else: gcmin = inf
307
308 # case 4
309 6315 39095 6.2 0.0 if ls > 0: gacmin = C - ac
310 else: gacmin = inf
311
312 # determine minimum largest increment
313 6315 37991 6.0 0.0 all_deltas = [gsmin, gemin, gomin, gcmin, gacmin]
314 6315 51044 8.1 0.0 gmin = min(all_deltas)
315 6315 151241 23.9 0.0 imin = where(all_deltas == gmin)[0][0]
316 # update a, g
317 6315 28142 4.5 0.0 if ls > 0:
318 6315 40268 6.4 0.0 mu += beta[0]*gmin
319 6315 29448 4.7 0.0 ac += gmin
320 6315 11957014 1893.4 0.3 a[inds] += betas*gmin
321 else:
322 mu += gmin
323 6315 31456 5.0 0.0 if lr > 0:
324 g[indr] += gamma[indr] * gmin
325 6315 35200 5.6 0.0 gc += gammac * gmin
326 6315 44916 7.1 0.0 if imin == 0: # min = gsmin => move k from s to r
327 # if there are more than 1 minimum, just take 1
328 ak = a[inds][ismin]
329
330 # delete the elements from X,a and g
331 # => add it to the end of X,a,g
332 ind_del = inds[ismin]
333 inds.remove(ind_del)
334 c_inds = [start_new] + inds
335 indr.append(ind_del)
336 if ak < e:
337 indo.append(ind_del)
338 lo += 1
339 else:
340 inde.append(ind_del)
341 le += 1
342
343 lr += 1
344 #decrement R, delete row ismin and column ismin
345
346 if ls > 2:
347 ismin += 1
348 R_new = zeros((ls,ls))
349 R_new[0:ismin, 0:ismin] = R[0:ismin, 0:ismin]
350 R_new[ismin:, 0:ismin] = R[ismin+1:,0:ismin]
351 R_new[0:ismin, ismin:] = R[0:ismin, ismin+1:]
352 R_new[ismin:, ismin:] = R[ismin+1:, ismin+1:]
353 betak = zeros(ls)
354 betak[:ismin] = R[ismin, :ismin]
355 betak[ismin:] = R[ismin, ismin+1:]
356 R_new -= outer(betak, betak)/R[ismin,ismin]
357 R = R_new
358 elif ls == 2:
359 R = ones((2, 2))
360 R[1,1] = 0
361 R[0,0] = -1
362 else:
363 R = inf
364 ls -= 1
365
366 6315 33341 5.3 0.0 elif imin == 1:
367 # delete the elements from X,a and g => add it to the end of X,a,g
368 ### old version find index to delete
369 #Ieplus_l = [i for i,b in enumerate(Ie_plus) if b]
370 #ind_del = inde[Ieplus_l[iemin]]
371 ### old version find index to delete
372 ind_del = np.asarray(inde)[Ie_plus][iemin]
373 if ls > 0:
374 nk = K_X[ind_del, :][[ind_del] + inds]
375 betak = - R.dot(nk)
376 betak1 = ones(ls + 2)
377 betak1[:-1] = betak
378 R_old = R
379 R = 1/k * outer(betak1, betak1)
380 R[:-1,:-1] += R_old
381 else:
382 R = ones((2, 2))
383 R[1,1] = 0
384 R[0,0] = -1
385 inds.append(ind_del)
386 c_inds = [start_new] + inds
387 indr.remove(ind_del)
388 inde.remove(ind_del)
389 ls += 1
390 lr -= 1
391 le -= 1
392
393 6315 32065 5.1 0.0 elif imin == 2: # min = gemin | gomin => move k from r to s
394
395 # delete the elements from X,a and g => add it to the end of X,a,g
396
397 ### old version find index to delete
398 #Io_minus_l = [i for i,b in enumerate(Io_minus) if b]
399 #ind_del = indo[Io_minus_l[iomin]]
400 ### old version find index to delete
401 ind_del = np.asarray(indo)[Io_minus][iomin]
402 if ls > 0:
403 nk = ones(ls+1)
404 nk[1:] = K_X[ind_del,:][inds]
405 betak = - R.dot(nk)
406 k = 1 - nk.dot(R).dot(nk)
407 betak1 = ones(ls+2)
408 betak1[:-1] = betak
409 R_old = R
410 R = 1/k * outer(betak1, betak1)
411 R[:-1,:-1] += R_old
412 else:
413 R = ones((2, 2))
414 R[1,1] = 0
415 R[0,0] = -1
416
417 indo.remove(ind_del)
418 indr.remove(ind_del)
419 inds.append(ind_del)
420 c_inds = [start_new] + inds
421 lo -= 1
422 lr -= 1
423 ls += 1
424 6315 32323 5.1 0.0 elif imin == 3:
425 '''
426 if ls > 0:
427 nk = ones(ls+1)
428 nk[1:] = K_X[start_new, :][inds]
429 betak = - R.dot(nk)
430 k = 1 - nk.dot(R).dot(nk)
431 betak1 = ones(ls + 2)
432 betak1[:-1] = betak
433 R_old = R
434 R = zeros((ls +2, ls +2))
435 R[:-1,:-1] = R_old
436 R += 1/k * outer(betak1, betak1)
437 else:
438 R = ones((2, 2))
439 R[1,1] = 0
440 R[0,0] = -1
441 '''
442 6315 28391 4.5 0.0 break
443 else:
444 break
445 loop_count += 1
446
447 6315 31180 4.9 0.0 a[start_new] = ac
448 6315 31385 5.0 0.0 g[start_new] = gc
449 6315 34513 5.5 0.0 if ac < e:
450 indr.append(start_new)
451 indo.append(start_new)
452 lr += 1
453 lo += 1
454 6315 39033 6.2 0.0 elif ac > C - e:
455 indr.append(start_new)
456 inde.append(start_new)
457 lr += 1
458 le += 1
459 else:
460 6315 45526 7.2 0.0 inds.append(start_new)
461 6315 29069 4.6 0.0 g[start_new] = 0
462 6315 37538 5.9 0.0 if len(inds) == 1:
463 R = ones((2, 2))
464 R[1,1] = 0
465 R[0,0] = -1
466 else:
467 6315 43113 6.8 0.0 if R.shape[0] != len(inds) + 1:
468 6315 127707 20.2 0.0 nk = ones(ls+1)
469 6315 7318330 1158.9 0.2 nk[1:] = K_X[start_new, :][inds[:-1]]
470 6315 276033663 43710.8 6.2 betak = - R.dot(nk)
471 6315 949917 150.4 0.0 k = 1 - nk.dot(R).dot(nk)
472 6315 221603 35.1 0.0 betak1 = ones(ls + 2)
473 6315 96065 15.2 0.0 betak1[:-1] = betak
474 6315 82876318 13123.7 1.9 R_old = R
475 6315 2616448189 414322.8 58.8 R = 1/k * outer(betak1, betak1)
476 6315 1114393414 176467.7 25.1 R[:-1,:-1] += R_old
477
478 6315 56172 8.9 0.0 ls += 1
479 # update X, a
480 1 27 27.0 0.0 self._data.set_X(X)
481 1 9 9.0 0.0 self._data.set_alpha(a)
482 1 9 9.0 0.0 self._data.set_C(C)
483 1 10 10.0 0.0 self._data.set_K_X(K_X)
484 1 4118987 4118987.0 0.1 print self.rho()
*** PROFILER RESULTS ***
incremental_ocsvm (../evaluation_tmp.py:185)
function called 1 times
186226 function calls in 4458.908 seconds
Ordered by: cumulative time, internal time, call count
List reduced from 149 to 40 due to restriction <40>
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 4458.908 4458.908 evaluation_tmp.py:185(incremental_ocsvm)
1 0.062 0.062 4448.712 4448.712 line_profiler.py:95(wrapper)
1 2499.493 2499.493 4448.650 4448.650 ocsvm.py:97(increment)
6315 1372.956 0.217 1373.067 0.217 numeric.py:740(outer)
37892 554.850 0.015 554.850 0.015 {method 'dot' of 'numpy.ndarray' objects}
2 0.000 0.000 11.064 5.532 ocsvm.py:58(gram)
2 0.000 0.000 11.064 5.532 pairwise.py:1164(pairwise_kernels)
2 0.000 0.000 11.064 5.532 pairwise.py:949(_parallel_pairwise)
2 2.008 1.004 11.064 5.532 pairwise.py:740(rbf_kernel)
1 0.013 0.013 10.196 10.196 ocsvm.py:35(fit)
1 0.386 0.386 10.183 10.183 ocsvm.py:62(alpha)
1 0.003 0.003 9.108 9.108 coneprog.py:4159(qp)
1 0.005 0.005 9.104 9.104 coneprog.py:1441(coneqp)
2 0.890 0.445 9.029 4.515 pairwise.py:136(euclidean_distances)
5 0.000 0.000 8.759 1.752 coneprog.py:1984(kktsolver)
5 0.120 0.024 8.759 1.752 misc.py:1389(factor)
2 0.000 0.000 8.112 4.056 extmath.py:171(safe_sparse_dot)
2 0.000 0.000 8.112 4.056 extmath.py:129(fast_dot)
2 7.718 3.859 8.112 4.056 extmath.py:97(_fast_dot)
5 6.097 1.219 6.097 1.219 {cvxopt.base.syrk}
12647 4.239 0.000 4.239 0.000 {min}
1 3.809 3.809 4.074 4.074 ocsvm.py:45(rho)
1 0.000 0.000 1.888 1.888 linalg.py:404(inv)
1 0.000 0.000 1.883 1.883 linalg.py:244(solve)
1 1.740 1.740 1.740 1.740 {numpy.linalg.lapack_lite.dgesv}
5 1.316 0.263 1.316 0.263 {cvxopt.base.gemm}
10 1.191 0.119 1.191 0.119 {cvxopt.lapack.potrf}
8 0.000 0.000 0.438 0.055 validation.py:268(check_array)
8 0.000 0.000 0.429 0.054 validation.py:43(_assert_all_finite)
8 0.428 0.054 0.428 0.054 {method 'sum' of 'numpy.ndarray' objects}
4 0.000 0.000 0.394 0.099 extmath.py:87(_impose_f_order)
18950 0.115 0.000 0.369 0.000 numeric.py:1791(ones)
56 0.255 0.005 0.255 0.005 {cvxopt.base.gemv}
9 0.000 0.000 0.222 0.025 misc.py:1489(solve)
12630 0.210 0.000 0.210 0.000 {numpy.core.multiarray.where}
8 0.000 0.000 0.199 0.025 coneprog.py:2333(f4)
8 0.000 0.000 0.198 0.025 coneprog.py:2291(f4_no_ir)
2 0.000 0.000 0.160 0.080 shape_base.py:177(vstack)
2 0.158 0.079 0.158 0.079 {numpy.core.multiarray.concatenate}
1 0.157 0.157 0.158 0.158 data.py:29(Xs)
*** PROFILER RESULTS ***
cvxopt_ocsvm (../evaluation_tmp.py:181)
function called 1 times
1399 function calls in 851.843 seconds
Ordered by: cumulative time, internal time, call count
List reduced from 123 to 40 due to restriction <40>
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 851.843 851.843 evaluation_tmp.py:181(cvxopt_ocsvm)
1 0.215 0.215 851.843 851.843 ocsvm.py:35(fit)
1 13.610 13.610 836.090 836.090 ocsvm.py:62(alpha)
1 0.085 0.085 805.091 805.091 coneprog.py:4159(qp)
1 0.009 0.009 805.006 805.006 coneprog.py:1441(coneqp)
5 0.000 0.000 797.632 159.526 coneprog.py:1984(kktsolver)
5 2.340 0.468 797.632 159.526 misc.py:1389(factor)
5 630.443 126.089 630.443 126.089 {cvxopt.base.syrk}
10 110.158 11.016 110.158 11.016 {cvxopt.lapack.potrf}
5 53.899 10.780 53.899 10.780 {cvxopt.base.gemm}
2 0.000 0.000 25.810 12.905 ocsvm.py:58(gram)
2 0.000 0.000 25.810 12.905 pairwise.py:1164(pairwise_kernels)
2 0.012 0.006 25.810 12.905 pairwise.py:949(_parallel_pairwise)
2 3.824 1.912 25.798 12.899 pairwise.py:740(rbf_kernel)
2 1.760 0.880 21.800 10.900 pairwise.py:136(euclidean_distances)
2 0.000 0.000 19.970 9.985 extmath.py:171(safe_sparse_dot)
2 0.000 0.000 19.970 9.985 extmath.py:129(fast_dot)
2 19.296 9.648 19.970 9.985 extmath.py:97(_fast_dot)
1 0.000 0.000 15.538 15.538 ocsvm.py:45(rho)
2 0.000 0.000 5.915 2.957 shape_base.py:177(vstack)
2 5.914 2.957 5.914 2.957 {numpy.core.multiarray.concatenate}
56 5.881 0.105 5.881 0.105 {cvxopt.base.gemv}
9 0.001 0.000 4.780 0.531 misc.py:1489(solve)
8 0.000 0.000 4.241 0.530 coneprog.py:2333(f4)
8 0.000 0.000 4.241 0.530 coneprog.py:2291(f4_no_ir)
10 0.000 0.000 2.122 0.212 coneprog.py:1900(fG)
10 0.000 0.000 2.122 0.212 misc.py:801(sgemv)
18 1.019 0.057 1.019 0.057 {cvxopt.blas.trsv}
10 0.000 0.000 0.893 0.089 validation.py:268(check_array)
2 0.001 0.001 0.841 0.420 twodim_base.py:220(diag)
4 0.840 0.210 0.840 0.210 {numpy.core.multiarray.zeros}
5 0.780 0.156 0.780 0.156 {cvxopt.blas.trsm}
10 0.000 0.000 0.763 0.076 validation.py:43(_assert_all_finite)
10 0.762 0.076 0.762 0.076 {method 'sum' of 'numpy.ndarray' objects}
4 0.000 0.000 0.674 0.168 extmath.py:87(_impose_f_order)
5 0.000 0.000 0.432 0.086 coneprog.py:1847(fP)
5 0.432 0.086 0.432 0.086 {cvxopt.base.symv}
2 0.256 0.128 0.257 0.129 data.py:29(Xs)
4 0.000 0.000 0.219 0.055 pairwise.py:57(check_pairwise_arrays)
39 0.130 0.003 0.130 0.003 {numpy.core.multiarray.array}
*** PROFILER RESULTS ***
sklearn_ocsvm (../evaluation_tmp.py:177)
function called 1 times
61 function calls in 437.500 seconds
Ordered by: cumulative time, internal time, call count
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 437.500 437.500 evaluation_tmp.py:177(sklearn_ocsvm)
1 0.004 0.004 437.500 437.500 classes.py:941(fit)
1 0.000 0.000 437.496 437.496 base.py:99(fit)
1 0.000 0.000 437.436 437.436 base.py:211(_dense_fit)
1 437.436 437.436 437.436 437.436 {sklearn.svm.libsvm.fit}
1 0.000 0.000 0.059 0.059 validation.py:268(check_array)
5 0.044 0.009 0.044 0.009 {numpy.core.multiarray.array}
1 0.000 0.000 0.015 0.015 validation.py:43(_assert_all_finite)
1 0.015 0.015 0.015 0.015 {method 'sum' of 'numpy.ndarray' objects}
1 0.000 0.000 0.000 0.000 base.py:193(_validate_targets)
1 0.000 0.000 0.000 0.000 validation.py:126(_shape_repr)
2 0.000 0.000 0.000 0.000 numeric.py:167(asarray)
1 0.000 0.000 0.000 0.000 numeric.py:1791(ones)
1 0.000 0.000 0.000 0.000 {method 'join' of 'str' objects}
2 0.000 0.000 0.000 0.000 base.py:553(isspmatrix)
1 0.000 0.000 0.000 0.000 {method 'fill' of 'numpy.ndarray' objects}
2 0.000 0.000 0.000 0.000 sputils.py:116(_isinstance)
2 0.000 0.000 0.000 0.000 numeric.py:237(asanyarray)
3 0.000 0.000 0.000 0.000 validation.py:153(<genexpr>)
1 0.000 0.000 0.000 0.000 getlimits.py:234(__init__)
2 0.000 0.000 0.000 0.000 {numpy.core.multiarray.empty}
1 0.000 0.000 0.000 0.000 validation.py:105(_num_samples)
1 0.000 0.000 0.000 0.000 {sklearn.svm.libsvm.set_verbosity_wrap}
1 0.000 0.000 0.000 0.000 shape_base.py:58(atleast_2d)
1 0.000 0.000 0.000 0.000 {method 'copy' of 'numpy.ndarray' objects}
1 0.000 0.000 0.000 0.000 validation.py:503(check_random_state)
3 0.000 0.000 0.000 0.000 {hasattr}
1 0.000 0.000 0.000 0.000 getlimits.py:259(max)
1 0.000 0.000 0.000 0.000 base.py:203(_warn_from_fit_status)
1 0.000 0.000 0.000 0.000 {method 'randint' of 'mtrand.RandomState' objects}
1 0.000 0.000 0.000 0.000 {method 'index' of 'list' objects}
6 0.000 0.000 0.000 0.000 {len}
4 0.000 0.000 0.000 0.000 {method 'split' of 'str' objects}
3 0.000 0.000 0.000 0.000 {isinstance}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
2 0.000 0.000 0.000 0.000 {callable}
1 0.000 0.000 0.000 0.000 {method 'append' of 'list' objects}
0 0.000 0.000 profile:0(profiler)
| gpl-2.0 |
hradec/gaffer | python/GafferAppleseedUI/AppleseedRenderUI.py | 11 | 2447 | ##########################################################################
#
# Copyright (c) 2014, Esteban Tovagliari. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferAppleseed
Gaffer.Metadata.registerNode(
GafferAppleseed.AppleseedRender,
"description",
"""
Performs offline batch rendering using the
appleseed renderer, or optionally generates
appleseed projects for later rendering using a SystemCommand
node.
""",
plugs = {
"fileName" : [
"description",
"""
The name of the appleseed project file to be generated.
""",
"nodule:type", "",
"plugValueWidget:type", "GafferUI.FileSystemPathPlugValueWidget",
"path:leaf", True,
"path:bookmarks", "appleseed",
"fileSystemPath:extensions", "appleseed",
],
}
)
| bsd-3-clause |
joshpfosi/gbn | src/dsdv/bindings/callbacks_list.py | 151 | 1222 | callback_classes = [
['void', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::Socket::SocketErrno', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Ipv4Route>', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| gpl-2.0 |
stevehof/CouchPotatoServer | libs/caper/parsers/anime.py | 81 | 2347 | # Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from caper.parsers.base import Parser
REGEX_GROUP = re.compile(r'(\(|\[)(?P<group>.*?)(\)|\])', re.IGNORECASE)
PATTERN_GROUPS = [
('identifier', [
r'S(?P<season>\d+)E(?P<episode>\d+)',
r'(S(?P<season>\d+))|(E(?P<episode>\d+))',
r'Ep(?P<episode>\d+)',
r'$(?P<absolute>\d+)^',
(r'Episode', r'(?P<episode>\d+)'),
]),
('video', [
(r'(?P<h264_profile>%s)', [
'Hi10P'
]),
(r'.(?P<resolution>%s)', [
'720p',
'1080p',
'960x720',
'1920x1080'
]),
(r'(?P<source>%s)', [
'BD'
]),
]),
('audio', [
(r'(?P<codec>%s)', [
'FLAC'
]),
])
]
class AnimeParser(Parser):
def __init__(self, debug=False):
super(AnimeParser, self).__init__(PATTERN_GROUPS, debug)
def capture_group(self, fragment):
match = REGEX_GROUP.match(fragment.value)
if not match:
return None
return match.group('group')
def run(self, closures):
"""
:type closures: list of CaperClosure
"""
self.setup(closures)
self.capture_closure('group', func=self.capture_group)\
.execute(once=True)
self.capture_fragment('show_name', single=False)\
.until_fragment(value__re='identifier')\
.until_fragment(value__re='video')\
.execute()
self.capture_fragment('identifier', regex='identifier') \
.capture_fragment('video', regex='video', single=False) \
.capture_fragment('audio', regex='audio', single=False) \
.execute()
self.result.build()
return self.result
| gpl-3.0 |
xuru/pyvisdk | pyvisdk/do/file_info.py | 1 | 1308 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def FileInfo(vim, *args, **kwargs):
'''This data object type contains rudimentary information about a file in a
datastore. The information here is not meant to cover all information in
traditional file systems, but rather to provide sufficient information for
files that are associated with virtual machines. Derived types describe the
known file types for a datastore.'''
obj = vim.client.factory.create('ns0:FileInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'path' ]
optional = [ 'fileSize', 'modification', 'owner', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit |
apapillon/django-twitter-stream | twitter_stream/fields.py | 2 | 3423 | from django.db import models
from django import forms
from django.core import exceptions
import math
class PositiveBigIntegerField(models.BigIntegerField):
description = "Positive Big integer"
def formfield(self, **kwargs):
defaults = {'min_value': 0,
'max_value': models.BigIntegerField.MAX_BIGINT * 2 - 1}
defaults.update(kwargs)
return super(PositiveBigIntegerField, self).formfield(**defaults)
def db_type(self, connection):
if 'mysql' in connection.__class__.__module__:
return 'bigint UNSIGNED'
return super(PositiveBigIntegerField, self).db_type(connection)
class PositiveBigAutoField(models.AutoField):
description = "Unsigned Big Integer"
empty_strings_allowed = False
MAX_BIGINT = 9223372036854775807
def db_type(self, connection):
if 'mysql' in connection.__class__.__module__:
return 'bigint UNSIGNED AUTO_INCREMENT'
return super(PositiveBigAutoField, self).db_type(connection)
default_error_messages = {
'invalid': "'%(value)s' value must be an integer.",
}
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(PositiveBigAutoField, self).get_prep_lookup(lookup_type, value)
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'min_value': 0,
'max_value': PositiveBigAutoField.MAX_BIGINT * 2 - 1,
'form_class': forms.IntegerField }
defaults.update(kwargs)
return super(PositiveBigAutoField, self).formfield(**defaults)
class PositiveBigAutoForeignKey(models.ForeignKey):
"""A special foriegn key field for positive big auto fields"""
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.related_field
if isinstance(rel_field, PositiveBigAutoField):
return PositiveBigIntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
try:
# If we are using south, we need some rules to use these fields
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^twitter_stream\.fields\.PositiveBigAutoField"])
add_introspection_rules([], ["^twitter_stream\.fields\.PositiveBigIntegerField"])
add_introspection_rules([], ["^twitter_stream\.fields\.PositiveBigAutoForeignKey"])
except ImportError:
pass
| mit |
maciekcc/tensorflow | tensorflow/contrib/learn/python/learn/datasets/produce_small_datasets.py | 124 | 1332 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Produce DBpedia datasets of a smaller size."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.datasets import text_datasets
from tensorflow.python.platform import app
def main(unused_argv):
text_datasets.maybe_download_dbpedia('dbpedia_data')
# Reduce the size of original data by a factor of 1000.
base.shrink_csv('dbpedia_data/dbpedia_csv/train.csv', 1000)
base.shrink_csv('dbpedia_data/dbpedia_csv/test.csv', 1000)
if __name__ == '__main__':
app.run()
| apache-2.0 |
undoware/neutron-drive | google_appengine/lib/django_1_2/django/core/management/commands/dbshell.py | 313 | 1261 | from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(BaseCommand):
help = ("Runs the command-line client for specified database, or the "
"default database if none is provided.")
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database onto which to '
'open a shell. Defaults to the "default" database.'),
)
requires_model_validation = False
def handle(self, **options):
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
try:
connection.client.runshell()
except OSError:
# Note that we're assuming OSError means that the client program
# isn't installed. There's a possibility OSError would be raised
# for some other reason, in which case this error message would be
# inaccurate. Still, this message catches the common case.
raise CommandError('You appear not to have the %r program installed or on your path.' % \
connection.client.executable_name)
| bsd-3-clause |
starwels/starwels | test/functional/feature_dersig.py | 2 | 6357 | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Starwels developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP66 (DER SIG).
Test that the DERSIG soft-fork activates at (regtest) height 1251.
"""
from test_framework.test_framework import StarwelsTestFramework
from test_framework.util import *
from test_framework.mininode import *
from test_framework.blocktools import create_coinbase, create_block
from test_framework.script import CScript
from io import BytesIO
DERSIG_HEIGHT = 1251
# Reject codes that we might receive in this test
REJECT_INVALID = 16
REJECT_OBSOLETE = 17
REJECT_NONSTANDARD = 64
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
"""
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
"""
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
def create_transaction(node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex'])))
return tx
class BIP66Test(StarwelsTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']]
self.setup_clean_chain = True
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
# wait_for_verack ensures that the P2P connection is fully up.
self.nodes[0].p2p.wait_for_verack()
self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2)
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that a transaction with non-DER signature can still appear in a block")
spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time)
block.nVersion = 2
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 3")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
block.nVersion = 2
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE)
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)')
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
del self.nodes[0].p2p.last_message["reject"]
self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
block.nVersion = 3
spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
# First we show that this tx is valid except for DERSIG by getting it
# accepted to the mempool (which we can achieve with
# -promiscuousmempoolflags).
self.nodes[0].p2p.send_and_ping(msg_tx(spendtx))
assert spendtx.hash in self.nodes[0].getrawmempool()
# Now we verify that a block with this transaction is invalid.
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
# We can receive different reject messages depending on whether
# starwelsd is running with multiple script check threads. If script
# check threads are not in use, then transaction script validation
# happens sequentially, and starwelsd produces more specific reject
# reasons.
assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID:
# Generic rejection when a block is invalid
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed')
else:
assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason
self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted")
block.vtx[1] = create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP66Test().main()
| mit |
isandlaTech/cohorte-demos | led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-1.0.0-20141201.234602-19-python-distribution/repo/sleekxmpp/thirdparty/socks.py | 10 | 16154 | """SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import socket
import struct
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
self.sendall(("CONNECT " + addr + ":" + str(destport) + " HTTP/1.1\r\n" + "Host: " + destaddr + "\r\n\r\n").encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (type(destpair[0]) != type('')) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| apache-2.0 |
odoousers2014/odoo | addons/hr_expense/__openerp__.py | 7 | 2750 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Expense Tracker',
'version': '1.0',
'category': 'Human Resources',
'sequence': 29,
'summary': 'Expenses Validation, Invoicing',
'description': """
Manage expenses by Employees
============================
This application allows you to manage your employees' daily expenses. It gives you access to your employees’ fee notes and give you the right to complete and validate or refuse the notes. After validation it creates an invoice for the employee.
Employee can encode their own expenses and the validation flow puts it automatically in the accounting after validation by managers.
The whole flow is implemented as:
---------------------------------
* Draft expense
* Confirmation of the sheet by the employee
* Validation by his manager
* Validation by the accountant and accounting entries creation
This module also uses analytic accounting and is compatible with the invoice on timesheet module so that you are able to automatically re-invoice your customers' expenses if your work by project.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/expenses',
'depends': ['hr', 'account_accountant', 'report'],
'data': [
'security/ir.model.access.csv',
'hr_expense_data.xml',
'hr_expense_sequence.xml',
'hr_expense_workflow.xml',
'hr_expense_view.xml',
'hr_expense_report.xml',
'security/ir_rule.xml',
'report/hr_expense_report_view.xml',
'hr_expense_installer_view.xml',
'views/report_expense.xml',
],
'demo': ['hr_expense_demo.xml'],
'test': [
'test/expense_demo.yml',
'test/expense_process.yml',
],
'installable': True,
'auto_install': False,
'application': True,
}
| agpl-3.0 |
flavour/ifrc_qa | modules/tests/inv/create_catalog.py | 1 | 2196 | # -*- coding: utf-8 -*-
""" Sahana Eden Automated Tests - INV006 Create Catalog
@copyright: 2011-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class CreateCatalog(SeleniumUnitTest):
def test_inv006_create_catalog(self):
"""
@case: INV006
@description: Create a Catalog
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
# Login, if not-already done so
self.login(account="admin", nexturl="supply/catalog/create")
self.create("supply_catalog",
[( "name",
"IFRC Food Catalogue" ),
( "organisation_id",
"International Federation of Red Cross and Red Crescent Societies"),
( "comments",
"This is a test Catalogue")
]
)
| mit |
zhiweix-dong/linux-yocto-micro-3.19 | tools/perf/scripts/python/syscall-counts-by-pid.py | 1996 | 2105 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
maellak/invenio | modules/oairepository/lib/oai_repository_unit_tests.py | 16 | 5712 | # -*- coding: utf-8 -*-
## Invenio OAI repository unit tests.
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the oai repository."""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase
import re
from cStringIO import StringIO
from invenio import oai_repository_server
from invenio.testutils import make_test_suite, run_test_suite
class TestVerbs(InvenioTestCase):
"""Test for OAI verb functionality."""
def test_verbs(self):
"""oairepository - testing verbs"""
self.assertNotEqual(None, re.search("Identify", oai_repository_server.oai_identify({'verb': 'Identify'})))
ret = StringIO()
oai_repository_server.oai_list_records_or_identifiers(ret, {'verb': 'ListIdentifiers', 'metadataPrefix': 'marcxml'})
self.assertNotEqual(None, re.search("ListIdentifiers", ret.getvalue()))
ret = StringIO()
oai_repository_server.oai_list_records_or_identifiers(ret, {'verb': 'ListRecords', 'metadataPrefix': 'marcxml'})
self.assertNotEqual(None, re.search("ListRecords", ret.getvalue()))
self.assertNotEqual(None, re.search("ListMetadataFormats", oai_repository_server.oai_list_metadata_formats({'verb': 'ListMetadataFormats'})))
self.assertNotEqual(None, re.search("ListSets", oai_repository_server.oai_list_sets({'verb': 'ListSets'})))
self.assertNotEqual(None, re.search("GetRecord", oai_repository_server.oai_get_record({'identifier': 'oai:atlantis.cern.ch:1', 'verb': 'GetRecord'})))
class TestErrorCodes(InvenioTestCase):
"""Test for handling OAI error codes."""
def test_issue_error_identify(self):
"""oairepository - testing error codes"""
self.assertNotEqual([], [code for (code, dummy_text) in oai_repository_server.check_argd({'verb':"IllegalVerb"}) if code == 'badVerb'])
self.assertNotEqual([], [code for (code, dummy_text) in oai_repository_server.check_argd({'verb':"Identify",
'test':"test"}) if code == 'badArgument'])
self.assertNotEqual([], [code for (code, dummy_text) in oai_repository_server.check_argd({'verb':"ListIdentifiers",
'metadataPrefix':"oai_dc",
'from':"some_random_date",
'until':"some_random_date"}) if code == 'badArgument'])
self.assertNotEqual([], [code for (code, dummy_text) in oai_repository_server.check_argd({'verb':"ListIdentifiers",
'metadataPrefix':"oai_dc",
'from':"2001-01-01",
'until':"2002-01-01T00:00:00Z"}) if code == 'badArgument'])
self.assertNotEqual([], [code for (code, dummy_text) in oai_repository_server.check_argd({'verb':"ListIdentifiers"}) if code == 'badArgument'])
self.assertNotEqual([], [code for (code, dummy_text) in oai_repository_server.check_argd({'verb':"ListIdentifiers",
'metadataPrefix':"illegal_mdp"}) if code == 'cannotDisseminateFormat'])
self.assertNotEqual([], [code for (code, dummy_text) in oai_repository_server.check_argd({'verb':"ListIdentifiers",
'metadataPrefix':"oai_dc",
'metadataPrefix':"oai_dc"}) if code == 'badArgument'])
self.assertNotEqual([], [code for (code, dummy_text) in oai_repository_server.check_argd({'verb':"ListRecords",
'metadataPrefix':"oai_dc",
'set':"really_wrong_set",
'from':"some_random_date",
'until':"some_random_date"}) if code == 'badArgument'])
self.assertNotEqual([], [code for (code, dummy_text) in oai_repository_server.check_argd({'verb':"ListRecords"}) if code == 'badArgument'])
self.assertNotEqual([], [code for (code, dummy_text) in oai_repository_server.check_argd({'verb': 'ListRecords', 'resumptionToken': ''}) if code == 'badResumptionToken'])
TEST_SUITE = make_test_suite(TestVerbs,
TestErrorCodes)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
ralphbean/ansible | v2/ansible/plugins/action/raw.py | 23 | 1754 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
# FIXME: need to rework the noop stuff still
#if self.runner.noop_on_check(inject):
# # in --check mode, always skip this module execution
# return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True))
executable = self._task.args.get('executable')
result = self._low_level_execute_command(self._task.args.get('_raw_params'), tmp=tmp, executable=executable)
# for some modules (script, raw), the sudo success key
# may leak into the stdout due to the way the sudo/su
# command is constructed, so we filter that out here
if result.get('stdout','').strip().startswith('SUDO-SUCCESS-'):
result['stdout'] = re.sub(r'^((\r)?\n)?SUDO-SUCCESS.*(\r)?\n', '', result['stdout'])
return result
| gpl-3.0 |
dennybaa/st2 | st2actions/tests/unit/test_runner_container_service.py | 10 | 4537 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo_config import cfg
import unittest2
from st2common.constants.action import LIBS_DIR as ACTION_LIBS_DIR
from st2actions.container.service import RunnerContainerService
from st2tests import config as tests_config
class RunnerContainerServiceTest(unittest2.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_get_pack_base_path(self):
orig_path = cfg.CONF.content.system_packs_base_path
cfg.CONF.content.system_packs_base_path = '/tests/packs'
names = [
'test_pack_1',
'test_pack_2',
'ma_pack'
]
for name in names:
actual = RunnerContainerService().get_pack_base_path(pack_name=name)
expected = os.path.join(cfg.CONF.content.system_packs_base_path,
name)
self.assertEqual(actual, expected)
cfg.CONF.content.system_packs_base_path = orig_path
def test_get_entry_point_absolute_path(self):
service = RunnerContainerService()
orig_path = cfg.CONF.content.system_packs_base_path
cfg.CONF.content.system_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo',
entry_point='/tests/packs/foo/bar.py')
self.assertEqual(acutal_path, '/tests/packs/foo/bar.py', 'Entry point path doesn\'t match.')
cfg.CONF.content.system_packs_base_path = orig_path
def test_get_entry_point_absolute_path_empty(self):
service = RunnerContainerService()
orig_path = cfg.CONF.content.system_packs_base_path
cfg.CONF.content.system_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point=None)
self.assertEqual(acutal_path, None, 'Entry point path doesn\'t match.')
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='')
self.assertEqual(acutal_path, None, 'Entry point path doesn\'t match.')
cfg.CONF.content.system_packs_base_path = orig_path
def test_get_entry_point_relative_path(self):
service = RunnerContainerService()
orig_path = cfg.CONF.content.system_packs_base_path
cfg.CONF.content.system_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='foo/bar.py')
expected_path = os.path.join(cfg.CONF.content.system_packs_base_path, 'foo', 'actions',
'foo/bar.py')
self.assertEqual(acutal_path, expected_path, 'Entry point path doesn\'t match.')
cfg.CONF.content.system_packs_base_path = orig_path
def test_get_action_libs_abs_path(self):
service = RunnerContainerService()
orig_path = cfg.CONF.content.system_packs_base_path
cfg.CONF.content.system_packs_base_path = '/tests/packs'
# entry point relative.
acutal_path = service.get_action_libs_abs_path(pack='foo', entry_point='foo/bar.py')
expected_path = os.path.join(cfg.CONF.content.system_packs_base_path, 'foo', 'actions',
os.path.join('foo', ACTION_LIBS_DIR))
self.assertEqual(acutal_path, expected_path, 'Action libs path doesn\'t match.')
# entry point absolute.
acutal_path = service.get_action_libs_abs_path(pack='foo',
entry_point='/tests/packs/foo/tmp/foo.py')
expected_path = os.path.join('/tests/packs/foo/tmp', ACTION_LIBS_DIR)
self.assertEqual(acutal_path, expected_path, 'Action libs path doesn\'t match.')
cfg.CONF.content.system_packs_base_path = orig_path
| apache-2.0 |
2ndQuadrant/ansible | test/units/modules/network/f5/test_bigip_smtp.py | 16 | 5069 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_smtp import ApiParameters
from library.modules.bigip_smtp import ModuleParameters
from library.modules.bigip_smtp import ModuleManager
from library.modules.bigip_smtp import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_smtp import ApiParameters
from ansible.modules.network.f5.bigip_smtp import ModuleParameters
from ansible.modules.network.f5.bigip_smtp import ModuleManager
from ansible.modules.network.f5.bigip_smtp import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
smtp_server='1.1.1.1',
smtp_server_port='25',
smtp_server_username='admin',
smtp_server_password='password',
local_host_name='smtp.mydomain.com',
encryption='tls',
update_password='always',
from_address='no-reply@mydomain.com',
authentication=True,
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.smtp_server == '1.1.1.1'
assert p.smtp_server_port == 25
assert p.smtp_server_username == 'admin'
assert p.smtp_server_password == 'password'
assert p.local_host_name == 'smtp.mydomain.com'
assert p.encryption == 'tls'
assert p.update_password == 'always'
assert p.from_address == 'no-reply@mydomain.com'
assert p.authentication_disabled is None
assert p.authentication_enabled is True
def test_api_parameters(self):
p = ApiParameters(params=load_fixture('load_sys_smtp_server.json'))
assert p.name == 'foo'
assert p.smtp_server == 'mail.foo.bar'
assert p.smtp_server_port == 465
assert p.smtp_server_username == 'admin'
assert p.smtp_server_password == '$M$Ch$this-is-encrypted=='
assert p.local_host_name == 'mail-host.foo.bar'
assert p.encryption == 'ssl'
assert p.from_address == 'no-reply@foo.bar'
assert p.authentication_disabled is None
assert p.authentication_enabled is True
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
smtp_server='1.1.1.1',
smtp_server_port='25',
smtp_server_username='admin',
smtp_server_password='password',
local_host_name='smtp.mydomain.com',
encryption='tls',
update_password='always',
from_address='no-reply@mydomain.com',
authentication=True,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['encryption'] == 'tls'
assert results['smtp_server'] == '1.1.1.1'
assert results['smtp_server_port'] == 25
assert results['local_host_name'] == 'smtp.mydomain.com'
assert results['authentication'] is True
assert results['from_address'] == 'no-reply@mydomain.com'
assert 'smtp_server_username' not in results
assert 'smtp_server_password' not in results
| gpl-3.0 |
Qalthos/ansible | test/units/modules/network/f5/test_bigip_sys_global.py | 16 | 4201 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_sys_global import ApiParameters
from library.modules.bigip_sys_global import ModuleParameters
from library.modules.bigip_sys_global import ModuleManager
from library.modules.bigip_sys_global import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_sys_global import ApiParameters
from ansible.modules.network.f5.bigip_sys_global import ModuleParameters
from ansible.modules.network.f5.bigip_sys_global import ModuleManager
from ansible.modules.network.f5.bigip_sys_global import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
banner_text='this is a banner',
console_timeout=100,
gui_setup='yes',
lcd_display='yes',
mgmt_dhcp='yes',
net_reboot='yes',
quiet_boot='yes',
security_banner='yes',
)
p = ModuleParameters(params=args)
assert p.banner_text == 'this is a banner'
assert p.console_timeout == 100
assert p.gui_setup == 'yes'
assert p.lcd_display == 'yes'
assert p.mgmt_dhcp == 'yes'
assert p.net_reboot == 'yes'
assert p.quiet_boot == 'yes'
assert p.security_banner == 'yes'
def test_api_parameters(self):
args = load_fixture('load_sys_global_settings.json')
p = ApiParameters(params=args)
assert 'Welcome to the BIG-IP Configuration Utility' in p.banner_text
assert p.console_timeout == 0
assert p.gui_setup == 'no'
assert p.lcd_display == 'yes'
assert p.mgmt_dhcp == 'yes'
assert p.net_reboot == 'no'
assert p.quiet_boot == 'yes'
assert p.security_banner == 'yes'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_update(self, *args):
set_module_args(dict(
banner_text='this is a banner',
console_timeout=100,
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(params=load_fixture('load_sys_global_settings.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
tectronics/open-ihm | docs/ERDs/generate_modules.py | 3 | 75875 | <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<title>
etienned / sphinx-autopackage-script / source — bitbucket.org
</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="description" content="Mercurial hosting - we're here to serve." />
<meta name="keywords" content="mercurial,hg,hosting,bitbucket,etienned,This,script,parse,a,directory,tree,looking,for,python,modules,and,packages,and,create,ReST,files,appropriately,to,create,code,documentation,with,Sphinx.,It,also,create,a,modules,index.,source,sourcecode,generate_modules.py@8275c6a0431a" />
<link rel="stylesheet" type="text/css" href="http://bitbucket-assets.s3.amazonaws.com/css/layout.css" />
<link rel="stylesheet" type="text/css" href="http://bitbucket-assets.s3.amazonaws.com/css/screen.css" />
<link rel="stylesheet" type="text/css" href="http://bitbucket-assets.s3.amazonaws.com/css/print.css" media="print" />
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="Bitbucket" />
<link rel="icon" href="http://bitbucket-assets.s3.amazonaws.com/img/logo_new.png" type="image/png"/>
<script type="text/javascript">var MEDIA_URL = "http://bitbucket-assets.s3.amazonaws.com/"</script>
<script type="text/javascript" src="http://bitbucket-assets.s3.amazonaws.com/js/lib/bundle.020510May.js"></script>
<script type="text/javascript">
$(document).ready(function() {
Dropdown.init();
$(".tooltip").tipsy({gravity:'s'});
});
</script>
<noscript>
<style type="text/css">
.dropdown-container-text .dropdown {
position: static !important;
}
</style>
</noscript>
<!--[if lt IE 7]>
<style type="text/css">
body {
behavior: url(http://bitbucket-assets.s3.amazonaws.com/css/csshover.htc);
}
#issues-issue pre {
white-space: normal !important;
}
.changeset-description {
white-space: normal !important;
}
</style>
<script type="text/javascript">
$(document).ready(function(){
$('#header-wrapper').pngFix();
$('#sourcelist').pngFix();
$('.promo-signup-screenshot').pngFix();
});
</script>
<![endif]-->
<link rel="stylesheet" href="http://bitbucket-assets.s3.amazonaws.com/css/highlight/trac.css" type="text/css" />
<style type="text/css">
/*body { background: #fff url('/m/img/layout/bg_header_new.png') repeat-x left 24px !important; }*/
</style>
</head>
<body class="">
<div id="main-wrapper">
<div id="header-wrapper">
<div id="header">
<a href="/"><img src="http://bitbucket-assets.s3.amazonaws.com/img/logo_myriad.png" alt="Bitbucket" id="header-wrapper-logo" /></a>
<div id="header-nav">
<ul class="right">
<li><a href="/">Home</a></li>
<li><a href="/plans"><b>Plans & Signup</b></a></li>
<li><a href="/repo/all">Repositories</a></li>
<li><a href="/news">News</a></li>
<li><a href="/help">Help</a></li>
<li><a href="/account/signin/">Sign in</a></li>
</ul>
</div>
</div>
</div>
<div id="content-wrapper">
<script type="text/javascript" src="http://bitbucket-assets.s3.amazonaws.com/js/lib/jquery.cookie.js"></script> <!--REMOVE WHEN NEWER BUNDLE THAN 030309Mar -->
<script type="text/javascript">
var date = new Date();
date.setTime(date.getTime() + (365 * 24 * 60 * 60 * 1000));
var cookieoptions = { path: '/', expires: date };
window._shard = 'bitbucket01 (ID 8)';
$(document).ready(function(){
$('#toggle-repo-content').click(function(){
$('#repo-desc-cloneinfo').toggle('fast');
$('#repo-menu').toggle();
$('#repo-menu-links-mini').toggle(100);
$('.repo-desc-description').toggle('fast');
var avatar_new_width = ($('.repo-avatar').width() == 35) ? 16 : 35;
$('.repo-avatar').animate({ width: avatar_new_width }, 250);
if ($.cookie('toggle_status') == 'hide') {
$.cookie('toggle_status', 'show', cookieoptions);
$(this).css('background-image','url(http://bitbucket-assets.s3.amazonaws.com/img/repo-toggle-up.png)');
} else {
$.cookie('toggle_status', 'hide', cookieoptions);
$(this).css('background-image','url(http://bitbucket-assets.s3.amazonaws.com/img/repo-toggle-down.png)');
}
});
if ($.cookie('toggle_status') == 'hide') {
$('#toggle-repo-content').css('background-image','url(http://bitbucket-assets.s3.amazonaws.com/img/repo-toggle-down.png)');
$('#repo-desc-cloneinfo').hide();
$('#repo-menu').hide();
$('#repo-menu-links-mini').show();
$('.repo-desc-description').hide();
$('.repo-avatar').css({ width: '16px' });
} else {
$('#toggle-repo-content').css('background-image','url(http://bitbucket-assets.s3.amazonaws.com/img/repo-toggle-up.png)');
$('#repo-desc-cloneinfo').show();
$('#repo-menu').show();
$('#repo-menu-links-mini').hide();
$('.repo-desc-description').show();
$('.repo-avatar').css({ width: '35px' });
}
});
</script>
<div id="tabs">
<ul class="ui-tabs-nav">
<li>
<a href="/etienned/sphinx-autopackage-script/overview"><span>Overview</span></a>
</li>
<li>
<a href="/etienned/sphinx-autopackage-script/downloads"><span>Downloads (0)</span></a>
</li>
<li class="ui-tabs-selected">
<a href="/etienned/sphinx-autopackage-script/src/8275c6a0431a"><span>Source</span></a>
</li>
<li>
<a href="/etienned/sphinx-autopackage-script/changesets"><span>Changesets</span></a>
</li>
<li class="ui-tabs-nav-issues">
<a href="/etienned/sphinx-autopackage-script/wiki"><span>Wiki</span></a>
</li>
<li class="ui-tabs-nav-issues">
<a href="/etienned/sphinx-autopackage-script/issues?status=new&status=open"><span>Issues (1) »</span></a>
<ul>
<li><a href="/etienned/sphinx-autopackage-script/issues?status=new">New issues</a></li>
<li><a href="/etienned/sphinx-autopackage-script/issues?status=new&status=open">Open issues</a></li>
<li><a href="/etienned/sphinx-autopackage-script/issues?status=resolved&status=invalid&status=duplicate">Closed issues</a></li>
<li><a href="/etienned/sphinx-autopackage-script/issues">All issues</a></li>
<li><a href="/etienned/sphinx-autopackage-script/issues/query">Advanced query</a></li>
<li><a href="/etienned/sphinx-autopackage-script/issues/new">Create new issue</a></li>
</ul>
</li>
<li class="tabs-right tabs-far-right">
<a href="/etienned/sphinx-autopackage-script/descendants"><span>Forks/Queues (0)</span></a>
</li>
<li class="tabs-right">
<a href="/etienned/sphinx-autopackage-script/zealots"><span>Followers (3)</span></a>
</li>
</ul>
</div>
<div id="repo-menu">
<div id="repo-menu-links">
<ul>
<li>
<a href="/etienned/sphinx-autopackage-script/rss" class="noborder repo-menu-rss" title="RSS Feed for sphinx-autopackage-script">RSS</a>
</li>
<li>
<a href="/etienned/sphinx-autopackage-script/atom" class="noborder repo-menu-atom" title="Atom Feed for sphinx-autopackage-script">Atom</a>
</li>
<li>
<a href="/etienned/sphinx-autopackage-script/pull" class="link-request-pull">
pull request
</a>
</li>
<li><a href="/etienned/sphinx-autopackage-script/fork" class="link-fork">fork</a></li>
<li><a href="/etienned/sphinx-autopackage-script/hack" class="link-hack">patch queue</a></li>
<li>
<a rel="nofollow" href="/etienned/sphinx-autopackage-script/follow" class="link-follow">follow</a>
</li>
<li><a class="link-download">get source »</a>
<ul>
<li><a rel="nofollow" href="/etienned/sphinx-autopackage-script/get/8275c6a0431a.zip" class="zip">zip</a></li>
<li><a rel="nofollow" href="/etienned/sphinx-autopackage-script/get/8275c6a0431a.gz" class="compressed">gz</a></li>
<li><a rel="nofollow" href="/etienned/sphinx-autopackage-script/get/8275c6a0431a.bz2" class="compressed">bz2</a></li>
</ul>
</li>
</ul>
</div>
<div id="repo-menu-branches-tags">
<ul>
<li class="icon-branches">
branches »
<ul>
<li><a href="/etienned/sphinx-autopackage-script/src/8275c6a0431a">default</a></li>
</ul>
</li>
<li class="icon-tags">
tags »
<ul>
<li><a href="/etienned/sphinx-autopackage-script/src/8275c6a0431a">tip</a></li>
</ul>
</li>
</ul>
</div>
<div class="cb"></div>
</div>
<div id="repo-desc" class="layout-box">
<div id="repo-menu-links-mini" class="right">
<ul>
<li>
<a href="/etienned/sphinx-autopackage-script/rss" class="noborder repo-menu-rss" title="RSS Feed for sphinx-autopackage-script"></a>
</li>
<li>
<a href="/etienned/sphinx-autopackage-script/atom" class="noborder repo-menu-atom" title="Atom Feed for sphinx-autopackage-script"></a>
</li>
<li>
<a href="/etienned/sphinx-autopackage-script/pull" class="tooltip noborder link-request-pull" title="Pull request"></a>
</li>
<li><a href="/etienned/sphinx-autopackage-script/fork" class="tooltip noborder link-fork" title="Fork"></a></li>
<li><a href="/etienned/sphinx-autopackage-script/hack" class="tooltip noborder link-hack" title="Patch queue"></a></li>
<li><a class="tooltip noborder link-download" title="Get source"></a>
<ul>
<li><a rel="nofollow" href="/etienned/sphinx-autopackage-script/get/8275c6a0431a.zip" class="zip">zip</a></li>
<li><a rel="nofollow" href="/etienned/sphinx-autopackage-script/get/8275c6a0431a.gz" class="compressed">gz</a></li>
<li><a rel="nofollow" href="/etienned/sphinx-autopackage-script/get/8275c6a0431a.bz2" class="compressed">bz2</a></li>
</ul>
</li>
</ul>
</div>
<h3>
<a href="/etienned">etienned</a> /
<a href="/etienned/sphinx-autopackage-script">sphinx-autopackage-script</a>
</h3>
<p class="repo-desc-description">This script parse a directory tree looking for python modules and packages and
create ReST files appropriately to create code documentation with Sphinx.
It also create a modules index. </p>
<div id="repo-desc-cloneinfo">Clone this repository (size: 6.7 KB): <a href="http://bitbucket.org/etienned/sphinx-autopackage-script" onclick="$('#clone-url-ssh').hide();$('#clone-url-https').toggle();return(false);"><small>HTTPS</small></a> / <a href="ssh://hg@bitbucket.org/etienned/sphinx-autopackage-script" onclick="$('#clone-url-https').hide();$('#clone-url-ssh').toggle();return(false);"><small>SSH</small></a><br/>
<pre id="clone-url-https">$ hg clone <a href="http://bitbucket.org/etienned/sphinx-autopackage-script">http://bitbucket.org/etienned/sphinx-autopackage-script</a></pre>
<pre id="clone-url-ssh" style="display:none;">$ hg clone <a href="ssh://hg@bitbucket.org/etienned/sphinx-autopackage-script">ssh://hg@bitbucket.org/etienned/sphinx-autopackage-script</a></pre></div>
<div class="cb"></div>
<a href="#" id="toggle-repo-content"></a>
</div>
<div id="source-summary" class="layout-box">
<div class="right">
<table>
<tr>
<td>commit 1:</td>
<td>8275c6a0431a</td>
</tr>
<tr>
<td>parent 0:</td>
<td>
<a href="/etienned/sphinx-autopackage-script/changeset/d20aab0a12b8" title="<b>Author:</b> etienne<br/><b>Age:</b> 5 months ago<br/>Initial commit" class="tooltip tooltip-ul">d20aab0a12b8</a>
</td>
</tr>
<tr>
<td>branch: </td>
<td>default</td>
</tr>
<tr>
<td>tags:</td>
<td>tip</td>
</tr>
</table>
</div>
<div class="changeset-description">Added README</div>
<div>
<div class="dropdown-container">
<img src="http://www.gravatar.com/avatar/faffc204d966e588cfa4cd8544c18865?d=identicon&s=32" class="avatar dropdown" />
<ul class="dropdown-list">
<li>
<a href="/etienned">View etienned's profile</a>
</li>
<li>
<a href="">etienned's public repos »</a>
<ul>
<li><a href="/etienned/sphinx-autopackage-script/overview">sphinx-autopackage-script</a></li>
</ul>
</li>
<li>
<a href="/account/notifications/send/?receiver=etienned">Send message</a>
</li>
</ul>
</div>
<span class="dropdown-right">
<a href="/etienned">etienned</a>
<br/>
<small class="dropdown-right">3 months ago</small>
</span>
</div>
<div class="cb"></div>
</div>
<div id="source-path" class="layout-box">
<a href="/etienned/sphinx-autopackage-script/src">sphinx-autopackage-script</a> /
generate_modules.py
</div>
<div id="source-view" class="scroll-x">
<table class="info-table">
<tr>
<th>r1:8275c6a0431a</th>
<th>287 loc</th>
<th>10.7 KB</th>
<th class="source-view-links">
<a id="embed-link" href="#" onclick="makeEmbed('#embed-link', 'http://bitbucket.org/etienned/sphinx-autopackage-script/src/8275c6a0431a/generate_modules.py?embed=t');">embed</a> /
<a href='/etienned/sphinx-autopackage-script/history/generate_modules.py'>history</a> /
<a href='/etienned/sphinx-autopackage-script/annotate/8275c6a0431a/generate_modules.py'>annotate</a> /
<a href='/etienned/sphinx-autopackage-script/raw/8275c6a0431a/generate_modules.py'>raw</a> /
<form action="/etienned/sphinx-autopackage-script/diff/generate_modules.py" method="get" class="source-view-form">
<select name='nothing' class="smaller" disabled="disabled">
<option>No previous changes</option>
</select>
</form>
</th>
</tr>
</table>
<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><a href="#cl-1"> 1</a>
<a href="#cl-2"> 2</a>
<a href="#cl-3"> 3</a>
<a href="#cl-4"> 4</a>
<a href="#cl-5"> 5</a>
<a href="#cl-6"> 6</a>
<a href="#cl-7"> 7</a>
<a href="#cl-8"> 8</a>
<a href="#cl-9"> 9</a>
<a href="#cl-10"> 10</a>
<a href="#cl-11"> 11</a>
<a href="#cl-12"> 12</a>
<a href="#cl-13"> 13</a>
<a href="#cl-14"> 14</a>
<a href="#cl-15"> 15</a>
<a href="#cl-16"> 16</a>
<a href="#cl-17"> 17</a>
<a href="#cl-18"> 18</a>
<a href="#cl-19"> 19</a>
<a href="#cl-20"> 20</a>
<a href="#cl-21"> 21</a>
<a href="#cl-22"> 22</a>
<a href="#cl-23"> 23</a>
<a href="#cl-24"> 24</a>
<a href="#cl-25"> 25</a>
<a href="#cl-26"> 26</a>
<a href="#cl-27"> 27</a>
<a href="#cl-28"> 28</a>
<a href="#cl-29"> 29</a>
<a href="#cl-30"> 30</a>
<a href="#cl-31"> 31</a>
<a href="#cl-32"> 32</a>
<a href="#cl-33"> 33</a>
<a href="#cl-34"> 34</a>
<a href="#cl-35"> 35</a>
<a href="#cl-36"> 36</a>
<a href="#cl-37"> 37</a>
<a href="#cl-38"> 38</a>
<a href="#cl-39"> 39</a>
<a href="#cl-40"> 40</a>
<a href="#cl-41"> 41</a>
<a href="#cl-42"> 42</a>
<a href="#cl-43"> 43</a>
<a href="#cl-44"> 44</a>
<a href="#cl-45"> 45</a>
<a href="#cl-46"> 46</a>
<a href="#cl-47"> 47</a>
<a href="#cl-48"> 48</a>
<a href="#cl-49"> 49</a>
<a href="#cl-50"> 50</a>
<a href="#cl-51"> 51</a>
<a href="#cl-52"> 52</a>
<a href="#cl-53"> 53</a>
<a href="#cl-54"> 54</a>
<a href="#cl-55"> 55</a>
<a href="#cl-56"> 56</a>
<a href="#cl-57"> 57</a>
<a href="#cl-58"> 58</a>
<a href="#cl-59"> 59</a>
<a href="#cl-60"> 60</a>
<a href="#cl-61"> 61</a>
<a href="#cl-62"> 62</a>
<a href="#cl-63"> 63</a>
<a href="#cl-64"> 64</a>
<a href="#cl-65"> 65</a>
<a href="#cl-66"> 66</a>
<a href="#cl-67"> 67</a>
<a href="#cl-68"> 68</a>
<a href="#cl-69"> 69</a>
<a href="#cl-70"> 70</a>
<a href="#cl-71"> 71</a>
<a href="#cl-72"> 72</a>
<a href="#cl-73"> 73</a>
<a href="#cl-74"> 74</a>
<a href="#cl-75"> 75</a>
<a href="#cl-76"> 76</a>
<a href="#cl-77"> 77</a>
<a href="#cl-78"> 78</a>
<a href="#cl-79"> 79</a>
<a href="#cl-80"> 80</a>
<a href="#cl-81"> 81</a>
<a href="#cl-82"> 82</a>
<a href="#cl-83"> 83</a>
<a href="#cl-84"> 84</a>
<a href="#cl-85"> 85</a>
<a href="#cl-86"> 86</a>
<a href="#cl-87"> 87</a>
<a href="#cl-88"> 88</a>
<a href="#cl-89"> 89</a>
<a href="#cl-90"> 90</a>
<a href="#cl-91"> 91</a>
<a href="#cl-92"> 92</a>
<a href="#cl-93"> 93</a>
<a href="#cl-94"> 94</a>
<a href="#cl-95"> 95</a>
<a href="#cl-96"> 96</a>
<a href="#cl-97"> 97</a>
<a href="#cl-98"> 98</a>
<a href="#cl-99"> 99</a>
<a href="#cl-100">100</a>
<a href="#cl-101">101</a>
<a href="#cl-102">102</a>
<a href="#cl-103">103</a>
<a href="#cl-104">104</a>
<a href="#cl-105">105</a>
<a href="#cl-106">106</a>
<a href="#cl-107">107</a>
<a href="#cl-108">108</a>
<a href="#cl-109">109</a>
<a href="#cl-110">110</a>
<a href="#cl-111">111</a>
<a href="#cl-112">112</a>
<a href="#cl-113">113</a>
<a href="#cl-114">114</a>
<a href="#cl-115">115</a>
<a href="#cl-116">116</a>
<a href="#cl-117">117</a>
<a href="#cl-118">118</a>
<a href="#cl-119">119</a>
<a href="#cl-120">120</a>
<a href="#cl-121">121</a>
<a href="#cl-122">122</a>
<a href="#cl-123">123</a>
<a href="#cl-124">124</a>
<a href="#cl-125">125</a>
<a href="#cl-126">126</a>
<a href="#cl-127">127</a>
<a href="#cl-128">128</a>
<a href="#cl-129">129</a>
<a href="#cl-130">130</a>
<a href="#cl-131">131</a>
<a href="#cl-132">132</a>
<a href="#cl-133">133</a>
<a href="#cl-134">134</a>
<a href="#cl-135">135</a>
<a href="#cl-136">136</a>
<a href="#cl-137">137</a>
<a href="#cl-138">138</a>
<a href="#cl-139">139</a>
<a href="#cl-140">140</a>
<a href="#cl-141">141</a>
<a href="#cl-142">142</a>
<a href="#cl-143">143</a>
<a href="#cl-144">144</a>
<a href="#cl-145">145</a>
<a href="#cl-146">146</a>
<a href="#cl-147">147</a>
<a href="#cl-148">148</a>
<a href="#cl-149">149</a>
<a href="#cl-150">150</a>
<a href="#cl-151">151</a>
<a href="#cl-152">152</a>
<a href="#cl-153">153</a>
<a href="#cl-154">154</a>
<a href="#cl-155">155</a>
<a href="#cl-156">156</a>
<a href="#cl-157">157</a>
<a href="#cl-158">158</a>
<a href="#cl-159">159</a>
<a href="#cl-160">160</a>
<a href="#cl-161">161</a>
<a href="#cl-162">162</a>
<a href="#cl-163">163</a>
<a href="#cl-164">164</a>
<a href="#cl-165">165</a>
<a href="#cl-166">166</a>
<a href="#cl-167">167</a>
<a href="#cl-168">168</a>
<a href="#cl-169">169</a>
<a href="#cl-170">170</a>
<a href="#cl-171">171</a>
<a href="#cl-172">172</a>
<a href="#cl-173">173</a>
<a href="#cl-174">174</a>
<a href="#cl-175">175</a>
<a href="#cl-176">176</a>
<a href="#cl-177">177</a>
<a href="#cl-178">178</a>
<a href="#cl-179">179</a>
<a href="#cl-180">180</a>
<a href="#cl-181">181</a>
<a href="#cl-182">182</a>
<a href="#cl-183">183</a>
<a href="#cl-184">184</a>
<a href="#cl-185">185</a>
<a href="#cl-186">186</a>
<a href="#cl-187">187</a>
<a href="#cl-188">188</a>
<a href="#cl-189">189</a>
<a href="#cl-190">190</a>
<a href="#cl-191">191</a>
<a href="#cl-192">192</a>
<a href="#cl-193">193</a>
<a href="#cl-194">194</a>
<a href="#cl-195">195</a>
<a href="#cl-196">196</a>
<a href="#cl-197">197</a>
<a href="#cl-198">198</a>
<a href="#cl-199">199</a>
<a href="#cl-200">200</a>
<a href="#cl-201">201</a>
<a href="#cl-202">202</a>
<a href="#cl-203">203</a>
<a href="#cl-204">204</a>
<a href="#cl-205">205</a>
<a href="#cl-206">206</a>
<a href="#cl-207">207</a>
<a href="#cl-208">208</a>
<a href="#cl-209">209</a>
<a href="#cl-210">210</a>
<a href="#cl-211">211</a>
<a href="#cl-212">212</a>
<a href="#cl-213">213</a>
<a href="#cl-214">214</a>
<a href="#cl-215">215</a>
<a href="#cl-216">216</a>
<a href="#cl-217">217</a>
<a href="#cl-218">218</a>
<a href="#cl-219">219</a>
<a href="#cl-220">220</a>
<a href="#cl-221">221</a>
<a href="#cl-222">222</a>
<a href="#cl-223">223</a>
<a href="#cl-224">224</a>
<a href="#cl-225">225</a>
<a href="#cl-226">226</a>
<a href="#cl-227">227</a>
<a href="#cl-228">228</a>
<a href="#cl-229">229</a>
<a href="#cl-230">230</a>
<a href="#cl-231">231</a>
<a href="#cl-232">232</a>
<a href="#cl-233">233</a>
<a href="#cl-234">234</a>
<a href="#cl-235">235</a>
<a href="#cl-236">236</a>
<a href="#cl-237">237</a>
<a href="#cl-238">238</a>
<a href="#cl-239">239</a>
<a href="#cl-240">240</a>
<a href="#cl-241">241</a>
<a href="#cl-242">242</a>
<a href="#cl-243">243</a>
<a href="#cl-244">244</a>
<a href="#cl-245">245</a>
<a href="#cl-246">246</a>
<a href="#cl-247">247</a>
<a href="#cl-248">248</a>
<a href="#cl-249">249</a>
<a href="#cl-250">250</a>
<a href="#cl-251">251</a>
<a href="#cl-252">252</a>
<a href="#cl-253">253</a>
<a href="#cl-254">254</a>
<a href="#cl-255">255</a>
<a href="#cl-256">256</a>
<a href="#cl-257">257</a>
<a href="#cl-258">258</a>
<a href="#cl-259">259</a>
<a href="#cl-260">260</a>
<a href="#cl-261">261</a>
<a href="#cl-262">262</a>
<a href="#cl-263">263</a>
<a href="#cl-264">264</a>
<a href="#cl-265">265</a>
<a href="#cl-266">266</a>
<a href="#cl-267">267</a>
<a href="#cl-268">268</a>
<a href="#cl-269">269</a>
<a href="#cl-270">270</a>
<a href="#cl-271">271</a>
<a href="#cl-272">272</a>
<a href="#cl-273">273</a>
<a href="#cl-274">274</a>
<a href="#cl-275">275</a>
<a href="#cl-276">276</a>
<a href="#cl-277">277</a>
<a href="#cl-278">278</a>
<a href="#cl-279">279</a>
<a href="#cl-280">280</a>
<a href="#cl-281">281</a>
<a href="#cl-282">282</a>
<a href="#cl-283">283</a>
<a href="#cl-284">284</a>
<a href="#cl-285">285</a>
<a href="#cl-286">286</a>
<a href="#cl-287">287</a>
<a href="#cl-288">288</a>
</pre></div></td><td class="code"><div class="highlight"><pre><a name="cl-1"></a><span class="c">#!/usr/bin/env python</span>
<a name="cl-2"></a><span class="c"># -*- coding: utf-8 -*-</span>
<a name="cl-3"></a>
<a name="cl-4"></a><span class="c"># Miville</span>
<a name="cl-5"></a><span class="c"># Copyright (C) 2008 Société des arts technologiques (SAT)</span>
<a name="cl-6"></a><span class="c"># http://www.sat.qc.ca</span>
<a name="cl-7"></a><span class="c"># All rights reserved.</span>
<a name="cl-8"></a><span class="c">#</span>
<a name="cl-9"></a><span class="c"># This file is free software: you can redistribute it and/or modify</span>
<a name="cl-10"></a><span class="c"># it under the terms of the GNU General Public License as published by</span>
<a name="cl-11"></a><span class="c"># the Free Software Foundation, either version 2 of the License, or</span>
<a name="cl-12"></a><span class="c"># (at your option) any later version.</span>
<a name="cl-13"></a><span class="c">#</span>
<a name="cl-14"></a><span class="c"># Miville is distributed in the hope that it will be useful,</span>
<a name="cl-15"></a><span class="c"># but WITHOUT ANY WARRANTY; without even the implied warranty of</span>
<a name="cl-16"></a><span class="c"># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the</span>
<a name="cl-17"></a><span class="c"># GNU General Public License for more details.</span>
<a name="cl-18"></a><span class="c">#</span>
<a name="cl-19"></a><span class="c"># You should have received a copy of the GNU General Public License</span>
<a name="cl-20"></a><span class="c"># along with Miville. If not, see <http://www.gnu.org/licenses/>.</span>
<a name="cl-21"></a>
<a name="cl-22"></a><span class="sd">"""</span>
<a name="cl-23"></a><span class="sd">This script parse a directory tree looking for python modules and packages and</span>
<a name="cl-24"></a><span class="sd">create ReST files appropriately to create code documentation with Sphinx.</span>
<a name="cl-25"></a><span class="sd">It also create a modules index. </span>
<a name="cl-26"></a><span class="sd">"""</span>
<a name="cl-27"></a>
<a name="cl-28"></a><span class="kn">import</span> <span class="nn">os</span>
<a name="cl-29"></a><span class="kn">import</span> <span class="nn">optparse</span>
<a name="cl-30"></a>
<a name="cl-31"></a>
<a name="cl-32"></a><span class="c"># automodule options</span>
<a name="cl-33"></a><span class="n">OPTIONS</span> <span class="o">=</span> <span class="p">[</span><span class="s">'members'</span><span class="p">,</span>
<a name="cl-34"></a> <span class="s">'undoc-members'</span><span class="p">,</span>
<a name="cl-35"></a><span class="c"># 'inherited-members', # disable because there's a bug in sphinx</span>
<a name="cl-36"></a> <span class="s">'show-inheritance'</span><span class="p">]</span>
<a name="cl-37"></a>
<a name="cl-38"></a>
<a name="cl-39"></a><span class="k">def</span> <span class="nf">create_file_name</span><span class="p">(</span><span class="n">base</span><span class="p">,</span> <span class="n">opts</span><span class="p">):</span>
<a name="cl-40"></a> <span class="sd">"""Create file name from base name, path and suffix"""</span>
<a name="cl-41"></a> <span class="k">return</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">opts</span><span class="o">.</span><span class="n">destdir</span><span class="p">,</span> <span class="s">"</span><span class="si">%s</span><span class="s">.</span><span class="si">%s</span><span class="s">"</span> <span class="o">%</span> <span class="p">(</span><span class="n">base</span><span class="p">,</span> <span class="n">opts</span><span class="o">.</span><span class="n">suffix</span><span class="p">))</span>
<a name="cl-42"></a>
<a name="cl-43"></a><span class="k">def</span> <span class="nf">write_directive</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">package</span><span class="o">=</span><span class="bp">None</span><span class="p">):</span>
<a name="cl-44"></a> <span class="sd">"""Create the automodule directive and add the options"""</span>
<a name="cl-45"></a> <span class="k">if</span> <span class="n">package</span><span class="p">:</span>
<a name="cl-46"></a> <span class="n">directive</span> <span class="o">=</span> <span class="s">'.. automodule:: </span><span class="si">%s</span><span class="s">.</span><span class="si">%s</span><span class="se">\n</span><span class="s">'</span> <span class="o">%</span> <span class="p">(</span><span class="n">package</span><span class="p">,</span> <span class="n">module</span><span class="p">)</span>
<a name="cl-47"></a> <span class="k">else</span><span class="p">:</span>
<a name="cl-48"></a> <span class="n">directive</span> <span class="o">=</span> <span class="s">'.. automodule:: </span><span class="si">%s</span><span class="se">\n</span><span class="s">'</span> <span class="o">%</span> <span class="n">module</span>
<a name="cl-49"></a> <span class="k">for</span> <span class="n">option</span> <span class="ow">in</span> <span class="n">OPTIONS</span><span class="p">:</span>
<a name="cl-50"></a> <span class="n">directive</span> <span class="o">+=</span> <span class="s">' :</span><span class="si">%s</span><span class="s">:</span><span class="se">\n</span><span class="s">'</span> <span class="o">%</span> <span class="n">option</span>
<a name="cl-51"></a> <span class="k">return</span> <span class="n">directive</span>
<a name="cl-52"></a>
<a name="cl-53"></a><span class="k">def</span> <span class="nf">write_heading</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">kind</span><span class="o">=</span><span class="s">'Module'</span><span class="p">):</span>
<a name="cl-54"></a> <span class="sd">"""Create the page heading."""</span>
<a name="cl-55"></a> <span class="n">module</span> <span class="o">=</span> <span class="n">module</span><span class="o">.</span><span class="n">title</span><span class="p">()</span>
<a name="cl-56"></a> <span class="n">heading</span> <span class="o">=</span> <span class="n">title_line</span><span class="p">(</span><span class="n">module</span> <span class="o">+</span> <span class="s">' Documentation'</span><span class="p">,</span> <span class="s">'='</span><span class="p">)</span>
<a name="cl-57"></a> <span class="n">heading</span> <span class="o">+=</span> <span class="s">'This page contains the </span><span class="si">%s</span><span class="s"> </span><span class="si">%s</span><span class="s"> documentation.</span><span class="se">\n\n</span><span class="s">'</span> <span class="o">%</span> <span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">kind</span><span class="p">)</span>
<a name="cl-58"></a> <span class="k">return</span> <span class="n">heading</span>
<a name="cl-59"></a>
<a name="cl-60"></a><span class="k">def</span> <span class="nf">write_sub</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">kind</span><span class="o">=</span><span class="s">'Module'</span><span class="p">):</span>
<a name="cl-61"></a> <span class="sd">"""Create the module subtitle"""</span>
<a name="cl-62"></a> <span class="n">sub</span> <span class="o">=</span> <span class="n">title_line</span><span class="p">(</span><span class="s">'The :mod:`</span><span class="si">%s</span><span class="s">` </span><span class="si">%s</span><span class="s">'</span> <span class="o">%</span> <span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">kind</span><span class="p">),</span> <span class="s">'-'</span><span class="p">)</span>
<a name="cl-63"></a> <span class="k">return</span> <span class="n">sub</span>
<a name="cl-64"></a>
<a name="cl-65"></a><span class="k">def</span> <span class="nf">title_line</span><span class="p">(</span><span class="n">title</span><span class="p">,</span> <span class="n">char</span><span class="p">):</span>
<a name="cl-66"></a> <span class="sd">""" Underline the title with the character pass, with the right length."""</span>
<a name="cl-67"></a> <span class="k">return</span> <span class="s">'</span><span class="si">%s</span><span class="se">\n</span><span class="si">%s</span><span class="se">\n\n</span><span class="s">'</span> <span class="o">%</span> <span class="p">(</span><span class="n">title</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">title</span><span class="p">)</span> <span class="o">*</span> <span class="n">char</span><span class="p">)</span>
<a name="cl-68"></a>
<a name="cl-69"></a><span class="k">def</span> <span class="nf">create_module_file</span><span class="p">(</span><span class="n">package</span><span class="p">,</span> <span class="n">module</span><span class="p">,</span> <span class="n">opts</span><span class="p">):</span>
<a name="cl-70"></a> <span class="sd">"""Build the text of the file and write the file."""</span>
<a name="cl-71"></a> <span class="n">name</span> <span class="o">=</span> <span class="n">create_file_name</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">opts</span><span class="p">)</span>
<a name="cl-72"></a> <span class="k">if</span> <span class="ow">not</span> <span class="n">opts</span><span class="o">.</span><span class="n">force</span> <span class="ow">and</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">isfile</span><span class="p">(</span><span class="n">name</span><span class="p">):</span>
<a name="cl-73"></a> <span class="k">print</span> <span class="s">'File </span><span class="si">%s</span><span class="s"> already exists.'</span> <span class="o">%</span> <span class="n">name</span>
<a name="cl-74"></a> <span class="k">else</span><span class="p">:</span>
<a name="cl-75"></a> <span class="k">print</span> <span class="s">'Creating file </span><span class="si">%s</span><span class="s"> (module).'</span> <span class="o">%</span> <span class="n">name</span>
<a name="cl-76"></a> <span class="n">text</span> <span class="o">=</span> <span class="n">write_heading</span><span class="p">(</span><span class="n">module</span><span class="p">)</span>
<a name="cl-77"></a> <span class="n">text</span> <span class="o">+=</span> <span class="n">write_sub</span><span class="p">(</span><span class="n">module</span><span class="p">)</span>
<a name="cl-78"></a> <span class="n">text</span> <span class="o">+=</span> <span class="n">write_directive</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">package</span><span class="p">)</span>
<a name="cl-79"></a>
<a name="cl-80"></a> <span class="c"># write the file</span>
<a name="cl-81"></a> <span class="k">if</span> <span class="ow">not</span> <span class="n">opts</span><span class="o">.</span><span class="n">dryrun</span><span class="p">:</span>
<a name="cl-82"></a> <span class="n">fd</span> <span class="o">=</span> <span class="nb">open</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="s">'w'</span><span class="p">)</span>
<a name="cl-83"></a> <span class="n">fd</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="n">text</span><span class="p">)</span>
<a name="cl-84"></a> <span class="n">fd</span><span class="o">.</span><span class="n">close</span><span class="p">()</span>
<a name="cl-85"></a>
<a name="cl-86"></a><span class="k">def</span> <span class="nf">create_package_file</span><span class="p">(</span><span class="n">root</span><span class="p">,</span> <span class="n">master_package</span><span class="p">,</span> <span class="n">subroot</span><span class="p">,</span> <span class="n">py_files</span><span class="p">,</span> <span class="n">opts</span><span class="p">,</span> <span class="n">subs</span><span class="o">=</span><span class="bp">None</span><span class="p">):</span>
<a name="cl-87"></a> <span class="sd">"""Build the text of the file and write the file."""</span>
<a name="cl-88"></a> <span class="n">package</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="n">root</span><span class="p">)[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span><span class="o">.</span><span class="n">lower</span><span class="p">()</span>
<a name="cl-89"></a> <span class="n">name</span> <span class="o">=</span> <span class="n">create_file_name</span><span class="p">(</span><span class="n">subroot</span><span class="p">,</span> <span class="n">opts</span><span class="p">)</span>
<a name="cl-90"></a> <span class="k">if</span> <span class="ow">not</span> <span class="n">opts</span><span class="o">.</span><span class="n">force</span> <span class="ow">and</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">isfile</span><span class="p">(</span><span class="n">name</span><span class="p">):</span>
<a name="cl-91"></a> <span class="k">print</span> <span class="s">'File </span><span class="si">%s</span><span class="s"> already exists.'</span> <span class="o">%</span> <span class="n">name</span>
<a name="cl-92"></a> <span class="k">else</span><span class="p">:</span>
<a name="cl-93"></a> <span class="k">print</span> <span class="s">'Creating file </span><span class="si">%s</span><span class="s"> (package).'</span> <span class="o">%</span> <span class="n">name</span>
<a name="cl-94"></a> <span class="n">text</span> <span class="o">=</span> <span class="n">write_heading</span><span class="p">(</span><span class="n">package</span><span class="p">,</span> <span class="s">'Package'</span><span class="p">)</span>
<a name="cl-95"></a> <span class="k">if</span> <span class="n">subs</span> <span class="o">==</span> <span class="bp">None</span><span class="p">:</span>
<a name="cl-96"></a> <span class="n">subs</span> <span class="o">=</span> <span class="p">[]</span>
<a name="cl-97"></a> <span class="k">else</span><span class="p">:</span>
<a name="cl-98"></a> <span class="c"># build a list of directories that are package (they contain an __init_.py file)</span>
<a name="cl-99"></a> <span class="n">subs</span> <span class="o">=</span> <span class="p">[</span><span class="n">sub</span> <span class="k">for</span> <span class="n">sub</span> <span class="ow">in</span> <span class="n">subs</span> <span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">isfile</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">root</span><span class="p">,</span> <span class="n">sub</span><span class="p">,</span> <span class="s">'__init__.py'</span><span class="p">))]</span>
<a name="cl-100"></a> <span class="c"># if there's some package directories, add a TOC for theses subpackages</span>
<a name="cl-101"></a> <span class="k">if</span> <span class="n">subs</span><span class="p">:</span>
<a name="cl-102"></a> <span class="n">text</span> <span class="o">+=</span> <span class="n">title_line</span><span class="p">(</span><span class="s">'Subpackages'</span><span class="p">,</span> <span class="s">'-'</span><span class="p">)</span>
<a name="cl-103"></a> <span class="n">text</span> <span class="o">+=</span> <span class="s">'.. toctree::</span><span class="se">\n\n</span><span class="s">'</span>
<a name="cl-104"></a> <span class="k">for</span> <span class="n">sub</span> <span class="ow">in</span> <span class="n">subs</span><span class="p">:</span>
<a name="cl-105"></a> <span class="n">text</span> <span class="o">+=</span> <span class="s">' </span><span class="si">%s</span><span class="s">.</span><span class="si">%s</span><span class="se">\n</span><span class="s">'</span> <span class="o">%</span> <span class="p">(</span><span class="n">subroot</span><span class="p">,</span> <span class="n">sub</span><span class="p">)</span>
<a name="cl-106"></a> <span class="n">text</span> <span class="o">+=</span> <span class="s">'</span><span class="se">\n</span><span class="s">'</span>
<a name="cl-107"></a>
<a name="cl-108"></a> <span class="c"># add each package's module</span>
<a name="cl-109"></a> <span class="k">for</span> <span class="n">py_file</span> <span class="ow">in</span> <span class="n">py_files</span><span class="p">:</span>
<a name="cl-110"></a> <span class="k">if</span> <span class="ow">not</span> <span class="n">check_for_code</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">root</span><span class="p">,</span> <span class="n">py_file</span><span class="p">)):</span>
<a name="cl-111"></a> <span class="c"># don't build the file if there's no code in it</span>
<a name="cl-112"></a> <span class="k">continue</span>
<a name="cl-113"></a> <span class="n">py_file</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">splitext</span><span class="p">(</span><span class="n">py_file</span><span class="p">)[</span><span class="mi">0</span><span class="p">]</span>
<a name="cl-114"></a> <span class="n">py_path</span> <span class="o">=</span> <span class="s">'</span><span class="si">%s</span><span class="s">.</span><span class="si">%s</span><span class="s">'</span> <span class="o">%</span> <span class="p">(</span><span class="n">subroot</span><span class="p">,</span> <span class="n">py_file</span><span class="p">)</span>
<a name="cl-115"></a> <span class="n">kind</span> <span class="o">=</span> <span class="s">"Module"</span>
<a name="cl-116"></a> <span class="k">if</span> <span class="n">py_file</span> <span class="o">==</span> <span class="s">'__init__'</span><span class="p">:</span>
<a name="cl-117"></a> <span class="n">kind</span> <span class="o">=</span> <span class="s">"Package"</span>
<a name="cl-118"></a> <span class="n">text</span> <span class="o">+=</span> <span class="n">write_sub</span><span class="p">(</span><span class="n">kind</span> <span class="o">==</span> <span class="s">'Package'</span> <span class="ow">and</span> <span class="n">package</span> <span class="ow">or</span> <span class="n">py_file</span><span class="p">,</span> <span class="n">kind</span><span class="p">)</span>
<a name="cl-119"></a> <span class="n">text</span> <span class="o">+=</span> <span class="n">write_directive</span><span class="p">(</span><span class="n">kind</span> <span class="o">==</span> <span class="s">"Package"</span> <span class="ow">and</span> <span class="n">subroot</span> <span class="ow">or</span> <span class="n">py_path</span><span class="p">,</span> <span class="n">master_package</span><span class="p">)</span>
<a name="cl-120"></a> <span class="n">text</span> <span class="o">+=</span> <span class="s">'</span><span class="se">\n</span><span class="s">'</span>
<a name="cl-121"></a>
<a name="cl-122"></a> <span class="c"># write the file</span>
<a name="cl-123"></a> <span class="k">if</span> <span class="ow">not</span> <span class="n">opts</span><span class="o">.</span><span class="n">dryrun</span><span class="p">:</span>
<a name="cl-124"></a> <span class="n">fd</span> <span class="o">=</span> <span class="nb">open</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="s">'w'</span><span class="p">)</span>
<a name="cl-125"></a> <span class="n">fd</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="n">text</span><span class="p">)</span>
<a name="cl-126"></a> <span class="n">fd</span><span class="o">.</span><span class="n">close</span><span class="p">()</span>
<a name="cl-127"></a>
<a name="cl-128"></a><span class="k">def</span> <span class="nf">check_for_code</span><span class="p">(</span><span class="n">module</span><span class="p">):</span>
<a name="cl-129"></a> <span class="sd">"""</span>
<a name="cl-130"></a><span class="sd"> Check if there's at least one class or one function in the module.</span>
<a name="cl-131"></a><span class="sd"> """</span>
<a name="cl-132"></a> <span class="n">fd</span> <span class="o">=</span> <span class="nb">open</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="s">'r'</span><span class="p">)</span>
<a name="cl-133"></a> <span class="k">for</span> <span class="n">line</span> <span class="ow">in</span> <span class="n">fd</span><span class="p">:</span>
<a name="cl-134"></a> <span class="k">if</span> <span class="n">line</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s">'def '</span><span class="p">)</span> <span class="ow">or</span> <span class="n">line</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s">'class '</span><span class="p">):</span>
<a name="cl-135"></a> <span class="n">fd</span><span class="o">.</span><span class="n">close</span><span class="p">()</span>
<a name="cl-136"></a> <span class="k">return</span> <span class="bp">True</span>
<a name="cl-137"></a> <span class="n">fd</span><span class="o">.</span><span class="n">close</span><span class="p">()</span>
<a name="cl-138"></a> <span class="k">return</span> <span class="bp">False</span>
<a name="cl-139"></a>
<a name="cl-140"></a><span class="k">def</span> <span class="nf">recurse_tree</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="n">excludes</span><span class="p">,</span> <span class="n">opts</span><span class="p">):</span>
<a name="cl-141"></a> <span class="sd">"""</span>
<a name="cl-142"></a><span class="sd"> Look for every file in the directory tree and create the corresponding</span>
<a name="cl-143"></a><span class="sd"> ReST files.</span>
<a name="cl-144"></a><span class="sd"> """</span>
<a name="cl-145"></a> <span class="n">package_name</span> <span class="o">=</span> <span class="bp">None</span>
<a name="cl-146"></a> <span class="c"># check if the base directory is a package and get is name</span>
<a name="cl-147"></a> <span class="k">if</span> <span class="s">'__init__.py'</span> <span class="ow">in</span> <span class="n">os</span><span class="o">.</span><span class="n">listdir</span><span class="p">(</span><span class="n">path</span><span class="p">):</span>
<a name="cl-148"></a> <span class="n">package_name</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">abspath</span><span class="p">(</span><span class="n">path</span><span class="p">)</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">sep</span><span class="p">)[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<a name="cl-149"></a>
<a name="cl-150"></a> <span class="n">toc</span> <span class="o">=</span> <span class="p">[]</span>
<a name="cl-151"></a> <span class="n">excludes</span> <span class="o">=</span> <span class="n">format_excludes</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="n">excludes</span><span class="p">)</span>
<a name="cl-152"></a> <span class="n">tree</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">walk</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="bp">False</span><span class="p">)</span>
<a name="cl-153"></a> <span class="k">for</span> <span class="n">root</span><span class="p">,</span> <span class="n">subs</span><span class="p">,</span> <span class="n">files</span> <span class="ow">in</span> <span class="n">tree</span><span class="p">:</span>
<a name="cl-154"></a> <span class="c"># keep only the Python script files</span>
<a name="cl-155"></a> <span class="n">py_files</span> <span class="o">=</span> <span class="n">check_py_file</span><span class="p">(</span><span class="n">files</span><span class="p">)</span>
<a name="cl-156"></a> <span class="c"># remove hidden ('.') and private ('_') directories</span>
<a name="cl-157"></a> <span class="n">subs</span> <span class="o">=</span> <span class="p">[</span><span class="n">sub</span> <span class="k">for</span> <span class="n">sub</span> <span class="ow">in</span> <span class="n">subs</span> <span class="k">if</span> <span class="n">sub</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="ow">not</span> <span class="ow">in</span> <span class="p">[</span><span class="s">'.'</span><span class="p">,</span> <span class="s">'_'</span><span class="p">]]</span>
<a name="cl-158"></a> <span class="c"># check if there's valid files to process</span>
<a name="cl-159"></a> <span class="c"># TODO: could add check for windows hidden files</span>
<a name="cl-160"></a> <span class="k">if</span> <span class="s">"/."</span> <span class="ow">in</span> <span class="n">root</span> <span class="ow">or</span> <span class="s">"/_"</span> <span class="ow">in</span> <span class="n">root</span> \
<a name="cl-161"></a> <span class="ow">or</span> <span class="ow">not</span> <span class="n">py_files</span> \
<a name="cl-162"></a> <span class="ow">or</span> <span class="n">check_excludes</span><span class="p">(</span><span class="n">root</span><span class="p">,</span> <span class="n">excludes</span><span class="p">):</span>
<a name="cl-163"></a> <span class="k">continue</span>
<a name="cl-164"></a> <span class="n">subroot</span> <span class="o">=</span> <span class="n">root</span><span class="p">[</span><span class="nb">len</span><span class="p">(</span><span class="n">path</span><span class="p">):]</span><span class="o">.</span><span class="n">lstrip</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">sep</span><span class="p">)</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">sep</span><span class="p">,</span> <span class="s">'.'</span><span class="p">)</span>
<a name="cl-165"></a> <span class="k">if</span> <span class="n">root</span> <span class="o">==</span> <span class="n">path</span><span class="p">:</span>
<a name="cl-166"></a> <span class="c"># we are at the root level so we create only modules</span>
<a name="cl-167"></a> <span class="k">for</span> <span class="n">py_file</span> <span class="ow">in</span> <span class="n">py_files</span><span class="p">:</span>
<a name="cl-168"></a> <span class="n">module</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">splitext</span><span class="p">(</span><span class="n">py_file</span><span class="p">)[</span><span class="mi">0</span><span class="p">]</span>
<a name="cl-169"></a> <span class="c"># add the module if it contains code</span>
<a name="cl-170"></a> <span class="k">if</span> <span class="n">check_for_code</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="s">'</span><span class="si">%s</span><span class="s">.py'</span> <span class="o">%</span> <span class="n">module</span><span class="p">)):</span>
<a name="cl-171"></a> <span class="n">create_module_file</span><span class="p">(</span><span class="n">package_name</span><span class="p">,</span> <span class="n">module</span><span class="p">,</span> <span class="n">opts</span><span class="p">)</span>
<a name="cl-172"></a> <span class="n">toc</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">module</span><span class="p">)</span>
<a name="cl-173"></a> <span class="k">elif</span> <span class="ow">not</span> <span class="n">subs</span> <span class="ow">and</span> <span class="s">"__init__.py"</span> <span class="ow">in</span> <span class="n">py_files</span><span class="p">:</span>
<a name="cl-174"></a> <span class="c"># we are in a package without sub package</span>
<a name="cl-175"></a> <span class="c"># check if there's only an __init__.py file</span>
<a name="cl-176"></a> <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">py_files</span><span class="p">)</span> <span class="o">==</span> <span class="mi">1</span><span class="p">:</span>
<a name="cl-177"></a> <span class="c"># check if there's code in the __init__.py file</span>
<a name="cl-178"></a> <span class="k">if</span> <span class="n">check_for_code</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">root</span><span class="p">,</span> <span class="s">'__init__.py'</span><span class="p">)):</span>
<a name="cl-179"></a> <span class="n">create_package_file</span><span class="p">(</span><span class="n">root</span><span class="p">,</span> <span class="n">package_name</span><span class="p">,</span> <span class="n">subroot</span><span class="p">,</span> <span class="n">py_files</span><span class="p">,</span> <span class="n">opts</span><span class="o">=</span><span class="n">opts</span><span class="p">)</span>
<a name="cl-180"></a> <span class="n">toc</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">subroot</span><span class="p">)</span>
<a name="cl-181"></a> <span class="k">else</span><span class="p">:</span>
<a name="cl-182"></a> <span class="n">create_package_file</span><span class="p">(</span><span class="n">root</span><span class="p">,</span> <span class="n">package_name</span><span class="p">,</span> <span class="n">subroot</span><span class="p">,</span> <span class="n">py_files</span><span class="p">,</span> <span class="n">opts</span><span class="o">=</span><span class="n">opts</span><span class="p">)</span>
<a name="cl-183"></a> <span class="n">toc</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">subroot</span><span class="p">)</span>
<a name="cl-184"></a> <span class="k">elif</span> <span class="s">"__init__.py"</span> <span class="ow">in</span> <span class="n">py_files</span><span class="p">:</span>
<a name="cl-185"></a> <span class="c"># we are in package with subpackage(s)</span>
<a name="cl-186"></a> <span class="n">create_package_file</span><span class="p">(</span><span class="n">root</span><span class="p">,</span> <span class="n">package_name</span><span class="p">,</span> <span class="n">subroot</span><span class="p">,</span> <span class="n">py_files</span><span class="p">,</span> <span class="n">opts</span><span class="p">,</span> <span class="n">subs</span><span class="p">)</span>
<a name="cl-187"></a> <span class="n">toc</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">subroot</span><span class="p">)</span>
<a name="cl-188"></a>
<a name="cl-189"></a> <span class="c"># create the module's index</span>
<a name="cl-190"></a> <span class="k">if</span> <span class="ow">not</span> <span class="n">opts</span><span class="o">.</span><span class="n">notoc</span><span class="p">:</span>
<a name="cl-191"></a> <span class="n">modules_toc</span><span class="p">(</span><span class="n">toc</span><span class="p">,</span> <span class="n">opts</span><span class="p">)</span>
<a name="cl-192"></a>
<a name="cl-193"></a><span class="k">def</span> <span class="nf">modules_toc</span><span class="p">(</span><span class="n">modules</span><span class="p">,</span> <span class="n">opts</span><span class="p">,</span> <span class="n">name</span><span class="o">=</span><span class="s">'modules'</span><span class="p">):</span>
<a name="cl-194"></a> <span class="sd">"""</span>
<a name="cl-195"></a><span class="sd"> Create the module's index.</span>
<a name="cl-196"></a><span class="sd"> """</span>
<a name="cl-197"></a> <span class="n">fname</span> <span class="o">=</span> <span class="n">create_file_name</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">opts</span><span class="p">)</span>
<a name="cl-198"></a> <span class="k">if</span> <span class="ow">not</span> <span class="n">opts</span><span class="o">.</span><span class="n">force</span> <span class="ow">and</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">exists</span><span class="p">(</span><span class="n">fname</span><span class="p">):</span>
<a name="cl-199"></a> <span class="k">print</span> <span class="s">"File </span><span class="si">%s</span><span class="s"> already exists."</span> <span class="o">%</span> <span class="n">name</span>
<a name="cl-200"></a> <span class="k">return</span>
<a name="cl-201"></a>
<a name="cl-202"></a> <span class="k">print</span> <span class="s">"Creating module's index modules.txt."</span>
<a name="cl-203"></a> <span class="n">text</span> <span class="o">=</span> <span class="n">write_heading</span><span class="p">(</span><span class="n">opts</span><span class="o">.</span><span class="n">header</span><span class="p">,</span> <span class="s">'Modules'</span><span class="p">)</span>
<a name="cl-204"></a> <span class="n">text</span> <span class="o">+=</span> <span class="n">title_line</span><span class="p">(</span><span class="s">'Modules:'</span><span class="p">,</span> <span class="s">'-'</span><span class="p">)</span>
<a name="cl-205"></a> <span class="n">text</span> <span class="o">+=</span> <span class="s">'.. toctree::</span><span class="se">\n</span><span class="s">'</span>
<a name="cl-206"></a> <span class="n">text</span> <span class="o">+=</span> <span class="s">' :maxdepth: </span><span class="si">%s</span><span class="se">\n\n</span><span class="s">'</span> <span class="o">%</span> <span class="n">opts</span><span class="o">.</span><span class="n">maxdepth</span>
<a name="cl-207"></a>
<a name="cl-208"></a> <span class="n">modules</span><span class="o">.</span><span class="n">sort</span><span class="p">()</span>
<a name="cl-209"></a> <span class="n">prev_module</span> <span class="o">=</span> <span class="s">''</span>
<a name="cl-210"></a> <span class="k">for</span> <span class="n">module</span> <span class="ow">in</span> <span class="n">modules</span><span class="p">:</span>
<a name="cl-211"></a> <span class="c"># look if the module is a subpackage and, if yes, ignore it</span>
<a name="cl-212"></a> <span class="k">if</span> <span class="n">module</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="n">prev_module</span> <span class="o">+</span> <span class="s">'.'</span><span class="p">):</span>
<a name="cl-213"></a> <span class="k">continue</span>
<a name="cl-214"></a> <span class="n">prev_module</span> <span class="o">=</span> <span class="n">module</span>
<a name="cl-215"></a> <span class="n">text</span> <span class="o">+=</span> <span class="s">' </span><span class="si">%s</span><span class="se">\n</span><span class="s">'</span> <span class="o">%</span> <span class="n">module</span>
<a name="cl-216"></a>
<a name="cl-217"></a> <span class="c"># write the file</span>
<a name="cl-218"></a> <span class="k">if</span> <span class="ow">not</span> <span class="n">opts</span><span class="o">.</span><span class="n">dryrun</span><span class="p">:</span>
<a name="cl-219"></a> <span class="n">fd</span> <span class="o">=</span> <span class="nb">open</span><span class="p">(</span><span class="n">fname</span><span class="p">,</span> <span class="s">'w'</span><span class="p">)</span>
<a name="cl-220"></a> <span class="n">fd</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="n">text</span><span class="p">)</span>
<a name="cl-221"></a> <span class="n">fd</span><span class="o">.</span><span class="n">close</span><span class="p">()</span>
<a name="cl-222"></a>
<a name="cl-223"></a><span class="k">def</span> <span class="nf">format_excludes</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="n">excludes</span><span class="p">):</span>
<a name="cl-224"></a> <span class="sd">"""</span>
<a name="cl-225"></a><span class="sd"> Format the excluded directory list.</span>
<a name="cl-226"></a><span class="sd"> (verify that the path is not from the root of the volume or the root of the</span>
<a name="cl-227"></a><span class="sd"> package)</span>
<a name="cl-228"></a><span class="sd"> """</span>
<a name="cl-229"></a> <span class="n">f_excludes</span> <span class="o">=</span> <span class="p">[]</span>
<a name="cl-230"></a> <span class="k">for</span> <span class="n">exclude</span> <span class="ow">in</span> <span class="n">excludes</span><span class="p">:</span>
<a name="cl-231"></a> <span class="k">if</span> <span class="ow">not</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">isabs</span><span class="p">(</span><span class="n">exclude</span><span class="p">)</span> <span class="ow">and</span> <span class="n">exclude</span><span class="p">[:</span><span class="nb">len</span><span class="p">(</span><span class="n">path</span><span class="p">)]</span> <span class="o">!=</span> <span class="n">path</span><span class="p">:</span>
<a name="cl-232"></a> <span class="n">exclude</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="n">exclude</span><span class="p">)</span>
<a name="cl-233"></a> <span class="c"># remove trailing slash</span>
<a name="cl-234"></a> <span class="n">f_excludes</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">exclude</span><span class="o">.</span><span class="n">rstrip</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">sep</span><span class="p">))</span>
<a name="cl-235"></a> <span class="k">return</span> <span class="n">f_excludes</span>
<a name="cl-236"></a>
<a name="cl-237"></a><span class="k">def</span> <span class="nf">check_excludes</span><span class="p">(</span><span class="n">root</span><span class="p">,</span> <span class="n">excludes</span><span class="p">):</span>
<a name="cl-238"></a> <span class="sd">"""</span>
<a name="cl-239"></a><span class="sd"> Check if the directory is in the exclude list.</span>
<a name="cl-240"></a><span class="sd"> """</span>
<a name="cl-241"></a> <span class="k">for</span> <span class="n">exclude</span> <span class="ow">in</span> <span class="n">excludes</span><span class="p">:</span>
<a name="cl-242"></a> <span class="k">if</span> <span class="n">root</span><span class="p">[:</span><span class="nb">len</span><span class="p">(</span><span class="n">exclude</span><span class="p">)]</span> <span class="o">==</span> <span class="n">exclude</span><span class="p">:</span>
<a name="cl-243"></a> <span class="k">return</span> <span class="bp">True</span>
<a name="cl-244"></a> <span class="k">return</span> <span class="bp">False</span>
<a name="cl-245"></a>
<a name="cl-246"></a><span class="k">def</span> <span class="nf">check_py_file</span><span class="p">(</span><span class="n">files</span><span class="p">):</span>
<a name="cl-247"></a> <span class="sd">"""</span>
<a name="cl-248"></a><span class="sd"> Return a list with only the python scripts (remove all other files). </span>
<a name="cl-249"></a><span class="sd"> """</span>
<a name="cl-250"></a> <span class="n">py_files</span> <span class="o">=</span> <span class="p">[</span><span class="n">fich</span> <span class="k">for</span> <span class="n">fich</span> <span class="ow">in</span> <span class="n">files</span> <span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">splitext</span><span class="p">(</span><span class="n">fich</span><span class="p">)[</span><span class="mi">1</span><span class="p">]</span> <span class="o">==</span> <span class="s">'.py'</span><span class="p">]</span>
<a name="cl-251"></a> <span class="k">return</span> <span class="n">py_files</span>
<a name="cl-252"></a>
<a name="cl-253"></a>
<a name="cl-254"></a><span class="k">def</span> <span class="nf">main</span><span class="p">():</span>
<a name="cl-255"></a> <span class="sd">"""</span>
<a name="cl-256"></a><span class="sd"> Parse and check the command line arguments</span>
<a name="cl-257"></a><span class="sd"> """</span>
<a name="cl-258"></a> <span class="n">parser</span> <span class="o">=</span> <span class="n">optparse</span><span class="o">.</span><span class="n">OptionParser</span><span class="p">(</span><span class="n">usage</span><span class="o">=</span><span class="s">"""usage: %prog [options] <package path> [exclude paths, ...]</span>
<a name="cl-259"></a><span class="s"> </span>
<a name="cl-260"></a><span class="s">Note: By default this script will not overwrite already created files."""</span><span class="p">)</span>
<a name="cl-261"></a> <span class="n">parser</span><span class="o">.</span><span class="n">add_option</span><span class="p">(</span><span class="s">"-n"</span><span class="p">,</span> <span class="s">"--doc-header"</span><span class="p">,</span> <span class="n">action</span><span class="o">=</span><span class="s">"store"</span><span class="p">,</span> <span class="n">dest</span><span class="o">=</span><span class="s">"header"</span><span class="p">,</span> <span class="n">help</span><span class="o">=</span><span class="s">"Documentation Header (default=Project)"</span><span class="p">,</span> <span class="n">default</span><span class="o">=</span><span class="s">"Project"</span><span class="p">)</span>
<a name="cl-262"></a> <span class="n">parser</span><span class="o">.</span><span class="n">add_option</span><span class="p">(</span><span class="s">"-d"</span><span class="p">,</span> <span class="s">"--dest-dir"</span><span class="p">,</span> <span class="n">action</span><span class="o">=</span><span class="s">"store"</span><span class="p">,</span> <span class="n">dest</span><span class="o">=</span><span class="s">"destdir"</span><span class="p">,</span> <span class="n">help</span><span class="o">=</span><span class="s">"Output destination directory"</span><span class="p">,</span> <span class="n">default</span><span class="o">=</span><span class="s">""</span><span class="p">)</span>
<a name="cl-263"></a> <span class="n">parser</span><span class="o">.</span><span class="n">add_option</span><span class="p">(</span><span class="s">"-s"</span><span class="p">,</span> <span class="s">"--suffix"</span><span class="p">,</span> <span class="n">action</span><span class="o">=</span><span class="s">"store"</span><span class="p">,</span> <span class="n">dest</span><span class="o">=</span><span class="s">"suffix"</span><span class="p">,</span> <span class="n">help</span><span class="o">=</span><span class="s">"module suffix (default=txt)"</span><span class="p">,</span> <span class="n">default</span><span class="o">=</span><span class="s">"txt"</span><span class="p">)</span>
<a name="cl-264"></a> <span class="n">parser</span><span class="o">.</span><span class="n">add_option</span><span class="p">(</span><span class="s">"-m"</span><span class="p">,</span> <span class="s">"--maxdepth"</span><span class="p">,</span> <span class="n">action</span><span class="o">=</span><span class="s">"store"</span><span class="p">,</span> <span class="n">dest</span><span class="o">=</span><span class="s">"maxdepth"</span><span class="p">,</span> <span class="n">help</span><span class="o">=</span><span class="s">"Maximum depth of submodules to show in the TOC (default=4)"</span><span class="p">,</span> <span class="nb">type</span><span class="o">=</span><span class="s">"int"</span><span class="p">,</span> <span class="n">default</span><span class="o">=</span><span class="mi">4</span><span class="p">)</span>
<a name="cl-265"></a> <span class="n">parser</span><span class="o">.</span><span class="n">add_option</span><span class="p">(</span><span class="s">"-r"</span><span class="p">,</span> <span class="s">"--dry-run"</span><span class="p">,</span> <span class="n">action</span><span class="o">=</span><span class="s">"store_true"</span><span class="p">,</span> <span class="n">dest</span><span class="o">=</span><span class="s">"dryrun"</span><span class="p">,</span> <span class="n">help</span><span class="o">=</span><span class="s">"Run the script without creating the files"</span><span class="p">)</span>
<a name="cl-266"></a> <span class="n">parser</span><span class="o">.</span><span class="n">add_option</span><span class="p">(</span><span class="s">"-f"</span><span class="p">,</span> <span class="s">"--force"</span><span class="p">,</span> <span class="n">action</span><span class="o">=</span><span class="s">"store_true"</span><span class="p">,</span> <span class="n">dest</span><span class="o">=</span><span class="s">"force"</span><span class="p">,</span> <span class="n">help</span><span class="o">=</span><span class="s">"Overwrite all the files"</span><span class="p">)</span>
<a name="cl-267"></a> <span class="n">parser</span><span class="o">.</span><span class="n">add_option</span><span class="p">(</span><span class="s">"-t"</span><span class="p">,</span> <span class="s">"--no-toc"</span><span class="p">,</span> <span class="n">action</span><span class="o">=</span><span class="s">"store_true"</span><span class="p">,</span> <span class="n">dest</span><span class="o">=</span><span class="s">"notoc"</span><span class="p">,</span> <span class="n">help</span><span class="o">=</span><span class="s">"Don't create the table of content file"</span><span class="p">)</span>
<a name="cl-268"></a> <span class="p">(</span><span class="n">opts</span><span class="p">,</span> <span class="n">args</span><span class="p">)</span> <span class="o">=</span> <span class="n">parser</span><span class="o">.</span><span class="n">parse_args</span><span class="p">()</span>
<a name="cl-269"></a> <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">args</span><span class="p">)</span> <span class="o"><</span> <span class="mi">1</span><span class="p">:</span>
<a name="cl-270"></a> <span class="n">parser</span><span class="o">.</span><span class="n">error</span><span class="p">(</span><span class="s">"package path is required."</span><span class="p">)</span>
<a name="cl-271"></a> <span class="k">else</span><span class="p">:</span>
<a name="cl-272"></a> <span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">isdir</span><span class="p">(</span><span class="n">args</span><span class="p">[</span><span class="mi">0</span><span class="p">]):</span>
<a name="cl-273"></a> <span class="c"># check if the output destination is a valid directory</span>
<a name="cl-274"></a> <span class="k">if</span> <span class="n">opts</span><span class="o">.</span><span class="n">destdir</span> <span class="ow">and</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">isdir</span><span class="p">(</span><span class="n">opts</span><span class="o">.</span><span class="n">destdir</span><span class="p">):</span>
<a name="cl-275"></a> <span class="c"># if there's some exclude arguments, build the list of excludes</span>
<a name="cl-276"></a> <span class="n">excludes</span> <span class="o">=</span> <span class="n">args</span><span class="p">[</span><span class="mi">1</span><span class="p">:]</span>
<a name="cl-277"></a> <span class="n">recurse_tree</span><span class="p">(</span><span class="n">args</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">excludes</span><span class="p">,</span> <span class="n">opts</span><span class="p">)</span>
<a name="cl-278"></a> <span class="k">else</span><span class="p">:</span>
<a name="cl-279"></a> <span class="k">print</span> <span class="s">'</span><span class="si">%s</span><span class="s"> is not a valid output destination directory.'</span> <span class="o">%</span> <span class="n">opts</span><span class="o">.</span><span class="n">destdir</span>
<a name="cl-280"></a> <span class="k">else</span><span class="p">:</span>
<a name="cl-281"></a> <span class="k">print</span> <span class="s">'</span><span class="si">%s</span><span class="s"> is not a valid directory.'</span> <span class="o">%</span> <span class="n">args</span>
<a name="cl-282"></a>
<a name="cl-283"></a>
<a name="cl-284"></a>
<a name="cl-285"></a>
<a name="cl-286"></a><span class="k">if</span> <span class="n">__name__</span> <span class="o">==</span> <span class="s">'__main__'</span><span class="p">:</span>
<a name="cl-287"></a> <span class="n">main</span><span class="p">()</span>
<a name="cl-288"></a>
</pre></div>
</td></tr></table>
</div>
<div class="cb"></div>
</div>
<div class="cb footer-placeholder"></div>
</div>
<div id="footer-wrapper">
<div id="footer">
<a href="/site/terms/">TOS</a> | <a href="/site/privacy/">Privacy Policy</a> | <a href="http://blog.bitbucket.org/">Blog</a> | <a href="http://bitbucket.org/jespern/bitbucket/issues/new/">Report Bug</a> | <a href="http://groups.google.com/group/bitbucket-users">Discuss</a> | <a href="http://avantlumiere.com/">© 2008-2010</a>
| We run <small><b>
<a href="http://www.djangoproject.com/">Django 1.2.3</a> /
<a href="http://bitbucket.org/jespern/django-piston/">Piston 0.2.3rc1</a> /
<a href="http://www.selenic.com/mercurial/">Hg 1.6</a> /
<a href="http://www.python.org">Python 2.7.0</a> /
r3172| bitbucket01
</b></small>
</div>
</div>
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-2456069-3'], ['_trackPageview']);
var _gaq = _gaq || [];
_gaq.push(['atl._setAccount', 'UA-6032469-33'], ['atl._trackPageview']);
(function() {
var ga = document.createElement('script');
ga.src = ('https:' == document.location.protocol ? 'https://ssl' :
'http://www') + '.google-analytics.com/ga.js';
ga.setAttribute('async', 'true');
document.documentElement.firstChild.appendChild(ga);
})();
</script>
</body>
</html>
| lgpl-3.0 |
risicle/Axelrod | axelrod/tests/unit/test_qlearner.py | 3 | 9415 | """Test for the qlearner strategy."""
import random
import axelrod
from axelrod import simulate_play, Game
from .test_player import TestPlayer, test_responses
C, D = 'C', 'D'
class TestRiskyQLearner(TestPlayer):
name = 'Risky QLearner'
player = axelrod.RiskyQLearner
expected_classifier = {
'memory_depth': float('inf'),
'stochastic': True,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_payoff_matrix(self):
(R, P, S, T) = Game().RPST()
payoff_matrix = {'C': {'C': R, 'D': S}, 'D': {'C': T, 'D': P}}
p1 = self.player()
self.assertEqual(p1.payoff_matrix, payoff_matrix)
def test_qs_update(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.RiskyQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Qs, {'': {C: 0, D: 0.9}, '0.0': {C: 0, D: 0}})
simulate_play(p1, p2)
self.assertEqual(p1.Qs,{'': {C: 0, D: 0.9}, '0.0': {C: 2.7, D: 0}, 'C1.0': {C: 0, D: 0}})
def test_vs_update(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.RiskyQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Vs, {'': 0.9, '0.0': 0})
simulate_play(p1, p2)
self.assertEqual(p1.Vs,{'': 0.9, '0.0': 2.7, 'C1.0': 0})
def test_prev_state_updates(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.RiskyQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, '0.0')
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, 'C1.0')
def test_strategy(self):
"""Tests that it chooses the best strategy."""
random.seed(5)
p1 = axelrod.RiskyQLearner()
p1.state = 'CCDC'
p1.Qs = {'': {C: 0, D: 0}, 'CCDC': {C: 2, D: 6}}
p2 = axelrod.Cooperator()
test_responses(self, p1, p2, [], [], [C, D, C, C, D, C, C])
def test_reset_method(self):
"""
tests the reset method
"""
P1 = axelrod.RiskyQLearner()
P1.Qs = {'': {C: 0, D: -0.9}, '0.0': {C: 0, D: 0}}
P1.Vs = {'': 0, '0.0': 0}
P1.history = [C, D, D, D]
P1.prev_state = C
P1.reset()
self.assertEqual(P1.prev_state, '')
self.assertEqual(P1.history, [])
self.assertEqual(P1.Vs, {'': 0})
self.assertEqual(P1.Qs, {'': {C: 0, D: 0}})
class TestArrogantQLearner(TestPlayer):
name = 'Arrogant QLearner'
player = axelrod.ArrogantQLearner
expected_classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': True,
'inspects_source': False,
'manipulates_state': False
}
def test_qs_update(self):
"""
Test that the q and v values update
"""
random.seed(5)
p1 = axelrod.ArrogantQLearner()
p2 = axelrod.Cooperator()
play_1, play_2 = simulate_play(p1, p2)
self.assertEqual(p1.Qs, {'': {C: 0, D: 0.9}, '0.0': {C: 0, D: 0}})
simulate_play(p1, p2)
self.assertEqual(p1.Qs,{'': {C: 0, D: 0.9}, '0.0': {C: 2.7, D: 0}, 'C1.0': {C: 0, D: 0}})
def test_vs_update(self):
"""
Test that the q and v values update
"""
random.seed(5)
p1 = axelrod.ArrogantQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Vs, {'': 0.9, '0.0': 0})
simulate_play(p1, p2)
self.assertEqual(p1.Vs,{'': 0.9, '0.0': 2.7, 'C1.0': 0})
def test_prev_state_updates(self):
"""
Test that the q and v values update
"""
random.seed(5)
p1 = axelrod.ArrogantQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, '0.0')
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, 'C1.0')
def test_strategy(self):
"""Tests that it chooses the best strategy."""
random.seed(9)
p1 = axelrod.ArrogantQLearner()
p1.state = 'CCDC'
p1.Qs = {'': {C: 0, D: 0}, 'CCDC': {C: 2, D: 6}}
p2 = axelrod.Cooperator()
test_responses(self, p1, p2, [], [], [C, C, C, C, C, C, C])
def test_reset_method(self):
"""Tests the reset method."""
P1 = axelrod.ArrogantQLearner()
P1.Qs = {'': {C: 0, D: -0.9}, '0.0': {C: 0, D: 0}}
P1.Vs = {'': 0, '0.0': 0}
P1.history = [C, D, D, D]
P1.prev_state = C
P1.reset()
self.assertEqual(P1.prev_state, '')
self.assertEqual(P1.history, [])
self.assertEqual(P1.Vs, {'':0})
self.assertEqual(P1.Qs, {'':{C:0, D:0}})
class TestHesitantQLearner(TestPlayer):
name = 'Hesitant QLearner'
player = axelrod.HesitantQLearner
expected_classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': True,
'inspects_source': False,
'manipulates_state': False
}
def test_qs_update(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.HesitantQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Qs, {'': {C: 0, D: 0.1}, '0.0': {C: 0, D: 0}})
simulate_play(p1, p2)
self.assertEqual(p1.Qs,{'': {C: 0, D: 0.1}, '0.0': {C: 0.30000000000000004, D: 0}, 'C1.0': {C: 0, D: 0}})
def test_vs_update(self):
"""
Test that the q and v values update
"""
random.seed(5)
p1 = axelrod.HesitantQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Vs, {'': 0.1, '0.0': 0})
simulate_play(p1, p2)
self.assertEqual(p1.Vs,{'': 0.1, '0.0': 0.30000000000000004, 'C1.0': 0})
def test_prev_state_updates(self):
"""
Test that the q and v values update
"""
random.seed(5)
p1 = axelrod.HesitantQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, '0.0')
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, 'C1.0')
def test_strategy(self):
"""Tests that it chooses the best strategy."""
random.seed(9)
p1 = axelrod.HesitantQLearner()
p1.state = 'CCDC'
p1.Qs = {'': {C: 0, D: 0}, 'CCDC': {C: 2, D: 6}}
p2 = axelrod.Cooperator()
test_responses(self, p1, p2, [], [], [C, C, C, C, C, C, C])
def test_reset_method(self):
"""
tests the reset method
"""
P1 = axelrod.HesitantQLearner()
P1.Qs = {'': {C: 0, D: -0.9}, '0.0': {C: 0, D: 0}}
P1.Vs = {'': 0, '0.0': 0}
P1.history = [C, D, D, D]
P1.prev_state = C
P1.reset()
self.assertEqual(P1.prev_state, '')
self.assertEqual(P1.history, [])
self.assertEqual(P1.Vs, {'': 0})
self.assertEqual(P1.Qs, {'': {C: 0, D: 0}})
class TestCautiousQLearner(TestPlayer):
name = 'Cautious QLearner'
player = axelrod.CautiousQLearner
expected_classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': True,
'inspects_source': False,
'manipulates_state': False
}
def test_qs_update(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.CautiousQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Qs, {'': {C: 0, D: 0.1}, '0.0': {C: 0, D: 0}})
simulate_play(p1, p2)
self.assertEqual(p1.Qs,{'': {C: 0, D: 0.1}, '0.0': {C: 0.30000000000000004, D: 0}, 'C1.0': {C: 0, D: 0.0}})
def test_vs_update(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.CautiousQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Vs, {'': 0.1, '0.0': 0})
simulate_play(p1, p2)
self.assertEqual(p1.Vs,{'': 0.1, '0.0': 0.30000000000000004, 'C1.0': 0})
def test_prev_state_updates(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.CautiousQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, '0.0')
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, 'C1.0')
def test_strategy(self):
"""Tests that it chooses the best strategy."""
random.seed(9)
p1 = axelrod.CautiousQLearner()
p1.state = 'CCDC'
p1.Qs = {'': {C: 0, D: 0}, 'CCDC': {C: 2, D: 6}}
p2 = axelrod.Cooperator()
test_responses(self, p1, p2, [], [], [C, C, C, C, C, C, C])
def test_reset_method(self):
"""Tests the reset method."""
P1 = axelrod.CautiousQLearner()
P1.Qs = {'': {C: 0, D: -0.9}, '0.0': {C: 0, D: 0}}
P1.Vs = {'': 0, '0.0': 0}
P1.history = [C, D, D, D]
P1.prev_state = C
P1.reset()
self.assertEqual(P1.prev_state, '')
self.assertEqual(P1.history, [])
self.assertEqual(P1.Vs, {'': 0})
self.assertEqual(P1.Qs, {'': {C: 0, D: 0}})
| mit |
AkA84/edx-platform | lms/djangoapps/instructor_task/tasks_helper.py | 14 | 62298 | """
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
from collections import OrderedDict
from datetime import datetime
from django.conf import settings
from eventtracking import tracker
from itertools import chain
from time import time
import unicodecsv
import logging
from celery import Task, current_task
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.files.storage import DefaultStorage
from django.db import transaction, reset_queries
from django.db.models import Q
import dogstats_wrapper as dog_stats_api
from pytz import UTC
from StringIO import StringIO
from edxmako.shortcuts import render_to_string
from instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
from shoppingcart.models import (
PaidCourseRegistration, CourseRegCodeItem, InvoiceTransaction,
Invoice, CouponRedemption, RegistrationCodeRedemption, CourseRegistrationCode
)
from track.views import task_track
from util.file import course_filename_prefix_generator, UniversalNewlineIterator
from xblock.runtime import KvsFieldData
from xmodule.modulestore.django import modulestore
from xmodule.split_test_module import get_split_user_partitions
from django.utils.translation import ugettext as _
from certificates.models import (
CertificateWhitelist,
certificate_info_for_user,
CertificateStatuses
)
from certificates.api import generate_user_certificates
from courseware.courses import get_course_by_id, get_problems_in_section
from courseware.grades import iterate_grades_for
from courseware.models import StudentModule
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_analytics.basic import enrolled_students_features, list_may_enroll
from instructor_analytics.csvs import format_dictlist
from instructor_task.models import ReportStore, InstructorTask, PROGRESS
from lms.djangoapps.lms_xblock.runtime import LmsPartitionService
from openedx.core.djangoapps.course_groups.cohorts import get_cohort
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from opaque_keys.edx.keys import UsageKey
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted
from student.models import CourseEnrollment, CourseAccessRole
from verify_student.models import SoftwareSecurePhotoVerification
# define different loggers for use within tasks and on client side
TASK_LOG = logging.getLogger('edx.celery.task')
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
FILTERED_OUT_ROLES = ['staff', 'instructor', 'finance_admin', 'sales_admin']
# define values for update functions to use to return status to perform_module_state_update
UPDATE_STATUS_SUCCEEDED = 'succeeded'
UPDATE_STATUS_FAILED = 'failed'
UPDATE_STATUS_SKIPPED = 'skipped'
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested'
class BaseInstructorTask(Task):
"""
Base task class for use with InstructorTask models.
Permits updating information about task in corresponding InstructorTask for monitoring purposes.
Assumes that the entry_id of the InstructorTask model is the first argument to the task.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This class
updates the entry on success and failure of the task it wraps. It is setting the entry's value
for task_state based on what Celery would set it to once the task returns to Celery:
FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to perform_module_state_update, and documented there.
"""
abstract = True
def on_success(self, task_progress, task_id, args, kwargs):
"""
Update InstructorTask object corresponding to this task with info about success.
Updates task_output and task_state. But it shouldn't actually do anything
if the task is only creating subtasks to actually do the work.
Assumes `task_progress` is a dict containing the task's result, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
"""
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
# We should be able to find the InstructorTask object to update
# based on the task_id here, without having to dig into the
# original args to the task. On the other hand, the entry_id
# is the first value passed to all such args, so we'll use that.
# And we assume that it exists, else we would already have had a failure.
entry_id = args[0]
entry = InstructorTask.objects.get(pk=entry_id)
# Check to see if any subtasks had been defined as part of this task.
# If not, then we know that we're done. (If so, let the subtasks
# handle updating task_state themselves.)
if len(entry.subtasks) == 0:
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Update InstructorTask object corresponding to this task with info about failure.
Fetches and updates exception and traceback information on failure.
If an exception is raised internal to the task, it is caught by celery and provided here.
The information is recorded in the InstructorTask object as a JSON-serialized dict
stored in the task_output column. It contains the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Note that there is no way to record progress made within the task (e.g. attempted,
succeeded, etc.) when such failures occur.
"""
TASK_LOG.debug(u'Task %s: failure returned', task_id)
entry_id = args[0]
try:
entry = InstructorTask.objects.get(pk=entry_id)
except InstructorTask.DoesNotExist:
# if the InstructorTask object does not exist, then there's no point
# trying to update it.
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
else:
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
entry.task_state = FAILURE
entry.save_now()
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
class TaskProgress(object):
"""
Encapsulates the current task's progress by keeping track of
'attempted', 'succeeded', 'skipped', 'failed', 'total',
'action_name', and 'duration_ms' values.
"""
def __init__(self, action_name, total, start_time):
self.action_name = action_name
self.total = total
self.start_time = start_time
self.attempted = 0
self.succeeded = 0
self.skipped = 0
self.failed = 0
def update_task_state(self, extra_meta=None):
"""
Update the current celery task's state to the progress state
specified by the current object. Returns the progress
dictionary for use by `run_main_task` and
`BaseInstructorTask.on_success`.
Arguments:
extra_meta (dict): Extra metadata to pass to `update_state`
Returns:
dict: The current task's progress dict
"""
progress_dict = {
'action_name': self.action_name,
'attempted': self.attempted,
'succeeded': self.succeeded,
'skipped': self.skipped,
'failed': self.failed,
'total': self.total,
'duration_ms': int((time() - self.start_time) * 1000),
}
if extra_meta is not None:
progress_dict.update(extra_meta)
_get_current_task().update_state(state=PROGRESS, meta=progress_dict)
return progress_dict
def run_main_task(entry_id, task_fcn, action_name):
"""
Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask.
Arguments passed to `task_fcn` are:
`entry_id` : the primary key for the InstructorTask entry representing the task.
`course_id` : the id for the course.
`task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input.
`action_name` : past-tense verb to use for constructing status messages.
If no exceptions are raised, the `task_fcn` should return a dict containing
the task's result with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages.
Should be past-tense. Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
"""
# Get the InstructorTask to be updated. If this fails then let the exception return to Celery.
# There's no point in catching it here.
entry = InstructorTask.objects.get(pk=entry_id)
entry.task_state = PROGRESS
entry.save_now()
# Get inputs to use in this task from the entry
task_id = entry.task_id
course_id = entry.course_id
task_input = json.loads(entry.task_input)
# Construct log message
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
TASK_LOG.error(message)
raise ValueError(message)
# Now do the work
with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]):
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
# Release any queries that the connection has been hanging onto
reset_queries()
# Log and exit, returning task_progress info as task result
TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
return task_progress
def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible updates to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
start_time = time()
usage_keys = []
problem_url = task_input.get('problem_url')
entrance_exam_url = task_input.get('entrance_exam_url')
student_identifier = task_input.get('student')
problems = {}
# if problem_url is present make a usage key from it
if problem_url:
usage_key = course_id.make_usage_key_from_deprecated_string(problem_url)
usage_keys.append(usage_key)
# find the problem descriptor:
problem_descriptor = modulestore().get_item(usage_key)
problems[unicode(usage_key)] = problem_descriptor
# if entrance_exam is present grab all problems in it
if entrance_exam_url:
problems = get_problems_in_section(entrance_exam_url)
usage_keys = [UsageKey.from_string(location) for location in problems.keys()]
# find the modules in question
modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key__in=usage_keys)
# give the option of updating an individual student. If not specified,
# then updates all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
task_progress = TaskProgress(action_name, modules_to_update.count(), start_time)
task_progress.update_task_state()
for module_to_update in modules_to_update:
task_progress.attempted += 1
module_descriptor = problems[unicode(module_to_update.module_state_key)]
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
update_status = update_fcn(module_descriptor, module_to_update)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
task_progress.succeeded += 1
elif update_status == UPDATE_STATUS_FAILED:
task_progress.failed += 1
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
return task_progress.update_task_state()
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_xqueue_callback_url_prefix(xmodule_instance_args):
"""Gets prefix to use when constructing xqueue_callback_url."""
return xmodule_instance_args.get('xqueue_callback_url_prefix', '') if xmodule_instance_args is not None else ''
def _get_track_function_for_task(student, xmodule_instance_args=None, source_page='x_module_task'):
"""
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
"""
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {'student': student.username, 'task_id': _get_task_id_from_xmodule_args(xmodule_instance_args)}
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page=source_page)
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None, course=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
student_data = KvsFieldData(DjangoKeyValueStore(field_data_cache))
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(
user=student,
descriptor=module_descriptor,
student_data=student_data,
course_id=course_id,
track_function=make_track_function(),
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type,
# This module isn't being used for front-end rendering
request_token=None,
# pass in a loaded course for override enabling
course=course
)
@transaction.autocommit
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
usage_key = student_module.module_state_key
with modulestore().bulk_operations(course_id):
course = get_course_by_id(course_id)
# TODO: Here is a call site where we could pass in a loaded course. I
# think we certainly need it since grading is happening here, and field
# overrides would be important in handling that correctly
instance = _get_module_instance_for_task(
course_id,
student,
module_descriptor,
xmodule_instance_args,
grade_bucket_type='rescore',
course=course
)
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(
loc=usage_key,
student=student
)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
instance.save()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: unexpected response %(msg)s",
dict(
msg=result,
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
else:
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_SUCCEEDED
@transaction.autocommit
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Resets problem attempts to zero for specified `student_module`.
Returns a status of UPDATE_STATUS_SUCCEEDED if a problem has non-zero attempts
that are being reset, and UPDATE_STATUS_SKIPPED otherwise.
"""
update_status = UPDATE_STATUS_SKIPPED
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
old_number_of_attempts = problem_state["attempts"]
if old_number_of_attempts > 0:
problem_state["attempts"] = 0
# convert back to json and save
student_module.state = json.dumps(problem_state)
student_module.save()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
track_function('problem_reset_attempts', event_info)
update_status = UPDATE_STATUS_SUCCEEDED
return update_status
@transaction.autocommit
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Delete the StudentModule entry.
Always returns UPDATE_STATUS_SUCCEEDED, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
track_function('problem_delete_state', {})
return UPDATE_STATUS_SUCCEEDED
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
"""
Upload data as a CSV using ReportStore.
Arguments:
rows: CSV data in the following format (first column may be a
header):
[
[row1_colum1, row1_colum2, ...],
...
]
csv_name: Name of the resulting CSV
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
report_store.store_rows(
course_id,
u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
course_prefix=course_filename_prefix_generator(course_id),
csv_name=csv_name,
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
),
rows
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": csv_name, })
def upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
"""
Upload Executive Summary Html file using ReportStore.
Arguments:
data_dict: containing executive report data.
report_name: Name of the resulting Html File.
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
# Use the data dict and html template to generate the output buffer
output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))
report_store.store(
course_id,
u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
course_prefix=course_filename_prefix_generator(course_id),
report_name=report_name,
timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
),
output_buffer,
config={
'content_type': 'text/html',
'content_encoding': None,
}
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": report_name})
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a grades CSV file for all students that
are enrolled, and store using a `ReportStore`. Once created, the files can
be accessed by instantiating another `ReportStore` (via
`ReportStore.from_config()`) and calling `link_for()` on it. Writes are
buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
that are visible in ReportStore will be complete ones.
As we start to add more CSV downloads, it will probably be worthwhile to
make a more general CSVDoc class instead of building out the rows like we
do here.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
course = get_course_by_id(course_id)
course_is_cohorted = is_course_cohorted(course.id)
cohorts_header = ['Cohort Name'] if course_is_cohorted else []
experiment_partitions = get_split_user_partitions(course.user_partitions)
group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]
certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
# Loop over all our students and build our CSV lists in memory
header = None
rows = []
err_rows = [["id", "username", "error_msg"]]
current_step = {'step': 'Calculating Grades'}
total_enrolled_students = enrolled_students.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
task_info_string,
action_name,
current_step,
total_enrolled_students
)
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after each student is graded to get a sense
# of the task's progress
student_counter += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
if gradeset:
# We were able to successfully grade this student for this course.
task_progress.succeeded += 1
if not header:
header = [section['label'] for section in gradeset[u'section_breakdown']]
rows.append(
["id", "email", "username", "grade"] + header + cohorts_header +
group_configs_header + ['Enrollment Track', 'Verification Status'] + certificate_info_header
)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
cohorts_group_name = []
if course_is_cohorted:
group = get_cohort(student, course_id, assign=False)
cohorts_group_name.append(group.name if group else '')
group_configs_group_names = []
for partition in experiment_partitions:
group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
group_configs_group_names.append(group.name if group else '')
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
student,
course_id,
enrollment_mode
)
certificate_info = certificate_info_for_user(
student,
course_id,
gradeset['grade'],
student.id in whitelisted_user_ids
)
# Not everybody has the same gradable items. If the item is not
# found in the user's gradeset, just assume it's a 0. The aggregated
# grades for their sections and overall course will be calculated
# without regard for the item they didn't have access to, so it's
# possible for a student to have a 0.0 show up in their row but
# still have 100% for the course.
row_percents = [percents.get(label, 0.0) for label in header]
rows.append(
[student.id, student.email, student.username, gradeset['percent']] +
row_percents + cohorts_group_name + group_configs_group_names +
[enrollment_mode] + [verification_status] + certificate_info
)
else:
# An empty gradeset means we failed to grade a student.
task_progress.failed += 1
err_rows.append([student.id, student.username, err_msg])
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)
# If there are any error rows (don't count the header), write them out as well
if len(err_rows) > 1:
upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def _order_problems(blocks):
"""
Sort the problems by the assignment type and assignment that it belongs to.
Args:
blocks (OrderedDict) - A course structure containing blocks that have been ordered
(i.e. when we iterate over them, we will see them in the order
that they appear in the course).
Returns:
an OrderedDict that maps a problem id to its headers in the final report.
"""
problems = OrderedDict()
assignments = dict()
# First, sort out all the blocks into their correct assignments and all the
# assignments into their correct types.
for block in blocks:
# Put the assignments in order into the assignments list.
if blocks[block]['block_type'] == 'sequential':
block_format = blocks[block]['format']
if block_format not in assignments:
assignments[block_format] = OrderedDict()
assignments[block_format][block] = list()
# Put the problems into the correct order within their assignment.
if blocks[block]['block_type'] == 'problem' and blocks[block]['graded'] is True:
current = blocks[block]['parent']
# crawl up the tree for the sequential block
while blocks[current]['block_type'] != 'sequential':
current = blocks[current]['parent']
current_format = blocks[current]['format']
assignments[current_format][current].append(block)
# Now that we have a sorting and an order for the assignments and problems,
# iterate through them in order to generate the header row.
for assignment_type in assignments:
for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1):
for problem in assignments[assignment_type][assignment]:
header_name = u"{assignment_type} {assignment_index}: {assignment_name} - {block}".format(
block=blocks[problem]['display_name'],
assignment_type=assignment_type,
assignment_index=assignment_index,
assignment_name=blocks[assignment]['display_name']
)
problems[problem] = [header_name + " (Earned)", header_name + " (Possible)"]
return problems
def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Generate a CSV containing all students' problem grades within a given
`course_id`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
# This struct encapsulates both the display names of each static item in the
# header row as values as well as the django User field names of those items
# as the keys. It is structured in this way to keep the values related.
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
try:
course_structure = CourseStructure.objects.get(course_id=course_id)
blocks = course_structure.ordered_blocks
problems = _order_problems(blocks)
except CourseStructure.DoesNotExist:
return task_progress.update_task_state(
extra_meta={'step': 'Generating course structure. Please refresh and try again.'}
)
# Just generate the static fields for now.
rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))]
error_rows = [list(header_row.values()) + ['error_msg']]
current_step = {'step': 'Calculating Grades'}
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True):
student_fields = [getattr(student, field_name) for field_name in header_row]
task_progress.attempted += 1
if 'percent' not in gradeset or 'raw_scores' not in gradeset:
# There was an error grading this student.
# Generally there will be a non-empty err_msg, but that is not always the case.
if not err_msg:
err_msg = u"Unknown error"
error_rows.append(student_fields + [err_msg])
task_progress.failed += 1
continue
final_grade = gradeset['percent']
# Only consider graded problems
problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded}
earned_possible_values = list()
for problem_id in problems:
try:
problem_score = problem_scores[problem_id]
earned_possible_values.append([problem_score.earned, problem_score.possible])
except KeyError:
# The student has not been graded on this problem. For example,
# iterate_grades_for skips problems that students have never
# seen in order to speed up report generation. It could also be
# the case that the student does not have access to it (e.g. A/B
# test or cohorted courseware).
earned_possible_values.append(['N/A', 'N/A'])
rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values)))
task_progress.succeeded += 1
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload if any students have been successfully graded
if len(rows) > 1:
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
# If there are any error rows, write them out as well
if len(error_rows) > 1:
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating Profile Info'}
task_progress.update_task_state(extra_meta=current_step)
# compute the student features table and format it
query_features = task_input.get('features')
student_data = enrolled_students_features(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'student_profile_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_enrollment_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
students_in_course = CourseEnrollment.objects.enrolled_and_dropped_out_users(course_id)
task_progress = TaskProgress(action_name, students_in_course.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
# Loop over all our students and build our CSV lists in memory
rows = []
header = None
current_step = {'step': 'Gathering Profile Information'}
enrollment_report_provider = PaidCourseEnrollmentReportProvider()
total_students = students_in_course.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating detailed enrollment report for total students: %s',
task_info_string,
action_name,
current_step,
total_students
)
for student in students_in_course:
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after certain intervals to get a hint that task is in progress
student_counter += 1
if student_counter % 100 == 0:
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, gathering enrollment profile for students in progress: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
user_data = enrollment_report_provider.get_user_profile(student.id)
course_enrollment_data = enrollment_report_provider.get_enrollment_info(student, course_id)
payment_data = enrollment_report_provider.get_payment_info(student, course_id)
# display name map for the column headers
enrollment_report_headers = {
'User ID': _('User ID'),
'Username': _('Username'),
'Full Name': _('Full Name'),
'First Name': _('First Name'),
'Last Name': _('Last Name'),
'Company Name': _('Company Name'),
'Title': _('Title'),
'Language': _('Language'),
'Year of Birth': _('Year of Birth'),
'Gender': _('Gender'),
'Level of Education': _('Level of Education'),
'Mailing Address': _('Mailing Address'),
'Goals': _('Goals'),
'City': _('City'),
'Country': _('Country'),
'Enrollment Date': _('Enrollment Date'),
'Currently Enrolled': _('Currently Enrolled'),
'Enrollment Source': _('Enrollment Source'),
'Enrollment Role': _('Enrollment Role'),
'List Price': _('List Price'),
'Payment Amount': _('Payment Amount'),
'Coupon Codes Used': _('Coupon Codes Used'),
'Registration Code Used': _('Registration Code Used'),
'Payment Status': _('Payment Status'),
'Transaction Reference Number': _('Transaction Reference Number')
}
if not header:
header = user_data.keys() + course_enrollment_data.keys() + payment_data.keys()
display_headers = []
for header_element in header:
# translate header into a localizable display string
display_headers.append(enrollment_report_headers.get(header_element, header_element))
rows.append(display_headers)
rows.append(user_data.values() + course_enrollment_data.values() + payment_data.values())
task_progress.succeeded += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Detailed enrollment report generated for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'enrollment_report', course_id, start_date, config_name='FINANCIAL_REPORTS')
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing detailed enrollment task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_may_enroll_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
information about students who may enroll but have not done so
yet, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about students who may enroll'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = task_input.get('features')
student_data = list_may_enroll(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'may_enroll_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def get_executive_report(course_id):
"""
Returns dict containing information about the course executive summary.
"""
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_id)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id)
paid_invoices_total = InvoiceTransaction.get_total_amount_of_paid_course_invoices(course_id)
gross_paid_revenue = single_purchase_total + bulk_purchase_total + paid_invoices_total
all_invoices_total = Invoice.get_invoice_total_amount_for_course(course_id)
gross_pending_revenue = all_invoices_total - float(paid_invoices_total)
gross_revenue = float(gross_paid_revenue) + float(gross_pending_revenue)
refunded_self_purchased_seats = PaidCourseRegistration.get_self_purchased_seat_count(
course_id, status='refunded'
)
refunded_bulk_purchased_seats = CourseRegCodeItem.get_bulk_purchased_seat_count(
course_id, status='refunded'
)
total_seats_refunded = refunded_self_purchased_seats + refunded_bulk_purchased_seats
self_purchased_refunds = PaidCourseRegistration.get_total_amount_of_purchased_item(
course_id,
status='refunded'
)
bulk_purchase_refunds = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id, status='refunded')
total_amount_refunded = self_purchased_refunds + bulk_purchase_refunds
top_discounted_codes = CouponRedemption.get_top_discount_codes_used(course_id)
total_coupon_codes_purchases = CouponRedemption.get_total_coupon_code_purchases(course_id)
bulk_purchased_codes = CourseRegistrationCode.order_generated_registration_codes(course_id)
unused_registration_codes = 0
for registration_code in bulk_purchased_codes:
if not RegistrationCodeRedemption.is_registration_code_redeemed(registration_code.code):
unused_registration_codes += 1
self_purchased_seat_count = PaidCourseRegistration.get_self_purchased_seat_count(course_id)
bulk_purchased_seat_count = CourseRegCodeItem.get_bulk_purchased_seat_count(course_id)
total_invoiced_seats = CourseRegistrationCode.invoice_generated_registration_codes(course_id).count()
total_seats = self_purchased_seat_count + bulk_purchased_seat_count + total_invoiced_seats
self_purchases_percentage = 0.0
bulk_purchases_percentage = 0.0
invoice_purchases_percentage = 0.0
avg_price_paid = 0.0
if total_seats != 0:
self_purchases_percentage = (float(self_purchased_seat_count) / float(total_seats)) * 100
bulk_purchases_percentage = (float(bulk_purchased_seat_count) / float(total_seats)) * 100
invoice_purchases_percentage = (float(total_invoiced_seats) / float(total_seats)) * 100
avg_price_paid = gross_revenue / total_seats
course = get_course_by_id(course_id, depth=0)
currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
return {
'display_name': course.display_name,
'start_date': course.start.strftime("%Y-%m-%d") if course.start is not None else 'N/A',
'end_date': course.end.strftime("%Y-%m-%d") if course.end is not None else 'N/A',
'total_seats': total_seats,
'currency': currency,
'gross_revenue': float(gross_revenue),
'gross_paid_revenue': float(gross_paid_revenue),
'gross_pending_revenue': gross_pending_revenue,
'total_seats_refunded': total_seats_refunded,
'total_amount_refunded': float(total_amount_refunded),
'average_paid_price': float(avg_price_paid),
'discount_codes_data': top_discounted_codes,
'total_seats_using_discount_codes': total_coupon_codes_purchases,
'total_self_purchase_seats': self_purchased_seat_count,
'total_bulk_purchase_seats': bulk_purchased_seat_count,
'total_invoiced_seats': total_invoiced_seats,
'unused_bulk_purchase_code_count': unused_registration_codes,
'self_purchases_percentage': self_purchases_percentage,
'bulk_purchases_percentage': bulk_purchases_percentage,
'invoice_purchases_percentage': invoice_purchases_percentage,
}
def upload_exec_summary_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a html report containing information,
which provides a snapshot of how the course is doing.
"""
start_time = time()
report_generation_date = datetime.now(UTC)
status_interval = 100
enrolled_users = CourseEnrollment.objects.users_enrolled_in(course_id)
true_enrollment_count = 0
for user in enrolled_users:
if not user.is_staff and not CourseAccessRole.objects.filter(
user=user, course_id=course_id, role__in=FILTERED_OUT_ROLES
).exists():
true_enrollment_count += 1
task_progress = TaskProgress(action_name, true_enrollment_count, start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
current_step = {'step': 'Gathering executive summary report information'}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating executive summary report',
task_info_string,
action_name,
current_step
)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# get the course executive summary report information.
data_dict = get_executive_report(course_id)
data_dict.update(
{
'total_enrollments': true_enrollment_count,
'report_generation_date': report_generation_date.strftime("%Y-%m-%d"),
}
)
# By this point, we've got the data that we need to generate html report.
current_step = {'step': 'Uploading executive summary report HTML file'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_exec_summary_to_store(data_dict, 'executive_report', course_id, report_generation_date)
task_progress.succeeded += 1
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing executive summary report task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def generate_students_certificates(
_xmodule_instance_args, _entry_id, course_id, task_input, action_name): # pylint: disable=unused-argument
"""
For a given `course_id`, generate certificates for all students
that are enrolled.
"""
start_time = time()
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating students already have certificates'}
task_progress.update_task_state(extra_meta=current_step)
students_require_certs = students_require_certificate(course_id, enrolled_students)
task_progress.skipped = task_progress.total - len(students_require_certs)
current_step = {'step': 'Generating Certificates'}
task_progress.update_task_state(extra_meta=current_step)
course = modulestore().get_course(course_id, depth=0)
# Generate certificate for each student
for student in students_require_certs:
task_progress.attempted += 1
status = generate_user_certificates(
student,
course_id,
course=course
)
if status in [CertificateStatuses.generating, CertificateStatuses.downloadable]:
task_progress.succeeded += 1
else:
task_progress.failed += 1
return task_progress.update_task_state(extra_meta=current_step)
def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
Within a given course, cohort students in bulk, then upload the results
using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
# Iterate through rows to get total assignments for task progress
with DefaultStorage().open(task_input['file_name']) as f:
total_assignments = 0
for _line in unicodecsv.DictReader(UniversalNewlineIterator(f)):
total_assignments += 1
task_progress = TaskProgress(action_name, total_assignments, start_time)
current_step = {'step': 'Cohorting Students'}
task_progress.update_task_state(extra_meta=current_step)
# cohorts_status is a mapping from cohort_name to metadata about
# that cohort. The metadata will include information about users
# successfully added to the cohort, users not found, and a cached
# reference to the corresponding cohort object to prevent
# redundant cohort queries.
cohorts_status = {}
with DefaultStorage().open(task_input['file_name']) as f:
for row in unicodecsv.DictReader(UniversalNewlineIterator(f), encoding='utf-8'):
# Try to use the 'email' field to identify the user. If it's not present, use 'username'.
username_or_email = row.get('email') or row.get('username')
cohort_name = row.get('cohort') or ''
task_progress.attempted += 1
if not cohorts_status.get(cohort_name):
cohorts_status[cohort_name] = {
'Cohort Name': cohort_name,
'Students Added': 0,
'Students Not Found': set()
}
try:
cohorts_status[cohort_name]['cohort'] = CourseUserGroup.objects.get(
course_id=course_id,
group_type=CourseUserGroup.COHORT,
name=cohort_name
)
cohorts_status[cohort_name]["Exists"] = True
except CourseUserGroup.DoesNotExist:
cohorts_status[cohort_name]["Exists"] = False
if not cohorts_status[cohort_name]['Exists']:
task_progress.failed += 1
continue
try:
with transaction.commit_on_success():
add_user_to_cohort(cohorts_status[cohort_name]['cohort'], username_or_email)
cohorts_status[cohort_name]['Students Added'] += 1
task_progress.succeeded += 1
except User.DoesNotExist:
cohorts_status[cohort_name]['Students Not Found'].add(username_or_email)
task_progress.failed += 1
except ValueError:
# Raised when the user is already in the given cohort
task_progress.skipped += 1
task_progress.update_task_state(extra_meta=current_step)
current_step['step'] = 'Uploading CSV'
task_progress.update_task_state(extra_meta=current_step)
# Filter the output of `add_users_to_cohorts` in order to upload the result.
output_header = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
output_rows = [
[
','.join(status_dict.get(column_name, '')) if column_name == 'Students Not Found'
else status_dict[column_name]
for column_name in output_header
]
for _cohort_name, status_dict in cohorts_status.iteritems()
]
output_rows.insert(0, output_header)
upload_csv_to_report_store(output_rows, 'cohort_results', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def students_require_certificate(course_id, enrolled_students):
""" Returns list of students where certificates needs to be generated.
Removing those students who have their certificate already generated
from total enrolled students for given course.
:param course_id:
:param enrolled_students:
"""
# compute those students where certificates already generated
students_already_have_certs = User.objects.filter(
~Q(generatedcertificate__status=CertificateStatuses.unavailable),
generatedcertificate__course_id=course_id)
return list(set(enrolled_students) - set(students_already_have_certs))
| agpl-3.0 |
kristi29091988/namebench | tools/split_instance_in_csv.py | 174 | 1077 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Split instance from provider"""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import csv
import re
reader = csv.reader(open('../config/servers.csv'))
writer = csv.writer(open('output.csv', 'w'))
for row in reader:
name = row[2]
matches = re.match('(.*?) \((.*)\)', name)
if matches:
name = matches.group(1)
instance = matches.group(2)
else:
instance = ''
row[2] = name
row.insert(3, instance)
writer.writerow(row)
| apache-2.0 |
pombredanne/invenio | modules/bibedit/lib/refextract_config.py | 1 | 3336 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""RefExtract configuration."""
__revision__ = "$Id$"
from invenio.config import CFG_VERSION, CFG_ETCDIR
# pylint: disable=C0301
# version number:
CFG_REFEXTRACT_VERSION = "Invenio/%s refextract/%s" % (CFG_VERSION, CFG_VERSION)
# periodicals knowledge base:
CFG_REFEXTRACT_KB_JOURNAL_TITLES = "%s/bibedit/refextract-journal-titles.kb" % CFG_ETCDIR
# report numbers knowledge base:
CFG_REFEXTRACT_KB_REPORT_NUMBERS = "%s/bibedit/refextract-report-numbers.kb" % CFG_ETCDIR
## MARC Fields and subfields used by refextract:
## reference fields:
CFG_REFEXTRACT_CTRL_FIELD_RECID = "001" ## control-field recid
CFG_REFEXTRACT_TAG_ID_REFERENCE = "999" ## ref field tag
CFG_REFEXTRACT_IND1_REFERENCE = "C" ## ref field ind1
CFG_REFEXTRACT_IND2_REFERENCE = "5" ## ref field ind2
CFG_REFEXTRACT_SUBFIELD_MARKER = "o" ## ref marker subfield
CFG_REFEXTRACT_SUBFIELD_MISC = "m" ## ref misc subfield
CFG_REFEXTRACT_SUBFIELD_DOI = "a" ## ref DOI subfield (NEW)
CFG_REFEXTRACT_SUBFIELD_REPORT_NUM = "r" ## ref reportnum subfield
CFG_REFEXTRACT_SUBFIELD_TITLE = "s" ## ref title subfield
CFG_REFEXTRACT_SUBFIELD_URL = "u" ## ref url subfield
CFG_REFEXTRACT_SUBFIELD_URL_DESCR = "z" ## ref url-text subfield
## refextract statisticts fields:
CFG_REFEXTRACT_TAG_ID_EXTRACTION_STATS = "999" ## ref-stats tag
CFG_REFEXTRACT_IND1_EXTRACTION_STATS = "C" ## ref-stats ind1
CFG_REFEXTRACT_IND2_EXTRACTION_STATS = "6" ## ref-stats ind2
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_STATS = "a" ## ref-stats subfield
## Internal tags are used by refextract to mark-up recognised citation
## information. These are the "closing tags:
CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM = r"</cds.REPORTNUMBER>"
CFG_REFEXTRACT_MARKER_CLOSING_TITLE = r"</cds.TITLE>"
CFG_REFEXTRACT_MARKER_CLOSING_SERIES = r"</cds.SER>"
CFG_REFEXTRACT_MARKER_CLOSING_VOLUME = r"</cds.VOL>"
CFG_REFEXTRACT_MARKER_CLOSING_YEAR = r"</cds.YR>"
CFG_REFEXTRACT_MARKER_CLOSING_PAGE = r"</cds.PG>"
## XML Record and collection opening/closing tags:
CFG_REFEXTRACT_XML_VERSION = u"""<?xml version="1.0" encoding="UTF-8"?>"""
CFG_REFEXTRACT_XML_COLLECTION_OPEN = u"""<collection xmlns="http://www.loc.gov/MARC21/slim">"""
CFG_REFEXTRACT_XML_COLLECTION_CLOSE = u"""</collection>\n"""
CFG_REFEXTRACT_XML_RECORD_OPEN = u"<record>"
CFG_REFEXTRACT_XML_RECORD_CLOSE = u"</record>"
| gpl-2.0 |
laslabs/vertical-medical | medical_appointment/tests/test_medical_appointment.py | 1 | 4400 | # -*- coding: utf-8 -*-
# Copyright 2016-2017 LasLabs Inc.
# License GPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from odoo.tests.common import TransactionCase
from odoo.exceptions import ValidationError
class TestMedicalAppointment(TransactionCase):
def setUp(self):
super(TestMedicalAppointment, self).setUp()
vals = {
'name': 'Test Patient',
'email': 'testpatient@example.com',
}
self.patient_id = self.env['medical.patient'].create(vals)
vals = {
'name': 'Test Specialty',
'code': 'TS',
}
specialty_id = self.env['medical.specialty'].create(vals)
vals = {
'name': 'Test Physician',
'specialty_id': specialty_id.id,
}
self.physician_id = self.env['medical.physician'].create(vals)
vals = {
'name': 'default',
'is_default': True,
}
self.env['medical.appointment.stage'].create(vals)
vals = {
'name': 'review',
'is_default': False,
}
self.env['medical.appointment.stage'].create(vals)
vals = {
'name': 'cancelled',
'is_default': False,
}
self.env['medical.appointment.stage'].create(vals)
vals = {
'name': 'Test Medical Center',
'type': 'medical.center',
}
self.institution_id = self.env['res.partner'].create(vals)
self.appointment_id = self._new_appointment()
def _new_appointment(self, time='11:00:00', force=False):
vals = {
'name': 'Test Appointment %s' % time,
'patient_id': self.patient_id.id,
'physician_id': self.physician_id.id,
'appointment_type': 'outpatient',
'appointment_date': '2016-01-01 %s' % time,
'institution_id': self.institution_id.id,
'duration': 60,
'force_schedule': force,
}
return self.env['medical.appointment'].create(vals)
def test_default_stage_id(self):
default_stage = self.appointment_id._default_stage_id()
self.assertEqual('Draft', default_stage.name)
def test_compute_appointment_end_date(self, ):
expect = '2016-01-01 12:00:00'
self.assertEqual(
expect, self.appointment_id.appointment_end_date,
'Did not correctly compute end date. Expect %s, Got %s' % (
expect, self.appointment_id.appointment_end_date,
)
)
def test_get_appointments_gets_correct_appointments(self):
self._new_appointment('15:00:00')
got = self.env['medical.appointment']._get_appointments(
self.physician_id,
self.institution_id,
'2016-01-01 11:00:00',
'2016-01-01 12:00:00',
)
self.assertIn(
self.appointment_id, got,
'Did not get correct appt. Expect %s, Got %s' % (
self.appointment_id, got
)
)
self.assertEqual(
1, len(got),
'Did not get correct amount of appointments. Expect %d Got %d' % (
1, len(got)
)
)
def test_clashes_state_to_review(self):
self._new_appointment('11:30:00', True)
self.env['medical.appointment']._set_clashes_state_to_review(
self.physician_id,
self.institution_id,
'2016-01-01 11:00:00',
'2016-01-01 12:00:00',
)
self.assertEquals('Pending Review', self.appointment_id.stage_id.name)
def test_check_not_double_booking_raises_error_when_in_appt(self):
""" Appt created while another appt in progress should be rejected """
with self.assertRaises(ValidationError):
self._new_appointment('11:30:00')
def test_check_not_double_booking_raises_error_when_clash_with_apt(self):
""" Appt that will be in progress during already created rejected """
with self.assertRaises(ValidationError):
self._new_appointment('10:30:00')
def test_check_not_double_booking_no_error_when_not_booked(self):
""" Should not raise ValidationError """
self._new_appointment('15:00:00')
| agpl-3.0 |
debayanray/ironic_backup | ironic/api/controllers/v1/__init__.py | 6 | 4722 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Version 1 of the Ironic API
NOTE: IN PROGRESS AND NOT FULLY IMPLEMENTED.
Should maintain feature parity with Nova Baremetal Extension.
Specification can be found at ironic/doc/api/v1.rst
"""
import pecan
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from ironic.api.controllers import base
from ironic.api.controllers import link
from ironic.api.controllers.v1 import chassis
from ironic.api.controllers.v1 import driver
from ironic.api.controllers.v1 import node
from ironic.api.controllers.v1 import port
class MediaType(base.APIBase):
"""A media type representation."""
base = wtypes.text
type = wtypes.text
def __init__(self, base, type):
self.base = base
self.type = type
class V1(base.APIBase):
"""The representation of the version 1 of the API."""
id = wtypes.text
"The ID of the version, also acts as the release number"
media_types = [MediaType]
"An array of supported media types for this version"
links = [link.Link]
"Links that point to a specific URL for this version and documentation"
chassis = [link.Link]
"Links to the chassis resource"
nodes = [link.Link]
"Links to the nodes resource"
ports = [link.Link]
"Links to the ports resource"
drivers = [link.Link]
"Links to the drivers resource"
@classmethod
def convert(self):
v1 = V1()
v1.id = "v1"
v1.links = [link.Link.make_link('self', pecan.request.host_url,
'v1', '', bookmark=True),
link.Link.make_link('describedby',
'http://docs.openstack.org',
'developer/ironic/dev',
'api-spec-v1.html',
bookmark=True, type='text/html')
]
v1.media_types = [MediaType('application/json',
'application/vnd.openstack.ironic.v1+json')]
v1.chassis = [link.Link.make_link('self', pecan.request.host_url,
'chassis', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'chassis', '',
bookmark=True)
]
v1.nodes = [link.Link.make_link('self', pecan.request.host_url,
'nodes', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'nodes', '',
bookmark=True)
]
v1.ports = [link.Link.make_link('self', pecan.request.host_url,
'ports', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'ports', '',
bookmark=True)
]
v1.drivers = [link.Link.make_link('self', pecan.request.host_url,
'drivers', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'drivers', '',
bookmark=True)
]
return v1
class Controller(rest.RestController):
"""Version 1 API controller root."""
nodes = node.NodesController()
ports = port.PortsController()
chassis = chassis.ChassisController()
drivers = driver.DriversController()
@wsme_pecan.wsexpose(V1)
def get(self):
# NOTE: The reason why convert() it's being called for every
# request is because we need to get the host url from
# the request object to make the links.
return V1.convert()
__all__ = (Controller)
| apache-2.0 |
kaedroho/django | django/db/utils.py | 19 | 10398 | import pkgutil
from importlib import import_module
from pathlib import Path
from asgiref.local import Local
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = 'default'
DJANGO_VERSION_PICKLE_KEY = '_django_version'
class Error(Exception):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper:
"""
Context manager and decorator that reraises backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
raise dj_exc_value.with_traceback(traceback) from exc_value
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
"""
Return a database backend's "base" module given a fully qualified database
backend name, or raise an error if it doesn't exist.
"""
# This backend was renamed in Django 1.9.
if backend_name == 'django.db.backends.postgresql_psycopg2':
backend_name = 'django.db.backends.postgresql'
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all built-in database backends.
backend_dir = str(Path(__file__).parent / 'backends')
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([backend_dir])
if ispkg and name not in {'base', 'dummy', 'postgresql_psycopg2'}
]
if backend_name not in ['django.db.backends.%s' % b for b in builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
raise ImproperlyConfigured(
"%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX is one of:\n"
" %s" % (backend_name, ", ".join(backend_reprs))
) from e_user
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler:
def __init__(self, databases=None):
"""
databases is an optional dictionary of database definitions (structured
like settings.DATABASES).
"""
self._databases = databases
# Connections needs to still be an actual thread local, as it's truly
# thread-critical. Database backends should use @async_unsafe to protect
# their code from async contexts, but this will give those contexts
# separate connections in case it's needed as well. There's no cleanup
# after async contexts, though, so we don't allow that if we can help it.
self._connections = Local(thread_critical=True)
@cached_property
def databases(self):
if self._databases is None:
self._databases = settings.DATABASES
if self._databases == {}:
self._databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
if DEFAULT_DB_ALIAS not in self._databases:
raise ImproperlyConfigured("You must define a '%s' database." % DEFAULT_DB_ALIAS)
if self._databases[DEFAULT_DB_ALIAS] == {}:
self._databases[DEFAULT_DB_ALIAS]['ENGINE'] = 'django.db.backends.dummy'
return self._databases
def ensure_defaults(self, alias):
"""
Put the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', None)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
def prepare_test_settings(self, alias):
"""
Make sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
test_settings = conn.setdefault('TEST', {})
default_test_settings = [
('CHARSET', None),
('COLLATION', None),
('MIGRATE', True),
('MIRROR', None),
('NAME', None),
]
for key, value in default_test_settings:
test_settings.setdefault(key, value)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter:
def __init__(self, routers=None):
"""
If routers is not specified, default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, str):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get('instance')
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""Return app models allowed to be migrated on provided db."""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
| bsd-3-clause |
bholley/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/filters/inject_meta_charset.py | 1730 | 2746 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def __init__(self, source, encoding):
_base.Filter.__init__(self, source)
self.encoding = encoding
def __iter__(self):
state = "pre_head"
meta_found = (self.encoding is None)
pending = []
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag":
if token["name"].lower() == "head":
state = "in_head"
elif type == "EmptyTag":
if token["name"].lower() == "meta":
# replace charset with actual encoding
has_http_equiv_content_type = False
for (namespace, name), value in token["data"].items():
if namespace is not None:
continue
elif name.lower() == 'charset':
token["data"][(namespace, name)] = self.encoding
meta_found = True
break
elif name == 'http-equiv' and value.lower() == 'content-type':
has_http_equiv_content_type = True
else:
if has_http_equiv_content_type and (None, "content") in token["data"]:
token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding
meta_found = True
elif token["name"].lower() == "head" and not meta_found:
# insert meta into empty head
yield {"type": "StartTag", "name": "head",
"data": token["data"]}
yield {"type": "EmptyTag", "name": "meta",
"data": {(None, "charset"): self.encoding}}
yield {"type": "EndTag", "name": "head"}
meta_found = True
continue
elif type == "EndTag":
if token["name"].lower() == "head" and pending:
# insert meta into head (if necessary) and flush pending queue
yield pending.pop(0)
if not meta_found:
yield {"type": "EmptyTag", "name": "meta",
"data": {(None, "charset"): self.encoding}}
while pending:
yield pending.pop(0)
meta_found = True
state = "post_head"
if state == "in_head":
pending.append(token)
else:
yield token
| mpl-2.0 |
kmoocdev2/edx-platform | lms/djangoapps/mobile_api/models.py | 23 | 3501 | """
ConfigurationModel for the mobile_api djangoapp.
"""
from config_models.models import ConfigurationModel
from django.db import models
from . import utils
from .mobile_platform import PLATFORM_CLASSES
class MobileApiConfig(ConfigurationModel):
"""
Configuration for the video upload feature.
The order in which the comma-separated list of names of profiles are given
is in priority order.
"""
video_profiles = models.TextField(
blank=True,
help_text="A comma-separated list of names of profiles to include for videos returned from the mobile API."
)
class Meta(object):
app_label = "mobile_api"
@classmethod
def get_video_profiles(cls):
"""
Get the list of profiles in priority order when requesting from VAL
"""
return [profile.strip() for profile in cls.current().video_profiles.split(",") if profile]
class AppVersionConfig(models.Model):
"""
Configuration for mobile app versions available.
"""
PLATFORM_CHOICES = tuple([
(platform, platform)
for platform in PLATFORM_CLASSES.keys()
])
platform = models.CharField(max_length=50, choices=PLATFORM_CHOICES, blank=False)
version = models.CharField(
max_length=50,
blank=False,
help_text="Version should be in the format X.X.X.Y where X is a number and Y is alphanumeric"
)
major_version = models.IntegerField()
minor_version = models.IntegerField()
patch_version = models.IntegerField()
expire_at = models.DateTimeField(null=True, blank=True, verbose_name="Expiry date for platform version")
enabled = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
app_label = "mobile_api"
unique_together = ('platform', 'version',)
ordering = ['-major_version', '-minor_version', '-patch_version']
def __unicode__(self):
return "{}_{}".format(self.platform, self.version)
@classmethod
def latest_version(cls, platform):
""" Returns latest supported app version for a platform. """
latest_version_config = cls.objects.filter(platform=platform, enabled=True).first()
if latest_version_config:
return latest_version_config.version
@classmethod
def last_supported_date(cls, platform, version):
""" Returns date when app version will get expired for a platform """
parsed_version = utils.parsed_version(version)
active_configs = cls.objects.filter(platform=platform, enabled=True, expire_at__isnull=False).reverse()
for config in active_configs:
if utils.parsed_version(config.version) >= parsed_version:
return config.expire_at
def save(self, *args, **kwargs):
""" parses version into major, minor and patch versions before saving """
self.major_version, self.minor_version, self.patch_version = utils.parsed_version(self.version)
super(AppVersionConfig, self).save(*args, **kwargs)
class IgnoreMobileAvailableFlagConfig(ConfigurationModel): # pylint: disable=W5101
"""
Configuration for the mobile_available flag. Default is false.
Enabling this configuration will cause the mobile_available flag check in
access.py._is_descriptor_mobile_available to ignore the mobile_available
flag.
"""
class Meta(object):
app_label = "mobile_api"
| agpl-3.0 |
SoftwareMaven/django | tests/postgres_tests/test_array.py | 89 | 19906 | import decimal
import json
import unittest
import uuid
from django import forms
from django.core import exceptions, serializers, validators
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.test import TransactionTestCase, override_settings
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import (
ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel,
NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel,
PostgreSQLModel,
)
try:
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import SimpleArrayField, SplitArrayField
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=['hello', 'goodbye'])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=['1'])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertEqual(loaded.field, None)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field('field')
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
class TestQuerying(PostgreSQLTestCase):
def setUp(self):
self.objs = [
NullableIntegerArrayModel.objects.create(field=[1]),
NullableIntegerArrayModel.objects.create(field=[2]),
NullableIntegerArrayModel.objects.create(field=[2, 3]),
NullableIntegerArrayModel.objects.create(field=[20, 30, 40]),
NullableIntegerArrayModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]),
self.objs[:1]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True),
self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]),
self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]),
self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2]
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2]
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=['text']),
[]
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=['text']),
[]
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=['text']),
[]
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2),
self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3),
self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1),
[instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]),
[instance]
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3]
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2),
self.objs[0:3]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]),
self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]),
self.objs[2:3]
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]),
[instance]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
id__in=NullableIntegerArrayModel.objects.filter(field__len=3)
),
[self.objs[3]]
)
class TestChecks(PostgreSQLTestCase):
def test_field_checks(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.CharField())
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E001')
def test_invalid_base_fields(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel'))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E002')
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ['postgres_tests']
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField')
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass')
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
})
def test_adding_field_with_default(self):
# See #22962
table_name = 'postgres_tests_integerarraydefaultmodel'
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
})
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes.
"""
table_name = 'postgres_tests_chartextarrayindexmodel'
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
like_constraint_field_names = [
c.rsplit('_', 2)[0][len(table_name) + 1:]
for c in connection.introspection.get_constraints(cursor, table_name)
if c.endswith('_like')
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_field_names, ['char2'])
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, table_name)
# All fields should have regular indexes.
self.assertIn('char', indexes)
self.assertIn('char2', indexes)
self.assertIn('text', indexes)
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLTestCase):
test_data = (
'[{"fields": {"field": "[\\"1\\", \\"2\\"]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]'
)
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, [1, 2])
class TestValidation(PostgreSQLTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
'Item 1 in the array did not validate: This field cannot be null.'
)
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.')
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, 'nested_array_mismatch')
self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.')
def test_with_validators(self):
field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)]))
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(
cm.exception.messages[0],
'Item 0 in the array did not validate: Ensure this value is greater than or equal to 1.'
)
class TestSimpleFormField(PostgreSQLTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean('a,b,c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,9')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a whole number.')
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,')
self.assertEqual(cm.exception.messages[0], 'Item 2 in the array did not validate: This field is required.')
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField('[a-e]{2}'))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,bc,de')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a valid value.')
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter='|')
value = field.clean('a|b|c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|')
value = field.clean('a,b|c,d')
self.assertEqual(value, [['a', 'b'], ['c', 'd']])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(['a', 'b', 'c'])
self.assertEqual(value, 'a,b,c')
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.')
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.')
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('')
self.assertEqual(cm.exception.messages[0], 'This field is required.')
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
class TestSplitFormField(PostgreSQLTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {'array_0': '', 'array_1': '', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['This field is required.']})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True)
data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['Item 2 in the array did not validate: This field is required.']})
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(str(SplitForm()), '''
<tr>
<th><label for="id_array_0">Array:</label></th>
<td>
<input id="id_array_0" name="array_0" type="text" />
<input id="id_array_1" name="array_1" type="text" />
<input id="id_array_2" name="array_2" type="text" />
</td>
</tr>
''')
| bsd-3-clause |
yatinkumbhare/openstack-nova | nova/tests/unit/api/openstack/compute/contrib/test_evacuate.py | 25 | 12249 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_config import cfg
import webob
from nova.api.openstack.compute.contrib import evacuate as evacuate_v2
from nova.api.openstack.compute.plugins.v3 import evacuate as evacuate_v21
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova.compute import vm_states
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
def fake_compute_api(*args, **kwargs):
return True
def fake_compute_api_get(self, context, instance_id, want_objects=False,
**kwargs):
# BAD_UUID is something that does not exist
if instance_id == 'BAD_UUID':
raise exception.InstanceNotFound(instance_id=instance_id)
else:
return fake_instance.fake_instance_obj(context, id=1, uuid=instance_id,
task_state=None, host='host1',
vm_state=vm_states.ACTIVE)
def fake_service_get_by_compute_host(self, context, host):
if host == 'bad-host':
raise exception.ComputeHostNotFound(host=host)
else:
return {
'host_name': host,
'service': 'compute',
'zone': 'nova'
}
class EvacuateTestV21(test.NoDBTestCase):
validation_error = exception.ValidationError
_methods = ('resize', 'evacuate')
def setUp(self):
super(EvacuateTestV21, self).setUp()
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
fake_service_get_by_compute_host)
self.UUID = uuid.uuid4()
for _method in self._methods:
self.stubs.Set(compute_api.API, _method, fake_compute_api)
self._set_up_controller()
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
self.controller = evacuate_v21.EvacuateController()
self.controller_no_ext = self.controller
def _get_evacuate_response(self, json_load, uuid=None):
base_json_load = {'evacuate': json_load}
response = self.controller._evacuate(self.admin_req, uuid or self.UUID,
body=base_json_load)
return response
def _check_evacuate_failure(self, exception, body, uuid=None,
controller=None):
controller = controller or self.controller
body = {'evacuate': body}
self.assertRaises(exception,
controller._evacuate,
self.admin_req, uuid or self.UUID, body=body)
def test_evacuate_with_valid_instance(self):
admin_pass = 'MyNewPass'
res = self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': admin_pass})
self.assertEqual(admin_pass, res['adminPass'])
def test_evacuate_with_invalid_instance(self):
self._check_evacuate_failure(webob.exc.HTTPNotFound,
{'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'},
uuid='BAD_UUID')
def test_evacuate_with_active_service(self):
def fake_evacuate(*args, **kwargs):
raise exception.ComputeServiceInUse("Service still in use")
self.stubs.Set(compute_api.API, 'evacuate', fake_evacuate)
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_no_target(self):
admin_pass = 'MyNewPass'
res = self._get_evacuate_response({'onSharedStorage': 'False',
'adminPass': admin_pass})
self.assertEqual(admin_pass, res['adminPass'])
def test_evacuate_instance_without_on_shared_storage(self):
self._check_evacuate_failure(self.validation_error,
{'host': 'my-host',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_invalid_characters_host(self):
host = 'abc!#'
self._check_evacuate_failure(self.validation_error,
{'host': host,
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_too_long_host(self):
host = 'a' * 256
self._check_evacuate_failure(self.validation_error,
{'host': host,
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_invalid_on_shared_storage(self):
self._check_evacuate_failure(self.validation_error,
{'host': 'my-host',
'onSharedStorage': 'foo',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_bad_target(self):
self._check_evacuate_failure(webob.exc.HTTPNotFound,
{'host': 'bad-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_target(self):
admin_pass = 'MyNewPass'
res = self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': admin_pass})
self.assertEqual(admin_pass, res['adminPass'])
@mock.patch('nova.objects.Instance.save')
def test_evacuate_shared_and_pass(self, mock_save):
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'host': 'bad-host',
'onSharedStorage': 'True',
'adminPass': 'MyNewPass'})
@mock.patch('nova.objects.Instance.save')
def test_evacuate_not_shared_pass_generated(self, mock_save):
res = self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'False'})
self.assertEqual(CONF.password_length, len(res['adminPass']))
@mock.patch('nova.objects.Instance.save')
def test_evacuate_shared(self, mock_save):
self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'True'})
def test_not_admin(self):
body = {'evacuate': {'host': 'my-host',
'onSharedStorage': 'False'}}
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._evacuate,
self.req, self.UUID, body=body)
def test_evacuate_to_same_host(self):
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'host': 'host1',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_empty_host(self):
self._check_evacuate_failure(self.validation_error,
{'host': '',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'},
controller=self.controller_no_ext)
@mock.patch('nova.objects.Instance.save')
def test_evacuate_instance_with_underscore_in_hostname(self, mock_save):
admin_pass = 'MyNewPass'
# NOTE: The hostname grammar in RFC952 does not allow for
# underscores in hostnames. However, we should test that it
# is supported because it sometimes occurs in real systems.
res = self._get_evacuate_response({'host': 'underscore_hostname',
'onSharedStorage': 'False',
'adminPass': admin_pass})
self.assertEqual(admin_pass, res['adminPass'])
def test_evacuate_disable_password_return(self):
self._test_evacuate_enable_instance_password_conf(False)
def test_evacuate_enable_password_return(self):
self._test_evacuate_enable_instance_password_conf(True)
@mock.patch('nova.objects.Instance.save')
def _test_evacuate_enable_instance_password_conf(self, mock_save,
enable_pass):
self.flags(enable_instance_password=enable_pass)
res = self._get_evacuate_response({'host': 'underscore_hostname',
'onSharedStorage': 'False'})
if enable_pass:
self.assertIn('adminPass', res)
else:
self.assertIsNone(res.get('adminPass'))
class EvacuateTestV2(EvacuateTestV21):
validation_error = webob.exc.HTTPBadRequest
def _set_up_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-extended-evacuate-find-host': 'fake'}
self.controller = evacuate_v2.Controller(ext_mgr)
ext_mgr_no_ext = extensions.ExtensionManager()
ext_mgr_no_ext.extensions = {}
self.controller_no_ext = evacuate_v2.Controller(ext_mgr_no_ext)
def test_no_target_fails_if_extension_not_loaded(self):
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'onSharedStorage': 'False',
'adminPass': 'MyNewPass'},
controller=self.controller_no_ext)
def test_evacuate_instance_with_too_long_host(self):
pass
def test_evacuate_instance_with_invalid_characters_host(self):
pass
def test_evacuate_instance_with_invalid_on_shared_storage(self):
pass
def test_evacuate_disable_password_return(self):
pass
def test_evacuate_enable_password_return(self):
pass
def tet_evacuate_with_non_admin(self):
self.assertRaises(exception.AdminRequired, self.controller.evacuate,
self.req, fakes.FAKE_UUID, {})
class EvacuatePolicyEnforcementv21(test.NoDBTestCase):
def setUp(self):
super(EvacuatePolicyEnforcementv21, self).setUp()
self.controller = evacuate_v21.EvacuateController()
def test_evacuate_policy_failed(self):
rule_name = "os_compute_api:os-evacuate"
self.policy.set_rules({rule_name: "project:non_fake"})
req = fakes.HTTPRequest.blank('')
body = {'evacuate': {'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'
}}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._evacuate, req, fakes.FAKE_UUID,
body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| apache-2.0 |
vprime/puuuu | env/lib/python2.7/site-packages/django/core/files/images.py | 117 | 2181 | """
Utility functions for handling images.
Requires Pillow (or PIL), as you might imagine.
"""
import zlib
from django.core.files import File
class ImageFile(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with images.
"""
def _get_width(self):
return self._get_image_dimensions()[0]
width = property(_get_width)
def _get_height(self):
return self._get_image_dimensions()[1]
height = property(_get_height)
def _get_image_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._dimensions_cache = get_image_dimensions(self, close=close)
return self._dimensions_cache
def get_image_dimensions(file_or_path, close=False):
"""
Returns the (width, height) of an image, given an open file or a path. Set
'close' to True to close the file at the end if it is initially in an open
state.
"""
from django.utils.image import ImageFile as PILImageFile
p = PILImageFile.Parser()
if hasattr(file_or_path, 'read'):
file = file_or_path
file_pos = file.tell()
file.seek(0)
else:
file = open(file_or_path, 'rb')
close = True
try:
# Most of the time PIL only needs a small chunk to parse the image and
# get the dimensions, but with some TIFF files PIL needs to parse the
# whole file.
chunk_size = 1024
while 1:
data = file.read(chunk_size)
if not data:
break
try:
p.feed(data)
except zlib.error as e:
# ignore zlib complaining on truncated stream, just feed more
# data to parser (ticket #19457).
if e.args[0].startswith("Error -5"):
pass
else:
raise
if p.image:
return p.image.size
chunk_size = chunk_size*2
return None
finally:
if close:
file.close()
else:
file.seek(file_pos)
| mit |
vansh21k/SMADES | FeatureExtraction/flow.py | 2 | 9716 | '''Generates 5 tuple flows from sorted pcap files ##(srcip,destip,proto,timestamp,srcport,destport)'''
import sys
from operator import itemgetter
import time
from flow_util import *
global sample
global label
global n_max
global n_input_pcaps
UDPTIMEOUT = 2000
TCPTIMEOUT = 2000
n_max = 0
clean_names = ['p2pbox']
mal_names = ['zeus', 'waledac, gtisc-winobot']
print sys.argv[1]
#inp_file = open('flow_input.csv', 'r')
inp_file = open(sys.argv[1], 'r')
#out_file = open('_output_flows','w')
out_file = open(sys.argv[1] + '_output_'+ str(UDPTIMEOUT)+'_flows','w')
out_file.close()
label = ''
for line in clean_names:
if line in sys.argv[1]:
label = line
for line in mal_names:
if line in sys.argv[1]:
label = line
if label == '':
label = 'botnet'
n_input_pcaps = []
#out_file_udp = open('_output_flows_udp','w')
#out_file_tcp = open('_output_flows_tcp','w')
out_file_udp = open(sys.argv[1] + '_output_' + str(UDPTIMEOUT) +'_flows_udp','w')
out_file_tcp = open(sys.argv[1] + '_output_' + str(UDPTIMEOUT) +'_flows_tcp','w')
out_file_udp.close()
out_file_tcp.close()
def generateFlowAttributes(flow):
'''Generates attributes for flow(SourceIp,DestIP,TimeStamp,Proto,packet_len,SourcePort, DestPort,reconnects)''' #Number of reconnects ommited
try:
if len(flow) < 2: #Ignoring flows with less than 2 packets
return
getPacketLevelInfo(flow, label)
except Exception:
return
def getDistinctFlags():
global n_input_pcaps
n_set = set()
for line in n_input_pcaps:
if line[2] == '6':
if line[7] == 1:
print "Syn found"
n_set.add(tuple(line[6:12]))
print n_set
def getDestPackets(i, dest_ip, source_ip, dest_port, source_port,proto):
global n_input_pcaps
index = i + 1
prev = []
for index in range(i+1, len(n_input_pcaps)):
prev = n_input_pcaps[index]
if prev[2] != proto:
continue
if prev[0].strip() == dest_ip and prev[1].strip() == source_ip and prev[-3].strip() == dest_port and prev[-2].strip() == source_port and prev[-1] == 'unmarked':
break #found first index
curr = []
data = []
data.append(prev)
for k in range(index, len(n_input_pcaps)):
curr = n_input_pcaps[k]
if curr[2] != proto or curr[-1] != 'unmarked':
continue
if prev[0].strip() == dest_ip and prev[1].strip() == source_ip and prev[-3].strip() == dest_port and prev[-2].strip() == source_port:
pass
else:
return data
prev[-1] = 'marked'
data.append(prev)
prev = curr
return data
def generateUDPFlowsBi():
'''To generate bi-directional udp flows'''
global sample
global n_max
if n_max < len(sample):
n_max = len(sample)
if sample == []:
return []
curr_flow = []
curr_flow.append(sample[0])
i = 1
for i in range(1, len(sample)):
prev = sample[i-1]
curr = sample[i]
if ((float(curr[3]) - float(prev[3])) <= UDPTIMEOUT):
curr_flow.append(curr)
else:
if len(curr_flow) > 0:
pass
curr_flow = []
curr_flow.append(curr)
prev = curr
flow_to_send = []
for item in curr_flow:
flow_to_send.append(item[0:-1])
#print flow_to_send[-1]
#assert False
#print flow_to_send
#print '*******************************************************************************'
generateFlowAttributes(flow_to_send)
def generateUDPFlowsUni():
'''for unidirectional udp flows'''
global sample
global n_max
if n_max < len(sample):
n_max = len(sample)
if sample == []:
return []
def generateTCPFlowsUtil(index, sourceIp,destIp,sourcePort,destPort):
''' Valid flow generate helper, add code for timeseries evaluation as well'''
global sample
number_reconnects = 0
valid_flow = []
valid_flowAtoB = []
valid_FlowBtoA = []
reset_flag = 6 #Depends on the Tshark command
syn_flag = 7
fin_flag = 8 #not really reqd
ack_flag = 9
curr = []
state = 0
valid_flow.append(sample[index])
valid_flowAtoB.append(sample[index])
for i in range(index + 1,len(sample)):
curr = sample[i]
'''if ((float(curr[3]) - float(sample[i-1][3])) <= TCPTIMEOUT):
#print "special case encountered"
break
'''
if curr[-1] == 'used':
continue
if state == 0 and curr[0] == destIp and curr[1] == sourceIp and curr[-3] == destPort and curr[-2] == sourcePort and curr[syn_flag] == '1' and curr[ack_flag] == '1':
state = 1
valid_flow.append(curr)
valid_FlowBtoA.append(curr)
elif curr[reset_flag] == '1':
number_reconnects = number_reconnects + 1
if curr[0] == sourceIp:
valid_flow.append(curr)
valid_flowAtoB.append(curr)
elif curr[0] == destIp:
valid_flow.append(curr)
valid_FlowBtoA.append(curr)
elif state == 1 and curr[0] == sourceIp and curr[1] == destIp and curr[-3] == sourcePort and curr[-2] == destPort and curr[ack_flag] == '1':
state = 2
valid_flow.append(curr)
valid_flowAtoB.append(curr)
elif state == 2 and curr[syn_flag]!=1:
if curr[0] == sourceIp:
valid_flow.append(curr)
valid_flowAtoB.append(curr)
elif curr[0] == destIp:
valid_flow.append(curr)
valid_FlowBtoA.append(curr)
elif curr[syn_flag] == 1:
break #Ungraceful exit but we dont need to monitor exit stage since we distribute flag info to flows as well
curr[-1] = 'used'
'''TCP Flow'''
'''
print "VALID FLOW"
print len(valid_flow)
for line in valid_flow:
print line
print "VALID FLOWATOB"
print len(valid_flowAtoB)
for line in valid_flowAtoB:
print line
print "VALID FLOWBTOA"
print len(valid_FlowBtoA)
for line in valid_FlowBtoA:
print line
'''
flow_to_send = []
for item in valid_flow:
curr = []
curr = item[0:5]
curr.append(item[-3])
curr.append(item[-2])
flow_to_send.append(curr)
#print '#####################################'
generateFlowAttributes(flow_to_send)
return number_reconnects #Number of reconnects no longer used
def generateTCPFlows():
'''Validates TCP flows and Extracts attributes needed for further stages'''
global sample
global n_max
reconnects = 0
if n_max < len(sample):
n_max = len(sample)
if sample == []:
return []
#reset_flag = 6 #Depends on the Tshark command
syn_flag = 7
#fin_flag = 8
ack_flag = 9
for i in range(len(sample)):
if sample[i][-1] == 'used':
continue
if sample[i][syn_flag] == '1' and sample[i][ack_flag] != '1':
sample[i][-1] = 'used'
#print "Here"
reconnects = reconnects + generateTCPFlowsUtil(i , sample[i][0], sample[i][1], sample[i][-3], sample[i][-2])
#print reconnects
for line in inp_file:
#print line
n_input_pcaps.append(line.strip().split(','))#.extend(['unmarked']))
n_input_pcaps[-1].append('unmarked')
inp_file.close()
'''Generate UDP Flows'''
prev = []
sample = []
index = 0
for index in range(0,len(n_input_pcaps)):
if n_input_pcaps[index][2] == '17':
break
#print index
prev = n_input_pcaps[index]
for i in range(index + 1,len(n_input_pcaps)):
curr = n_input_pcaps[i]
if curr[2] != '17' or curr[-1] != 'unmarked':
continue
sample.append(prev)
prev[-1] = 'marked'
if prev[0].strip() == curr[0].strip() and prev[1].strip() == curr[1].strip() and prev[-3].strip() == curr[-3].strip() and prev[-2].strip() == curr[-2].strip():#and ((float(curr[3]) - float(prev[3])) <= UDPTIMEOUT): #Fix for unidirectional and bidirectional flows
pass
else:
#Time to generate a flow
#Fix for Bidirecftional flows
ext_sample = getDestPackets(i, prev[1].strip(), prev[0].strip(), prev[-2].strip(), prev[-3].strip(),prev[2].strip())
if len(ext_sample[0]) > 0:
sample.extend(ext_sample)
sample = sorted(sample, key = itemgetter(3))
generateUDPFlowsBi()
sample = []
prev = curr
sample.append(prev)
sample = sorted(sample, key = itemgetter(3))
generateUDPFlowsBi() #for last flow
'''Generate TCP Flows'''
prev = []
sample = []
index = 0
for index in range(0,len(n_input_pcaps)):
if n_input_pcaps[index][2] == '6':
break
#print index
prev = n_input_pcaps[index]
curr = []
for i in range(index + 1,len(n_input_pcaps)):
curr = n_input_pcaps[i]
if curr[2] != '6' or curr[-1] != 'unmarked':
continue
prev[-1] = 'marked'
sample.append(prev)
if prev[0].strip() == curr[0].strip() and prev[1].strip() == curr[1].strip() and prev[-3].strip() == curr[-3].strip() and prev[-2].strip() == curr[-2].strip():# and ((float(curr[3]) - float(prev[3])) <= TCPTIMEOUT):
pass
else:
#Time to generate a flow
ext_sample = getDestPackets(i, prev[1].strip(), prev[0].strip(), prev[-2].strip(), prev[-3].strip(),prev[2].strip())
if len(ext_sample[0]) > 0:
sample.extend(ext_sample)
sample = sorted(sample, key = itemgetter(3))
generateTCPFlows() #sort by timestamp to get flows
sample = []
prev = curr
sample.append(prev)
sample = sorted(sample, key = itemgetter(3))
generateTCPFlows() #for last flow
print n_max
| mit |
labordoc/labordoc-next | modules/bibedit/lib/bibedit_webinterface.py | 2 | 12640 | ## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0103
"""Invenio BibEdit Administrator Interface."""
__revision__ = "$Id"
__lastupdated__ = """$Date: 2008/08/12 09:26:46 $"""
from flask.ext.login import current_user
from invenio.jsonutils import json, json_unicode_to_utf8, CFG_JSON_AVAILABLE
from invenio.access_control_engine import acc_authorize_action
from invenio.bibedit_engine import perform_request_ajax, perform_request_init, \
perform_request_newticket, perform_request_compare, \
perform_request_init_template_interface, \
perform_request_ajax_template_interface
from invenio.bibedit_utils import user_can_edit_record_collection
from invenio.config import CFG_SITE_LANG, CFG_SITE_SECURE_URL, CFG_SITE_RECORD
from invenio.messages import gettext_set_language
from invenio.urlutils import redirect_to_url
from invenio.webinterface_handler import WebInterfaceDirectory, wash_urlargd
from invenio.webpage import page
from invenio.webuser import page_not_authorized
navtrail = (' <a class="navtrail" href=\"%s/help/admin\">Admin Area</a> '
) % CFG_SITE_SECURE_URL
navtrail_bibedit = (' <a class="navtrail" href=\"%s/help/admin\">Admin Area</a> ' + \
' > <a class="navtrail" href=\"%s/%s/edit\">Record Editor</a>'
) % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, CFG_SITE_RECORD)
class WebInterfaceEditPages(WebInterfaceDirectory):
"""Defines the set of /edit pages."""
_exports = ['', 'new_ticket', 'compare_revisions', 'templates']
def __init__(self, recid=None):
"""Initialize."""
self.recid = recid
def index(self, req, form):
"""Handle all BibEdit requests.
The responsibilities of this functions is:
* JSON decoding and encoding.
* Redirection, if necessary.
* Authorization.
* Calling the appropriate function from the engine.
"""
uid = current_user.get_id()
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
title = 'Record Editor'
body = '''Sorry, the record editor cannot operate when the
`simplejson' module is not installed. Please see the INSTALL
file.'''
return page(title = title,
body = body,
errors = [],
warnings = [],
uid = uid,
language = argd['ln'],
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
body_css_classes = ['bibedit'])
# If it is an Ajax request, extract any JSON data.
ajax_request, recid = False, None
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
if json_data.has_key('recID'):
recid = json_data['recID']
json_response = {'resultCode': 0, 'ID': json_data['ID']}
# Authorization.
if current_user.is_guest:
# User is not logged in.
if not ajax_request:
# Do not display the introductory recID selection box to guest
# users (as it used to be with v0.99.0):
dummy_auth_code, auth_message = acc_authorize_action(req,
'runbibedit')
referer = '/edit/'
if self.recid:
referer = '/%s/%s/edit/' % (CFG_SITE_RECORD, self.recid)
return page_not_authorized(req=req, referer=referer,
text=auth_message, navtrail=navtrail)
else:
# Session has most likely timed out.
json_response.update({'resultCode': 100})
return json.dumps(json_response)
elif self.recid:
# Handle RESTful calls from logged in users by redirecting to
# generic URL.
redirect_to_url(req, '%s/%s/edit/#state=edit&recid=%s&recrev=%s' % (
CFG_SITE_SECURE_URL, CFG_SITE_RECORD, self.recid, ""))
elif recid is not None:
json_response.update({'recID': recid})
if json_data['requestType'] == "getRecord":
# Authorize access to record.
if not user_can_edit_record_collection(req, recid):
json_response.update({'resultCode': 101})
return json.dumps(json_response)
# Handle request.
if not ajax_request:
# Show BibEdit start page.
body, errors, warnings = perform_request_init(uid, argd['ln'], req, __lastupdated__)
title = 'Record Editor'
return page(title = title,
body = body,
errors = errors,
warnings = warnings,
uid = uid,
language = argd['ln'],
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
body_css_classes = ['bibedit'])
else:
# Handle AJAX request.
json_response.update(perform_request_ajax(req, recid, uid,
json_data))
return json.dumps(json_response)
def compare_revisions(self, req, form):
"""Handle the compare revisions request"""
argd = wash_urlargd(form, { \
'ln': (str, CFG_SITE_LANG), \
'rev1' : (str, ''), \
'rev2' : (str, ''), \
'recid': (int, 0)})
ln = argd['ln']
uid = current_user.get_id()
_ = gettext_set_language(ln)
# Checking if currently logged user has permission to perform this request
auth_code, auth_message = acc_authorize_action(req, 'runbibedit')
if auth_code != 0:
return page_not_authorized(req=req, referer="/edit",
text=auth_message, navtrail=navtrail)
recid = argd['recid']
rev1 = argd['rev1']
rev2 = argd['rev2']
ln = argd['ln']
body, errors, warnings = perform_request_compare(ln, recid, rev1, rev2)
return page(title = _("Comparing two record revisions"),
body = body,
errors = errors,
warnings = warnings,
uid = uid,
language = ln,
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
body_css_classes = ['bibedit'])
def new_ticket(self, req, form):
"""handle a edit/new_ticket request"""
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG), 'recid': (int, 0)})
ln = argd['ln']
_ = gettext_set_language(ln)
auth_code, auth_message = acc_authorize_action(req, 'runbibedit')
if auth_code != 0:
return page_not_authorized(req=req, referer="/edit",
text=auth_message, navtrail=navtrail)
uid = current_user.get_id()
if argd['recid']:
(errmsg, url) = perform_request_newticket(argd['recid'], uid)
if errmsg:
return page(title = _("Failed to create a ticket"),
body = _("Error")+": "+errmsg,
errors = [],
warnings = [],
uid = uid,
language = ln,
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
body_css_classes = ['bibedit'])
else:
#redirect..
redirect_to_url(req, url)
def templates(self, req, form):
"""handle a edit/templates request"""
uid = current_user.get_id()
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
title = 'Record Editor Template Manager'
body = '''Sorry, the record editor cannot operate when the
`simplejson' module is not installed. Please see the INSTALL
file.'''
return page(title = title,
body = body,
errors = [],
warnings = [],
uid = uid,
language = argd['ln'],
navtrail = navtrail_bibedit,
lastupdated = __lastupdated__,
req = req,
body_css_classes = ['bibedit'])
# If it is an Ajax request, extract any JSON data.
ajax_request = False
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Authorization.
if current_user.is_guest:
# User is not logged in.
if not ajax_request:
# Do not display the introductory recID selection box to guest
# users (as it used to be with v0.99.0):
dummy_auth_code, auth_message = acc_authorize_action(req,
'runbibedit')
referer = '/edit'
return page_not_authorized(req=req, referer=referer,
text=auth_message, navtrail=navtrail)
else:
# Session has most likely timed out.
json_response.update({'resultCode': 100})
return json.dumps(json_response)
# Handle request.
if not ajax_request:
# Show BibEdit template management start page.
body, errors, warnings = perform_request_init_template_interface()
title = 'Record Editor Template Manager'
return page(title = title,
body = body,
errors = errors,
warnings = warnings,
uid = uid,
language = argd['ln'],
navtrail = navtrail_bibedit,
lastupdated = __lastupdated__,
req = req,
body_css_classes = ['bibedit'])
else:
# Handle AJAX request.
json_response.update(perform_request_ajax_template_interface(json_data))
return json.dumps(json_response)
def __call__(self, req, form):
"""Redirect calls without final slash."""
if self.recid:
redirect_to_url(req, '%s/%s/%s/edit/' % (CFG_SITE_SECURE_URL,
CFG_SITE_RECORD,
self.recid))
else:
redirect_to_url(req, '%s/%s/edit/' % (CFG_SITE_SECURE_URL, CFG_SITE_RECORD))
| gpl-2.0 |
ycl2045/nova-master | nova/openstack/common/gettextutils.py | 8 | 18772 | # Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from nova.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
import re
from babel import localedata
import six
_localedir = os.environ.get('nova'.upper() + '_LOCALEDIR')
_t = gettext.translation('nova', localedir=_localedir, fallback=True)
# We use separate translation catalogs for each log level, so set up a
# mapping between the log level name and the translator. The domain
# for the log level is project_name + "-log-" + log_level so messages
# for each level end up in their own catalog.
_t_log_levels = dict(
(level, gettext.translation('nova' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='nova')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def _log_translation(msg, level):
"""Build a single translation of a log message
"""
if USE_LAZY:
return Message(msg, domain='nova' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='nova', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
params = self._trim_dictionary_parameters(other)
else:
params = self._copy_param(other)
return params
def _trim_dictionary_parameters(self, dict_param):
"""Return a dict that only has matching entries in the msgid."""
# NOTE(luisg): Here we trim down the dictionary passed as parameters
# to avoid carrying a lot of unnecessary weight around in the message
# object, for example if someone passes in Message() % locals() but
# only some params are used, and additionally we prevent errors for
# non-deepcopyable objects by unicoding() them.
# Look for %(param) keys in msgid;
# Skip %% and deal with the case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
# If we don't find any %(param) keys but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
# Apparently the full dictionary is the parameter
params = self._copy_param(dict_param)
else:
params = {}
# Save our existing parameters as defaults to protect
# ourselves from losing values if we are called through an
# (erroneous) chain that builds a valid Message with
# arguments, and then does something like "msg % kwds"
# where kwds is an empty dictionary.
src = {}
if isinstance(self.params, dict):
src.update(self.params)
src.update(dict_param)
for key in keys:
params[key] = self._copy_param(src[key])
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except TypeError:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
| apache-2.0 |
M4573R/BerkeleyX-CS188.1x-Artificial-Intelligence | reinforcement/graphicsDisplay.py | 7 | 28071 | # graphicsDisplay.py
# ------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from graphicsUtils import *
import math, time
from game import Directions
###########################
# GRAPHICS DISPLAY CODE #
###########################
# Most code by Dan Klein and John Denero written or rewritten for cs188, UC Berkeley.
# Some code from a Pacman implementation by LiveWires, and used / modified with permission.
DEFAULT_GRID_SIZE = 30.0
INFO_PANE_HEIGHT = 35
BACKGROUND_COLOR = formatColor(0,0,0)
WALL_COLOR = formatColor(0.0/255.0, 51.0/255.0, 255.0/255.0)
INFO_PANE_COLOR = formatColor(.4,.4,0)
SCORE_COLOR = formatColor(.9, .9, .9)
PACMAN_OUTLINE_WIDTH = 2
PACMAN_CAPTURE_OUTLINE_WIDTH = 4
GHOST_COLORS = []
GHOST_COLORS.append(formatColor(.9,0,0)) # Red
GHOST_COLORS.append(formatColor(0,.3,.9)) # Blue
GHOST_COLORS.append(formatColor(.98,.41,.07)) # Orange
GHOST_COLORS.append(formatColor(.1,.75,.7)) # Green
GHOST_COLORS.append(formatColor(1.0,0.6,0.0)) # Yellow
GHOST_COLORS.append(formatColor(.4,0.13,0.91)) # Purple
TEAM_COLORS = GHOST_COLORS[:2]
GHOST_SHAPE = [
( 0, 0.3 ),
( 0.25, 0.75 ),
( 0.5, 0.3 ),
( 0.75, 0.75 ),
( 0.75, -0.5 ),
( 0.5, -0.75 ),
(-0.5, -0.75 ),
(-0.75, -0.5 ),
(-0.75, 0.75 ),
(-0.5, 0.3 ),
(-0.25, 0.75 )
]
GHOST_SIZE = 0.65
SCARED_COLOR = formatColor(1,1,1)
GHOST_VEC_COLORS = map(colorToVector, GHOST_COLORS)
PACMAN_COLOR = formatColor(255.0/255.0,255.0/255.0,61.0/255)
PACMAN_SCALE = 0.5
#pacman_speed = 0.25
# Food
FOOD_COLOR = formatColor(1,1,1)
FOOD_SIZE = 0.1
# Laser
LASER_COLOR = formatColor(1,0,0)
LASER_SIZE = 0.02
# Capsule graphics
CAPSULE_COLOR = formatColor(1,1,1)
CAPSULE_SIZE = 0.25
# Drawing walls
WALL_RADIUS = 0.15
class InfoPane:
def __init__(self, layout, gridSize):
self.gridSize = gridSize
self.width = (layout.width) * gridSize
self.base = (layout.height + 1) * gridSize
self.height = INFO_PANE_HEIGHT
self.fontSize = 24
self.textColor = PACMAN_COLOR
self.drawPane()
def toScreen(self, pos, y = None):
"""
Translates a point relative from the bottom left of the info pane.
"""
if y == None:
x,y = pos
else:
x = pos
x = self.gridSize + x # Margin
y = self.base + y
return x,y
def drawPane(self):
self.scoreText = text( self.toScreen(0, 0 ), self.textColor, "SCORE: 0", "Times", self.fontSize, "bold")
def initializeGhostDistances(self, distances):
self.ghostDistanceText = []
size = 20
if self.width < 240:
size = 12
if self.width < 160:
size = 10
for i, d in enumerate(distances):
t = text( self.toScreen(self.width/2 + self.width/8 * i, 0), GHOST_COLORS[i+1], d, "Times", size, "bold")
self.ghostDistanceText.append(t)
def updateScore(self, score):
changeText(self.scoreText, "SCORE: % 4d" % score)
def setTeam(self, isBlue):
text = "RED TEAM"
if isBlue: text = "BLUE TEAM"
self.teamText = text( self.toScreen(300, 0 ), self.textColor, text, "Times", self.fontSize, "bold")
def updateGhostDistances(self, distances):
if len(distances) == 0: return
if 'ghostDistanceText' not in dir(self): self.initializeGhostDistances(distances)
else:
for i, d in enumerate(distances):
changeText(self.ghostDistanceText[i], d)
def drawGhost(self):
pass
def drawPacman(self):
pass
def drawWarning(self):
pass
def clearIcon(self):
pass
def updateMessage(self, message):
pass
def clearMessage(self):
pass
class PacmanGraphics:
def __init__(self, zoom=1.0, frameTime=0.0, capture=False):
self.have_window = 0
self.currentGhostImages = {}
self.pacmanImage = None
self.zoom = zoom
self.gridSize = DEFAULT_GRID_SIZE * zoom
self.capture = capture
self.frameTime = frameTime
def checkNullDisplay(self):
return False
def initialize(self, state, isBlue = False):
self.isBlue = isBlue
self.startGraphics(state)
# self.drawDistributions(state)
self.distributionImages = None # Initialized lazily
self.drawStaticObjects(state)
self.drawAgentObjects(state)
# Information
self.previousState = state
def startGraphics(self, state):
self.layout = state.layout
layout = self.layout
self.width = layout.width
self.height = layout.height
self.make_window(self.width, self.height)
self.infoPane = InfoPane(layout, self.gridSize)
self.currentState = layout
def drawDistributions(self, state):
walls = state.layout.walls
dist = []
for x in range(walls.width):
distx = []
dist.append(distx)
for y in range(walls.height):
( screen_x, screen_y ) = self.to_screen( (x, y) )
block = square( (screen_x, screen_y),
0.5 * self.gridSize,
color = BACKGROUND_COLOR,
filled = 1, behind=2)
distx.append(block)
self.distributionImages = dist
def drawStaticObjects(self, state):
layout = self.layout
self.drawWalls(layout.walls)
self.food = self.drawFood(layout.food)
self.capsules = self.drawCapsules(layout.capsules)
refresh()
def drawAgentObjects(self, state):
self.agentImages = [] # (agentState, image)
for index, agent in enumerate(state.agentStates):
if agent.isPacman:
image = self.drawPacman(agent, index)
self.agentImages.append( (agent, image) )
else:
image = self.drawGhost(agent, index)
self.agentImages.append( (agent, image) )
refresh()
def swapImages(self, agentIndex, newState):
"""
Changes an image from a ghost to a pacman or vis versa (for capture)
"""
prevState, prevImage = self.agentImages[agentIndex]
for item in prevImage: remove_from_screen(item)
if newState.isPacman:
image = self.drawPacman(newState, agentIndex)
self.agentImages[agentIndex] = (newState, image )
else:
image = self.drawGhost(newState, agentIndex)
self.agentImages[agentIndex] = (newState, image )
refresh()
def update(self, newState):
agentIndex = newState._agentMoved
agentState = newState.agentStates[agentIndex]
if self.agentImages[agentIndex][0].isPacman != agentState.isPacman: self.swapImages(agentIndex, agentState)
prevState, prevImage = self.agentImages[agentIndex]
if agentState.isPacman:
self.animatePacman(agentState, prevState, prevImage)
else:
self.moveGhost(agentState, agentIndex, prevState, prevImage)
self.agentImages[agentIndex] = (agentState, prevImage)
if newState._foodEaten != None:
self.removeFood(newState._foodEaten, self.food)
if newState._capsuleEaten != None:
self.removeCapsule(newState._capsuleEaten, self.capsules)
self.infoPane.updateScore(newState.score)
if 'ghostDistances' in dir(newState):
self.infoPane.updateGhostDistances(newState.ghostDistances)
def make_window(self, width, height):
grid_width = (width-1) * self.gridSize
grid_height = (height-1) * self.gridSize
screen_width = 2*self.gridSize + grid_width
screen_height = 2*self.gridSize + grid_height + INFO_PANE_HEIGHT
begin_graphics(screen_width,
screen_height,
BACKGROUND_COLOR,
"CS188 Pacman")
def drawPacman(self, pacman, index):
position = self.getPosition(pacman)
screen_point = self.to_screen(position)
endpoints = self.getEndpoints(self.getDirection(pacman))
width = PACMAN_OUTLINE_WIDTH
outlineColor = PACMAN_COLOR
fillColor = PACMAN_COLOR
if self.capture:
outlineColor = TEAM_COLORS[index % 2]
fillColor = GHOST_COLORS[index]
width = PACMAN_CAPTURE_OUTLINE_WIDTH
return [circle(screen_point, PACMAN_SCALE * self.gridSize,
fillColor = fillColor, outlineColor = outlineColor,
endpoints = endpoints,
width = width)]
def getEndpoints(self, direction, position=(0,0)):
x, y = position
pos = x - int(x) + y - int(y)
width = 30 + 80 * math.sin(math.pi* pos)
delta = width / 2
if (direction == 'West'):
endpoints = (180+delta, 180-delta)
elif (direction == 'North'):
endpoints = (90+delta, 90-delta)
elif (direction == 'South'):
endpoints = (270+delta, 270-delta)
else:
endpoints = (0+delta, 0-delta)
return endpoints
def movePacman(self, position, direction, image):
screenPosition = self.to_screen(position)
endpoints = self.getEndpoints( direction, position )
r = PACMAN_SCALE * self.gridSize
moveCircle(image[0], screenPosition, r, endpoints)
refresh()
def animatePacman(self, pacman, prevPacman, image):
if self.frameTime < 0:
print 'Press any key to step forward, "q" to play'
keys = wait_for_keys()
if 'q' in keys:
self.frameTime = 0.1
if self.frameTime > 0.01 or self.frameTime < 0:
start = time.time()
fx, fy = self.getPosition(prevPacman)
px, py = self.getPosition(pacman)
frames = 4.0
for i in range(1,int(frames) + 1):
pos = px*i/frames + fx*(frames-i)/frames, py*i/frames + fy*(frames-i)/frames
self.movePacman(pos, self.getDirection(pacman), image)
refresh()
sleep(abs(self.frameTime) / frames)
else:
self.movePacman(self.getPosition(pacman), self.getDirection(pacman), image)
refresh()
def getGhostColor(self, ghost, ghostIndex):
if ghost.scaredTimer > 0:
return SCARED_COLOR
else:
return GHOST_COLORS[ghostIndex]
def drawGhost(self, ghost, agentIndex):
pos = self.getPosition(ghost)
dir = self.getDirection(ghost)
(screen_x, screen_y) = (self.to_screen(pos) )
coords = []
for (x, y) in GHOST_SHAPE:
coords.append((x*self.gridSize*GHOST_SIZE + screen_x, y*self.gridSize*GHOST_SIZE + screen_y))
colour = self.getGhostColor(ghost, agentIndex)
body = polygon(coords, colour, filled = 1)
WHITE = formatColor(1.0, 1.0, 1.0)
BLACK = formatColor(0.0, 0.0, 0.0)
dx = 0
dy = 0
if dir == 'North':
dy = -0.2
if dir == 'South':
dy = 0.2
if dir == 'East':
dx = 0.2
if dir == 'West':
dx = -0.2
leftEye = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE)
rightEye = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE)
leftPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK)
rightPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK)
ghostImageParts = []
ghostImageParts.append(body)
ghostImageParts.append(leftEye)
ghostImageParts.append(rightEye)
ghostImageParts.append(leftPupil)
ghostImageParts.append(rightPupil)
return ghostImageParts
def moveEyes(self, pos, dir, eyes):
(screen_x, screen_y) = (self.to_screen(pos) )
dx = 0
dy = 0
if dir == 'North':
dy = -0.2
if dir == 'South':
dy = 0.2
if dir == 'East':
dx = 0.2
if dir == 'West':
dx = -0.2
moveCircle(eyes[0],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2)
moveCircle(eyes[1],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2)
moveCircle(eyes[2],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08)
moveCircle(eyes[3],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08)
def moveGhost(self, ghost, ghostIndex, prevGhost, ghostImageParts):
old_x, old_y = self.to_screen(self.getPosition(prevGhost))
new_x, new_y = self.to_screen(self.getPosition(ghost))
delta = new_x - old_x, new_y - old_y
for ghostImagePart in ghostImageParts:
move_by(ghostImagePart, delta)
refresh()
if ghost.scaredTimer > 0:
color = SCARED_COLOR
else:
color = GHOST_COLORS[ghostIndex]
edit(ghostImageParts[0], ('fill', color), ('outline', color))
self.moveEyes(self.getPosition(ghost), self.getDirection(ghost), ghostImageParts[-4:])
refresh()
def getPosition(self, agentState):
if agentState.configuration == None: return (-1000, -1000)
return agentState.getPosition()
def getDirection(self, agentState):
if agentState.configuration == None: return Directions.STOP
return agentState.configuration.getDirection()
def finish(self):
end_graphics()
def to_screen(self, point):
( x, y ) = point
#y = self.height - y
x = (x + 1)*self.gridSize
y = (self.height - y)*self.gridSize
return ( x, y )
# Fixes some TK issue with off-center circles
def to_screen2(self, point):
( x, y ) = point
#y = self.height - y
x = (x + 1)*self.gridSize
y = (self.height - y)*self.gridSize
return ( x, y )
def drawWalls(self, wallMatrix):
wallColor = WALL_COLOR
for xNum, x in enumerate(wallMatrix):
if self.capture and (xNum * 2) < wallMatrix.width: wallColor = TEAM_COLORS[0]
if self.capture and (xNum * 2) >= wallMatrix.width: wallColor = TEAM_COLORS[1]
for yNum, cell in enumerate(x):
if cell: # There's a wall here
pos = (xNum, yNum)
screen = self.to_screen(pos)
screen2 = self.to_screen2(pos)
# draw each quadrant of the square based on adjacent walls
wIsWall = self.isWall(xNum-1, yNum, wallMatrix)
eIsWall = self.isWall(xNum+1, yNum, wallMatrix)
nIsWall = self.isWall(xNum, yNum+1, wallMatrix)
sIsWall = self.isWall(xNum, yNum-1, wallMatrix)
nwIsWall = self.isWall(xNum-1, yNum+1, wallMatrix)
swIsWall = self.isWall(xNum-1, yNum-1, wallMatrix)
neIsWall = self.isWall(xNum+1, yNum+1, wallMatrix)
seIsWall = self.isWall(xNum+1, yNum-1, wallMatrix)
# NE quadrant
if (not nIsWall) and (not eIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (0,91), 'arc')
if (nIsWall) and (not eIsWall):
# vertical line
line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor)
if (not nIsWall) and (eIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
if (nIsWall) and (eIsWall) and (not neIsWall):
# outer circle
circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (180,271), 'arc')
line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5))), wallColor)
# NW quadrant
if (not nIsWall) and (not wIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (90,181), 'arc')
if (nIsWall) and (not wIsWall):
# vertical line
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor)
if (not nIsWall) and (wIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
if (nIsWall) and (wIsWall) and (not nwIsWall):
# outer circle
circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (270,361), 'arc')
line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(-1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5))), wallColor)
# SE quadrant
if (not sIsWall) and (not eIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (270,361), 'arc')
if (sIsWall) and (not eIsWall):
# vertical line
line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor)
if (not sIsWall) and (eIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(1)*WALL_RADIUS)), wallColor)
if (sIsWall) and (eIsWall) and (not seIsWall):
# outer circle
circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (90,181), 'arc')
line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5, self.gridSize*(1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5))), wallColor)
# SW quadrant
if (not sIsWall) and (not wIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (180,271), 'arc')
if (sIsWall) and (not wIsWall):
# vertical line
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor)
if (not sIsWall) and (wIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(1)*WALL_RADIUS)), wallColor)
if (sIsWall) and (wIsWall) and (not swIsWall):
# outer circle
circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (0,91), 'arc')
line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5))), wallColor)
def isWall(self, x, y, walls):
if x < 0 or y < 0:
return False
if x >= walls.width or y >= walls.height:
return False
return walls[x][y]
def drawFood(self, foodMatrix ):
foodImages = []
color = FOOD_COLOR
for xNum, x in enumerate(foodMatrix):
if self.capture and (xNum * 2) <= foodMatrix.width: color = TEAM_COLORS[0]
if self.capture and (xNum * 2) > foodMatrix.width: color = TEAM_COLORS[1]
imageRow = []
foodImages.append(imageRow)
for yNum, cell in enumerate(x):
if cell: # There's food here
screen = self.to_screen((xNum, yNum ))
dot = circle( screen,
FOOD_SIZE * self.gridSize,
outlineColor = color, fillColor = color,
width = 1)
imageRow.append(dot)
else:
imageRow.append(None)
return foodImages
def drawCapsules(self, capsules ):
capsuleImages = {}
for capsule in capsules:
( screen_x, screen_y ) = self.to_screen(capsule)
dot = circle( (screen_x, screen_y),
CAPSULE_SIZE * self.gridSize,
outlineColor = CAPSULE_COLOR,
fillColor = CAPSULE_COLOR,
width = 1)
capsuleImages[capsule] = dot
return capsuleImages
def removeFood(self, cell, foodImages ):
x, y = cell
remove_from_screen(foodImages[x][y])
def removeCapsule(self, cell, capsuleImages ):
x, y = cell
remove_from_screen(capsuleImages[(x, y)])
def drawExpandedCells(self, cells):
"""
Draws an overlay of expanded grid positions for search agents
"""
n = float(len(cells))
baseColor = [1.0, 0.0, 0.0]
self.clearExpandedCells()
self.expandedCells = []
for k, cell in enumerate(cells):
screenPos = self.to_screen( cell)
cellColor = formatColor(*[(n-k) * c * .5 / n + .25 for c in baseColor])
block = square(screenPos,
0.5 * self.gridSize,
color = cellColor,
filled = 1, behind=2)
self.expandedCells.append(block)
if self.frameTime < 0:
refresh()
def clearExpandedCells(self):
if 'expandedCells' in dir(self) and len(self.expandedCells) > 0:
for cell in self.expandedCells:
remove_from_screen(cell)
def updateDistributions(self, distributions):
"Draws an agent's belief distributions"
# copy all distributions so we don't change their state
distributions = map(lambda x: x.copy(), distributions)
if self.distributionImages == None:
self.drawDistributions(self.previousState)
for x in range(len(self.distributionImages)):
for y in range(len(self.distributionImages[0])):
image = self.distributionImages[x][y]
weights = [dist[ (x,y) ] for dist in distributions]
if sum(weights) != 0:
pass
# Fog of war
color = [0.0,0.0,0.0]
colors = GHOST_VEC_COLORS[1:] # With Pacman
if self.capture: colors = GHOST_VEC_COLORS
for weight, gcolor in zip(weights, colors):
color = [min(1.0, c + 0.95 * g * weight ** .3) for c,g in zip(color, gcolor)]
changeColor(image, formatColor(*color))
refresh()
class FirstPersonPacmanGraphics(PacmanGraphics):
def __init__(self, zoom = 1.0, showGhosts = True, capture = False, frameTime=0):
PacmanGraphics.__init__(self, zoom, frameTime=frameTime)
self.showGhosts = showGhosts
self.capture = capture
def initialize(self, state, isBlue = False):
self.isBlue = isBlue
PacmanGraphics.startGraphics(self, state)
# Initialize distribution images
walls = state.layout.walls
dist = []
self.layout = state.layout
# Draw the rest
self.distributionImages = None # initialize lazily
self.drawStaticObjects(state)
self.drawAgentObjects(state)
# Information
self.previousState = state
def lookAhead(self, config, state):
if config.getDirection() == 'Stop':
return
else:
pass
# Draw relevant ghosts
allGhosts = state.getGhostStates()
visibleGhosts = state.getVisibleGhosts()
for i, ghost in enumerate(allGhosts):
if ghost in visibleGhosts:
self.drawGhost(ghost, i)
else:
self.currentGhostImages[i] = None
def getGhostColor(self, ghost, ghostIndex):
return GHOST_COLORS[ghostIndex]
def getPosition(self, ghostState):
if not self.showGhosts and not ghostState.isPacman and ghostState.getPosition()[1] > 1:
return (-1000, -1000)
else:
return PacmanGraphics.getPosition(self, ghostState)
def add(x, y):
return (x[0] + y[0], x[1] + y[1])
# Saving graphical output
# -----------------------
# Note: to make an animated gif from this postscript output, try the command:
# convert -delay 7 -loop 1 -compress lzw -layers optimize frame* out.gif
# convert is part of imagemagick (freeware)
SAVE_POSTSCRIPT = False
POSTSCRIPT_OUTPUT_DIR = 'frames'
FRAME_NUMBER = 0
import os
def saveFrame():
"Saves the current graphical output as a postscript file"
global SAVE_POSTSCRIPT, FRAME_NUMBER, POSTSCRIPT_OUTPUT_DIR
if not SAVE_POSTSCRIPT: return
if not os.path.exists(POSTSCRIPT_OUTPUT_DIR): os.mkdir(POSTSCRIPT_OUTPUT_DIR)
name = os.path.join(POSTSCRIPT_OUTPUT_DIR, 'frame_%08d.ps' % FRAME_NUMBER)
FRAME_NUMBER += 1
writePostscript(name) # writes the current canvas
| mit |
yqm/sl4a | python/src/Lib/test/test_re.py | 55 | 37255 | import sys
sys.path = ['.'] + sys.path
from test.test_support import verbose, run_unittest
import re
from re import Scanner
import sys, os, traceback
from weakref import proxy
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
# what you're doing. Some of these tests were carefuly modeled to
# cover most of the code.
import unittest
class ReTests(unittest.TestCase):
def test_weakref(self):
s = 'QabbbcR'
x = re.compile('ab+c')
y = proxy(x)
self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR'))
def test_search_star_plus(self):
self.assertEqual(re.search('x*', 'axx').span(0), (0, 0))
self.assertEqual(re.search('x*', 'axx').span(), (0, 0))
self.assertEqual(re.search('x+', 'axx').span(0), (1, 3))
self.assertEqual(re.search('x+', 'axx').span(), (1, 3))
self.assertEqual(re.search('x', 'aaa'), None)
self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0))
self.assertEqual(re.match('a*', 'xxx').span(), (0, 0))
self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3))
self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3))
self.assertEqual(re.match('a+', 'xxx'), None)
def bump_num(self, matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
def test_basic_re_sub(self):
self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
'9.3 -3 24x100y')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
'9.3 -3 23x99y')
self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n')
self.assertEqual(re.sub('.', r"\n", 'x'), '\n')
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
'\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
(chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
def test_bug_449964(self):
# fails for group followed by other escape
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
'xx\bxx\b')
def test_bug_449000(self):
# Test for sub() on escaped characters
self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
def test_bug_1140(self):
# re.sub(x, y, u'') should return u'', not '', and
# re.sub(x, y, '') should return '', not u''.
# Also:
# re.sub(x, y, unicode(x)) should return unicode(y), and
# re.sub(x, y, str(x)) should return
# str(y) if isinstance(y, str) else unicode(y).
for x in 'x', u'x':
for y in 'y', u'y':
z = re.sub(x, y, u'')
self.assertEqual(z, u'')
self.assertEqual(type(z), unicode)
#
z = re.sub(x, y, '')
self.assertEqual(z, '')
self.assertEqual(type(z), str)
#
z = re.sub(x, y, unicode(x))
self.assertEqual(z, y)
self.assertEqual(type(z), unicode)
#
z = re.sub(x, y, str(x))
self.assertEqual(z, y)
self.assertEqual(type(z), type(y))
def test_bug_1661(self):
# Verify that flags do not get silently ignored with compiled patterns
pattern = re.compile('.')
self.assertRaises(ValueError, re.match, pattern, 'A', re.I)
self.assertRaises(ValueError, re.search, pattern, 'A', re.I)
self.assertRaises(ValueError, re.findall, pattern, 'A', re.I)
self.assertRaises(ValueError, re.compile, pattern, re.I)
def test_bug_3629(self):
# A regex that triggered a bug in the sre-code validator
re.compile("(?P<quote>)(?(quote))")
def test_sub_template_numeric_escape(self):
# bug 776311 and friends
self.assertEqual(re.sub('x', r'\0', 'x'), '\0')
self.assertEqual(re.sub('x', r'\000', 'x'), '\000')
self.assertEqual(re.sub('x', r'\001', 'x'), '\001')
self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\111', 'x'), '\111')
self.assertEqual(re.sub('x', r'\117', 'x'), '\117')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1')
self.assertEqual(re.sub('x', r'\00', 'x'), '\x00')
self.assertEqual(re.sub('x', r'\07', 'x'), '\x07')
self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a')
self.assertEqual(re.sub('x', r'\400', 'x'), '\0')
self.assertEqual(re.sub('x', r'\777', 'x'), '\377')
self.assertRaises(re.error, re.sub, 'x', r'\1', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\8', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\9', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\11', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\18', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\90', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\99', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8'
self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1'
self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0'
# in python2.3 (etc), these loop endlessly in sre_parser.py
self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
'xz8')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
'xza')
def test_qualified_re_sub(self):
self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb')
self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa')
def test_bug_114660(self):
self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
'hello there')
def test_bug_462270(self):
# Test for empty sub() behaviour, see SF bug #462270
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx')
self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx')
def test_re_subn(self):
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1))
self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0))
self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4))
self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2))
def test_re_split(self):
self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:*)", ":a:b::c"),
['', ':', 'a', ':', 'b', '::', 'c'])
self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:)*", ":a:b::c"),
['', ':', 'a', ':', 'b', ':', 'c'])
self.assertEqual(re.split("([b:]+)", ":a:b::c"),
['', ':', 'a', ':b::', 'c'])
self.assertEqual(re.split("(b)|(:+)", ":a:b::c"),
['', None, ':', 'a', None, ':', '', 'b', None, '',
None, '::', 'c'])
self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"),
['', 'a', '', '', 'c'])
def test_qualified_re_split(self):
self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
self.assertEqual(re.split("(:)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
self.assertEqual(re.split("(:*)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
def test_re_findall(self):
self.assertEqual(re.findall(":+", "abc"), [])
self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""),
(":", ":"),
(":", "::")])
def test_bug_117612(self):
self.assertEqual(re.findall(r"(a|(b))", "aba"),
[("a", ""),("b", "b"),("a", "")])
def test_re_match(self):
self.assertEqual(re.match('a', 'a').groups(), ())
self.assertEqual(re.match('(a)', 'a').groups(), ('a',))
self.assertEqual(re.match(r'(a)', 'a').group(0), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a'))
pat = re.compile('((a)|(b))(c)?')
self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None))
self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None))
self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c'))
self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c'))
self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c'))
# A single group
m = re.match('(a)', 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(1), 'a')
self.assertEqual(m.group(1, 1), ('a', 'a'))
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None))
self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'),
(None, 'b', None))
self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c'))
def test_re_groupref_exists(self):
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
('(', 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None)
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None)
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
('a', 'b'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(),
('a', ''))
# Tests for bug #1177831: exercise groups other than the first group
p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))')
self.assertEqual(p.match('abc').groups(),
('a', 'b', 'c'))
self.assertEqual(p.match('ad').groups(),
('a', None, 'd'))
self.assertEqual(p.match('abd'), None)
self.assertEqual(p.match('ac'), None)
def test_re_groupref(self):
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(),
('|', 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None)
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None)
self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(),
('a', 'a'))
self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(),
(None, None))
def test_groupdict(self):
self.assertEqual(re.match('(?P<first>first) (?P<second>second)',
'first second').groupdict(),
{'first':'first', 'second':'second'})
def test_expand(self):
self.assertEqual(re.match("(?P<first>first) (?P<second>second)",
"first second")
.expand(r"\2 \1 \g<second> \g<first>"),
"second first second first")
def test_repeat_minmax(self):
self.assertEqual(re.match("^(\w){1}$", "abc"), None)
self.assertEqual(re.match("^(\w){1}?$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None)
self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^x{1}$", "xxx"), None)
self.assertEqual(re.match("^x{1}?$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertEqual(re.match("^x{}$", "xxx"), None)
self.assertNotEqual(re.match("^x{}$", "x{}"), None)
def test_getattr(self):
self.assertEqual(re.match("(a)", "a").pos, 0)
self.assertEqual(re.match("(a)", "a").endpos, 1)
self.assertEqual(re.match("(a)", "a").string, "a")
self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1)))
self.assertNotEqual(re.match("(a)", "a").re, None)
def test_special_escapes(self):
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\b(b.)\b",
u"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
u"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"^abc$", u"\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", u"abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", u"\nabc\n", re.M), None)
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a").group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.LOCALE).group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
def test_bigcharset(self):
self.assertEqual(re.match(u"([\u2222\u2223])",
u"\u2222").group(1), u"\u2222")
self.assertEqual(re.match(u"([\u2222\u2223])",
u"\u2222", re.UNICODE).group(1), u"\u2222")
def test_anyall(self):
self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
"a\nb")
self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0),
"a\n\nb")
def test_non_consuming(self):
self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a")
def test_ignore_case(self):
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", u"ABC", re.I).group(0), "ABC")
self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa")
self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa")
def test_category(self):
self.assertEqual(re.match(r"(\s)", " ").group(1), " ")
def test_getlower(self):
import _sre
self.assertEqual(_sre.getlower(ord('A'), 0), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a'))
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", u"ABC", re.I).group(0), "ABC")
def test_not_literal(self):
self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb")
def test_search_coverage(self):
self.assertEqual(re.search("\s(b)", " b").group(1), "b")
self.assertEqual(re.search("a\s", "a ").group(0), "a ")
def test_re_escape(self):
p=""
for i in range(0, 256):
p = p + chr(i)
self.assertEqual(re.match(re.escape(chr(i)), chr(i)) is not None,
True)
self.assertEqual(re.match(re.escape(chr(i)), chr(i)).span(), (0,1))
pat=re.compile(re.escape(p))
self.assertEqual(pat.match(p) is not None, True)
self.assertEqual(pat.match(p).span(), (0,256))
def test_pickling(self):
import pickle
self.pickle_test(pickle)
import cPickle
self.pickle_test(cPickle)
# old pickles expect the _compile() reconstructor in sre module
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "The sre module is deprecated",
DeprecationWarning)
from sre import _compile
def pickle_test(self, pickle):
oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(oldpat)
newpat = pickle.loads(s)
self.assertEqual(oldpat, newpat)
def test_constants(self):
self.assertEqual(re.I, re.IGNORECASE)
self.assertEqual(re.L, re.LOCALE)
self.assertEqual(re.M, re.MULTILINE)
self.assertEqual(re.S, re.DOTALL)
self.assertEqual(re.X, re.VERBOSE)
def test_flags(self):
for flag in [re.I, re.M, re.X, re.S, re.L]:
self.assertNotEqual(re.compile('^pattern$', flag), None)
def test_sre_character_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertNotEqual(re.match(r"\%03o" % i, chr(i)), None)
self.assertNotEqual(re.match(r"\%03o0" % i, chr(i)+"0"), None)
self.assertNotEqual(re.match(r"\%03o8" % i, chr(i)+"8"), None)
self.assertNotEqual(re.match(r"\x%02x" % i, chr(i)), None)
self.assertNotEqual(re.match(r"\x%02x0" % i, chr(i)+"0"), None)
self.assertNotEqual(re.match(r"\x%02xz" % i, chr(i)+"z"), None)
self.assertRaises(re.error, re.match, "\911", "")
def test_sre_character_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertNotEqual(re.match(r"[\%03o]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\%03o0]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\%03o8]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02x]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02x0]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02xz]" % i, chr(i)), None)
self.assertRaises(re.error, re.match, "[\911]", "")
def test_bug_113254(self):
self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1))
def test_bug_527371(self):
# bug described in patches 527371/672491
self.assertEqual(re.match(r'(a)?a','a').lastindex, None)
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a')
self.assertEqual(re.match("((a))", "a").lastindex, 1)
def test_bug_545855(self):
# bug 545855 -- This pattern failed to cause a compile error as it
# should, instead provoking a TypeError.
self.assertRaises(re.error, re.compile, 'foo[a-')
def test_bug_418626(self):
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
# SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of
# pattern '*?' on a long string.
self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001)
self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0),
20003)
self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001)
# non-simple '*?' still used to hit the recursion limit, before the
# non-recursive scheme was implemented.
self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001)
def test_bug_612074(self):
pat=u"["+re.escape(u"\u2039")+u"]"
self.assertEqual(re.compile(pat) and 1, 1)
def test_stack_overflow(self):
# nasty cases that used to overflow the straightforward recursive
# implementation of repeated groups.
self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x')
self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x')
self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x')
def test_scanner(self):
def s_ident(scanner, token): return token
def s_operator(scanner, token): return "op%s" % token
def s_float(scanner, token): return float(token)
def s_int(scanner, token): return int(token)
scanner = Scanner([
(r"[a-zA-Z_]\w*", s_ident),
(r"\d+\.\d*", s_float),
(r"\d+", s_int),
(r"=|\+|-|\*|/", s_operator),
(r"\s+", None),
])
self.assertNotEqual(scanner.scanner.scanner("").pattern, None)
self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"),
(['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5,
'op+', 'bar'], ''))
def test_bug_448951(self):
# bug 448951 (similar to 429357, but with single char match)
# (Also test greedy matches.)
for op in '','?','*':
self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(),
(None, None))
self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(),
('a:', 'a'))
def test_bug_725106(self):
# capturing groups in alternatives in repeats
self.assertEqual(re.match('^((a)|b)*', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(),
('b', None))
def test_bug_725149(self):
# mark_stack_base restoring before restoring marks
self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(),
('a', None))
self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(),
('a', None, None))
def test_bug_764548(self):
# bug 764548, re.compile() barfs on str/unicode subclasses
try:
unicode
except NameError:
return # no problem if we have no unicode
class my_unicode(unicode): pass
pat = re.compile(my_unicode("abc"))
self.assertEqual(pat.match("xyz"), None)
def test_finditer(self):
iter = re.finditer(r":+", "a:b::c:::d")
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
def test_bug_926075(self):
try:
unicode
except NameError:
return # no problem if we have no unicode
self.assert_(re.compile('bug_926075') is not
re.compile(eval("u'bug_926075'")))
def test_bug_931848(self):
try:
unicode
except NameError:
pass
pattern = eval('u"[\u002E\u3002\uFF0E\uFF61]"')
self.assertEqual(re.compile(pattern).split("a.b.c"),
['a','b','c'])
def test_bug_581080(self):
iter = re.finditer(r"\s", "a b")
self.assertEqual(iter.next().span(), (1,2))
self.assertRaises(StopIteration, iter.next)
scanner = re.compile(r"\s").scanner("a b")
self.assertEqual(scanner.search().span(), (1, 2))
self.assertEqual(scanner.search(), None)
def test_bug_817234(self):
iter = re.finditer(r".*", "asdf")
self.assertEqual(iter.next().span(), (0, 4))
self.assertEqual(iter.next().span(), (4, 4))
self.assertRaises(StopIteration, iter.next)
def test_empty_array(self):
# SF buf 1647541
import array
for typecode in 'cbBuhHiIlLfd':
a = array.array(typecode)
self.assertEqual(re.compile("bla").match(a), None)
self.assertEqual(re.compile("").match(a).groups(), ())
def test_inline_flags(self):
# Bug #1700
upper_char = unichr(0x1ea0) # Latin Capital Letter A with Dot Bellow
lower_char = unichr(0x1ea1) # Latin Small Letter A with Dot Bellow
p = re.compile(upper_char, re.I | re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile(lower_char, re.I | re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + upper_char, re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + lower_char, re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + upper_char)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + lower_char)
q = p.match(upper_char)
self.assertNotEqual(q, None)
def test_dollar_matches_twice(self):
"$ matches the end of string, and just before the terminating \n"
pattern = re.compile('$')
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
pattern = re.compile('$', re.MULTILINE)
self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#' )
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
def run_re_tests():
from test.re_tests import benchmarks, tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
print 'Running re_tests test suite'
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError, ('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print '=== Syntax error:', t
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print '*** Unexpected error ***', t
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error, msg:
print '=== Unexpected exception', t, repr(msg)
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print '=== Succeeded incorrectly', t
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print '=== grouping error', t,
print repr(repl) + ' should be ' + repr(expected)
else:
print '=== Failed incorrectly', t
# Try the match on a unicode string, and check that it
# still succeeds.
try:
result = obj.search(unicode(s, "latin-1"))
if result is None:
print '=== Fails on unicode match', t
except NameError:
continue # 1.5.2
except TypeError:
continue # unicode test case
# Try the match on a unicode pattern, and check that it
# still succeeds.
obj=re.compile(unicode(pattern, "latin-1"))
result = obj.search(s)
if result is None:
print '=== Fails on unicode pattern match', t
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result is not None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result is None:
print '=== Failed on range-limited match', t
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result is None:
print '=== Fails on case-insensitive match', t
# Try the match with LOCALE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result is None:
print '=== Fails on locale-sensitive match', t
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result is None:
print '=== Fails on unicode-sensitive match', t
def test_main():
run_unittest(ReTests)
run_re_tests()
if __name__ == "__main__":
test_main()
| apache-2.0 |
Impactstory/biomed | refset.py | 2 | 8347 | import pubmed
from biblio import Biblio
import db
from app import refset_queue
from app import my_redis
from scopus import enqueue_scopus
from collections import defaultdict
import arrow
import os
from journals_histogram import make_journals_histogram
def enqueue_for_refset(medline_citation, core_journals):
biblio = Biblio(medline_citation)
show_keys = [
"pmid",
"doi",
"best_pub_date",
"title",
"is_old_enough_for_percentile"
]
biblio_dict_for_queue = biblio.to_dict(show_keys=show_keys)
job = refset_queue.enqueue_call(
func=make_refset,
args=(biblio_dict_for_queue, core_journals),
result_ttl=120 # number of seconds
)
job.meta["pmid"] = medline_citation["PMID"]
job.save()
def get_closest_biblios(possible_biblios, center_date, refset_size):
sorted_biblios = sorted(
possible_biblios,
key=lambda biblio: abs(biblio.pseudo_published_days_since(center_date)))
picked_biblios = sorted_biblios[0:min(refset_size, len(sorted_biblios))]
return picked_biblios
def tabulate_non_epub_biblios_by_pub_date(biblios):
biblios_by_pub_date = defaultdict(list)
for biblio in biblios:
# don't set pseudo dates for things with epub dates
if not biblio.has_epub_date:
biblios_by_pub_date[biblio.pub_date].append(biblio)
return biblios_by_pub_date
def timedelta_between(date1, date2):
date1_arrow = arrow.get(date1)
date2_arrow = arrow.get(date2)
response = date1_arrow - date2_arrow
return response
def set_pseudo_dates(biblios):
# initialize
for biblio in biblios:
biblio.pseudo_date = biblio.best_pub_date
response_biblios = dict((biblio.pmid, biblio) for biblio in biblios)
biblios_by_pub_date = tabulate_non_epub_biblios_by_pub_date(biblios)
# if there are some publications without epub dates
if biblios_by_pub_date:
sorted_pub_dates = sorted(biblios_by_pub_date.keys())
previous_pub_dates = [sorted_pub_dates[0]] + sorted_pub_dates[:-1]
previous_pub_date_lookup = dict(zip(sorted_pub_dates, previous_pub_dates))
for (real_pub_date, biblio_list) in biblios_by_pub_date.iteritems():
num_pubs_on_this_date = len(biblio_list)
previous_pub_date = previous_pub_date_lookup[real_pub_date]
timedelta_since_last_pub_date = timedelta_between(real_pub_date, previous_pub_date)
pseudo_timedelta_step_size = timedelta_since_last_pub_date / num_pubs_on_this_date
for (i, biblio) in enumerate(biblio_list):
timedelta_to_add = i * pseudo_timedelta_step_size
biblio.add_to_pseudo_date(timedelta_to_add)
response_biblios[biblio.pmid] = biblio
return response_biblios.values()
def get_pmids_for_refset(refset_center_date, core_journals, refset_size=None):
if not refset_size:
refset_size = int(os.getenv("REFSET_LENGTH", 50))
possible_pmids = pubmed.get_pmids_in_date_window(refset_center_date, core_journals)
possible_records = pubmed.get_medline_records(possible_pmids)
possible_biblios = [Biblio(record) for record in possible_records]
pseudo_date_biblios = set_pseudo_dates(possible_biblios)
refset_biblios = get_closest_biblios(
pseudo_date_biblios,
refset_center_date,
refset_size)
refset_pmids = [biblio.pmid for biblio in refset_biblios]
return refset_pmids
def make_refset(biblio_dict, core_journals):
refset_owner_pmid = biblio_dict["pmid"]
refset_owner_doi = biblio_dict["doi"]
refset_center_date = biblio_dict["best_pub_date"]
is_old_enough_for_percentile = biblio_dict["is_old_enough_for_percentile"]
print "making a refset for {pmid}".format(pmid=refset_owner_pmid)
if bool(is_old_enough_for_percentile):
refset_pmids = get_pmids_for_refset(refset_center_date, core_journals)
else:
refset_pmids = []
# put our article of interest in its own refset
refset_pmids.append(refset_owner_pmid)
print refset_pmids
# store the newly-minted refset. scopus will save citations
# to it as it finds them. Do this before putting on scopus queue.
save_new_refset(refset_pmids, refset_owner_pmid)
# now let's get scopus looking for citations on this refset's members
for pmid_in_refset in refset_pmids:
enqueue_scopus(pmid_in_refset, refset_owner_pmid, refset_owner_doi)
def save_new_refset(refset_pmids, pmid_we_are_making_refset_for):
key = db.make_refset_key(pmid_we_are_making_refset_for)
refset_dict = {}
for pmid in refset_pmids:
refset_dict[pmid] = None
print "saving this refset", key, refset_dict
my_redis.hmset(key, refset_dict)
def get_refsets(pmid_list):
pipe = my_redis.pipeline()
for pmid in pmid_list:
key = db.make_refset_key(pmid)
pipe.hgetall(key)
refset_dicts = pipe.execute()
return refset_dicts
def build_refset(raw_refset_dict):
refset = Refset(raw_refset_dict)
refset.biblios = refset.get_biblios_from_medline()
return refset
def build_refset_from_records(records):
raw_refset_dict = dict([(record[pmid], None) for record in records])
refset = Refset(raw_refset_dict)
refset.biblios = refset.get_biblios_from_medline(records)
return refset
class Refset(object):
def __init__(self, raw_refset_dict):
self.raw_refset_dict = raw_refset_dict
self.biblios = {}
@property
def pmids(self):
return self.raw_refset_dict.keys()
# not a property, because it does a network call
def get_biblios_from_medline(self):
records = pubmed.get_medline_records(self.pmids)
biblios = self.get_biblios_from_medline_records(records)
return biblios
def get_biblios_from_medline_records(self, medline_records):
biblios = {}
for record in medline_records:
biblio = Biblio(record)
biblios[biblio.pmid] = biblio
return biblios
@property
def refset_length(self):
return len(self.pmids)
@property
def scopus_max(self):
scopus_values = self.raw_refset_dict.values()
scopus_values_int = [s for s in scopus_values if isinstance(s, int)]
try:
response = max(scopus_values_int)
except ValueError:
response = None
return response
@property
def article_details(self):
response = {}
for pmid in self.pmids:
my_scopus = self.raw_refset_dict[pmid]
try:
scopus_scaling_factor = float(my_scopus) / float(self.scopus_max)
except (ValueError, TypeError, ZeroDivisionError):
# there's no scopus value
scopus_scaling_factor = None
response[pmid] = {
"scopus": my_scopus,
"biblio": self.biblios[pmid].to_dict(hide_keys=["abstract", "mesh_terms"])
}
return response
def _make_scopus_histogram(self, articles):
histogram_dict = defaultdict(list)
for article in articles:
my_scopus = article["scopus"]
histogram_dict[my_scopus].append(article)
return histogram_dict.values()
@property
def journal_histograms(self):
ret = make_journals_histogram(self.article_details.values())
return ret
@property
def citation_summary(self):
citation_list = self.raw_refset_dict.values()
if "None" in citation_list:
return None
summary = defaultdict(int)
for citation_count in citation_list:
summary[citation_count] += 1
return summary
@property
def mesh_summary(self):
summary = defaultdict(int)
for (pmid, biblio) in self.biblios.iteritems():
for mesh in biblio.mesh_terms:
summary[mesh] += 1
return summary
def to_dict(self):
return {
"articles": self.article_details,
"journals": self.journal_histograms.to_dict(),
# "mesh_summary": self.mesh_summary,
"refset_length": self.refset_length,
"citation_summary": self.citation_summary
}
| mit |
rec/echomesh | code/python/external/requests/__init__.py | 184 | 1678 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('http://python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print r.text
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2013 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '1.1.0'
__build__ = 0x010100
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Kenneth Reitz'
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| mit |
liupfskygre/qiime | scripts/make_otu_heatmap.py | 15 | 11322 | #!/usr/bin/env python
from __future__ import division
__author__ = "Dan Knights"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = [
"Dan Knights",
"Jose Carlos Clemente Litran",
"Yoshiki Vazquez Baeza",
"Greg Caporaso",
"Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Dan Knights"
__email__ = "daniel.knights@colorado.edu"
import numpy as np
from biom import load_table
from qiime.make_otu_heatmap import (
plot_heatmap, get_clusters, make_otu_labels, extract_metadata_column,
get_order_from_categories, get_order_from_tree, names_to_indices,
get_log_transform, get_overlapping_samples)
from qiime.util import (parse_command_line_parameters, get_options_lookup,
make_option, MissingFileError)
from qiime.parse import parse_mapping_file
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = """Plot heatmap of OTU table"""
script_info['script_description'] = (
"This script visualizes an OTU table as a heatmap where each row "
"corresponds to an OTU and each column corresponds to a sample. The "
"higher the relative abundance of an OTU in a sample, the more intense "
"the color at the corresponsing position in the heatmap. By default, the "
"OTUs (rows) will be clustered by UPGMA hierarchical clustering, and the "
"samples (columns) will be presented in the order in which they appear in "
"the OTU table. Alternatively, the user may supply a tree to sort the "
"OTUs (rows) or samples (columns), or both. The user may also pass in a "
"mapping file for sorting samples. If the user passes in a mapping file "
"and a metadata category, samples (columns) will be grouped by category "
"value and subsequently clustered within each group.")
script_info['script_usage'] = []
script_info['script_usage'].append(
("",
"Generate a heatmap as a PDF using all default values:",
"%prog -i otu_table.biom -o heatmap.pdf"))
script_info['script_usage'].append(
("",
"Generate a heatmap as a PNG:",
"%prog -i otu_table.biom -o heatmap.png -g png"))
script_info['script_usage'].append(
("",
"Sort the heatmap columns (samples) by the order of samples in the "
"mapping file",
"%prog -i otu_table.biom -o heatmap_sorted_samples.pdf -m "
"mapping_file.txt"))
script_info['script_usage'].append(
("",
"Sort the heatmap columns (samples) by the order of samples in the "
"mapping file, and sort the heatmap rows by the order of tips in the "
"tree:",
"%prog -i otu_table.biom -o heatmap_sorted.pdf -m mapping_file.txt -t "
"rep_set.tre"))
script_info['script_usage'].append(
("",
"Group the heatmap columns (samples) by metadata category (e.g., "
"Treatment), then cluster within each group:""",
"%prog -i otu_table.biom -o heatmap_grouped_by_Treatment.pdf -m "
"mapping_file.txt -c Treatment"))
script_info['output_description'] = (
"A single output file is created containing the heatmap of the OTU "
"table (a PDF file by default).")
script_info['required_options'] = [
options_lookup['otu_table_as_primary_input'],
options_lookup['output_fp']
]
script_info['optional_options'] = [
make_option('-t', '--otu_tree', type='existing_filepath', help='Tree file '
'to be used for sorting OTUs in the heatmap', default=None),
make_option('-m', '--map_fname', dest='map_fname',
type='existing_filepath', help='Metadata mapping file to be '
'used for sorting Samples in the heatmap.', default=None),
make_option('-c', '--category', dest='category', type="string",
help='Metadata category for sorting samples. Samples will be '
'clustered within each category level using euclidean UPGMA.',
default=None),
make_option('-s', '--sample_tree', dest='sample_tree',
type='existing_filepath', help='Tree file to be used for '
'sorting samples (e.g, output from upgma_cluster.py). If both '
'this and the sample mapping file are provided, the mapping '
'file is ignored.', default=None),
make_option('-g', '--imagetype',
help='type of image to produce (i.e. png, svg, pdf) '
'[default: %default]', default='pdf', type="choice",
choices=['pdf', 'png', 'svg']),
make_option('--no_log_transform', action="store_true",
help='Data will not be log-transformed. Without this option, '
'all zeros will be set to a small value (default is 1/2 the '
'smallest non-zero entry). Data will be translated to be '
'non-negative after log transform, and num_otu_hits will be '
'set to 0.', default=False),
make_option('--suppress_row_clustering', action="store_true",
help='No UPGMA clustering of OTUs (rows) is performed. If '
'--otu_tree is provided, this flag is ignored.',
default=False),
make_option('--suppress_column_clustering', action="store_true",
help='No UPGMA clustering of Samples (columns) is performed. '
'If --map_fname is provided, this flag is ignored.',
default=False),
make_option('--absolute_abundance', action="store_true",
help='Do not normalize samples to sum to 1 [default: %default]',
default=False),
make_option('--color_scheme', default="YlGn",
help="color scheme for figure. see "
"http://matplotlib.org/examples/color/"
"colormaps_reference.html for choices "
"[default: %default]"),
make_option('--width',
help='width of the figure in inches [default: %default]',
default=5, type='float'),
make_option('--height',
help='height of the figure in inches [default: %default]',
default=5, type='float'),
make_option('--dpi',
help='resolution of the figure in dots per inch '
'[default: value of savefig.dpi in matplotlibrc file]',
type='int', default=None),
make_option('--obs_md_category', default="taxonomy",
help="observation metadata category to plot "
"[default: %default]"),
make_option('--obs_md_level', default=None, type="int",
help="the level of observation metadata to plot for "
"hierarchical metadata [default: lowest level]")
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
otu_table = load_table(opts.otu_table_fp)
obs_md_category = opts.obs_md_category
obs_md_level = opts.obs_md_level
if obs_md_level is None:
# grab the last level if the user didn't specify a level
obs_md_level = -1
else:
# convert to 0-based indexing
obs_md_level -= 1
obs_md = otu_table.metadata(axis='observation')
obs_md_labels = []
if (obs_md is None or obs_md_category not in obs_md[0]):
obs_md_labels = [['']] * len(otu_table.ids(axis='observation'))
else:
for _, _, md in otu_table.iter(axis='observation'):
current_md = md[obs_md_category]
if obs_md_level < len(current_md):
current_md_at_level = current_md[obs_md_level]
else:
current_md_at_level = ''
obs_md_labels.append([current_md_at_level])
otu_labels = make_otu_labels(otu_table.ids(axis='observation'),
obs_md_labels)
# Convert to relative abundance if requested
if not opts.absolute_abundance:
otu_table = otu_table.norm(axis='observation')
# Get log transform if requested
if not opts.no_log_transform:
otu_table = get_log_transform(otu_table)
# Re-order samples by tree if provided
if opts.sample_tree is not None:
sample_order = get_order_from_tree(otu_table.ids(),
open(opts.sample_tree, 'U'))
# if there's no sample tree, sort samples by mapping file
elif opts.map_fname is not None:
lines = open(opts.map_fname, 'U').readlines()
metadata = list(parse_mapping_file(lines))
new_map, otu_table = get_overlapping_samples(metadata[0], otu_table)
metadata[0] = new_map
map_sample_ids = zip(*metadata[0])[0]
# if there's a category, do clustering within each category
if opts.category is not None:
category_labels = extract_metadata_column(otu_table.ids(),
metadata, opts.category)
sample_order = get_order_from_categories(otu_table,
category_labels)
# else: just use the mapping file order
else:
ordered_sample_ids = []
for sample_id in map_sample_ids:
if otu_table.exists(sample_id):
ordered_sample_ids.append(sample_id)
sample_order = names_to_indices(
otu_table.ids(),
ordered_sample_ids)
# if no tree or mapping file, perform upgma euclidean
elif not opts.suppress_column_clustering:
data = np.asarray([i for i in otu_table.iter_data(axis='observation')])
sample_order = get_clusters(data, axis='column')
# else just use OTU table ordering
else:
sample_order = np.arange(len(otu_table.ids()))
# re-order OTUs by tree (if provided), or clustering
if opts.otu_tree is not None:
# open tree file
try:
f = open(opts.otu_tree, 'U')
except (TypeError, IOError):
raise MissingFileError("Couldn't read tree file at path: %s" %
opts.otu_tree)
otu_order = get_order_from_tree(otu_table.ids(axis='observation'), f)
f.close()
# if no tree or mapping file, perform upgma euclidean
elif not opts.suppress_row_clustering:
data = np.asarray([i for i in otu_table.iter_data(axis='observation')])
otu_order = get_clusters(data, axis='row')
# else just use OTU table ordering
else:
otu_order = np.arange(len(otu_table.ids(axis='observation')))
# otu_order and sample_order should be ids, rather than indices
# to use in sortObservationOrder/sortSampleOrder
otu_id_order = [otu_table.ids(axis='observation')[i] for i in otu_order]
sample_id_order = [otu_table.ids()[i] for i in sample_order]
# Re-order otu table, sampleids, etc. as necessary
otu_table = otu_table.sort_order(otu_id_order, axis='observation')
otu_labels = np.array(otu_labels)[otu_order]
otu_table = otu_table.sort_order(sample_id_order)
sample_labels = otu_table.ids()
plot_heatmap(otu_table, otu_labels, sample_labels, opts.output_fp,
imagetype=opts.imagetype, width=opts.width,
height=opts.height, dpi=opts.dpi,
color_scheme=opts.color_scheme)
if __name__ == "__main__":
main()
| gpl-2.0 |
pwmarcz/django | django/contrib/gis/gdal/error.py | 104 | 1430 | """
This module houses the OGR & SRS Exception objects, and the
check_err() routine which checks the status code returned by
OGR methods.
"""
#### OGR & SRS Exceptions ####
class GDALException(Exception):
pass
class OGRException(Exception):
pass
class SRSException(Exception):
pass
class OGRIndexError(OGRException, KeyError):
"""
This exception is raised when an invalid index is encountered, and has
the 'silent_variable_feature' attribute set to true. This ensures that
django's templates proceed to use the next lookup type gracefully when
an Exception is raised. Fixes ticket #4740.
"""
silent_variable_failure = True
#### OGR error checking codes and routine ####
# OGR Error Codes
OGRERR_DICT = {
1: (OGRException, 'Not enough data.'),
2: (OGRException, 'Not enough memory.'),
3: (OGRException, 'Unsupported geometry type.'),
4: (OGRException, 'Unsupported operation.'),
5: (OGRException, 'Corrupt data.'),
6: (OGRException, 'OGR failure.'),
7: (SRSException, 'Unsupported SRS.'),
8: (OGRException, 'Invalid handle.'),
}
OGRERR_NONE = 0
def check_err(code):
"Checks the given OGRERR, and raises an exception where appropriate."
if code == OGRERR_NONE:
return
elif code in OGRERR_DICT:
e, msg = OGRERR_DICT[code]
raise e(msg)
else:
raise OGRException('Unknown error code: "%s"' % code)
| bsd-3-clause |
asimshankar/tensorflow | tensorflow/python/ops/gradient_checker_v2_test.py | 13 | 10669 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for compute_gradient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import \
gradient_checker_v2 as gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
# needs this to register gradient for SoftmaxCrossEntropyWithLogits:
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _random_complex(shape, dtype):
data = np.random.random_sample(shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data.imag = np.random.random_sample(shape)
return data
@test_util.run_all_in_graph_and_eager_modes
class GradientCheckerTest(test.TestCase):
def testAddSimple(self):
size = (2, 3)
x1 = constant_op.constant(2.0, shape=size, name="x1")
x2 = constant_op.constant(3.0, shape=size, name="x2")
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
lambda x1: math_ops.add(x1, x2), [x1]))
tf_logging.info("x1 error = %f", error)
assert error < 1e-4
def testAddCustomized(self):
size = (2, 3)
x1 = constant_op.constant(
2.0, shape=size, dtype=dtypes.float64, name="x1")
x2 = np.asarray(np.arange(6, dtype=np.float64).reshape(2, 3))
# checkint gradients for x2 using a special delta
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
lambda x2: math_ops.add(x1, x2),
[x2], delta=1e-2))
tf_logging.info("x2 error = %f", error)
assert error < 1e-10
def testGather(self):
def f(params):
index_values = [1, 3]
indices = constant_op.constant(index_values, name="i")
return array_ops.gather(params, indices, name="y")
p_shape = (4, 2)
p_size = 8
params = constant_op.constant(
np.arange(p_size).astype(np.float), shape=p_shape, name="p")
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
f, [params]))
tf_logging.info("gather error = %f", error)
assert error < 1e-4
def testNestedGather(self):
def f(params):
index_values = [1, 3, 5, 6]
indices = constant_op.constant(index_values, name="i")
y = array_ops.gather(params, indices, name="y")
index_values2 = [0, 2]
indices2 = constant_op.constant(index_values2, name="i2")
return array_ops.gather(y, indices2, name="y2")
p_shape = (8, 2)
p_size = 16
params = constant_op.constant(
np.arange(p_size).astype(np.float), shape=p_shape, name="p")
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
f, [params]))
tf_logging.info("nested gather error = %f", error)
assert error < 1e-4
def testComplexMul(self):
c = constant_op.constant(5 + 7j, dtype=dtypes.complex64)
def f(x):
return c * x
x_shape = c.shape
x_dtype = c.dtype
x = constant_op.constant(_random_complex(x_shape, x_dtype))
analytical, numerical = gradient_checker.compute_gradient(
f, [x])
correct = np.array([[5, 7], [-7, 5]])
self.assertAllEqual(correct, analytical[0])
self.assertAllClose(correct, numerical[0], rtol=1e-4)
x = constant_op.constant(_random_complex(x_shape, x_dtype))
self.assertLess(
gradient_checker.max_error(*gradient_checker.compute_gradient(
f, [x])), 3e-4)
def testComplexConj(self):
def f(x):
return math_ops.conj(x)
x_shape = ()
x_dtype = dtypes.complex64
x = constant_op.constant(_random_complex(x_shape, x_dtype))
analytical, numerical = gradient_checker.compute_gradient(
f, [x])
correct = np.array([[1, 0], [0, -1]])
self.assertAllEqual(correct, analytical[0])
self.assertAllClose(correct, numerical[0], rtol=2e-5)
x = constant_op.constant(_random_complex(x_shape, x_dtype))
self.assertLess(
gradient_checker.max_error(*gradient_checker.compute_gradient(
f, [x])), 2e-5)
def testEmptySucceeds(self):
def f(x):
return array_ops.identity(x)
x = constant_op.constant(np.random.random_sample((0, 3)),
dtype=dtypes.float32)
for grad in gradient_checker.compute_gradient(f, [x]):
self.assertEqual(grad[0].shape, (0, 0))
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
f, [x]))
self.assertEqual(error, 0)
def testEmptyFails(self):
@custom_gradient.custom_gradient
def id_bad_grad(x):
y = array_ops.identity(x)
def grad_fn(dy):
# dx = constant_op.constant(np.zeros((1, 4)), dtype=dtypes.float32)
dx = array_ops.transpose(dy)
return dx
return y, grad_fn
def f(x):
return id_bad_grad(x)
x = constant_op.constant(np.random.random_sample((0, 3)),
dtype=dtypes.float32)
bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)"
with self.assertRaisesRegexp(ValueError, bad):
gradient_checker.compute_gradient(f, [x])
def testNaNGradFails(self):
@custom_gradient.custom_gradient
def id_nan_grad(x):
y = array_ops.identity(x)
def grad_fn(dy):
dx = np.nan * dy
# dx = dy
return dx
return y, grad_fn
def f(x):
return id_nan_grad(x)
x = constant_op.constant(np.random.random_sample((1, 1)),
dtype=dtypes.float32)
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
f, [x]))
# Typical test would assert error < max_err, so assert this test would
# raise AssertionError, since NaN is not < 1.0.
with self.assertRaisesRegexp(AssertionError, "False is not true"):
self.assertTrue(error < 1.0)
def testGradGrad(self):
def f(x):
with backprop.GradientTape() as tape:
tape.watch(x)
y = math_ops.square(x)
z = math_ops.square(y)
return tape.gradient(z, x)
analytical, numerical = gradient_checker.compute_gradient(f, [2.0])
self.assertAllEqual([[[48.]]], analytical)
self.assertAllClose([[[48.]]], numerical, rtol=1e-4)
@test_util.run_all_in_graph_and_eager_modes
class MiniMNISTTest(test.TestCase):
# Gradient checker for MNIST.
def _BuildAndTestMiniMNIST(self, param_index, tag):
# Fix seed to avoid occasional flakiness
np.random.seed(6)
# Hyperparameters
batch = 3
inputs = 16
features = 32
classes = 10
# Define the parameters
inp_data = np.random.random_sample(inputs * batch)
hidden_weight_data = np.random.randn(inputs * features) / np.sqrt(inputs)
hidden_bias_data = np.random.random_sample(features)
sm_weight_data = np.random.randn(features * classes) / np.sqrt(features)
sm_bias_data = np.random.random_sample(classes)
# special care for labels since they need to be normalized per batch
label_data = np.random.random(batch * classes).reshape((batch, classes))
s = label_data.sum(axis=1)
label_data /= s[:, None]
# We treat the inputs as "parameters" here
inp = constant_op.constant(
inp_data.tolist(),
shape=[batch, inputs],
dtype=dtypes.float64,
name="inp")
hidden_weight = constant_op.constant(
hidden_weight_data.tolist(),
shape=[inputs, features],
dtype=dtypes.float64,
name="hidden_weight")
hidden_bias = constant_op.constant(
hidden_bias_data.tolist(),
shape=[features],
dtype=dtypes.float64,
name="hidden_bias")
softmax_weight = constant_op.constant(
sm_weight_data.tolist(),
shape=[features, classes],
dtype=dtypes.float64,
name="softmax_weight")
softmax_bias = constant_op.constant(
sm_bias_data.tolist(),
shape=[classes],
dtype=dtypes.float64,
name="softmax_bias")
# List all the parameter so that we can test them one at a time
all_params = [
inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias
]
# Now, Building MNIST
def f(inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias):
features = nn_ops.relu(
nn_ops.xw_plus_b(inp, hidden_weight, hidden_bias), name="features")
logits = nn_ops.xw_plus_b(
features, softmax_weight, softmax_bias, name="logits")
labels = constant_op.constant(
label_data.tolist(),
shape=[batch, classes],
dtype=dtypes.float64,
name="labels")
cost = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name="cost")
return cost
def f_restricted(x):
xs = all_params
i = param_index
# use x for the i-th parameter
xs = xs[0:i]+[x]+xs[i+1:]
return f(*xs)
# Test the gradients.
err = gradient_checker.max_error(*gradient_checker.compute_gradient(
f_restricted, [all_params[param_index]], delta=1e-5))
tf_logging.info("Mini MNIST: %s gradient error = %g", tag, err)
return err
def testInputGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(0, "input"), 1e-8)
def testHiddenWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(1, "hidden_weight"), 1e-8)
def testHiddenBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(2, "hidden_bias"), 1e-8)
def testSoftmaxWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(3, "softmax_weight"), 1e-8)
def testSoftmaxBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(4, "softmax_bias"), 1e-8)
if __name__ == "__main__":
test.main()
| apache-2.0 |
onecloud/neutron | neutron/extensions/providernet.py | 8 | 3416 | # Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import exceptions as n_exc
NETWORK_TYPE = 'provider:network_type'
PHYSICAL_NETWORK = 'provider:physical_network'
SEGMENTATION_ID = 'provider:segmentation_id'
EXTENDED_ATTRIBUTES_2_0 = {
'networks': {
NETWORK_TYPE: {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
PHYSICAL_NETWORK: {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
SEGMENTATION_ID: {'allow_post': True, 'allow_put': True,
'convert_to': int,
'enforce_policy': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True},
}
}
def _raise_if_updates_provider_attributes(attrs):
"""Raise exception if provider attributes are present.
This method is used for plugins that do not support
updating provider networks.
"""
immutable = (NETWORK_TYPE, PHYSICAL_NETWORK, SEGMENTATION_ID)
if any(attributes.is_attr_set(attrs.get(a)) for a in immutable):
msg = _("Plugin does not support updating provider attributes")
raise n_exc.InvalidInput(error_message=msg)
class Providernet(extensions.ExtensionDescriptor):
"""Extension class supporting provider networks.
This class is used by neutron's extension framework to make
metadata about the provider network extension available to
clients. No new resources are defined by this extension. Instead,
the existing network resource's request and response messages are
extended with attributes in the provider namespace.
With admin rights, network dictionaries returned will also include
provider attributes.
"""
@classmethod
def get_name(cls):
return "Provider Network"
@classmethod
def get_alias(cls):
return "provider"
@classmethod
def get_description(cls):
return "Expose mapping of virtual networks to physical networks"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/provider/api/v1.0"
@classmethod
def get_updated(cls):
return "2012-09-07T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 |
credativUK/account-financial-tools | account_default_draft_move/account_bank_statement.py | 19 | 1289 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author Vincent Renaville. Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from openerp import models, api
class AccountBankStatement(models.Model):
_inherit = "account.bank.statement"
@api.multi
def button_confirm_bank(self):
res = super(AccountBankStatement, self).button_confirm_bank()
entries = self.mapped('line_ids.journal_entry_id')
entries.write({'state': 'draft'})
return res
| agpl-3.0 |
Tuckie/max31855 | test_max31855.py | 4 | 1867 | #!/usr/bin/python
from max31855 import MAX31855, MAX31855Error
import unittest
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.thermocouple = MAX31855(1, 2, 3, 4)
def test_convert_tc_data(self):
'''Verify thermocouple conversion to signed two's complement int and conversion factor from datasheet is working as expected.'''
tc_tests = [['0110 0100 0000 00',1600.00],
['0011 1110 1000 00',1000.00],
['0000 0110 0100 11',100.75],
['0000 0001 1001 00', 25.00],
['0000 0000 0000 00', 0.00],
['1111 1111 1111 11', -0.25],
['1111 1111 1111 00', -1.00],
['1111 0000 0110 00',-250.00]]
for test in tc_tests:
value = self.thermocouple.convert_tc_data(int(test[0].replace(" ", ""), 2))
self.assertEqual(value, test[1])
def test_convert_rj_data(self):
'''Verify reference junction conversion to signed two's complement int and conversion factor from datasheet is working as expected.'''
rj_tests = [['0111 1111 0000',127.0000],
['0110 0100 1001',100.5625],
['0001 1001 0000', 25.0000],
['0000 0000 0000', 0.0000],
['1111 1111 1111',-0.0625],
['1111 1111 0000',-1.0000],
['1110 1100 0000',-20.0000],
['1100 1001 0000',-55.0000]]
for test in rj_tests:
value = self.thermocouple.convert_rj_data(int(test[0].replace(" ", ""), 2))
self.assertEqual(value, test[1])
def tearDown(self):
self.thermocouple.cleanup()
if __name__ == '__main__':
unittest.main()
#suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
#unittest.TextTestRunner(verbosity=2).run(suite) | mit |
openrightsgroup/OrgProbe | orgprobe/middleware_api.py | 1 | 1995 | import logging
import requests
class MiddlewareAPI(object):
def __init__(self, config, signer):
self.signer = signer
if config.has_option('api', 'url'):
self.url_base = config.get('api', 'url')
else:
# backwards compatibility with old config keys
https = config.getboolean('api', 'https', fallback=True)
host = config.get('api', 'host')
port = config.getint('api', 'port', fallback=443)
version = config.get('api', 'version', fallback='1.2')
self.url_base = "{}://{}:{}/{}".format(
'https' if https else 'http',
host,
port,
version)
def status_ip(self, public_ip, probe_uuid):
url = 'status/ip'
if public_ip:
url = '{}/{}'.format(url, public_ip)
return self._execute(url,
args={"probe_uuid": probe_uuid},
send_timestamp=True,
sig_keys=['date'])
def config(self, version):
return self._execute('config/{}'.format(version))
def _execute(self,
path,
args=None,
send_timestamp=False,
sig_keys=None):
if not args:
args = {}
if send_timestamp:
args['date'] = self.signer.timestamp()
if sig_keys:
args['signature'] = self.signer.get_signature(args, sig_keys)
url = "{}/{}".format(self.url_base, path)
logging.debug("Opening ORG Api connection to: %s with args: %s", url, args)
response = requests.get(url, params=args)
logging.debug("ORG Api Request Complete: %s", response.status_code)
response.raise_for_status()
try:
return response.json()
except ValueError:
logging.error("Middleware response contained invalid JSON: %s", response.content)
raise
| gpl-3.0 |
TeutoNet-Netzdienste/ansible | v2/ansible/plugins/action/script.py | 7 | 4130 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
from ansible import constants as C
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
# FIXME: noop stuff still needs to be sorted out
#if self.runner.noop_on_check(inject):
# # in check mode, always skip this module
# return ReturnData(conn=conn, comm_ok=True,
# result=dict(skipped=True, msg='check mode not supported for this module'))
if not tmp:
tmp = self._make_tmp_path()
creates = self._task.args.get('creates')
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
result = self._execute_module(module_name='stat', module_args=dict(path=creates), tmp=tmp, persist_files=True)
stat = result.get('stat', None)
if stat and stat.get('exists', False):
return dict(skipped=True, msg=("skipped, since %s exists" % creates))
removes = self._task.args.get('removes')
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
result = self._execute_module(module_name='stat', module_args=dict(path=removes), tmp=tmp, persist_files=True)
stat = result.get('stat', None)
if stat and not stat.get('exists', False):
return dict(skipped=True, msg=("skipped, since %s does not exist" % removes))
# the script name is the first item in the raw params, so we split it
# out now so we know the file name we need to transfer to the remote,
# and everything else is an argument to the script which we need later
# to append to the remote command
parts = self._task.args.get('_raw_params', '').strip().split()
source = parts[0]
args = ' '.join(parts[1:])
if self._task._role is not None:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source)
else:
source = self._loader.path_dwim(source)
# transfer the file to a remote tmp location
tmp_src = self._shell.join_path(tmp, os.path.basename(source))
self._connection.put_file(source, tmp_src)
sudoable = True
# set file permissions, more permissive when the copy is done as a different user
if self._connection_info.become and self._connection_info.become_user != 'root':
chmod_mode = 'a+rx'
sudoable = False
else:
chmod_mode = '+rx'
self._remote_chmod(tmp, chmod_mode, tmp_src, sudoable=sudoable)
# add preparation steps to one ssh roundtrip executing the script
env_string = self._compute_environment_string()
script_cmd = ' '.join([env_string, tmp_src, args])
result = self._low_level_execute_command(cmd=script_cmd, tmp=None, sudoable=sudoable)
# clean up after
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES:
self._remove_tmp_path(tmp)
result['changed'] = True
return result
| gpl-3.0 |
Stan1989/volatility | volatility/plugins/linux/check_creds.py | 45 | 2250 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.pslist as linux_pslist
class linux_check_creds(linux_pslist.linux_pslist):
"""Checks if any processes are sharing credential structures"""
def calculate(self):
linux_common.set_plugin_members(self)
if not self.profile.obj_has_member("task_struct", "cred"):
debug.error("This command is not supported in this profile.")
creds = {}
tasks = linux_pslist.linux_pslist.calculate(self)
for task in tasks:
cred_addr = task.cred.v()
if not cred_addr in creds:
creds[cred_addr] = []
creds[cred_addr].append(task.pid)
yield creds
def render_text(self, outfd, data):
self.table_header(outfd, [("PIDs", "8")])
# print out processes that are sharing cred structures
for htable in data:
for (addr, pids) in htable.items():
if len(pids) > 1:
pid_str = ""
for pid in pids:
pid_str = pid_str + "{0:d}, ".format(pid)
pid_str = pid_str[:-2]
self.table_row(outfd, pid_str)
| gpl-2.0 |
chrismeyersfsu/ansible | lib/ansible/modules/network/cumulus/cl_bridge.py | 21 | 13367 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cl_bridge
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a bridge port on Cumulus Linux
description:
- Configures a bridge interface on Cumulus Linux To configure a bond port
use the cl_bond module. To configure any other type of interface use the
cl_interface module. Follow the guidelines for bridging found in the
Cumulus User Guide at U(http://docs.cumulusnetworks.com)
options:
name:
description:
- Name of the interface.
required: true
alias_name:
description:
- Description of the port.
ipv4:
description:
- List of IPv4 addresses to configure on the interface.
In the form I(X.X.X.X/YY).
ipv6:
description:
- List of IPv6 addresses to configure on the interface.
In the form I(X:X:X::X/YYY).
addr_method:
description:
- Configures the port to use DHCP.
To enable this feature use the option I(dhcp).
choices: ['dhcp']
mtu:
description:
- Set MTU. Configure Jumbo Frame by setting MTU to I(9000).
virtual_ip:
description:
- Define IPv4 virtual IP used by the Cumulus Linux VRR feature.
virtual_mac:
description:
- Define Ethernet mac associated with Cumulus Linux VRR feature.
vids:
description:
- In vlan-aware mode, lists VLANs defined under the interface.
pvid:
description:
- In vlan-aware mode, defines vlan that is the untagged vlan.
stp:
description:
- Enables spanning tree Protocol. As of Cumulus Linux 2.5 the default
bridging mode, only per vlan RSTP or 802.1d is supported. For the
vlan aware mode, only common instance STP is supported
default: 'yes'
choices: ['yes', 'no']
ports:
description:
- List of bridge members.
required: True
vlan_aware:
description:
- Enables vlan-aware mode.
choices: ['yes', 'no']
mstpctl_treeprio:
description:
- Set spanning tree root priority. Must be a multiple of 4096.
location:
description:
- Interface directory location.
default:
- '/etc/network/interfaces.d'
requirements: [ Alternate Debian network interface manager
ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ]
notes:
- As this module writes the interface directory location, ensure that
``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or
whatever path is mentioned in the ``location`` attribute.
- For the config to be activated, i.e installed in the kernel,
"service networking reload" needs be be executed. See EXAMPLES section.
'''
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
# configure a bridge vlan aware bridge.
- cl_bridge:
name: br0
ports: 'swp1-12'
vlan_aware: 'yes'
notify: reload networking
# configure bridge interface to define a default set of vlans
- cl_bridge:
name: bridge
ports: 'swp1-12'
vlan_aware: 'yes'
vids: '1-100'
notify: reload networking
# define cl_bridge once in tasks file
# then write interface config in variables file
# with just the options you want.
- cl_bridge:
name: "{{ item.key }}"
ports: "{{ item.value.ports }}"
vlan_aware: "{{ item.value.vlan_aware|default(omit) }}"
ipv4: "{{ item.value.ipv4|default(omit) }}"
ipv6: "{{ item.value.ipv6|default(omit) }}"
alias_name: "{{ item.value.alias_name|default(omit) }}"
addr_method: "{{ item.value.addr_method|default(omit) }}"
mtu: "{{ item.value.mtu|default(omit) }}"
vids: "{{ item.value.vids|default(omit) }}"
virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
mstpctl_treeprio: "{{ item.value.mstpctl_treeprio|default(omit) }}"
with_dict: "{{ cl_bridges }}"
notify: reload networking
# In vars file
# ============
cl_bridge:
br0:
alias_name: 'vlan aware bridge'
ports: ['swp1', 'swp3']
vlan_aware: true
vids: ['1-100']
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
# handy helper for calling system calls.
# calls AnsibleModule.run_command and prints a more appropriate message
# exec_path - path to file to execute, with all its arguments.
# E.g "/sbin/ip -o link show"
# failure_msg - what message to print on failure
def run_cmd(module, exec_path):
(_rc, out, _err) = module.run_command(exec_path)
if _rc > 0:
if re.search('cannot find interface', _err):
return '[{}]'
failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
module.fail_json(msg=failure_msg)
else:
return out
def current_iface_config(module):
# due to a bug in ifquery, have to check for presence of interface file
# and not rely solely on ifquery. when bug is fixed, this check can be
# removed
_ifacename = module.params.get('name')
_int_dir = module.params.get('location')
module.custom_current_config = {}
if os.path.exists(_int_dir + '/' + _ifacename):
_cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
module.custom_current_config = module.from_json(
run_cmd(module, _cmd))[0]
def build_address(module):
# if addr_method == 'dhcp', dont add IP address
if module.params.get('addr_method') == 'dhcp':
return
_ipv4 = module.params.get('ipv4')
_ipv6 = module.params.get('ipv6')
_addresslist = []
if _ipv4 and len(_ipv4) > 0:
_addresslist += _ipv4
if _ipv6 and len(_ipv6) > 0:
_addresslist += _ipv6
if len(_addresslist) > 0:
module.custom_desired_config['config']['address'] = ' '.join(
_addresslist)
def build_vids(module):
_vids = module.params.get('vids')
if _vids and len(_vids) > 0:
module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
def build_pvid(module):
_pvid = module.params.get('pvid')
if _pvid:
module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
def conv_bool_to_str(_value):
if isinstance(_value, bool):
if _value is True:
return 'yes'
else:
return 'no'
return _value
def build_generic_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
if _value:
module.custom_desired_config['config'][
re.sub('_', '-', _attr)] = str(_value)
def build_alias_name(module):
alias_name = module.params.get('alias_name')
if alias_name:
module.custom_desired_config['config']['alias'] = alias_name
def build_addr_method(module):
_addr_method = module.params.get('addr_method')
if _addr_method:
module.custom_desired_config['addr_family'] = 'inet'
module.custom_desired_config['addr_method'] = _addr_method
def build_vrr(module):
_virtual_ip = module.params.get('virtual_ip')
_virtual_mac = module.params.get('virtual_mac')
vrr_config = []
if _virtual_ip:
vrr_config.append(_virtual_mac)
vrr_config.append(_virtual_ip)
module.custom_desired_config.get('config')['address-virtual'] = \
' '.join(vrr_config)
def add_glob_to_array(_bridgemems):
"""
goes through each bridge member if it sees a dash add glob
before it
"""
result = []
if isinstance(_bridgemems, list):
for _entry in _bridgemems:
if re.search('-', _entry):
_entry = 'glob ' + _entry
result.append(_entry)
return ' '.join(result)
return _bridgemems
def build_bridge_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
_value = add_glob_to_array(_value)
if _value:
module.custom_desired_config['config'][
'bridge-' + re.sub('_', '-', _attr)] = str(_value)
def build_desired_iface_config(module):
"""
take parameters defined and build ifupdown2 compatible hash
"""
module.custom_desired_config = {
'addr_family': None,
'auto': True,
'config': {},
'name': module.params.get('name')
}
for _attr in ['vlan_aware', 'pvid', 'ports', 'stp']:
build_bridge_attr(module, _attr)
build_addr_method(module)
build_address(module)
build_vids(module)
build_alias_name(module)
build_vrr(module)
for _attr in ['mtu', 'mstpctl_treeprio']:
build_generic_attr(module, _attr)
def config_dict_changed(module):
"""
return true if 'config' dict in hash is different
between desired and current config
"""
current_config = module.custom_current_config.get('config')
desired_config = module.custom_desired_config.get('config')
return current_config != desired_config
def config_changed(module):
"""
returns true if config has changed
"""
if config_dict_changed(module):
return True
# check if addr_method is changed
return module.custom_desired_config.get('addr_method') != \
module.custom_current_config.get('addr_method')
def replace_config(module):
temp = tempfile.NamedTemporaryFile()
desired_config = module.custom_desired_config
# by default it will be something like /etc/network/interfaces.d/swp1
final_location = module.params.get('location') + '/' + \
module.params.get('name')
final_text = ''
_fh = open(final_location, 'w')
# make sure to put hash in array or else ifquery will fail
# write to temp file
try:
temp.write(module.jsonify([desired_config]))
# need to seek to 0 so that data is written to tempfile.
temp.seek(0)
_cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
final_text = run_cmd(module, _cmd)
finally:
temp.close()
try:
_fh.write(final_text)
finally:
_fh.close()
def main():
module = AnsibleModule(
argument_spec=dict(
ports=dict(required=True, type='list'),
name=dict(required=True, type='str'),
ipv4=dict(type='list'),
ipv6=dict(type='list'),
alias_name=dict(type='str'),
addr_method=dict(type='str',
choices=['', 'dhcp']),
mtu=dict(type='str'),
virtual_ip=dict(type='str'),
virtual_mac=dict(type='str'),
vids=dict(type='list'),
pvid=dict(type='str'),
mstpctl_treeprio=dict(type='str'),
vlan_aware=dict(type='bool', choices=BOOLEANS),
stp=dict(type='bool', default='yes', choices=BOOLEANS),
location=dict(type='str',
default='/etc/network/interfaces.d')
),
required_together=[
['virtual_ip', 'virtual_mac']
]
)
# if using the jinja default filter, this resolves to
# create an list with an empty string ['']. The following
# checks all lists and removes it, so that functions expecting
# an empty list, get this result. May upstream this fix into
# the AnsibleModule code to have it check for this.
for k, _param in module.params.items():
if isinstance(_param, list):
module.params[k] = [x for x in _param if x]
_location = module.params.get('location')
if not os.path.exists(_location):
_msg = "%s does not exist." % (_location)
module.fail_json(msg=_msg)
return # for testing purposes only
ifacename = module.params.get('name')
_changed = False
_msg = "interface %s config not changed" % (ifacename)
current_iface_config(module)
build_desired_iface_config(module)
if config_changed(module):
replace_config(module)
_msg = "interface %s config updated" % (ifacename)
_changed = True
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
import tempfile
import os
import re
if __name__ == '__main__':
main()
| gpl-3.0 |
xxxrac/git-repo | subcmds/checkout.py | 48 | 1747 | #
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from command import Command
from progress import Progress
class Checkout(Command):
common = True
helpSummary = "Checkout a branch for development"
helpUsage = """
%prog <branchname> [<project>...]
"""
helpDescription = """
The '%prog' command checks out an existing branch that was previously
created by 'repo start'.
The command is equivalent to:
repo forall [<project>...] -c git checkout <branchname>
"""
def Execute(self, opt, args):
if not args:
self.Usage()
nb = args[0]
err = []
success = []
all_projects = self.GetProjects(args[1:])
pm = Progress('Checkout %s' % nb, len(all_projects))
for project in all_projects:
pm.update()
status = project.CheckoutBranch(nb)
if status is not None:
if status:
success.append(project)
else:
err.append(project)
pm.end()
if err:
for p in err:
print >>sys.stderr,\
"error: %s/: cannot checkout %s" \
% (p.relpath, nb)
sys.exit(1)
elif not success:
print >>sys.stderr, 'error: no project has branch %s' % nb
sys.exit(1)
| apache-2.0 |
kiranvizru/psutil | examples/meminfo.py | 44 | 1522 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Print system memory information.
$ python examples/meminfo.py
MEMORY
------
Total : 9.7G
Available : 4.9G
Percent : 49.0
Used : 8.2G
Free : 1.4G
Active : 5.6G
Inactive : 2.1G
Buffers : 341.2M
Cached : 3.2G
SWAP
----
Total : 0B
Used : 0B
Free : 0B
Percent : 0.0
Sin : 0B
Sout : 0B
"""
import psutil
from psutil._compat import print_
def bytes2human(n):
# http://code.activestate.com/recipes/578019
# >>> bytes2human(10000)
# '9.8K'
# >>> bytes2human(100001221)
# '95.4M'
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def pprint_ntuple(nt):
for name in nt._fields:
value = getattr(nt, name)
if name != 'percent':
value = bytes2human(value)
print_('%-10s : %7s' % (name.capitalize(), value))
def main():
print_('MEMORY\n------')
pprint_ntuple(psutil.virtual_memory())
print_('\nSWAP\n----')
pprint_ntuple(psutil.swap_memory())
if __name__ == '__main__':
main()
| bsd-3-clause |
pyrrho314/recipesystem | trunk/setup.py | 1 | 10072 | #!/usr/bin/env python
"""
Setup script for gemini_python.
The tools and modules in this package are developed by the Gemini Data Processing Software Group.
In this package:
astrodata : AstroData, The Recipe System
astrodata_Gemini : Recipes, Primitives and astrodata configurations for Gemini
gempy : Gemini toolkits
fitsstore : Calibration Manager
iqtool : IQ assessment. Used internally by the SOS-DAs
Usage:
python setup.py install --prefix=/astro/iraf/rhux-x86_64-glibc2.5/gempylocal
python setup.py sdist
"""
import os.path
import os
import re
import glob
import sys
from distutils.core import setup
svndir = re.compile('.svn')
PACKAGENAME = 'gemini_python'
# PACKAGES and PACKAGE_DIRS
# Note: KL not sure what to do about astrodata.adutils.reduceutils.pyjamaface
ASTRODATA_MODULES = ['astrodata',
'astrodata.adutils',
'astrodata.adutils.future',
'astrodata.adutils.reduceutils',
'astrodata.eti']
#FITSSTORE_MODULES = ['fitsstore']
GEMPY_MODULES = ['gempy',
'gempy.adlibrary',
'gempy.gemini',
'gempy.gemini.eti',
'gempy.library']
# IQTOOL NOT FOR PUBLIC RELEASE, YET
IQTOOL_MODULES = ['iqtool',
'iqtool.iq',
'iqtool.gemplotlib']
ADLIB_PACKAGES = ['FITS','Gemini'] # This is be used to form 'astrodata_Gemini' and 'astrodata_FITS'
RECIPE_MODULES=[]
PIF_MODULES=[]
slash = re.compile('/')
for p in ADLIB_PACKAGES:
if os.path.isdir(os.path.join('astrodata_'+p,'RECIPES_'+p)):
RECIPE_MODULES.append('astrodata_'+p+'.RECIPES_'+p)
RECIPE_MODULES.append('astrodata_'+p+'.RECIPES_'+p+'.primitives')
if os.path.isdir(os.path.join('astrodata_'+p,'PIF_'+p)):
PIF_MODULES.append('astrodata_'+p+'.PIF_'+p)
PIF_MODULES.append('astrodata_'+p+'.PIF_'+p+'.pif'+p.lower())
PIFROOT = os.path.join('astrodata_'+p,'PIF_'+p,'pif'+p.lower())
for root, dirs, files in os.walk(PIFROOT):
if not svndir.search(root) and len(files) > 0:
pifmodules = map((lambda d: slash.sub('.','/'.join([root,d]))),\
filter((lambda d: not svndir.search(d)), dirs))
PIF_MODULES.extend( pifmodules )
SUBMODULES = []
SUBMODULES.extend(ASTRODATA_MODULES)
SUBMODULES.extend(GEMPY_MODULES)
SUBMODULES.extend(RECIPE_MODULES)
SUBMODULES.extend(PIF_MODULES)
SUBMODULES.extend(IQTOOL_MODULES)
PACKAGES = []
PACKAGES.extend(SUBMODULES)
for p in ADLIB_PACKAGES:
PACKAGES.append('astrodata_'+p)
PACKAGE_DIRS = {}
PACKAGE_DIRS[''] = '.'
# PACKAGE_DATA
PACKAGE_DATA = {}
for s in SUBMODULES:
PACKAGE_DATA[s] = ['Copyright',
'ReleaseNote',
'README',
'INSTALL'
]
for p in ADLIB_PACKAGES:
PACKAGE_DATA['astrodata_'+p] = \
['Copyright',
'ReleaseNote',
'README',
'INSTALL',
os.path.join('ADCONFIG_'+p,'structures','*.py'),
]
for root, dirs, files in os.walk(os.path.join('astrodata_'+p,'ADCONFIG_'+p,'lookups')):
if not svndir.search(root) and len(files) > 0:
dest = root.split('/',1)[1] if len(root.split('/',1)) > 1 else ""
PACKAGE_DATA['astrodata_'+p].extend( map((lambda f: os.path.join(dest, f)), files) )
for root, dirs, files in os.walk(os.path.join('astrodata_'+p,'ADCONFIG_'+p,'descriptors')):
if not svndir.search(root) and len(files) > 0:
dest = root.split('/',1)[1] if len(root.split('/',1)) > 1 else ""
PACKAGE_DATA['astrodata_'+p].extend( map((lambda f: os.path.join(dest, f)), files) )
for root, dirs, files in os.walk(os.path.join('astrodata_'+p,'ADCONFIG_'+p,'classifications')):
if not svndir.search(root) and len(files) > 0:
dest = root.split('/',1)[1] if len(root.split('/',1)) > 1 else ""
PACKAGE_DATA['astrodata_'+p].extend( map((lambda f: os.path.join(dest, f)), files) )
if os.path.isdir(os.path.join('astrodata_'+p,'RECIPES_'+p)):
PACKAGE_DATA['astrodata_'+p+'.RECIPES_'+p].append('recipe.*')
PACKAGE_DATA['astrodata_'+p+'.RECIPES_'+p].append(os.path.join('subrecipes','recipe.*'))
PACKAGE_DATA['astrodata_'+p+'.RECIPES_'+p+'.primitives'].append('primitives_List.txt')
astrodatadir = re.compile('astrodata/')
for root, dirs, files in os.walk(os.path.join('astrodata','scripts','adcc_faceplate')):
if not svndir.search(root) and len(files) > 0:
PACKAGE_DATA['astrodata'].extend( map((lambda f: os.path.join(astrodatadir.sub('',root), f)), files) )
# DATA_DIRS and DATA_FILES
DATA_FILES = []
ASTRODATADOC_DIR = os.path.join('share','astrodata')
for root, dirs, files in os.walk(os.path.join('astrodata','doc')):
if not svndir.search(root) and len(files) > 0:
dest = root.split('/',2)[2] if len(root.split('/',2)) > 2 else ""
DOC_FILES = map((lambda f: os.path.join(root,f)), files)
DATA_FILES.append( (os.path.join(ASTRODATADOC_DIR,dest), DOC_FILES) )
RECIPESDOC_DIR = os.path.join('share','astrodata_Gemini','RECIPES_Gemini')
for root, dirs, files in os.walk(os.path.join('astrodata_Gemini','RECIPES_Gemini','doc')):
if not svndir.search(root) and len(files) > 0:
dest = root.split('/',3)[3] if len(root.split('/',3)) > 3 else ""
DOC_FILES = map((lambda f: os.path.join(root,f)), files)
DATA_FILES.append( (os.path.join(RECIPESDOC_DIR,dest), DOC_FILES) )
GEMPYDOC_DIR = os.path.join('share','gempy')
# doc completely out of date, do not install or distribute until that's fixed
#for root, dirs, files in os.walk(os.path.join('gempy','doc')):
# if not svndir.search(root) and len(files) > 0:
# dest = root.split('/',2)[2] if len(root.split('/',2)) > 2 else ""
# DOC_FILES = map((lambda f: os.path.join(root,f)), files)
# DATA_FILES.append( (os.path.join(GEMPYDOC_DIR,dest), DOC_FILES) )
for root, dirs, files in os.walk(os.path.join('gempy','doc-local')):
if not svndir.search(root) and len(files) > 0:
dest = root.split('/',2)[2] if len(root.split('/',2)) > 2 else ""
DOC_FILES = map((lambda f: os.path.join(root,f)), files)
DATA_FILES.append( (os.path.join(GEMPYDOC_DIR,dest), DOC_FILES) )
# SCRIPTS
ASTRODATA_SCRIPTS = [ os.path.join('astrodata','scripts','adcc'),
os.path.join('astrodata','scripts','listPrimitives'),
os.path.join('astrodata','scripts','mkCalculatorInterface'),
os.path.join('astrodata','scripts','reduce'),
os.path.join('astrodata','scripts','superclean'),
os.path.join('astrodata','scripts','typelib'),
os.path.join('astrodata','scripts','typewalk'),
os.path.join('astrodata','scripts','rsifaces','pif2prim','mkPIF'),
#os.path.join('astrodata','scripts','header_cal_rq.py')
#os.path.join('astrodata','scripts','packgempy'),
#os.path.join('astrodata','scripts','prsproxy'),
#os.path.join('astrodata','scripts','recipe'),
#os.path.join('astrodata','scripts','recipecontrolcenter'),
#os.path.join('astrodata','scripts','runNEWdemo'),
#os.path.join('astrodata','scripts','rundemo'),
#os.path.join('astrodata','scripts','rundemo2'),
#os.path.join('astrodata','scripts','tempConfig.py'),
#os.path.join('astrodata','scripts','uploaddataset.py'),
#os.path.join('astrodata','scripts','wget_uploaddataset.sh'),
]
GEMPY_SCRIPTS = [ os.path.join('gempy','scripts','autoredux'),
#os.path.join('gempy','scripts','cleanir.py')
os.path.join('gempy','scripts','fwhm_histogram'),
os.path.join('gempy','scripts','profile_all_obj'),
os.path.join('gempy','scripts','psf_plot'),
os.path.join('gempy','scripts','redux'),
os.path.join('gempy','scripts','zp_histogram')
]
#IQTOOL_SCRIPTS = [ os.path.join('iqtool','iqtool.py')]
if "sdist" in sys.argv:
#GEMPY_SCRIPTS and ASTRODATA_SCRIPTS contain the name of the links which might not be dereferenced during sdist
#Therefore, here we package the .py those links point to. During "install" the links are
#dereferenced, always, as far as I can tell, so there's no need for the .py then.
PYFILES = []
dotpy = re.compile(".py$")
for script in GEMPY_SCRIPTS:
if not dotpy.match(script):
PYFILES.append(''.join([script,'.py']))
GEMPY_SCRIPTS.extend(PYFILES)
for script in ASTRODATA_SCRIPTS:
if not dotpy.match(script):
PYFILES.append(''.join([script,'.py']))
ASTRODATA_SCRIPTS.extend(PYFILES)
SCRIPTS = []
SCRIPTS.extend(ASTRODATA_SCRIPTS)
SCRIPTS.extend(GEMPY_SCRIPTS)
#SCRIPTS.extend(IQTOOL_SCRIPTS)
EXTENSIONS = None
setup ( name='gemini_python',
version='0.9.0',
description='Gemini Data Processing Python Package',
author='Gemini Data Processing Software Group',
author_email='klabrie@gemini.edu',
url='http://www.gemini.edu',
maintainer='Gemini Data Processing Software Group',
packages=PACKAGES,
package_dir=PACKAGE_DIRS,
package_data=PACKAGE_DATA,
data_files=DATA_FILES,
scripts=SCRIPTS,
ext_modules=EXTENSIONS,
classifiers=[
'Development Status :: Beta',
'Intended Audience :: Beta Testers',
'Operating System :: Linux :: RHEL',
'Programming Language :: Python',
'Topic :: Gemini',
'Topic :: Data Reduction',
'Topic :: Astronomy',
],
)
| mpl-2.0 |
AOSP-S4-KK/platform_external_chromium_org | build/android/pylib/utils/command_option_parser.py | 160 | 2419 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An option parser which handles the first arg as a command.
Add other nice functionality such as printing a list of commands
and an example in usage.
"""
import optparse
import sys
class CommandOptionParser(optparse.OptionParser):
"""Wrapper class for OptionParser to help with listing commands."""
def __init__(self, *args, **kwargs):
"""Creates a CommandOptionParser.
Args:
commands_dict: A dictionary mapping command strings to an object defining
- add_options_func: Adds options to the option parser
- run_command_func: Runs the command itself.
example: An example command.
everything else: Passed to optparse.OptionParser contructor.
"""
self.commands_dict = kwargs.pop('commands_dict', {})
self.example = kwargs.pop('example', '')
if not 'usage' in kwargs:
kwargs['usage'] = 'Usage: %prog <command> [options]'
optparse.OptionParser.__init__(self, *args, **kwargs)
#override
def get_usage(self):
normal_usage = optparse.OptionParser.get_usage(self)
command_list = self.get_command_list()
example = self.get_example()
return self.expand_prog_name(normal_usage + example + command_list)
#override
def get_command_list(self):
if self.commands_dict.keys():
return '\nCommands:\n %s\n' % '\n '.join(
sorted(self.commands_dict.keys()))
return ''
def get_example(self):
if self.example:
return '\nExample:\n %s\n' % self.example
return ''
def ParseAndExecute(option_parser, argv=None):
"""Parses options/args from argv and runs the specified command.
Args:
option_parser: A CommandOptionParser object.
argv: Command line arguments. If None, automatically draw from sys.argv.
Returns:
An exit code.
"""
if not argv:
argv = sys.argv
if len(argv) < 2 or argv[1] not in option_parser.commands_dict:
# Parse args first, if this is '--help', optparse will print help and exit
option_parser.parse_args(argv)
option_parser.error('Invalid command.')
cmd = option_parser.commands_dict[argv[1]]
cmd.add_options_func(option_parser)
options, args = option_parser.parse_args(argv)
return cmd.run_command_func(argv[1], options, args, option_parser)
| bsd-3-clause |
gencer/python-phonenumbers | python/phonenumbers/data/region_UY.py | 2 | 1439 | """Auto-generated file, do not edit by hand. UY metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_UY = PhoneMetadata(id='UY', country_code=598, international_prefix='0(?:1[3-9]\\d|0)',
general_desc=PhoneNumberDesc(national_number_pattern='[2489]\\d{6,7}', possible_length=(7, 8), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='2\\d{7}|4[2-7]\\d{6}', example_number='21231234', possible_length=(8,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='9[1-9]\\d{6}', example_number='94231234', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='80[05]\\d{4}', example_number='8001234', possible_length=(7,)),
premium_rate=PhoneNumberDesc(national_number_pattern='90[0-8]\\d{4}', example_number='9001234', possible_length=(7,)),
preferred_international_prefix='00',
national_prefix='0',
preferred_extn_prefix=' int. ',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{4})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[24]']),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['9[1-9]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[89]0'], national_prefix_formatting_rule='0\\1')])
| apache-2.0 |
billyhunt/osf.io | admin/pre_reg/views.py | 4 | 5652 | import functools
import httplib as http
import json
import operator
from copy import deepcopy
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.paginator import Paginator, EmptyPage
from django.core.urlresolvers import reverse
from django.http import HttpResponseBadRequest
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from admin.pre_reg import serializers
from admin.pre_reg.forms import DraftRegistrationForm
from framework.exceptions import HTTPError
from framework.mongo.utils import get_or_http_error
from modularodm import Q
from website.exceptions import NodeStateError
from website.files.models import FileNode
from website.project.model import MetaSchema, DraftRegistration
get_draft_or_error = functools.partial(get_or_http_error, DraftRegistration)
def get_prereg_drafts(user=None, filters=tuple()):
prereg_schema = MetaSchema.find_one(
Q('name', 'eq', 'Prereg Challenge') &
Q('schema_version', 'eq', 2)
)
query = (
Q('registration_schema', 'eq', prereg_schema) &
Q('approval', 'ne', None)
)
if user:
pass
# TODO: filter by assignee; this requires multiple levels of Prereg admins-
# one level that can see all drafts, and another than can see only the ones they're assigned.
# As a followup to this, we need to make sure this applies to approval/rejection/commenting endpoints
# query = query & Q('_metaschema_flags.assignee', 'eq', user._id)
return sorted(
DraftRegistration.find(query),
key=operator.attrgetter('approval.initiation_date')
)
def is_in_prereg_group(user):
"""Determines whether a user is in the prereg_group
:param user: User wanting access to prereg material
:return: True if prereg False if not
"""
return user.is_in_group('prereg_group')
@login_required
@user_passes_test(is_in_prereg_group)
def prereg(request):
"""Redirects to prereg page if user has prereg access
:param request: Current logged in user
:return: Redirect to prereg page with username, reviewers, and user obj
"""
paginator = Paginator(get_prereg_drafts(user=request.user), 5)
try:
page_number = int(request.GET.get('page'))
except (TypeError, ValueError):
page_number = 1
page = paginator.page(page_number)
try:
drafts = [serializers.serialize_draft_registration(d, json_safe=False) for d in page]
except EmptyPage:
drafts = []
for draft in drafts:
draft['form'] = DraftRegistrationForm(draft)
context = {
'drafts': drafts,
'page': page,
'IMMEDIATE': serializers.IMMEDIATE,
}
return render(request, 'pre_reg/prereg.html', context)
@login_required
@user_passes_test(is_in_prereg_group)
def view_draft(request, draft_pk):
"""Redirects to prereg form review page if user has prereg access
:param draft_pk: Unique id for selected draft
:return: Redirect to prereg form review page with draft obj
"""
draft = get_draft_or_error(draft_pk)
context = {
'draft': serializers.serialize_draft_registration(draft)
}
return render(request, 'pre_reg/edit_draft_registration.html', context)
@login_required
@user_passes_test(is_in_prereg_group)
def view_file(request, node_id, provider, file_id):
file = FileNode.load(file_id)
wb_url = file.generate_waterbutler_url()
return redirect(wb_url)
@csrf_exempt
@login_required
@user_passes_test(is_in_prereg_group)
def approve_draft(request, draft_pk):
"""Approves current draft
:param request: mostly for user
:param draft_pk: Unique id for current draft
:return: DraftRegistrationApproval obj
"""
draft = get_draft_or_error(draft_pk)
user = request.user.osf_user
draft.approve(user)
return redirect(reverse('pre_reg:prereg') + "?page={0}".format(request.POST.get('page', 1)), permanent=True)
@csrf_exempt
@login_required
@user_passes_test(is_in_prereg_group)
def reject_draft(request, draft_pk):
"""Rejects current draft
:param request: mostly for user
:param draft_pk: Unique id for current draft
:return: DraftRegistrationApproval obj
"""
draft = get_draft_or_error(draft_pk)
user = request.user.osf_user
draft.reject(user)
return redirect(reverse('pre_reg:prereg') + "?page={0}".format(request.POST.get('page', 1)), permanent=True)
@csrf_exempt
@login_required
def update_draft(request, draft_pk):
"""Updates current draft to save admin comments
:param draft_pk: Unique id for current draft
:return: DraftRegistration obj
"""
data = json.loads(request.body)
draft = get_draft_or_error(draft_pk)
if 'admin_settings' in data:
form = DraftRegistrationForm(data=data['admin_settings'])
if not form.is_valid():
return HttpResponseBadRequest("Invalid form data")
admin_settings = form.cleaned_data
draft.notes = admin_settings.get('notes', draft.notes)
del admin_settings['notes']
draft.flags = admin_settings
draft.save()
else:
schema_data = data.get('schema_data', {})
data = deepcopy(draft.registration_metadata)
for key, value in data.items():
data[key]['comments'] = schema_data.get(key, {}).get('comments', [])
try:
draft.update_metadata(data)
draft.save()
except (NodeStateError):
raise HTTPError(http.BAD_REQUEST)
return JsonResponse(serializers.serialize_draft_registration(draft))
| apache-2.0 |
tahahn/FollowTheMoney | FollowTheMoneyETL.py | 1 | 41550 | #Title: Follow The Money ETL
#Author: Travis Hahn
#Version: 0.02
#Date: 06/30/2016
'''
The Follow The Money ETL extracts data concerning contributions to lawmakers
and candidates at the state level using the Follow The Money API. Additionally
this module inserts the extracted data into a database allowing for front-end
access.
'''
import MySQLdb,json,datetime,time,re,urllib,pickle
def data_extract(Entity,page,state):
'''
The Data-Extract function extracts the contribution data from the Follow The Money website
via API calls
Parameters:
Entity: (String) The Entity the data to be collected is about (Either a Lawmaker or a Candidate)
page: (int) the page number to be extracted
state:(String) the state currently being inserted/updated
'''
with open('api.txt') as f:
content=f.readlines()
api_key=content[0]
p_number=str(page)
api_dict = {
'Lawmaker':'meh',
'Candidate':'meh2'
}
api_url="http://api.followthemoney.org/?f-core=1&mode=json&y=2015,2016&APIKey="+str(api_key)+"&s="+state+"&gro=y,c-t-eid,law-oc,law-p&so=law-eid&sod=0&p="+p_number
api_url2='http://api.followthemoney.org/?s='+state+'&f-core=1&c-exi=1&y=2015,2016&gro=c-t-id&APIKey='+api_key+'&mode=json&so=c-t-id&p='+p_number
api_dict['Lawmaker']=api_url
api_dict['Candidate']=api_url2
#api_url="http://api.followthemoney.org/entity.php?eid=6688494&APIKey=eebe0a5bc8970cbab68104c1759e6cb6&mode=json"
#print api_dict[Entity]
response=urllib.urlopen(api_url2)
data=json.loads(response.read())
file_ad=state+'Cp'+p_number+".json"
with open(file_ad,'w') as outfile:
json.dump(data,outfile)
def NumToDIM(x):
"""
Associates database dimensions with numerical values for iteration
Parameters:
x: (int) a number in a loop that is to be associated with a dimension ex(0->cycle)
"""
return {
0: 'cycle',
1: 'geo',
2: 'office',
3: 'party'
}.get(x)
def NumToState(x):
"""
Associates state names with numerical values for iteration
Parameters:
x: (int) a number in a loop that is to be associated with a state ex(0->AL)
"""
return {
0: 'AL',
1: 'AK',
2:'AZ',
3:'AR',
4:'CA',
5:'CO',
6:'CT',
7:'DE',
8:'FL',
9:'GA',
10:'HI',
11:'ID',
12:'IL',
13:'IN',
14:'IA',
15:'KS',
16:'KY',
17:'LA',
18:'ME',
19:'MD',
20:'MA',
21:'MI',
22:'MN',
23:'MS',
24:'MO',
25:'MT',
26:'NE',
27:'NV',
28:'NH',
29:'NJ',
30:'NM',
31:'NY',
32:'NC',
33:'ND',
34:'OH',
35:'OK',
36:'OR',
37:'PA',
38:'RI',
39:'SC',
40:'SD',
41:'TN',
42:'TX',
43:'UT',
44:'VT',
45:'VA',
46:'WA',
47:'WV',
48:'WI',
49:'WY',
}.get(x)
def statetonum(x):
return {
'AL':0,
'AK':1,
'AZ':2,
'AR':3,
'CA':4,
'CO':5,
'CT':6,
'DE':7,
'FL':8,
'GA':9,
'HI':10,
'ID':11,
'IL':12,
'IN':13,
'IA':14,
'KS':15,
'KY':16,
'LA':17,
'ME':18,
'MD':19,
'MA':20,
'MI':21,
'MN':22,
'MS':23,
'MO':24,
'MT':25,
'NE':26,
'NV':27,
'NH':28,
'NJ':29,
'NM':30,
'NY':31,
'NC':32,
'ND':33,
'OH':34,
'OK':35,
'OR':36,
'PA':37,
'RI':38,
'SC':39,
'SD':40,
'TN':41,
'TX':42,
'UT':43,
'VT':44,
'VA':45,
'WA':46,
'WV':47,
'WI':48,
'WY':49,
}.get(x)
def Update(Type,addr,host,user,passwd,db):
"""
The Update function updates the Incubator Table in an incremental fashion
Paramater:
Type: (String) Lawmaker or Candidate
addr: (String) File Address in Lawmaker Format(Ex. ALp0.json) or Candidate Format(Ex. ALCp0.json)
host: (String) MySQL Server Location
User: (String) MySQL username
passwd: (String) MySQL password
db: (String) MySQL Database Name
"""
row_updates=0
Time = time.time()
with open(addr) as f:
data=json.load(f)
for item in data['records']:
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
if Type=='Candidate':
#Create Variables for Candidate Specific Elements
Name=item['Candidate']['Candidate'].split(',')
if len(Name)>1:
Cand_Frname=Name[1].strip().upper()
Mname='n/a'
if " " in Cand_Frname:
Namez=Cand_Frname.split(" ")
Cand_Frname= Namez[0]
Mname=Namez[1]
Cand_Lname=Name[0].upper()
Cand_Fname=(Name[0]+","+Name[1]).upper()
else:
Cand_Fname=Name[0]
Cand_Frname=""
Cand_Lname=""
year=item['Election_Year']['Election_Year']
State=item['Election_Jurisdiction']['Election_Jurisdiction']
office=normalizeOffice(item['Office_Sought']['Office_Sought'])
Election_Type=item['Election_Type']['Election_Type']
Election_Status=item['Election_Status']['Election_Status']
Incumbency_Status=item['Incumbency_Status']['Incumbency_Status']
money=item['Total_$']['Total_$']
party=item['General_Party']['General_Party']
if "'" in Cand_Fname:
Cand_Frname=Cand_Frname.replace("'"," ")
Cand_Fname=Cand_Fname.replace("'"," ")
Cand_Lname=Cand_Lname.replace("'"," ")
if '"' in Cand_Fname or '"' in Cand_Lname:
Cand_Frname=Cand_Frname.replace('"',' ')
Cand_Fname=Cand_Fname.replace('"',' ')
Cand_Lname=Cand_Lname.replace('"',' ')
#If statements to remove apostrophers and quotation marks from Candidates names
#This ensures that the SQL Statement will execute properly.
SQL='SELECT COUNT(*) FROM ftm_inc WHERE First_Name="'+Cand_Frname+'"'+' AND Last_Name="'+Cand_Lname+'"'+" AND State='"+State+"'"+" AND Cycle="+str(year)+" AND Office='"+office+"'"+" AND General_Party='"+party+"'"+"AND Cycle_Type='"+Election_Type+"'"+" AND contribution="+str(money)+";"
#SQL statement to check if the Lawmaker from the file is already in the Incubator Table
if Type=='Lawmaker':
table='lm_dim_lawmaker'
Time = time.time()
Fname=item['Lawmaker']['Lawmaker']
Lname=Fname.split(',')[0]
Frname1=Fname.split(',')[1]
Frname=Frname1.split()[0]
ID= item['Lawmaker']['id']
Election_Type='Standard'
Election_Status=None
Incumbency_Status=None
office = item['General_Office']['General_Office']
party=item['General_Party']['General_Party']
#party2=item['Specific_Party']['Specific_Party']
party2='NA'
money = item['Total_$']['Total_$']
money=int(round(float(money)))
year = item['Election_Year']['Election_Year']
State="AL"
#State=item['Election_Jurisdiction']['Election_Jurisdiction']
#Change to this once API Access is regained
#add gro=s to request parameters
Mname=''
SQL="SELECT COUNT(*) FROM ftm_inc WHERE First_Name='"+Frname+"'"+" AND Last_Name='"+Lname+"'"+"AND State='"+State+"'"+" AND Office='"+office+"'"+" AND General_Party='"+party+"'"+" AND Cycle="+str(year)+" AND Cycle_Type='"+Election_Type+"'"+" AND contribution="+str(money)
#SQL statement to check if the Candidate from the file is already in the Incubator Table
cursor.execute(SQL)
results= cursor.fetchall()
conn.commit()
conn.close()
#print Cand_Frname+" "+str(results[0][0])
#results[0][0]=0
#results=1
if results[0][0]>0:
print Cand_Fname+"checked"
if results[0][0]==0:
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
Incubator="""INSERT INTO ftm_inc(State,Cycle,Cycle_Type,Election_Status,Incumbency_Status,Full_Name,First_Name,Middle_Name,Last_Name,contribution,Office,General_Party,timestamp) VALUES('%s',%s,'%s','%s','%s','%s','%s','%s','%s',%s,'%s','%s','%s')"""
#print Incubator%(State,year,Election_Type,Election_Status,Incumbency_Status,Cand_Fname,Cand_Frname,Cand_Mname,Cand_Lname,money,office,party,datetime.datetime.fromtimestamp(Time).strftime('%Y-%m-%d'))
cursor.execute(Incubator%(State,year,Election_Type,Election_Status,Incumbency_Status,Cand_Fname,Cand_Frname,Mname,Cand_Lname,money,office,party,datetime.datetime.fromtimestamp(Time).strftime('%Y-%m-%d')))
print Cand_Fname+"inserted"
#If the Candidate or Lawmaker is not already in the Incubator Table they are then inserted
#This can be done more efficiently with a unique index and/or an upsert SQL Function(Return to this if time allows)
conn.commit()
conn.close()
row_updates+=1
with open('log.txt','a') as f:
f.write(str(row_updates)+ " Rows Updated on "+datetime.datetime.fromtimestamp(Time).strftime('%Y-%m-%d %H:%M:%S')+"\n")
#Logs the number of rows updated
print 'updated'
def database2(host,user,passwd,db,type1):
'''
The Second database function populates the fact and dimension tables from the incubator table
Parameters:
Host: (String) URL of the mySql Server
User:(String) Username for MySql Server
passwd: (String) password for MySQL Server
db: (String) MySQL Database Name
type1: (String) Entitity type, Lawmaker or Candidate
'''
y=0
for i in range (0,5):
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
x = str(NumToDIM(i))
if type1=='Lawmaker' and i==4:
table='lm_dim_lawmaker'
params=['Lawmaker_ID','full_name','first_name','middle_name','last_name']
#parameters for Lawmaker SQL Statement
elif type1=='Candidate' and i==4:
table='cand_dim_candidate'
params=['Candidate_ID','full_name','first_name','middle_name','last_name']
#parameters for Candidate SQL Statements
elif x=='cycle':
table='ftm_dim_'+x
params=['cycle_ID','cycle','cycle_type',]
#parameters for Cycle SQL Statements
elif x=='geo':
table='ftm_dim_'+x
params=['Geo_ID','State','District']
#parameters for Geography SQL Statements
elif x=='office':
table = 'ftm_dim_'+x
params=['Office_ID','Office','Office_Code']
#parameters for Office SQL Statements
elif x =='party':
table='ftm_dim_'+x
params=['Party_ID','General_Party','Specific_Party']
#parameters for Party SQL statements
join=params[1]
pk=params[0]
params3 =list()
params2=''
params4=''
for i in range(1,len(params)):
if(i==len(params)-1):
params2+=params[i]
else:
params2+=params[i]+','
#formats parameters with commas to ensure correct SQL
params3.append("ftm_inc."+params[i])
for x in range(0,len(params3)):
if(x==len(params3)-1):
params4+=params3[x]
else:
params4+=params3[x]+','
#formats parameters with commas to ensure correct SQL
if(type1=='Lawmaker'):
#print params
SQL="INSERT IGNORE INTO "+table+' ('+params2+')'+'\n\t SELECT DISTINCT '+params4+"\n\t\t FROM ftm_inc \n\t\t LEFT JOIN "+table+" ON ftm_inc."+join+'='+table+'.'+join+'\n\t\t WHERE ftm_inc.'+pk+' IS NULL AND Election_Status IS NULL'# AND ftm_inc.'+params[1]+" IS NOT NULL AND ftm_inc."+params[2]+" IS NOT NULL"
#Inserts dimension data into dimensionn tables ensuring that there are no duplicates and that the dimension data is associated with lawmakers
SQL2="UPDATE IGNORE ftm_inc JOIN "+table+' ON ftm_inc.'+join+'='+table+'.'+join+' SET ftm_inc'+'.'+pk+'='+table+'.'+pk+" WHERE Election_Status IS NULL"
#Set's the dimension ID's on the incubator table to the auto-incremented ID's on the dimension table
SQL3="UPDATE "+table+" SET timestamp=now()"
#Sets the time in which the dimensions were added to the dimension table
SQL4="""INSERT IGNORE INTO lm_fact (contributions) \n\t SELECT DISTINCT contribution FROM ftm_inc WHERE Election_Status IS NULL"""
#Inserts the contributions associated with each Lawmaker
SQL5="UPDATE lm_fact JOIN ftm_inc ON lm_fact.contributions=ftm_inc.contribution SET lm_fact."+pk+"=ftm_inc."+pk
#Sets the dimension ID's on the Lawmaker Fact Table to the auto-incremented ID's on the dimension table
SQL6="UPDATE lm_fact SET timestamp=now()"
#Sets timestamp for when Lawmakers were added to the fact-table
SQL7="UPDATE IGNORE lm_fact JOIN ftm_dim_geo ON ftm_dim_geo.Geo_ID=lm_fact.Geo_ID SET lm_fact.State=ftm_dim_geo.State"
#Set the State associated with the ID on the Geography Dimension table
cursor.execute(SQL)
cursor.execute(SQL2)
cursor.execute(SQL3)
cursor.execute(SQL4)
cursor.execute(SQL5)
cursor.execute(SQL6)
cursor.execute(SQL7)
conn.commit()
conn.close()
if(type1=='Candidate'):
SQL="INSERT IGNORE INTO "+table+' ('+params2+')'+'\n\t SELECT DISTINCT '+params4+"\n\t\t FROM ftm_inc \n\t\t LEFT JOIN "+table+" ON ftm_inc."+join+'='+table+'.'+join+'\n\t\t WHERE ftm_inc.'+pk+' IS NULL AND Election_Status IS NOT NULL'# AND ftm_inc.'+params[1]+" IS NOT NULL AND ftm_inc."+params[2]+" IS NOT NULL"
#Inserts dimension data into dimensionn tables ensuring that there are no duplicates and that the dimension data is associated with lawmakers
#Inserts dimension data into dimensionn tables ensuring that there are no duplicates and that the dimension data is associated with candidates
SQL2="UPDATE IGNORE ftm_inc JOIN "+table+' ON ftm_inc.'+join+'='+table+'.'+join+' SET ftm_inc'+'.'+pk+'='+table+'.'+pk+" WHERE Election_Status IS NOT NULL"
#Set's the dimension ID's on the incubator table to the auto-incremented ID's on the dimension table
SQL3="UPDATE "+table+" SET timestamp=now()"
#Sets the time in which the dimensions were added to the dimension table
SQL4="INSERT IGNORE INTO cand_fact (contribution) \n\t SELECT DISTINCT contribution FROM ftm_inc WHERE Election_Status IS NOT NULL"
#Inserts the contributions associated with each Candidate
SQL5="UPDATE cand_fact JOIN ftm_inc ON cand_fact.contribution=ftm_inc.contribution SET cand_fact."+pk+"=ftm_inc."+pk+" WHERE cand_fact.Candidate_ID IS NULL"
#Sets the dimension ID's on the Lawmaker Fact Table to the auto-incremented ID's on the dimension table
SQL6="UPDATE cand_fact SET timestamp=now()"
#Sets timestamp for when Candidates were added to the fact-table
SQL7="UPDATE IGNORE cand_fact JOIN ftm_dim_geo ON cand_fact.Geo_ID=ftm_dim_geo.Geo_ID SET cand_fact.State=ftm_dim_geo.State WHERE cand_fact.state"
#Set the State associated with the ID on the Geography Dimension table
cursor.execute(SQL)
print SQL
cursor.execute(SQL2)
print SQL2
cursor.execute(SQL3)
print SQL3
cursor.execute(SQL4)
print SQL4
print SQL5
cursor.execute(SQL5)
cursor.execute(SQL6)
print SQL6
cursor.execute(SQL7)
print SQL7
conn.commit()
conn.close()
def transform(addr,State):
'''
Function that extracts variables relevant to lawmakers from the JSON files
and passes them onto the database function. Only run during the
Historical load phase of the ETL.
Parameters:
addr: (String) Address of Lawmaker JSON file (Format ALp0.json)
State: (String) Name of State (Ex. Alabama)
'''
with open(addr) as data_file:
data=json.load(data_file)
for item in data['records']:
Time = time.time()
Law_Fname=item['Lawmaker']['Lawmaker']
print Law_Fname
Law_Lname=Law_Fname.split(',')[0]
Law_Frname1=Law_Fname.split(',')[1]
Law_Frname=Law_Frname1.split()[0]
ID= item['Lawmaker']['id']
e_type='General'
#Lawmakers have been elected so all of their Elections were General Elections
office = item['General_Office']['General_Office']
Office_Code(office)
#Adds the relevant office code to the office dimension table
party1=item['General_Party']['General_Party']
#party2=item['Specific_Party']['Specific_Party']
#Once API_Access is gained Uncomment this
#Original API call before the cutoff didn't request specific parties
party2='NA'
money = item['Total_$']['Total_$']
year = item['Election_Year']['Election_Year']
#The Lawmaker API call does not include information about districts
database('Lawmaker',State,None,year,e_type,None,None,Law_Fname,Law_Frname,'n/a',Law_Lname,office,money,party1,party2,datetime.datetime.fromtimestamp(Time).strftime('%Y-%m-%d'))
def database(Entity,State,District,Cycle,Type,Election_Status,Incumbency_Status,Full_Name,First_Name,Middle_Name,Last_Name,Office,Total_Contribution,General_Party,Specific_Party,date):#Party_S,date):
'''
The database function populates the incubator table during the
historical load portion of the ETL. It is only to be run once.
Parameters:
State: (String) Name of State in the United States (ex. Alabama)
District: (int) Numerical value of US State Senate or House District (ex. 007)
Cycle: (int) Numerical value of US Election Cycle (ex. 2010)
Type: (String) Type of Election (Ex. Primary Election)
Election_Status:(String) Status of the Election, wheather the candidate won or lsot
Incumbency_Status: (String) The candidates position relative to the seat, wheather they are running for an open seat are an incumbent or challenger
Full_Name: (String) Full Name of the Candidate or Lawmaker Format: LAST NAME, (MIDDLE NAME) FIRST NAME
First_Name: (String) First Name of Candidate or Lawmaker Format: FIRST NAME
Middle_Name: (String) Middle Name of Candidate or Lawmaker Format: MIDDLE NAME
Last_Name: (String) Last Name of Candidate or Lawmakaer Format: LAST NAME
Office: (String) The Office the Lawmaker has or the candidate is running for (Ex. Lieutenant Governor, State Senate)
Total_Contribution: (int) The amount of money contributed to the candidate or lawmaker
General_Party: (String) The General identifier for a political party (ex. Democrats, 3rd Party)
Specific_Party: (String) The Specific identifier for a political party (ex. Democrats, Libertarians)
date: (datetime) The timestamp for the date of insertion into the incubator table Format: Year-Month-Day
'''
with open ('db.txt') as f:
content=f.readlines()
host=content[0][content[0].find("=")+1:].strip()
user=content[1][content[1].find("=")+1:].strip()
passwd=content[2][content[2].find("=")+1:].strip()
db=content[3][content[3].find("=")+1:].strip()
db='atlas_ftm'
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
if Entity=='Candidate':
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
if District is None:
Incubator="""INSERT INTO ftm_inc(State,Cycle,Cycle_Type,Election_Status,Incumbency_Status,Full_Name,First_Name,Middle_Name,Last_Name,contribution,Office,General_Party,Specific_Party,timestamp) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
cursor.execute(Incubator,(State,Cycle,Type,Election_Status,Incumbency_Status,Full_Name,First_Name,Middle_Name,Last_Name,Total_Contribution,Office,General_Party,Specific_Party,date))
else:
Incubator="""INSERT INTO ftm_inc(State,Cycle,Cycle_Type,District,Election_Status,Incumbency_Status,Full_Name,First_Name,Middle_Name,Last_Name,contribution,Office,General_Party,Specific_Party,timestamp) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
cursor.execute(Incubator,(State,Cycle,Type,District,Election_Status,Incumbency_Status,Full_Name,First_Name,Middle_Name,Last_Name,Total_Contribution,Office,General_Party,Specific_Party,date))
#inserts candidate with fields relevant to candidates into the incubator table
conn.commit()
conn.close()
if Entity=='Lawmaker':
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
Incubator="""INSERT INTO ftm_inc(State,Cycle,Cycle_Type,Full_Name,First_Name,Middle_Name,Last_Name,contribution,Office,General_Party,Specific_Party,timestamp) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
cursor.execute(Incubator,(State,Cycle,Type,Full_Name,First_Name,Middle_Name,Last_Name,Total_Contribution,Office,General_Party,Specific_Party,date))
#inserts lawmaker with fields relevant to lawmakers into the incubator table
conn.commit()
conn.close()
def transform_Candidate(Addr):
'''
Function that extracts data relevant to Candidates from the JSON files
and passes them onto the database function. Only run during the
Historical load phase of the ETL.
Parameters:
Adrr: (String) Address of Candidate JSON file Format: StateCp#.json (ex. ALCp0.json)
'''
print Addr
with open(Addr) as f:
data= json.load(f)
Time = time.time()
for item in data['records']:
#Extracts data relevant to Candidates
Name=item['Candidate']['Candidate'].split(',')
if len(Name)>1:
Cand_Frname=Name[1].strip().upper()
Mname='n/a'
if " " in Cand_Frname:
Namez=Cand_Frname.split(" ")
Cand_Frname= Namez[0]
Mname=Namez[1]
Cand_Lname=Name[0].upper()
Cand_Fname=(Name[0]+","+Name[1]).upper()
print Cand_Fname
else:
Cand_Fname=Name[0]
Cand_Frname=""
Cand_Lname=""
#print Cand_Fname
if "'" in Cand_Fname:
Cand_Frname=Cand_Frname.replace("'"," ")
Cand_Fname=Cand_Fname.replace("'"," ")
Cand_Lname=Cand_Lname.replace("'"," ")
if '"' in Cand_Fname or '"' in Cand_Lname:
Cand_Frname=Cand_Frname.replace('"',' ')
Cand_Fname=Cand_Fname.replace('"',' ')
Cand_Lname=Cand_Lname.replace('"',' ')
#Cand_Lname=Name.split(',')[0].upper()
#Cand_Frname=Name.split(',')[1].strip().upper()
#Cand_Mname=''
#Cand_Fname= Name.upper()
#print Name
Specific_Party=item['Specific_Party']['Specific_Party']
General_Party=item['General_Party']['General_Party']
Office_Sought=item['Office_Sought']['Office_Sought']
Year = item['Election_Year']['Election_Year']
Type = item['Election_Type']['Election_Type']
digit=re.search("\d", Office_Sought)#Finds location of first digit in string
Office=normalizeOffice(Office_Sought)#Standardizes names
District = None
if digit:
District=Office_Sought[digit.start():]
#In the API Candidate Offices are formated HOUSE 07 or SENATE 22
#This sub-method finds the District number to insert into the database
#print Office
Office_Code(Office)
#The lawmaker API formates Office as State House/Assembly and State Senate
#This sub-method sets the Office of the Candidate to the Lawmaker Standard
Office_Code(Office)
#This sub-method ensures that the office code associated with the candidates office
#is in the Office Dimension Table
State= item['Election_Jurisdiction']['id']
Election_Type=item['Election_Type']['Election_Type']
Election_Status = item['Election_Status']['Election_Status']
Incumbency_Status=item['Incumbency_Status']['Incumbency_Status']
contribution = item['Total_$']['Total_$']
database('Candidate',State,District,Year,Election_Type,Election_Status,Incumbency_Status,Cand_Fname,Cand_Frname,Mname,Cand_Lname,Office,contribution,General_Party,Specific_Party,datetime.datetime.fromtimestamp(Time).strftime('%Y-%m-%d'))
def state_cycle(daily_api_calls,api_call_limit,start,startPage,pages,type2,update1,skip):
'''
The purpose of the method is to cycle through the states
keeping track of the last page utilized and ensuring the
daily API call limit is not reached
Parameters:
daily_api_calls: (int) the number of times the API has been called
api_call_limit: (int) the limit of the number of times the API can be called in a day
start: (int) the number of the state to start on (ex. State #3=Arkansas)
startPage: (int) the page number to start on (ex. page 0)
pages: (dict) dictionary containing the maximum amount of pages associated with each state (ex. Alabama:4)
type2: (String) type of entity being inserted or updated Values: Lawmaker or Candidate
update1: (boolean) variable that indicates if the program is historically loading data or incrementally updating
'''
Week=datetime.date.today().isocalendar()[1]
#print daily_api_calls
#Set this to 50 After Testing Finishes
#print 'test'
for y in range (start,50):
print NumToState(y)
print ""
Time = time.time()
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
SQL="SELECT Max_Page From ftm_update WHERE State='%s'"%NumToState(y)
cursor.execute(SQL)
conn.commit()
conn.close()
results= cursor.fetchall()
daily_api_calls=int(daily_api_calls)
if pages[NumToState(y)]==0:
pages[NumToState(y)]=1
for x in range(startPage,pages[NumToState(y)]):
if daily_api_calls<=api_call_limit and x!=skip:
print x
print daily_api_calls
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
SQL="INSERT INTO ftm_update(State,Update_Date,Page_Num) VALUES(%s,%s,%s) on duplicate key update State=%s,page_Num=%s"
#SQL Statement that inserts State and Page # into the Update table if the State isn't in the table and
#performs and update operation with the page number if it is
SQL2="UPDATE ftm_update SET Max_Page=%s WHERE State='%s'"%(pages[NumToState(y)],NumToState(y))
#This Sets the Max_page field in the Update table to the maxpage of the state
#This statement should only be executed once when the State is initially reached
SQL3="UPDATE ftm_update SET Status='Incomplete' WHERE State='%s'"%NumToState(y)
#SQL Statement that sets the Update Status of a State to Incomplete
#This is done only once when the first page of the State is reached
#print SQL
cursor.execute(SQL,(NumToState(y), datetime.datetime.fromtimestamp(Time).strftime('%Y-%m-%d %H:%M:%S'),x,NumToState(y),x))
if(startPage==0):
cursor.execute(SQL2)
cursor.execute(SQL3)
conn.commit()
conn.close()
Addr=NumToState(y)+'p'+str(x)+".json" #File address of data from Lawmaker API calls
AddrC=NumToState(y)+'Cp'+str(x)+".json"#File address of data from Candidate API Calls
#print Addr
#print AddrC
if(type2=='Lawmaker'and not update1):
data_extract('Lawmaker',x,NumToState(y))#Extracts the data from the Lawmaker API
transform(Addr,NumToState(y))#Inserts the data into the incubator database
daily_api_calls+=1
#print 'test'
if(type2=='Candidate' and not update1):
data_extract('Candidate',x,NumToState(y))
daily_api_calls+=1
transform_Candidate(AddrC)
if(type2=='Lawmaker' and update1):
#print 'test'
data_extract('Lawmaker',x,NumToState(y))#Extracts the data from the Lawmaker API
Update('Lawmaker',Addr,host,user,passwd,db)
daily_api_calls+=1
if(update1 is True and type2=='Candidate'):
#data_extract('Candidate',x,NumToState(y))
Update('Candidate',AddrC,host,user,passwd,db)
daily_api_calls+=1
with open ('date.txt') as f:
date=f.readline()
if daily_api_calls>api_call_limit and Week>int(date):
with open('apicalls.txt','w') as f:
f.write('0')
daily_api_calls=0
#If the API limit has been reached and the most recently accessed date is not the current date
#The number of daily api calls is set to 0 both in the program and in the text_file
if daily_api_calls>api_call_limit and str(Today)==str(date):
print "API LIMIT REACHED"
exit()
#If the number of api calls made in a day exceed the limit the program will shut-down
#time.sleep(5)#For Testing Purposes Delete For Production
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
cursor.execute("UPDATE ftm_update SET STATUS='Complete' WHERE STATE='%s'"%NumToState(y))
#After a state has been iterated through when page_num=max_page the status is set to complete
#This should occur after the loop above is iterated through so no if statement and further SQL should be necessary
conn.commit()
conn.close()
#print daily_api_calls
write_api_calls(daily_api_calls)
database2(host,user,passwd,db,type2)
return daily_api_calls
#This method returns the number of API calls executed when the function was run as a way to pass the number of calls
#to this function when it is run again to ensure that the daily limit is not exceeded
#There is probobly a better way to implement this using global variables or recursion
def normalizeOffice(office):
'''
The purpose of this funciton is to standarize the office field
in the incubator and office dimensional tables to the standard
set in the Lawmaker API Calls
Parameters:
office: (string) the name of the office sought by a candidate or inhabited by a lawmaker
'''
if 'US HOUSE' in office:
return 'House of Representatives'
elif 'HOUSE DISTRICT' in office:
return "State House/Assembly"
elif 'ASSEMBLY DISTRICT' in office:
return 'State House/Assembly'
elif 'EDUCATION' in office:
return 'Board of Ed.'
elif 'SUPREME' in office:
return 'Supreme Court Seat'
elif 'APPELLATE' in office:
return "Apellate Court Seat"
elif 'SENATE DISTRICT' in office:
return 'State Senate'
elif 'State Representative' in office:
return "State House/Assembly"
elif 'SENATE' in office and 'US' not in office:
return "State Senate"
elif 'GOVERNOR' in office:
return 'Governor'
elif 'LIEUTENANT GOVERNOR' in office:
return 'Lt. Gov'
elif 'HAWAIIAN AFFAIRS' in office:
return 'Office of Hawaiian Affairs'
elif 'PUBLIC REGULATION' in office:
return 'Public Regulation Comissioner'
elif "REGENTS" in office:
return 'Board of Reagents Member'
elif "SUPERINTENDENT OF PUBLIC" in office:
return 'Superintendent of Public Instruction'
elif "TRANSPORTATION COMMISSIONER" in office:
return "Transportation Commissioner"
elif "REGIONAL TRANSPORTATION" in office:
return "Regional Transportation Commissioner"
elif "SUPERIOR COURT" in office:
return "Superior Court Seat"
elif "PUBLIC SERVICE COMMISSIONER" in office:
return 'Public Service Commissioner'
else:
return office.title()
def Office_Code(o):
'''
The purpose of this function is to populate the office_code field of the
incubator and office dimensional tables.
Note: Where applicable the office codes were taken from those that existed in the er_fact table
Parameter:
o: (string) the name of the office sought by a candidate or inhabited by a lawmaker
'''
Code=''
if o=='State House/Assembly':
Code='SLEG'
if o=='State Senate':
Code='SSN'
if o=='Governor':
Code='GOV'
if o=='Lieutenant Governor':
Code='LTGOV'
if o=='Board of Ed.':
Code='BOE'
if o=='Supreme Court':
Code='SSC'
with open ('db.txt') as f:
content=f.readlines()
host=content[0][content[0].find("=")+1:].strip()
user=content[1][content[1].find("=")+1:].strip()
passwd=content[2][content[2].find("=")+1:].strip()
db=content[3][content[3].find("=")+1:].strip()
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
OFF="""UPDATE ftm_dim_office SET Office_Code='%s' WHERE Office='%s'"""
OFF2="""UPDATE ftm_inc SET Office_Code='%s' WHERE Office='%s'"""
#print OFF%(Code,o)
cursor.execute(OFF%(Code,o))
cursor.execute(OFF2%(Code,o))
conn.commit()
conn.close()
def get_maxPage(api_limit,maxpage,maxpageC,load_type):
'''
The purpose of the get_maxPage function is to
populate the two max_page dictionaries with data
extracted from the Follow The Money API
Parameters:
api_limit: (int) the number of times the API can be called in a day
maxpage: (dict) a dictionary that associates the maxPage Number with a State for Lawmakers
maxpageC: (dict) a dictionary that associates the maxPage Number with a State for Candidates
load_type: (int) either a 1 or a 0. 1 means that a historical load is occuring and a 0 means and update is occuring.
'''
if(load_type!=0):
y=0
#The range will be from 0 to 50 once testing begins
#But with the limited data available only for Alabama the range is set from 0 to 1
for x in range (0,50):
print x
if y<api_limit:
data_extract('Lawmaker',0,NumToState(x))
data_extract('Candidate',0,NumToState(x))
#The first page of the lawmakers and candidates for each state is donwloaded
with open(NumToState(x)+'Cp0.json') as f2:
data1= json.load(f2)
max_cand=int(data1['metaInfo']['paging']['maxPage'])
#The number of pages of candidates running in 2015-2016 and lawmakers elected in 2015-2016 is extracted
#maxpage[NumToState(x)]=max_law
maxpageC[NumToState(x)]=max_cand
#The number of pages is then added to the dictionary of pages and states in the format state:maxPage (Ex. AL:4)
y+=1
pickle.dump(maxpageC, open( "maxpage.p", "wb" ) )
#Writes the dictionary to a file to create a persistant version of the dictionary
else:
maxpageC=pickle.load(open("maxpage.p","rb"))
#If the program is updating(ie. the Maxpage dictionary has already been set) then
#the maximum number of pages for each state is loaded from the persistent file
return maxpageC
def write_api_calls(api_call):
'''
Writes the number of API calls during the day to a text_file
used to ensure that the ETL doesn't exceed the number of API calls allocated per day
Parameters:
api_call:(int) the current number of API calls performed
'''
with open('apicalls.txt','w') as f:
f.write(str(api_call))
def write_day():
'''
Writes the current date to a text file, part of the process of ensuring that
the ETL doesn't exceed the number of API calls allocated per day
Parameters:
None
'''
today=datetime.date.today()
Weeknum=today.isocalendar()[1]
print Weeknum
with open('date.txt','w') as f:
f.write(str(Weeknum))
def getLastStatePage():
'''
Gets the last state and page updated and returns the numeric values
for the next state and page to be updated
'''
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
SQL="SELECT State,Page_num FROM ftm_update WHERE Status='Incomplete' GROUP BY Update_Date"
cursor.execute(SQL)
results= cursor.fetchall()
#Connects to the Update Table of the Database to see where the program left off
#Returns the most recently incomplete page
if len(results)==0:
#If there are no incomplete states then another statement is run
#to find the state and the page that should be updated next
SQL="SELECT * FROM ftm_update GROUP BY update_date DESC;"
cursor.execute(SQL)
results=cursor.fetchall()
result2=list()
if results[0][3]=='Complete' and results[0][0]=='WY':
#If the top result is Wyoming and it is Complete
#Then a new array containing ('AL',0) will be returned as all states have been covered
result2.append('AL')
result2.append(0)
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
SQL3="UPDATE ftm_update SET Status='Incomplete'"
SQL4="UPDATE ftm_update SET Page_num=0"
cursor.execute(SQL3)
cursor.execute(SQL4)
conn.commit()
conn.close()
#As this part of the function is resetting the update start point
#it also resets the update table setting all states to incomplete
results= cursor.fetchall()
print result2
return result2
#If all states through wyominy are complete then the state
#to be updated is set to Alabama and the page to 0
else:
result2.append(NumToState(statetonum(results[0][0])+1))
result2.append(0)
print result2
return result2
#If all the states through wyoming are not complete then
#the numerical value of the next state and page are returned
return results[0]
def getLastStatePageUser(State):
'''
Gets the last page updated for the state that has been defined by the user
'''
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
SQL="SELECT Page_num FROM ftm_update WHERE State='"+State+"'"
cursor.execute(SQL)
results= cursor.fetchall()
return results[0][0]
#Connects to the Update Table of the Database to see where the program left off
#Returns the most recently incomplete page
def maxpageUpdate(maxpage_Cand,Page,State):
'''
The purpose of this function is to check to see if the number of pages
for a certain state has changed.
Parameters:
maxpage_cand: (dict) maxpage_Cand is the dictionary that contains the number of maxpages
it can be read from the maxpage.p pickle file after getmaxpage has been run.
Page:(int) the page number that is currently being accessed as part of the update. (Ex. page 2)
State: (String) the string value of a state (ex. 'AL')
'''
data_extract('Candidate',Page,State)#Downloads the pertinent JSON File assocaited with the State and Page
addr=State+"Cp"+str(Page)+".json"#Address for the Json file that contains the maxPage Info
with open(addr) as f:
data1= json.load(f)
if maxpage_Cand[State] != data1['metaInfo']['paging']['maxPage']:
#If the maxpage extracted from the JSON file is not the same as the
#maxpage in the dictionary then the persistent dictioanry will be updated.
temporary = dict()
temporary[State]=Page
Skip=temporary[State]
#Skip is the value that will be returned. It is the page that has already
#been downloaded as part of the checking process and will be skipped in the
#update process as to save an API call.
End=data1['metaInfo']['paging']['maxPage']+1#End is the numerical value of the new maxpage and will be added to the persitent dictionary
maxpage_Cand[State]=End
pickle.dump(maxpage_Cand, open( "maxpage.p", "wb" ) )
return Skip
else:
return None
if __name__ == "__main__":
with open ('db.txt') as f:
content=f.readlines()
host=content[0][content[0].find("=")+1:].strip()
user=content[1][content[1].find("=")+1:].strip()
passwd=content[2][content[2].find("=")+1:].strip()
db=content[3][content[3].find("=")+1:].strip()
maxpage=dict()#Dictionary of MaxPages and States for Lawmakers
maxpage_Cand=dict()#Dictionary of MaxPages and States for Candidates
#get_maxPage(200,maxpage,maxpage_Cand)
conn = MySQLdb.connect(host,user,passwd,db)
cursor = conn.cursor()
SQL="SELECT COUNT(*) FROM ftm_inc"
cursor.execute(SQL)
results= cursor.fetchall()
daily_api_calls=0#Number of API Calls made in a day
api_call_limit=999#Maximum number of API calls allowed in a day
write_day()
if results[0][0]==0:
maxpage_Cand=get_maxPage(api_call_limit,maxpage,maxpage_Cand,0)
state_cycle(daily_api_calls,api_call_limit,0,0,maxpage_Cand,'Candidate',False,900)
#If the incubator table is empty the historical load process is initiated
if results[0][0]>0:
maxpage_Cand=get_maxPage(api_call_limit,maxpage,maxpage_Cand,0)
#maxpage_Cand is a persistent dictionary that contains the number of pages that each state has
#It is created and accessed using the get_maxPage function
with open('apicalls.txt') as f:
api_calls=f.readline()
#The total number of API Calls made in a week are contained in the apicalls.txt file
StartInfo= getLastStatePage()
#getLastStatePage() returns a list that contains the State and the page number to be accessed next
State=StartInfo[0]
Page= StartInfo[1]
Entity=raw_input("Lawmaker or Candidate?")
if(Entity=='Candidate'):
State_input = raw_input("Do you want to Start the Updates with a particular state?")
if State_input =='Yes':
State_Start=raw_input("Which State?(Please Enter State Abbrv. ex.Texas->TX)")
Page2=getLastStatePageUser(State_Start)
Skip=maxpageUpdate(maxpage_Cand,Page,State)
#maxpageUpdate checks to see if the number of pages for the state being accessed has changed
#If it has then one version of the state_cycle method is called if it hasn't another is called
if Skip is not None:
state_cycle(api_calls,api_call_limit,statetonum(State_Start),Page2,maxpage_Cand,'Candidate',True,Skip)
#Skip is the page that will be skipped in the maxpage update. For more info about this look at the Readme
else:
state_cycle(api_calls,api_call_limit,statetonum(State_Start),Page2,maxpage_Cand,'Candidate',True,900)
#Skip is set to 900 if no page is to be skipped as no State will likely have page 900.None or 0 could
#also be plausible placeholders.
else:
Skip=maxpageUpdate(maxpage_Cand,Page,State)
print str(Skip)+"TEST"
if Skip is not None:
state_cycle(api_calls,api_call_limit,statetonum(State),0,maxpage_Cand,'Candidate',True,Skip)
else:
state_cycle(api_calls,api_call_limit,statetonum(State),Page,maxpage_Cand,'Candidate',True,900)
#If the incubator table is not empty and the user inidcates that they want to update Candidates the Candidate fact and dimension tables
#and the dimensions associated with those lawmakers are updated incrementall
| mit |
eoncloud-dev/eontools | latency_dump.py | 2 | 1467 | #!/usr/bin/env python
import json
import argparse
latencys = [
{ 'osd':[
'op_latency',
'op_process_latency',
'op_r_latency',
'op_r_process_latency',
'op_w_rlat',
'op_w_latency',
'op_w_process_latency',
'op_rw_rlat',
'op_rw_process_latency',
'subop_latency',
'subop_w_latency',
'subop_pull_latency',
'subop_push_latency',
]
},
{ 'filestore':[
'journal_latency',
'commitcycle_latency',
'apply_latency',
'queue_transaction_latency_avg',
]
}
]
def format_data(item, sum, avgcount):
try:
result = sum / avgcount * 1000
except ZeroDivisionError:
result = 0
formatdata = '%-30s | %-30f | %-30f | %-30f' %(item, sum, avgcount, result )
return formatdata
def handle_data(latencys):
output = ['%-30s | %-30s | %-30s | %-30s' %('', 'sum', 'avgcount', 'latency/op (ms)')]
for i in latencys:
for component,items in i.items():
for item in items:
sum = data[component][item]['sum']
avgcount = data[component][item]['avgcount']
result = format_data(item, sum, avgcount)
output.append(result)
return output
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='the full path of json file')
args = parser.parse_args()
f = open(args.file)
data = json.load(f)
output = handle_data(latencys)
for i in output:
print i
| apache-2.0 |
apeyser/nest-simulator | pynest/nest/lib/hl_api_info.py | 4 | 7678 | # -*- coding: utf-8 -*-
#
# hl_api_info.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions to get information on NEST.
"""
from .hl_api_helper import *
import sys
import os
import webbrowser
@check_stack
def sysinfo():
"""Print information on the platform on which NEST was compiled."""
sr("sysinfo")
@check_stack
def version():
"""Return the NEST version.
Returns
-------
str:
The version of NEST.
"""
sr("statusdict [[ /kernelname /version ]] get")
return " ".join(spp())
@check_stack
def authors():
"""Print the authors of NEST."""
sr("authors")
@check_stack
def helpdesk():
"""Open the NEST helpdesk in browser.
Use the system default browser.
"""
if sys.version_info < (2, 7, 8):
print("The NEST Helpdesk is only available with Python 2.7.8 or "
"later. \n")
return
if 'NEST_DOC_DIR' not in os.environ:
print(
'NEST help needs to know where NEST is installed.'
'Please source nest_vars.sh or define NEST_DOC_DIR manually.')
return
helpfile = os.path.join(os.environ['NEST_DOC_DIR'], 'help',
'helpindex.html')
# Under Windows systems webbrowser.open is incomplete
# See <https://bugs.python.org/issue8232>
if sys.platform[:3] == "win":
os.startfile(helpfile)
# Under MacOs we need to ask for the browser explicitly.
# See <https://bugs.python.org/issue30392>.
if sys.platform[:3] == "dar":
webbrowser.get('safari').open_new(helpfile)
else:
webbrowser.open_new(helpfile)
@check_stack
def help(obj=None, pager=None, return_text=False):
"""Show the help page for the given object using the given pager.
The default pager is more.
Parameters
----------
obj : object, optional
Object to display help for
pager : str, optional
Pager to use
return_text : bool, optional
Option for returning the help text
"""
hlpobj = obj
if hlpobj is not None:
if return_text:
return load_help(hlpobj)
else:
show_help_with_pager(hlpobj, pager)
else:
print("Type 'nest.helpdesk()' to access the online documentation "
"in a browser.")
print("Type 'nest.help(object)' to get help on a NEST object or "
"command.\n")
print("Type 'nest.Models()' to see a list of available models "
"in NEST.")
print("Type 'nest.authors()' for information about the makers "
"of NEST.")
print("Type 'nest.sysinfo()' to see details on the system "
"configuration.")
print("Type 'nest.version()' for information about the NEST "
"version.\n")
print("For more information visit http://www.nest-simulator.org.")
@check_stack
def get_argv():
"""Return argv as seen by NEST.
This is similar to Python sys.argv but might have changed after
MPI initialization.
Returns
-------
tuple:
Argv, as seen by NEST.
"""
sr('statusdict')
statusdict = spp()
return statusdict['argv']
@check_stack
def message(level, sender, text):
"""Print a message using NEST's message system.
Parameters
----------
level :
Level
sender :
Message sender
text : str
Text to be sent in the message
"""
sps(level)
sps(sender)
sps(text)
sr('message')
@check_stack
def SetStatus(nodes, params, val=None):
"""Set the parameters of nodes or connections to params.
If val is given, params has to be the name
of an attribute, which is set to val on the nodes/connections. val
can be a single value or a list of the same size as nodes.
Parameters
----------
nodes : list or tuple
Either a list of global ids of nodes, or a tuple of connection
handles as returned by GetConnections()
params : str or dict or list
Dictionary of parameters or list of dictionaries of parameters of
same length as nodes. If val is given, this has to be the name of
a model property as a str.
val : str, optional
If given, params has to be the name of a model property.
Raises
------
TypeError
Description
"""
if not is_coercible_to_sli_array(nodes):
raise TypeError("nodes must be a list of nodes or synapses")
# This was added to ensure that the function is a nop (instead of,
# for instance, raising an exception) when applied to an empty list,
# which is an artifact of the API operating on lists, rather than
# relying on language idioms, such as comprehensions
#
if len(nodes) == 0:
return
if val is not None and is_literal(params):
if is_iterable(val) and not isinstance(val, (uni_str, dict)):
params = [{params: x} for x in val]
else:
params = {params: val}
params = broadcast(params, len(nodes), (dict,), "params")
if len(nodes) != len(params):
raise TypeError(
"status dict must be a dict, or list of dicts of length 1 "
"or len(nodes)")
if is_sequence_of_connections(nodes):
pcd(nodes)
else:
sps(nodes)
sps(params)
sr('2 arraystore')
sr('Transpose { arrayload pop SetStatus } forall')
@check_stack
def GetStatus(nodes, keys=None):
"""Return the parameter dictionaries of nodes or connections.
If keys is given, a list of values is returned instead. keys may also be a
list, in which case the returned list contains lists of values.
Parameters
----------
nodes : list or tuple
Either a list of global ids of nodes, or a tuple of connection
handles as returned by GetConnections()
keys : str or list, optional
String or a list of strings naming model properties. GetDefaults then
returns a single value or a list of values belonging to the keys
given.
Returns
-------
dict:
All parameters
type:
If keys is a string, the corrsponding default parameter is returned
list:
If keys is a list of strings, a list of corrsponding default parameters
is returned
Raises
------
TypeError
Description
"""
if not is_coercible_to_sli_array(nodes):
raise TypeError("nodes must be a list of nodes or synapses")
if len(nodes) == 0:
return nodes
if keys is None:
cmd = '{ GetStatus } Map'
elif is_literal(keys):
cmd = '{{ GetStatus /{0} get }} Map'.format(keys)
elif is_iterable(keys):
keys_str = " ".join("/{0}".format(x) for x in keys)
cmd = '{{ GetStatus }} Map {{ [ [ {0} ] ] get }} Map'.format(keys_str)
else:
raise TypeError("keys should be either a string or an iterable")
if is_sequence_of_connections(nodes):
pcd(nodes)
else:
sps(nodes)
sr(cmd)
return spp()
| gpl-2.0 |
DivineHime/seishirou | lib/pip/_vendor/appdirs.py | 327 | 22368 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - macOS: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 0)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "macOS", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
macOS: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
macOS: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
macOS: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
macOS: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None, roaming=False,
multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernal.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| gpl-3.0 |
drcoms/openwrt | gui/usr/lib/python2.7/json/tests/test_unicode.py | 10 | 3594 | from collections import OrderedDict
from json.tests import PyTest, CTest
class TestUnicode(object):
def test_encoding1(self):
encoder = self.json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEqual(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = self.dumps(u, encoding='utf-8')
js = self.dumps(s, encoding='utf-8')
self.assertEqual(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = self.dumps(u)
self.assertEqual(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = self.dumps([u])
self.assertEqual(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = self.dumps(u, ensure_ascii=False)
self.assertEqual(j, u'"{0}"'.format(u))
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = self.dumps([u], ensure_ascii=False)
self.assertEqual(j, u'["{0}"]'.format(u))
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEqual(self.dumps(u), '"\\ud834\\udd20"')
self.assertEqual(self.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEqual(self.loads('"' + u + '"'), u)
self.assertEqual(self.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
s = '"\\u{0:04x}"'.format(i)
self.assertEqual(self.loads(s), u)
def test_object_pairs_hook_with_unicode(self):
s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4),
(u"qrt", 5), (u"pad", 6), (u"hoy", 7)]
self.assertEqual(self.loads(s), eval(s))
self.assertEqual(self.loads(s, object_pairs_hook = lambda x: x), p)
od = self.loads(s, object_pairs_hook = OrderedDict)
self.assertEqual(od, OrderedDict(p))
self.assertEqual(type(od), OrderedDict)
# the object_pairs_hook takes priority over the object_hook
self.assertEqual(self.loads(s,
object_pairs_hook = OrderedDict,
object_hook = lambda x: None),
OrderedDict(p))
def test_default_encoding(self):
self.assertEqual(self.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEqual(type(self.loads(u'""')), unicode)
self.assertEqual(type(self.loads(u'"a"')), unicode)
self.assertEqual(type(self.loads(u'["a"]')[0]), unicode)
# Issue 10038.
self.assertEqual(type(self.loads('"foo"')), unicode)
def test_bad_encoding(self):
self.assertRaises(UnicodeEncodeError, self.loads, '"a"', u"rat\xe9")
self.assertRaises(TypeError, self.loads, '"a"', 1)
class TestPyUnicode(TestUnicode, PyTest): pass
class TestCUnicode(TestUnicode, CTest): pass
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.