repo_name
stringlengths 5
104
| path
stringlengths 4
248
| content
stringlengths 102
99.9k
|
|---|---|---|
bjodah/pyodesys
|
pyodesys/native/odeint.py
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import copy
from ..util import import_
from ._base import _NativeCodeBase, _NativeSysBase, _compile_kwargs
pyodeint = import_('pyodeint')
class NativeOdeintCode(_NativeCodeBase):
wrapper_name = '_odeint_wrapper'
def __init__(self, *args, **kwargs):
self.compile_kwargs = copy.deepcopy(_compile_kwargs)
self.compile_kwargs['include_dirs'].append(pyodeint.get_include())
self.compile_kwargs['libraries'].extend(['m'])
super(NativeOdeintCode, self).__init__(*args, **kwargs)
class NativeOdeintSys(_NativeSysBase):
_NativeCode = NativeOdeintCode
_native_name = 'odeint'
|
Alex-Ian-Hamilton/sunpy
|
sunpy/spectra/tests/test_callisto.py
|
# -*- coding: utf-8 -*-
# Author: Florian Mayer <florian.mayer@bitsrc.org>
from __future__ import absolute_import
import shutil
from tempfile import mkdtemp
from datetime import datetime
import pytest
import os
import glob
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
import sunpy.data.test
from sunpy.spectra.sources.callisto import (
CallistoSpectrogram, query, download, minimal_pairs
)
@pytest.fixture
def CALLISTO_IMAGE():
testpath = sunpy.data.test.rootdir
return os.path.join(testpath, 'BIR_20110922_050000_01.fit')
@pytest.fixture
def CALLISTO_IMAGE_GLOB_KEY():
return 'BIR_*'
@pytest.fixture
def CALLISTO_IMAGE_GLOB_INDEX(CALLISTO_IMAGE, CALLISTO_IMAGE_GLOB_KEY):
testpath = sunpy.data.test.rootdir
res = glob.glob(os.path.join(testpath, CALLISTO_IMAGE_GLOB_KEY))
return res.index(CALLISTO_IMAGE)
def test_read(CALLISTO_IMAGE):
ca = CallistoSpectrogram.read(CALLISTO_IMAGE)
assert ca.start == datetime(2011, 9, 22, 5, 0, 0, 454000)
assert ca.t_init == 18000.0
assert ca.shape == (200, 3600)
assert ca.t_delt == 0.25
# Test linearity of time axis.
assert np.array_equal(
ca.time_axis, np.linspace(0, 0.25 * (ca.shape[1] - 1), ca.shape[1])
)
assert ca.dtype == np.uint8
@pytest.mark.online
def test_query():
URL = 'http://soleil.i4ds.ch/solarradio/data/2002-20yy_Callisto/2011/09/22/'
result = list(query(
datetime(2011, 9, 22, 5), datetime(2011, 9, 22, 6), set(["BIR"])
))
RESULTS = [
"BIR_20110922_050000_01.fit.gz",
"BIR_20110922_051500_01.fit.gz",
"BIR_20110922_053000_01.fit.gz",
"BIR_20110922_050000_03.fit.gz",
"BIR_20110922_051500_03.fit.gz",
"BIR_20110922_053000_03.fit.gz",
"BIR_20110922_054500_03.fit.gz",
]
RESULTS.sort()
# Should be sorted anyway, but better to assume as little as possible.
result.sort()
for item in RESULTS:
assert URL + item in result
@pytest.mark.online
@pytest.mark.xfail
def test_query_number():
URL = 'http://soleil.i4ds.ch/solarradio/data/2002-20yy_Callisto/2011/09/22/'
result = list(query(
datetime(2011, 9, 22, 5), datetime(2011, 9, 22, 6), set([("BIR", 1)])
))
RESULTS = [
"BIR_20110922_050000_01.fit.gz",
"BIR_20110922_051500_01.fit.gz",
"BIR_20110922_053000_01.fit.gz",
]
RESULTS.sort()
# Should be sorted anyway, but better to assume as little as possible.
result.sort()
assert len(result) == len(RESULTS)
@pytest.mark.online
@pytest.mark.xfail
def test_download():
directory = mkdtemp()
try:
result = query(
datetime(2011, 9, 22, 5), datetime(2011, 9, 22, 6), set([("BIR", 1)])
)
RESULTS = [
"BIR_20110922_050000_01.fit.gz",
"BIR_20110922_051500_01.fit.gz",
"BIR_20110922_053000_01.fit.gz",
]
download(result, directory)
for item in RESULTS:
assert item in sorted(os.listdir(directory))
finally:
shutil.rmtree(directory)
def test_create_file(CALLISTO_IMAGE):
ca = CallistoSpectrogram.create(CALLISTO_IMAGE)
assert np.array_equal(ca.data, CallistoSpectrogram.read(CALLISTO_IMAGE).data)
def test_create_file_kw(CALLISTO_IMAGE):
ca = CallistoSpectrogram.create(filename=CALLISTO_IMAGE)
assert np.array_equal(ca.data, CallistoSpectrogram.read(CALLISTO_IMAGE).data)
@pytest.mark.online
def test_create_url():
URL = (
"http://soleil.i4ds.ch/solarradio/data/2002-20yy_Callisto/2011/09/22/"
"BIR_20110922_050000_01.fit.gz"
)
ca = CallistoSpectrogram.create(URL)
assert np.array_equal(ca.data, CallistoSpectrogram.read(URL).data)
@pytest.mark.online
def test_create_url_kw():
URL = (
"http://soleil.i4ds.ch/solarradio/data/2002-20yy_Callisto/2011/09/22/"
"BIR_20110922_050000_01.fit.gz"
)
ca = CallistoSpectrogram.create(url=URL)
assert np.array_equal(ca.data, CallistoSpectrogram.read(URL).data)
def test_create_single_glob(CALLISTO_IMAGE, CALLISTO_IMAGE_GLOB_INDEX, CALLISTO_IMAGE_GLOB_KEY):
PATTERN = os.path.join(os.path.dirname(CALLISTO_IMAGE), CALLISTO_IMAGE_GLOB_KEY)
ca = CallistoSpectrogram.create(PATTERN)
assert_allclose(ca[CALLISTO_IMAGE_GLOB_INDEX].data,
CallistoSpectrogram.read(CALLISTO_IMAGE).data)
# seems like this does not work anymore and can't figure out what it is for
#def test_create_single_glob_kw(CALLISTO_IMAGE):
# PATTERN = os.path.join( os.path.dirname(CALLISTO_IMAGE), "BIR_*")
# ca = CallistoSpectrogram.create(singlepattern=PATTERN)
# assert np.array_equal(ca[0].data, CallistoSpectrogram.read(CALLISTO_IMAGE).data)
def test_create_glob_kw(CALLISTO_IMAGE, CALLISTO_IMAGE_GLOB_INDEX, CALLISTO_IMAGE_GLOB_KEY):
PATTERN = os.path.join(
os.path.dirname(CALLISTO_IMAGE),
CALLISTO_IMAGE_GLOB_KEY
)
ca = CallistoSpectrogram.create(pattern=PATTERN)[CALLISTO_IMAGE_GLOB_INDEX]
assert_allclose(ca.data, CallistoSpectrogram.read(CALLISTO_IMAGE).data)
def test_create_glob(CALLISTO_IMAGE_GLOB_KEY):
PATTERN = os.path.join(
os.path.dirname(sunpy.data.test.__file__),
CALLISTO_IMAGE_GLOB_KEY
)
ca = CallistoSpectrogram.create(PATTERN)
assert len(ca) == 2
def test_minimum_pairs_commotative():
A = [0, 1, 2]
B = [1, 2, 3]
first = list(minimal_pairs(A, B))
assert first == [(b, a, d) for a, b, d in minimal_pairs(B, A)]
def test_minimum_pairs_end():
assert (
list(minimal_pairs([0, 1, 2, 4], [1, 2, 3, 4])) ==
[(1, 0, 0), (2, 1, 0), (3, 3, 0)]
)
def test_minimum_pairs_end_more():
assert (
list(minimal_pairs([0, 1, 2, 4, 8], [1, 2, 3, 4])) ==
[(1, 0, 0), (2, 1, 0), (3, 3, 0)]
)
def test_minimum_pairs_end_diff():
assert (
list(minimal_pairs([0, 1, 2, 8], [1, 2, 3, 4])) ==
[(1, 0, 0), (2, 1, 0), (3, 3, 4)]
)
def test_closest():
assert (
list(minimal_pairs([50, 60], [0, 10, 20, 30, 40, 51, 52])) ==
[(0, 5, 1), (1, 6, 8)]
)
def test_homogenize_factor():
a = np.float64(np.random.randint(0, 255, 3600))[np.newaxis, :]
c1 = CallistoSpectrogram(
a,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
b = 2 * a
c2 = CallistoSpectrogram(
b,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
pairs_indices, factors, constants = c1._homogenize_params(
c2, 0
)
assert pairs_indices == [(0, 0)]
assert_array_almost_equal(factors, [0.5], 2)
assert_array_almost_equal(constants, [0], 2)
assert_array_almost_equal(factors[0] * b + constants[0], a)
def test_homogenize_constant():
a = np.float64(np.random.randint(0, 255, 3600))[np.newaxis, :]
c1 = CallistoSpectrogram(
a,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
b = a + 10
c2 = CallistoSpectrogram(
b,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
pairs_indices, factors, constants = c1._homogenize_params(
c2, 0
)
assert pairs_indices == [(0, 0)]
assert_array_almost_equal(factors, [1], 2)
assert_array_almost_equal(constants, [-10], 2)
assert_array_almost_equal(factors[0] * b + constants[0], a)
def test_homogenize_both():
a = np.float64(np.random.randint(0, 255, 3600))[np.newaxis, :]
c1 = CallistoSpectrogram(
a,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
b = 2 * a + 1
c2 = CallistoSpectrogram(
b,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
pairs_indices, factors, constants = c1._homogenize_params(
c2, 0
)
assert pairs_indices == [(0, 0)]
assert_array_almost_equal(factors, [0.5], 2)
assert_array_almost_equal(constants, [-0.5], 2)
assert_array_almost_equal(factors[0] * b + constants[0], a)
def test_homogenize_rightfq():
a = np.float64(np.random.randint(0, 255, 3600))[np.newaxis, :]
c1 = CallistoSpectrogram(
a,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
b = 2 * a + 1
c2 = CallistoSpectrogram(
np.concatenate([
np.arange(3600)[np.newaxis, :], b,
np.arange(3600)[np.newaxis, :]
], 0),
np.arange(3600),
np.array([0, 1, 2]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
pairs_indices, factors, constants = c1._homogenize_params(
c2, 0
)
assert pairs_indices == [(0, 1)]
assert_array_almost_equal(factors, [0.5], 2)
assert_array_almost_equal(constants, [-0.5], 2)
assert_array_almost_equal(factors[0] * b + constants[0], a)
@pytest.mark.online
def test_extend(CALLISTO_IMAGE):
im = CallistoSpectrogram.create(CALLISTO_IMAGE)
im2 = im.extend()
# Not too stable test, but works.
assert im2.data.shape == (200, 7200)
|
thisch/python-falafel
|
examples/project1/testrunner.py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
from falafel import findout_terminal_width
from falafel import test_list
from falafel.runners import FalafelTestRunner
from falafel.loaders import FalafelTestLoader
parser = argparse.ArgumentParser(description='custom test runner for '
'project1')
parser.add_argument("-S", "--suite", help="Test suite",
choices=['moda', 'modb'], type=str, required=True)
parser.add_argument("--test", action='append', help="Testcase(s)/Test(s) "
"to run")
parser.add_argument("-L", "--list", action="store_true",
help="List tests which match the specified "
"suite/testcases(s)")
parser.add_argument("--pdb", action="store_true",
help="drop into pdb/ipdb in case of a failure/error")
parser.add_argument("--log", action="store_true",
help='write all log messages + header and footer of '
'each test to a logfiles in a dirctory specified with '
'--logdirectory. If the logdirectory does not exist '
'it gets automatically.')
parser.add_argument("-d", "--logdirectory",
help="log directory [default=%(default)s]",
default='log')
parser.add_argument("--debug", action="store_true",
help="print logging messages")
parser.add_argument("--interactive", '--enable-interactive-tests',
action="store_true", dest='interactive',
help="if not set then all interactive tests are skipped")
args = parser.parse_args()
pkg = args.suite
allowed_tests = args.test
width = findout_terminal_width()
print(" info ".center(width, '='))
print("suite: %s" % pkg)
print("tests: %s" % allowed_tests)
print("interactive tests: %s" % args.interactive)
print('=' * width)
if args.interactive:
os.environ['INTERACTIVE_TESTS'] = '1'
loader = FalafelTestLoader(allowed_tests=allowed_tests)
suite = loader.discover('mypackage.' + pkg)
tdata = []
if args.debug or args.list:
with_skipped = args.list
tdata = test_list(suite, with_skipped=with_skipped)
if not with_skipped:
print("The following tests will be run:", end='')
try:
from tabulate import tabulate
except ImportError:
for data in tdata:
print(" %-30s\tin %-30s\tskipped: %s" % data)
else:
headers = ['class.method', 'module']
if with_skipped:
headers.append('skipped')
print('\n%s' % tabulate(tdata, headers=headers))
print("%d tests available" % len(tdata))
if args.list:
exit()
logdir = args.logdirectory if args.log else None
runner = FalafelTestRunner(
verbosity=2, logger='st', debug=args.debug,
logdirectory=logdir, width=width, pdb=args.pdb)
runner.run(suite)
|
castelao/CoTeDe
|
cotede/qctests/gradient.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
"""
import logging
import numpy as np
from numpy import ma
from cotede.qctests import QCCheckVar
try:
import pandas as pd
PANDAS_AVAILABLE = True
except ImportError:
PANDAS_AVAILABLE = False
module_logger = logging.getLogger(__name__)
def gradient(x):
return curvature(x)
def _curvature_pandas(x):
"""Equivalent to curvature() but using pandas
It looks like the numpy implementation is faster even for larger datasets,
so the default is with numpy.
Note
----
- In the future this will be useful to handle specific window widths.
"""
if isinstance(x, ma.MaskedArray):
x[x.mask] = np.nan
x = x.data
if not PANDAS_AVAILABLE:
return curvature(x)
if hasattr(x, "to_series"):
x = x.to_series()
elif not isinstance(x, pd.Series):
x = pd.Series(x)
y = np.nan * x
y = x - (x.shift(1) + x.shift(-1)) / 2.0
return np.array(y)
def curvature(x):
"""Curvature of a timeseries
This test is commonly known as gradient for historical reasons, but that
is a bad name choice since it is not the actual gradient, like:
d/dx + d/dy + d/dz,
but as defined by GTSPP, EuroGOOS and others, which is actually the
curvature of the timeseries..
Note
----
- Pandas.Series operates with indexes, so it should be done different. In
that case, call for _curvature_pandas.
"""
if isinstance(x, ma.MaskedArray):
x[x.mask] = np.nan
x = x.data
if PANDAS_AVAILABLE and isinstance(x, pd.Series):
return _curvature_pandas(x)
x = np.atleast_1d(x)
y = np.nan * x
y[1:-1] = x[1:-1] - (x[:-2] + x[2:]) / 2.0
return y
class Gradient(QCCheckVar):
def set_features(self):
self.features = {"gradient": curvature(self.data[self.varname])}
def test(self):
self.flags = {}
try:
threshold = self.cfg["threshold"]
except KeyError:
module_logger.debug(
"Deprecated cfg format. It should contain a threshold item."
)
threshold = self.cfg
assert (
(np.size(threshold) == 1)
and (threshold is not None)
and (np.isfinite(threshold))
)
flag = np.zeros(np.shape(self.data[self.varname]), dtype="i1")
feature = np.absolute(self.features["gradient"])
flag[feature > threshold] = self.flag_bad
flag[feature <= threshold] = self.flag_good
x = np.atleast_1d(self.data[self.varname])
flag[ma.getmaskarray(x) | ~np.isfinite(x)] = 9
self.flags["gradient"] = flag
|
mikewolfli/django-goflow
|
sampleproject/urls.py
|
from django.conf.urls import *
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# django-flags for internationalization
(r'^lang/', include('sampleproject.flags.urls')),
# FOR DEBUG AND TEST ONLY
(r'^.*switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user'),
# home page
(r'^$', 'sampleproject.sampleapp.views.home'),
# home redirection
(r'^.*home/$', 'django.views.generic.simple.redirect_to', {'url':'/'}),
# login/logout
(r'^logout/$', 'django.contrib.auth.views.logout'),
(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name':'goflow/login.html'}),
# Example:
(r'^sampleapp/', include('sampleproject.sampleapp.urls')),
# Uncomment the next line to enable admin documentation:
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# FOR TEST - insert before admin/(.*)
(r'^admin/workflow/', include('goflow.apptools.urls_admin')),
# special
(r'^admin/apptools/', include('goflow.apptools.urls_admin')),
# Uncomment the next line for to enable the admin:
(r'^admin/(.*)', admin.site.root),
# workflow pages
(r'^workflow/', include('goflow.urls')),
# static files
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
|
dimagi/commcare-hq
|
corehq/form_processor/tests/test_sql_update_strategy.py
|
from django.test import TestCase
from freezegun import freeze_time
from unittest.mock import patch
from testil import eq
from corehq.util.soft_assert.core import SoftAssert
from casexml.apps.case.exceptions import ReconciliationError
from casexml.apps.case.xml.parser import CaseUpdateAction, KNOWN_PROPERTIES
from corehq.form_processor.backends.sql.processor import FormProcessorSQL
from corehq.form_processor.backends.sql.update_strategy import SqlCaseUpdateStrategy
from corehq.form_processor.interfaces.processor import ProcessedForms
from corehq.form_processor.models import (
CommCareCase,
CaseTransaction,
RebuildWithReason,
)
from corehq.form_processor.utils import TestFormMetadata
from corehq.form_processor.tests.utils import sharded, FormProcessorTestUtils
from corehq.util.test_utils import get_form_ready_to_save
import uuid
from datetime import datetime
@sharded
class SqlUpdateStrategyTest(TestCase):
DOMAIN = 'update-strategy-test-' + uuid.uuid4().hex
USER_ID = 'mr_wednesday_'
@classmethod
def setUpClass(cls):
super(SqlUpdateStrategyTest, cls).setUpClass()
FormProcessorTestUtils.delete_all_sql_forms()
FormProcessorTestUtils.delete_all_sql_cases()
@classmethod
def tearDownClass(cls):
FormProcessorTestUtils.delete_all_sql_forms()
FormProcessorTestUtils.delete_all_sql_cases()
super(SqlUpdateStrategyTest, cls).tearDownClass()
@patch.object(SoftAssert, '_call')
def test_reconcile_transactions(self, soft_assert_mock):
""" tests a transanction with an early client date and late server date """
with freeze_time("2018-10-10"):
case = self._create_case()
with freeze_time("2018-10-11"):
new_old_xform = self._create_form()
with freeze_time("2018-10-08"):
new_old_trans = self._create_case_transaction(case, new_old_xform)
with freeze_time("2018-10-11"):
self._save(new_old_xform, case, new_old_trans)
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertTrue(update_strategy.reconcile_transactions_if_necessary())
self._check_for_reconciliation_error_soft_assert(soft_assert_mock)
case.save(with_tracked_models=True)
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertFalse(update_strategy.reconcile_transactions_if_necessary())
self._check_for_reconciliation_error_soft_assert(soft_assert_mock)
def test_reconcile_not_necessary(self):
with freeze_time("2018-10-10"):
case = self._create_case()
with freeze_time("2018-10-11"):
new_old_xform = self._create_form()
new_old_trans = self._create_case_transaction(case, new_old_xform)
self._save(new_old_xform, case, new_old_trans)
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertFalse(update_strategy.reconcile_transactions_if_necessary())
def test_ignores_before_rebuild_transaction(self):
with freeze_time("2018-10-10"):
case = self._create_case()
with freeze_time("2018-10-11"):
new_old_xform = self._create_form()
with freeze_time("2018-10-08"):
new_old_trans = self._create_case_transaction(case, new_old_xform)
with freeze_time("2018-10-11"):
self._save(new_old_xform, case, new_old_trans)
self.assertFalse(case.check_transaction_order())
with freeze_time("2018-10-13"):
new_rebuild_xform = self._create_form()
rebuild_detail = RebuildWithReason(reason="shadow's golden coin")
rebuild_transaction = CaseTransaction.rebuild_transaction(case, rebuild_detail)
self._save(new_rebuild_xform, case, rebuild_transaction)
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertFalse(update_strategy.reconcile_transactions_if_necessary())
def test_first_transaction_not_create(self):
with freeze_time("2018-10-10"):
case = self._create_case()
with freeze_time("2018-10-08"):
new_old_xform = self._create_form()
new_old_trans = self._create_case_transaction(case, new_old_xform)
self._save(new_old_xform, case, new_old_trans)
self.assertTrue(case.check_transaction_order())
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertRaises(ReconciliationError, update_strategy.reconcile_transactions)
@patch.object(SoftAssert, '_call')
def test_reconcile_transactions_within_fudge_factor(self, soft_assert_mock):
""" tests a transanction with an early client date and late server date """
with freeze_time("2018-10-10"):
case = self._create_case()
with freeze_time("2018-10-11 06:00"):
new_old_xform = self._create_form()
with freeze_time("2018-10-10 18:00"):
new_old_trans = self._create_case_transaction(case, new_old_xform)
with freeze_time("2018-10-11 06:00"):
self._save(new_old_xform, case, new_old_trans)
with freeze_time("2018-10-11"):
new_old_xform = self._create_form()
new_old_trans = self._create_case_transaction(case, new_old_xform)
self._save(new_old_xform, case, new_old_trans)
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertTrue(update_strategy.reconcile_transactions_if_necessary())
self._check_for_reconciliation_error_soft_assert(soft_assert_mock)
case.save(with_tracked_models=True)
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertFalse(update_strategy.reconcile_transactions_if_necessary())
self._check_for_reconciliation_error_soft_assert(soft_assert_mock)
def _create_form(self, user_id=None, received_on=None):
"""
Create the models directly so that these tests aren't dependent on any
other apps.
:return: XFormInstance
"""
user_id = user_id or 'mr_wednesday'
received_on = received_on or datetime.utcnow()
metadata = TestFormMetadata(
domain=self.DOMAIN,
received_on=received_on,
user_id=user_id
)
form = get_form_ready_to_save(metadata)
return form
def _create_case_transaction(self, case, form=None, submitted_on=None, action_types=None):
form = form or self._create_form()
submitted_on = submitted_on or datetime.utcnow()
return CaseTransaction.form_transaction(case, form, submitted_on, action_types)
def _create_case(self, case_type=None, user_id=None, case_id=None):
case_id = case_id or uuid.uuid4().hex
user_id = user_id or 'mr_wednesday'
utcnow = datetime.utcnow()
case = CommCareCase(
case_id=case_id,
domain=self.DOMAIN,
type=case_type or '',
owner_id=user_id,
opened_on=utcnow,
modified_on=utcnow,
modified_by=utcnow,
server_modified_on=utcnow
)
form = self._create_form(user_id, utcnow)
trans = self._create_case_transaction(case, form, utcnow, action_types=[128])
self._save(form, case, trans)
return CommCareCase.objects.get_case(case_id)
def _save(self, form, case, transaction):
# disable publish to Kafka to avoid intermittent errors caused by
# the nexus of kafka's consumer thread and freeze_time
with patch.object(FormProcessorSQL, "publish_changes_to_kafka"):
case.track_create(transaction)
FormProcessorSQL.save_processed_models(ProcessedForms(form, []), [case])
def _check_for_reconciliation_error_soft_assert(self, soft_assert_mock):
for call in soft_assert_mock.call_args_list:
self.assertNotIn('ReconciliationError', call[0][1])
soft_assert_mock.reset_mock()
def test_update_known_properties_with_empty_values():
def test(prop):
case = SqlCaseUpdateStrategy.case_implementation_class()
setattr(case, prop, "value")
action = CaseUpdateAction(block=None, **{prop: ""})
SqlCaseUpdateStrategy(case)._update_known_properties(action)
eq(getattr(case, prop), "")
# verify that at least one property will be tested
assert any(v is not None for v in KNOWN_PROPERTIES.values()), KNOWN_PROPERTIES
for prop, default in KNOWN_PROPERTIES.items():
if default is not None:
yield test, prop
|
GenericMappingTools/gmt-python
|
pygmt/src/subplot.py
|
"""
subplot - Manage modern mode figure subplot configuration and selection.
"""
import contextlib
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import (
build_arg_string,
fmt_docstring,
is_nonstr_iter,
kwargs_to_strings,
use_alias,
)
@fmt_docstring
@contextlib.contextmanager
@use_alias(
Ff="figsize",
Fs="subsize",
A="autolabel",
B="frame",
C="clearance",
J="projection",
M="margins",
R="region",
SC="sharex",
SR="sharey",
T="title",
V="verbose",
X="xshift",
Y="yshift",
)
@kwargs_to_strings(Ff="sequence", Fs="sequence", M="sequence", R="sequence")
def subplot(self, nrows=1, ncols=1, **kwargs):
r"""
Create multi-panel subplot figures.
This function is used to split the current figure into a rectangular layout
of subplots that each may contain a single self-contained figure. Begin by
defining the layout of the entire multi-panel illustration. Several
parameters are available to specify the systematic layout, labeling,
dimensions, and more for the subplots.
Full option list at :gmt-docs:`subplot.html#synopsis-begin-mode`
{aliases}
Parameters
----------
nrows : int
Number of vertical rows of the subplot grid.
ncols : int
Number of horizontal columns of the subplot grid.
figsize : tuple
Specify the final figure dimensions as (*width*, *height*).
subsize : tuple
Specify the dimensions of each subplot directly as (*width*, *height*).
Note that only one of ``figsize`` or ``subsize`` can be provided at
once.
autolabel : bool or str
[*autolabel*][**+c**\ *dx*\ [/*dy*]][**+g**\ *fill*][**+j**\|\ **J**\
*refpoint*][**+o**\ *dx*\ [/*dy*]][**+p**\ *pen*][**+r**\|\ **R**]
[**+v**].
Specify automatic tagging of each subplot. Append either a number or
letter [a]. This sets the tag of the first, top-left subplot and others
follow sequentially. Surround the number or letter by parentheses on
any side if these should be typeset as part of the tag. Use
**+j**\|\ **J**\ *refpoint* to specify where the tag should be placed
in the subplot [TL]. Note: **+j** sets the justification of the tag to
*refpoint* (suitable for interior tags) while **+J** instead selects
the mirror opposite (suitable for exterior tags). Append
**+c**\ *dx*\[/*dy*] to set the clearance between the tag and a
surrounding text box requested via **+g** or **+p** [3p/3p, i.e., 15%
of the :gmt-term:`FONT_TAG` size dimension]. Append **+g**\ *fill* to
paint the tag's text box with *fill* [no painting]. Append
**+o**\ *dx*\ [/*dy*] to offset the tag's reference point in the
direction implied by the justification [4p/4p, i.e., 20% of the
:gmt-term:`FONT_TAG` size]. Append **+p**\ *pen* to draw the outline of
the tag's text box using selected *pen* [no outline]. Append **+r** to
typeset your tag numbers using lowercase Roman numerals; use **+R** for
uppercase Roman numerals [Arabic numerals]. Append **+v** to increase
tag numbers vertically down columns [horizontally across rows].
{B}
clearance : str or list
[*side*]\ *clearance*.
Reserve a space of dimension *clearance* between the margin and the
subplot on the specified side, using *side* values from **w**, **e**,
**s**, or **n**; or **x** for both **w** and **e**; or **y** for both
**s** and **n**. No *side* means all sides (i.e. ``clearance='1c'``
would set a clearance of 1 cm on all sides). The option is repeatable
to set aside space on more than one side (e.g. ``clearance=['w1c',
's2c']`` would set a clearance of 1 cm on west side and 2 cm on south
side). Such space will be left untouched by the main map plotting but
can be accessed by modules that plot scales, bars, text, etc.
{J}
margins : str or list
This is margin space that is added between neighboring subplots (i.e.,
the interior margins) in addition to the automatic space added for tick
marks, annotations, and labels. The margins can be specified as either:
- a single value (for same margin on all sides). E.g. '5c'.
- a pair of values (for setting separate horizontal and vertical
margins). E.g. ['5c', '3c'].
- a set of four values (for setting separate left, right, bottom, and
top margins). E.g. ['1c', '2c', '3c', '4c'].
The actual gap created is always a sum of the margins for the two
opposing sides (e.g., east plus west or south plus north margins)
[Default is half the primary annotation font size, giving the full
annotation font size as the default gap].
{R}
sharex : bool or str
Set subplot layout for shared x-axes. Use when all subplots in a column
share a common *x*-range. If ``sharex=True``, the first (i.e.,
**t**\ op) and the last (i.e., **b**\ ottom) rows will have
*x*-annotations; use ``sharex='t'`` or ``sharex='b'`` to select only
one of those two rows [both]. Append **+l** if annotated *x*-axes
should have a label [none]; optionally append the label if it is the
same for the entire subplot. Append **+t** to make space for subplot
titles for each row; use **+tc** for top row titles only [no subplot
titles].
sharey : bool or str
Set subplot layout for shared y-axes. Use when all subplots in a row
share a common *y*-range. If ``sharey=True``, the first (i.e.,
**l**\ eft) and the last (i.e., **r**\ ight) columns will have
*y*-annotations; use ``sharey='l'`` or ``sharey='r'`` to select only
one of those two columns [both]. Append **+l** if annotated *y*-axes
will have a label [none]; optionally, append the label if it is the
same for the entire subplot. Append **+p** to make all annotations
axis-parallel [horizontal]; if not used you may have to set
``clearance`` to secure extra space for long horizontal annotations.
Notes for ``sharex``/``sharey``:
- Labels and titles that depends on which row or column are specified
as usual via a subplot's own ``frame`` setting.
- Append **+w** to the ``figsize`` or ``subsize`` parameter to draw
horizontal and vertical lines between interior panels using selected
pen [no lines].
title : str
While individual subplots can have titles (see ``sharex``/``sharey`` or
``frame``), the entire figure may also have an overarching *heading*
[no heading]. Font is determined by setting :gmt-term:`FONT_HEADING`.
{V}
{XY}
"""
kwargs = self._preprocess(**kwargs) # pylint: disable=protected-access
# allow for spaces in string without needing double quotes
if isinstance(kwargs.get("A"), str):
kwargs["A"] = f'"{kwargs.get("A")}"'
kwargs["T"] = f'"{kwargs.get("T")}"' if kwargs.get("T") else None
if nrows < 1 or ncols < 1:
raise GMTInvalidInput("Please ensure that both 'nrows'>=1 and 'ncols'>=1.")
if kwargs.get("Ff") and kwargs.get("Fs"):
raise GMTInvalidInput(
"Please provide either one of 'figsize' or 'subsize' only."
)
with Session() as lib:
try:
arg_str = " ".join(["begin", f"{nrows}x{ncols}", build_arg_string(kwargs)])
lib.call_module("subplot", arg_str)
yield
finally:
v_arg = build_arg_string({"V": kwargs.get("V")})
lib.call_module("subplot", f"end {v_arg}")
@fmt_docstring
@contextlib.contextmanager
@use_alias(A="fixedlabel", C="clearance", V="verbose")
def set_panel(self, panel=None, **kwargs):
r"""
Set the current subplot panel to plot on.
Before you start plotting you must first select the active subplot. Note:
If any *projection* option is passed with the question mark **?** as scale
or width when plotting subplots, then the dimensions of the map are
automatically determined by the subplot size and your region. For Cartesian
plots: If you want the scale to apply equally to both dimensions then you
must specify ``projection="x"`` [The default ``projection="X"`` will fill
the subplot by using unequal scales].
{aliases}
Parameters
----------
panel : str or list
*row,col*\|\ *index*.
Sets the current subplot until further notice. **Note**: First *row*
or *col* is 0, not 1. If not given we go to the next subplot by order
specified via ``autolabel`` in :meth:`pygmt.Figure.subplot`. As an
alternative, you may bypass using :meth:`pygmt.Figure.set_panel` and
instead supply the common option **panel**\ =[*row,col*] to the first
plot command you issue in that subplot. GMT maintains information about
the current figure and subplot. Also, you may give the one-dimensional
*index* instead which starts at 0 and follows the row or column order
set via ``autolabel`` in :meth:`pygmt.Figure.subplot`.
fixedlabel : str
Overrides the automatic labeling with the given string. No modifiers
are allowed. Placement, justification, etc. are all inherited from how
``autolabel`` was specified by the initial :meth:`pygmt.Figure.subplot`
command.
clearance : str or list
[*side*]\ *clearance*.
Reserve a space of dimension *clearance* between the margin and the
subplot on the specified side, using *side* values from **w**, **e**,
**s**, or **n**. The option is repeatable to set aside space on more
than one side (e.g. ``clearance=['w1c', 's2c']`` would set a clearance
of 1 cm on west side and 2 cm on south side). Such space will be left
untouched by the main map plotting but can be accessed by modules that
plot scales, bars, text, etc. This setting overrides the common
clearances set by ``clearance`` in the initial
:meth:`pygmt.Figure.subplot` call.
{V}
"""
kwargs = self._preprocess(**kwargs) # pylint: disable=protected-access
# allow for spaces in string with needing double quotes
kwargs["A"] = f'"{kwargs.get("A")}"' if kwargs.get("A") is not None else None
# convert tuple or list to comma-separated str
panel = ",".join(map(str, panel)) if is_nonstr_iter(panel) else panel
with Session() as lib:
arg_str = " ".join(["set", f"{panel}", build_arg_string(kwargs)])
lib.call_module(module="subplot", args=arg_str)
yield
|
ergodicbreak/evennia
|
evennia/server/evennia_launcher.py
|
#!/usr/bin/env python
"""
EVENNIA SERVER LAUNCHER SCRIPT
This is the start point for running Evennia.
Sets the appropriate environmental variables and launches the server
and portal through the evennia_runner. Run without arguments to get a
menu. Run the script with the -h flag to see usage information.
"""
from __future__ import print_function
from builtins import input, range
import os
import sys
import signal
import shutil
import importlib
from argparse import ArgumentParser
from subprocess import Popen, check_output, call, CalledProcessError, STDOUT
import django
# Signal processing
SIG = signal.SIGINT
# Set up the main python paths to Evennia
EVENNIA_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import evennia
EVENNIA_LIB = os.path.join(os.path.dirname(os.path.abspath(evennia.__file__)))
EVENNIA_SERVER = os.path.join(EVENNIA_LIB, "server")
EVENNIA_RUNNER = os.path.join(EVENNIA_SERVER, "evennia_runner.py")
EVENNIA_TEMPLATE = os.path.join(EVENNIA_LIB, "game_template")
EVENNIA_PROFILING = os.path.join(EVENNIA_SERVER, "profiling")
EVENNIA_DUMMYRUNNER = os.path.join(EVENNIA_PROFILING, "dummyrunner.py")
TWISTED_BINARY = "twistd"
# Game directory structure
SETTINGFILE = "settings.py"
SERVERDIR = "server"
CONFDIR = os.path.join(SERVERDIR, "conf")
SETTINGS_PATH = os.path.join(CONFDIR, SETTINGFILE)
SETTINGS_DOTPATH = "server.conf.settings"
CURRENT_DIR = os.getcwd()
GAMEDIR = CURRENT_DIR
# Operational setup
SERVER_LOGFILE = None
PORTAL_LOGFILE = None
HTTP_LOGFILE = None
SERVER_PIDFILE = None
PORTAL_PIDFILE = None
SERVER_RESTART = None
PORTAL_RESTART = None
SERVER_PY_FILE = None
PORTAL_PY_FILE = None
PYTHON_MIN = '2.7'
TWISTED_MIN = '16.0.0'
DJANGO_MIN = '1.8'
DJANGO_REC = '1.9'
sys.path[1] = EVENNIA_ROOT
#------------------------------------------------------------
#
# Messages
#
#------------------------------------------------------------
CREATED_NEW_GAMEDIR = \
"""
Welcome to Evennia!
Created a new Evennia game directory '{gamedir}'.
You can now optionally edit your new settings file
at {settings_path}. If you don't, the defaults
will work out of the box. When ready to continue, 'cd' to your
game directory and run:
evennia migrate
This initializes the database. To start the server for the first
time, run:
evennia start
Make sure to create a superuser when asked for it (the email can
be blank if you want). You should now be able to (by default)
connect to your server on 'localhost', port 4000 using a
telnet/mud client or http://localhost:8000 using your web browser.
If things don't work, check so those ports are open.
"""
ERROR_INPUT = \
"""
Command
{args} {kwargs}
raised an error: '{traceback}'.
"""
ERROR_NO_GAMEDIR = \
"""
ERROR: No Evennia settings file was found. Evennia looks for the
file in your game directory as server/conf/settings.py.
You must run this command from somewhere inside a valid game
directory first created with
evennia --init mygamename
If you are in a game directory but is missing a settings.py file,
it may be because you have git-cloned an existing game directory.
The settings.py file is not cloned by git (it's in .gitignore)
since it can contain sensitive and/or server-specific information.
You can create a new, empty settings file with
evennia --initsettings
If cloning the settings file is not a problem you could manually
copy over the old settings file or remove its entry in .gitignore
"""
WARNING_MOVING_SUPERUSER = \
"""
WARNING: Evennia expects a Player superuser with id=1. No such
Player was found. However, another superuser ('{other_key}',
id={other_id}) was found in the database. If you just created this
superuser and still see this text it is probably due to the
database being flushed recently - in this case the database's
internal auto-counter might just start from some value higher than
one.
We will fix this by assigning the id 1 to Player '{other_key}'.
Please confirm this is acceptable before continuing.
"""
WARNING_RUNSERVER = \
"""
WARNING: There is no need to run the Django development
webserver to test out Evennia web features (the web client
will in fact not work since the Django test server knows
nothing about MUDs). Instead, just start Evennia with the
webserver component active (this is the default).
"""
ERROR_SETTINGS = \
"""
ERROR: There was an error importing Evennia's config file
{settingspath}.
There is usually one of three reasons for this:
1) You are not running this command from your game directory.
Change directory to your game directory and try again (or
create a new game directory using evennia --init <dirname>)
2) The settings file contains a syntax error. If you see a
traceback above, review it, resolve the problem and try again.
3) Django is not correctly installed. This usually shows as
errors mentioning 'DJANGO_SETTINGS_MODULE'. If you run a
virtual machine, it might be worth to restart it to see if
this resolves the issue.
""".format(settingsfile=SETTINGFILE, settingspath=SETTINGS_PATH)
ERROR_INITSETTINGS = \
"""
ERROR: 'evennia --initsettings' must be called from the root of
your game directory, since it tries to (re)create the new
settings.py file in a subfolder server/conf/.
"""
RECREATED_SETTINGS = \
"""
(Re)created an empty settings file in server/conf/settings.py.
Note that if you were using an existing database, the password
salt of this new settings file will be different from the old one.
This means that any existing players may not be able to log in to
their accounts with their old passwords.
"""
ERROR_DATABASE = \
"""
ERROR: Your database does not seem to be set up correctly.
(error was '{traceback}')
Standing in your game directory, run
evennia migrate
to initialize/update the database according to your settings.
"""
ERROR_WINDOWS_WIN32API = \
"""
ERROR: Unable to import win32api, which Twisted requires to run.
You may download it from:
http://sourceforge.net/projects/pywin32/files/pywin32/
If you are running in a virtual environment, browse to the
location of the latest win32api exe file for your computer and
Python version and copy the url to it; then paste it into a call
to easy_install:
easy_install http://<url to win32api exe>
"""
INFO_WINDOWS_BATFILE = \
"""
INFO: Since you are running Windows, a file 'twistd.bat' was
created for you. This is a simple batch file that tries to call
the twisted executable. Evennia determined this to be:
{twistd_path}
If you run into errors at startup you might need to edit
twistd.bat to point to the actual location of the Twisted
executable (usually called twistd.py) on your machine.
This procedure is only done once. Run evennia.py again when you
are ready to start the server.
"""
CMDLINE_HELP = \
"""
Starts or operates the Evennia MU* server. Also allows for
initializing a new game directory and manages the game's database.
You can also pass most standard django-admin arguments and
options.
"""
VERSION_INFO = \
"""
Evennia {version}
OS: {os}
Python: {python}
Twisted: {twisted}
Django: {django}{about}
"""
ABOUT_INFO = \
"""
Evennia MUD/MUX/MU* development system
Licence: BSD 3-Clause Licence
Web: http://www.evennia.com
Irc: #evennia on FreeNode
Forum: http://www.evennia.com/discussions
Maintainer (2010-): Griatch (griatch AT gmail DOT com)
Maintainer (2006-10): Greg Taylor
Use -h for command line options.
"""
HELP_ENTRY = \
"""
Enter 'evennia -h' for command-line options.
Use option (1) in a production environment. During development (2) is
usually enough, portal debugging is usually only useful if you are
adding new protocols or are debugging Evennia itself.
Reload with (5) to update the server with your changes without
disconnecting any players.
Note: Reload and stop are sometimes poorly supported in Windows. If you
have issues, log into the game to stop or restart the server instead.
"""
MENU = \
"""
+----Evennia Launcher-------------------------------------------+
| |
+--- Starting --------------------------------------------------+
| |
| 1) (normal): All output to logfiles |
| 2) (server devel): Server logs to terminal (-i option) |
| 3) (portal devel): Portal logs to terminal |
| 4) (full devel): Both Server and Portal logs to terminal |
| |
+--- Restarting ------------------------------------------------+
| |
| 5) Reload the Server |
| 6) Reload the Portal (only works with portal/full debug) |
| |
+--- Stopping --------------------------------------------------+
| |
| 7) Stopping both Portal and Server |
| 8) Stopping only Server |
| 9) Stopping only Portal |
| |
+---------------------------------------------------------------+
| h) Help i) About info q) Abort |
+---------------------------------------------------------------+
"""
ERROR_LOGDIR_MISSING = \
"""
ERROR: One or more log-file directory locations could not be
found:
{logfiles}
This is simple to fix: Just manually create the missing log
directory (or directories) and re-launch the server (the log files
will be created automatically).
(Explanation: Evennia creates the log directory automatically when
initializating a new game directory. This error usually happens if
you used git to clone a pre-created game directory - since log
files are in .gitignore they will not be cloned, which leads to
the log directory also not being created.)
"""
ERROR_PYTHON_VERSION = \
"""
ERROR: Python {pversion} used. Evennia requires version
{python_min} or higher (but not 3.x).
"""
ERROR_TWISTED_VERSION = \
"""
ERROR: Twisted {tversion} found. Evennia requires
version {twisted_min} or higher.
"""
ERROR_NOTWISTED = \
"""
ERROR: Twisted does not seem to be installed.
"""
ERROR_DJANGO_MIN = \
"""
ERROR: Django {dversion} found. Evennia requires version {django_min}
or higher.
Install it with for example `pip install --upgrade django`
or with `pip install django=={django_min}` to get a specific version.
It's also a good idea to run `evennia migrate` after this upgrade.
"""
NOTE_DJANGO_MIN = \
"""
NOTE: Django {dversion} found. This will work, but v{django_rec}
is recommended for production.
"""
NOTE_DJANGO_NEW = \
"""
NOTE: Django {dversion} found. This is newer than Evennia's
recommended version (v{django_rec}). It might work, but may be new
enough to not be fully tested yet. Report any issues.
"""
ERROR_NODJANGO = \
"""
ERROR: Django does not seem to be installed.
"""
NOTE_KEYBOARDINTERRUPT = \
"""
STOP: Caught keyboard interrupt while in interactive mode.
"""
#------------------------------------------------------------
#
# Functions
#
#------------------------------------------------------------
def evennia_version():
"""
Get the Evennia version info from the main package.
"""
version = "Unknown"
try:
import evennia
version = evennia.__version__
except ImportError:
pass
try:
rev = check_output(
"git rev-parse --short HEAD",
shell=True, cwd=EVENNIA_ROOT, stderr=STDOUT).strip()
version = "%s (rev %s)" % (version, rev)
except (IOError, CalledProcessError):
pass
return version
EVENNIA_VERSION = evennia_version()
def check_main_evennia_dependencies():
"""
Checks and imports the Evennia dependencies. This must be done
already before the paths are set up.
Returns:
not_error (bool): True if no dependency error was found.
"""
error = False
# Python
pversion = ".".join(str(num) for num in sys.version_info if type(num) == int)
if pversion < PYTHON_MIN:
print(ERROR_PYTHON_VERSION.format(pversion=pversion, python_min=PYTHON_MIN))
error = True
# Twisted
try:
import twisted
tversion = twisted.version.short()
if tversion < TWISTED_MIN:
print(ERROR_TWISTED_VERSION.format(
tversion=tversion, twisted_min=TWISTED_MIN))
error = True
except ImportError:
print(ERROR_NOTWISTED)
error = True
# Django
try:
dversion = ".".join(str(num) for num in django.VERSION if type(num) == int)
# only the main version (1.5, not 1.5.4.0)
dversion_main = ".".join(dversion.split(".")[:2])
if dversion < DJANGO_MIN:
print(ERROR_DJANGO_MIN.format(
dversion=dversion_main, django_min=DJANGO_MIN))
error = True
elif DJANGO_MIN <= dversion < DJANGO_REC:
print(NOTE_DJANGO_MIN.format(
dversion=dversion_main, django_rec=DJANGO_REC))
elif DJANGO_REC < dversion_main:
print(NOTE_DJANGO_NEW.format(
dversion=dversion_main, django_rec=DJANGO_REC))
except ImportError:
print(ERROR_NODJANGO)
error = True
if error:
sys.exit()
# return True/False if error was reported or not
return not error
def set_gamedir(path):
"""
Set GAMEDIR based on path, by figuring out where the setting file
is inside the directory tree.
"""
global GAMEDIR
Ndepth = 10
settings_path = os.path.join("server", "conf", "settings.py")
for i in range(Ndepth):
gpath = os.getcwd()
if "server" in os.listdir(gpath):
if os.path.isfile(settings_path):
GAMEDIR = gpath
return
os.chdir(os.pardir)
print(ERROR_NO_GAMEDIR)
sys.exit()
def create_secret_key():
"""
Randomly create the secret key for the settings file
"""
import random
import string
secret_key = list((string.letters +
string.digits + string.punctuation).replace("\\", "")\
.replace("'", '"').replace("{","_").replace("}","-"))
random.shuffle(secret_key)
secret_key = "".join(secret_key[:40])
return secret_key
def create_settings_file(init=True):
"""
Uses the template settings file to build a working settings file.
Args:
init (bool): This is part of the normal evennia --init
operation. If false, this function will copy a fresh
template file in (asking if it already exists).
"""
settings_path = os.path.join(GAMEDIR, "server", "conf", "settings.py")
if not init:
# if not --init mode, settings file may already exist from before
if os.path.exists(settings_path):
inp = raw_input("server/conf/settings.py already exists. "
"Do you want to reset it? y/[N]> ")
if not inp.lower() == 'y':
print ("Aborted.")
sys.exit()
else:
print ("Reset the settings file.")
default_settings_path = os.path.join(EVENNIA_TEMPLATE, "server", "conf", "settings.py")
shutil.copy(default_settings_path, settings_path)
with open(settings_path, 'r') as f:
settings_string = f.read()
# tweak the settings
setting_dict = {
"settings_default": os.path.join(EVENNIA_LIB, "settings_default.py"),
"servername": "\"%s\"" % GAMEDIR.rsplit(os.path.sep, 1)[1].capitalize(),
"secret_key": "\'%s\'" % create_secret_key()}
settings_string = settings_string.format(**setting_dict)
with open(settings_path, 'w') as f:
f.write(settings_string)
def create_game_directory(dirname):
"""
Initialize a new game directory named dirname
at the current path. This means copying the
template directory from evennia's root.
Args:
dirname (str): The directory name to create.
"""
global GAMEDIR
GAMEDIR = os.path.abspath(os.path.join(CURRENT_DIR, dirname))
if os.path.exists(GAMEDIR):
print("Cannot create new Evennia game dir: '%s' already exists." % dirname)
sys.exit()
# copy template directory
shutil.copytree(EVENNIA_TEMPLATE, GAMEDIR)
# pre-build settings file in the new GAMEDIR
create_settings_file()
def create_superuser():
"""
Create the superuser player
"""
print(
"\nCreate a superuser below. The superuser is Player #1, the 'owner' "
"account of the server.\n")
django.core.management.call_command("createsuperuser", interactive=True)
def check_database():
"""
Check so the database exists.
Returns:
exists (bool): `True` if the database exists, otherwise `False`.
"""
# Check so a database exists and is accessible
from django.db import connection
tables = connection.introspection.get_table_list(connection.cursor())
if not tables or not isinstance(tables[0], basestring): # django 1.8+
tables = [tableinfo.name for tableinfo in tables]
if tables and u'players_playerdb' in tables:
# database exists and seems set up. Initialize evennia.
import evennia
evennia._init()
# Try to get Player#1
from evennia.players.models import PlayerDB
try:
PlayerDB.objects.get(id=1)
except django.db.utils.OperationalError as e:
print(ERROR_DATABASE.format(traceback=e))
sys.exit()
except PlayerDB.DoesNotExist:
# no superuser yet. We need to create it.
other_superuser = PlayerDB.objects.filter(is_superuser=True)
if other_superuser:
# Another superuser was found, but not with id=1. This may
# happen if using flush (the auto-id starts at a higher
# value). Wwe copy this superuser into id=1. To do
# this we must deepcopy it, delete it then save the copy
# with the new id. This allows us to avoid the UNIQUE
# constraint on usernames.
other = other_superuser[0]
other_id = other.id
other_key = other.username
print(WARNING_MOVING_SUPERUSER.format(
other_key=other_key, other_id=other_id))
res = ""
while res.upper() != "Y":
# ask for permission
res = input("Continue [Y]/N: ")
if res.upper() == "N":
sys.exit()
elif not res:
break
# continue with the
from copy import deepcopy
new = deepcopy(other)
other.delete()
new.id = 1
new.save()
else:
create_superuser()
check_database()
return True
def getenv():
"""
Get current environment and add PYTHONPATH.
Returns:
env (dict): Environment global dict.
"""
sep = ";" if os.name == 'nt' else ":"
env = os.environ.copy()
env['PYTHONPATH'] = sep.join(sys.path)
return env
def get_pid(pidfile):
"""
Get the PID (Process ID) by trying to access an PID file.
Args:
pidfile (str): The path of the pid file.
Returns:
pid (str): The process id.
"""
pid = None
if os.path.exists(pidfile):
f = open(pidfile, 'r')
pid = f.read()
return pid
def del_pid(pidfile):
"""
The pidfile should normally be removed after a process has
finished, but when sending certain signals they remain, so we need
to clean them manually.
Args:
pidfile (str): The path of the pid file.
"""
if os.path.exists(pidfile):
os.remove(pidfile)
def kill(pidfile, signal=SIG, succmsg="", errmsg="",
restart_file=SERVER_RESTART, restart=False):
"""
Send a kill signal to a process based on PID. A customized
success/error message will be returned. If clean=True, the system
will attempt to manually remove the pid file.
Args:
pidfile (str): The path of the pidfile to get the PID from.
signal (int, optional): Signal identifier.
succmsg (str, optional): Message to log on success.
errmsg (str, optional): Message to log on failure.
restart_file (str, optional): Restart file location.
restart (bool, optional): Are we in restart mode or not.
"""
pid = get_pid(pidfile)
if pid:
if os.name == 'nt':
os.remove(pidfile)
# set restart/norestart flag
if restart:
django.core.management.call_command(
'collectstatic', interactive=False, verbosity=0)
with open(restart_file, 'w') as f:
f.write("reload")
else:
with open(restart_file, 'w') as f:
f.write("shutdown")
try:
os.kill(int(pid), signal)
except OSError:
print("Process %(pid)s cannot be stopped. "\
"The PID file 'server/%(pidfile)s' seems stale. "\
"Try removing it." % {'pid': pid, 'pidfile': pidfile})
return
print("Evennia:", succmsg)
return
print("Evennia:", errmsg)
def show_version_info(about=False):
"""
Display version info.
Args:
about (bool): Include ABOUT info as well as version numbers.
Returns:
version_info (str): A complete version info string.
"""
import os
import sys
import twisted
import django
return VERSION_INFO.format(
version=EVENNIA_VERSION, about=ABOUT_INFO if about else "",
os=os.name, python=sys.version.split()[0],
twisted=twisted.version.short(),
django=django.get_version())
def error_check_python_modules():
"""
Import settings modules in settings. This will raise exceptions on
pure python-syntax issues which are hard to catch gracefully with
exceptions in the engine (since they are formatting errors in the
python source files themselves). Best they fail already here
before we get any further.
Raises:
DeprecationWarning: For trying to access various modules
(usually in `settings.py`) which are no longer supported.
"""
from django.conf import settings
def imp(path, split=True):
mod, fromlist = path, "None"
if split:
mod, fromlist = path.rsplit('.', 1)
__import__(mod, fromlist=[fromlist])
# core modules
imp(settings.COMMAND_PARSER)
imp(settings.SEARCH_AT_RESULT)
imp(settings.CONNECTION_SCREEN_MODULE)
#imp(settings.AT_INITIAL_SETUP_HOOK_MODULE, split=False)
for path in settings.LOCK_FUNC_MODULES:
imp(path, split=False)
# cmdsets
deprstring = ("settings.%s should be renamed to %s. If defaults are used, "
"their path/classname must be updated "
"(see evennia/settings_default.py).")
if hasattr(settings, "CMDSET_DEFAULT"):
raise DeprecationWarning(deprstring % (
"CMDSET_DEFAULT", "CMDSET_CHARACTER"))
if hasattr(settings, "CMDSET_OOC"):
raise DeprecationWarning(deprstring % ("CMDSET_OOC", "CMDSET_PLAYER"))
if settings.WEBSERVER_ENABLED and not isinstance(settings.WEBSERVER_PORTS[0], tuple):
raise DeprecationWarning(
"settings.WEBSERVER_PORTS must be on the form "
"[(proxyport, serverport), ...]")
if hasattr(settings, "BASE_COMM_TYPECLASS"):
raise DeprecationWarning(deprstring % (
"BASE_COMM_TYPECLASS", "BASE_CHANNEL_TYPECLASS"))
if hasattr(settings, "COMM_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % (
"COMM_TYPECLASS_PATHS", "CHANNEL_TYPECLASS_PATHS"))
if hasattr(settings, "CHARACTER_DEFAULT_HOME"):
raise DeprecationWarning(
"settings.CHARACTER_DEFAULT_HOME should be renamed to "
"DEFAULT_HOME. See also settings.START_LOCATION "
"(see evennia/settings_default.py).")
deprstring = ("settings.%s is now merged into settings.TYPECLASS_PATHS. "
"Update your settings file.")
if hasattr(settings, "OBJECT_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % "OBJECT_TYPECLASS_PATHS")
if hasattr(settings, "SCRIPT_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % "SCRIPT_TYPECLASS_PATHS")
if hasattr(settings, "PLAYER_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % "PLAYER_TYPECLASS_PATHS")
if hasattr(settings, "CHANNEL_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % "CHANNEL_TYPECLASS_PATHS")
from evennia.commands import cmdsethandler
if not cmdsethandler.import_cmdset(settings.CMDSET_UNLOGGEDIN, None):
print("Warning: CMDSET_UNLOGGED failed to load!")
if not cmdsethandler.import_cmdset(settings.CMDSET_CHARACTER, None):
print("Warning: CMDSET_CHARACTER failed to load")
if not cmdsethandler.import_cmdset(settings.CMDSET_PLAYER, None):
print("Warning: CMDSET_PLAYER failed to load")
# typeclasses
imp(settings.BASE_PLAYER_TYPECLASS)
imp(settings.BASE_OBJECT_TYPECLASS)
imp(settings.BASE_CHARACTER_TYPECLASS)
imp(settings.BASE_ROOM_TYPECLASS)
imp(settings.BASE_EXIT_TYPECLASS)
imp(settings.BASE_SCRIPT_TYPECLASS)
def init_game_directory(path, check_db=True):
"""
Try to analyze the given path to find settings.py - this defines
the game directory and also sets PYTHONPATH as well as the django
path.
Args:
path (str): Path to new game directory, including its name.
check_db (bool, optional): Check if the databae exists.
"""
# set the GAMEDIR path
set_gamedir(path)
# Add gamedir to python path
sys.path.insert(0, GAMEDIR)
if sys.argv[1] == 'test':
os.environ['DJANGO_SETTINGS_MODULE'] = 'evennia.settings_default'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = SETTINGS_DOTPATH
# required since django1.7
django.setup()
# test existence of the settings module
try:
from django.conf import settings
except Exception as ex:
if not str(ex).startswith("No module named"):
import traceback
print(traceback.format_exc().strip())
print(ERROR_SETTINGS)
sys.exit()
# this will both check the database and initialize the evennia dir.
if check_db:
check_database()
# set up the Evennia executables and log file locations
global SERVER_PY_FILE, PORTAL_PY_FILE
global SERVER_LOGFILE, PORTAL_LOGFILE, HTTP_LOGFILE
global SERVER_PIDFILE, PORTAL_PIDFILE
global SERVER_RESTART, PORTAL_RESTART
global EVENNIA_VERSION
SERVER_PY_FILE = os.path.join(EVENNIA_LIB, "server", "server.py")
PORTAL_PY_FILE = os.path.join(EVENNIA_LIB, "portal", "portal", "portal.py")
SERVER_PIDFILE = os.path.join(GAMEDIR, SERVERDIR, "server.pid")
PORTAL_PIDFILE = os.path.join(GAMEDIR, SERVERDIR, "portal.pid")
SERVER_RESTART = os.path.join(GAMEDIR, SERVERDIR, "server.restart")
PORTAL_RESTART = os.path.join(GAMEDIR, SERVERDIR, "portal.restart")
SERVER_LOGFILE = settings.SERVER_LOG_FILE
PORTAL_LOGFILE = settings.PORTAL_LOG_FILE
HTTP_LOGFILE = settings.HTTP_LOG_FILE
# verify existence of log file dir (this can be missing e.g.
# if the game dir itself was cloned since log files are in .gitignore)
logdirs = [logfile.rsplit(os.path.sep, 1)
for logfile in (SERVER_LOGFILE, PORTAL_LOGFILE, HTTP_LOGFILE)]
if not all(os.path.isdir(pathtup[0]) for pathtup in logdirs):
errstr = "\n ".join("%s (log file %s)" % (pathtup[0], pathtup[1]) for pathtup in logdirs
if not os.path.isdir(pathtup[0]))
print(ERROR_LOGDIR_MISSING.format(logfiles=errstr))
sys.exit()
if os.name == 'nt':
# We need to handle Windows twisted separately. We create a
# batchfile in game/server, linking to the actual binary
global TWISTED_BINARY
# Windows requires us to use the absolute path for the bat file.
server_path = os.path.dirname(os.path.abspath(__file__))
TWISTED_BINARY = os.path.join(server_path, "twistd.bat")
# add path so system can find the batfile
sys.path.insert(1, os.path.join(GAMEDIR, SERVERDIR))
try:
importlib.import_module("win32api")
except ImportError:
print(ERROR_WINDOWS_WIN32API)
sys.exit()
batpath = os.path.join(EVENNIA_SERVER, TWISTED_BINARY)
if not os.path.exists(batpath):
# Test for executable twisted batch file. This calls the
# twistd.py executable that is usually not found on the
# path in Windows. It's not enough to locate
# scripts.twistd, what we want is the executable script
# C:\PythonXX/Scripts/twistd.py. Alas we cannot hardcode
# this location since we don't know if user has Python in
# a non-standard location. So we try to figure it out.
twistd = importlib.import_module("twisted.scripts.twistd")
twistd_dir = os.path.dirname(twistd.__file__)
# note that we hope the twistd package won't change here, since we
# try to get to the executable by relative path.
twistd_path = os.path.abspath(
os.path.join(twistd_dir, os.pardir, os.pardir, os.pardir,
os.pardir, 'scripts', 'twistd.py'))
with open(batpath, 'w') as bat_file:
# build a custom bat file for windows
bat_file.write("@\"%s\" \"%s\" %%*" % (
sys.executable, twistd_path))
print(INFO_WINDOWS_BATFILE.format(twistd_path=twistd_path))
def run_dummyrunner(number_of_dummies):
"""
Start an instance of the dummyrunner
Args:
number_of_dummies (int): The number of dummy players to start.
Notes:
The dummy players' behavior can be customized by adding a
`dummyrunner_settings.py` config file in the game's conf/
directory.
"""
number_of_dummies = str(int(number_of_dummies)) if number_of_dummies else 1
cmdstr = [sys.executable, EVENNIA_DUMMYRUNNER, "-N", number_of_dummies]
config_file = os.path.join(SETTINGS_PATH, "dummyrunner_settings.py")
if os.path.exists(config_file):
cmdstr.extend(["--config", config_file])
try:
call(cmdstr, env=getenv())
except KeyboardInterrupt:
pass
def list_settings(keys):
"""
Display the server settings. We only display the Evennia specific
settings here. The result will be printed to the terminal.
Args:
keys (str or list): Setting key or keys to inspect.
"""
from importlib import import_module
from evennia.utils import evtable
evsettings = import_module(SETTINGS_DOTPATH)
if len(keys) == 1 and keys[0].upper() == "ALL":
# show a list of all keys
# a specific key
table = evtable.EvTable()
confs = [key for key in sorted(evsettings.__dict__) if key.isupper()]
for i in range(0, len(confs), 4):
table.add_row(*confs[i:i+4])
else:
# a specific key
table = evtable.EvTable(width=131)
keys = [key.upper() for key in keys]
confs = dict((key, var) for key, var in evsettings.__dict__.items()
if key in keys)
for key, val in confs.items():
table.add_row(key, str(val))
print(table)
def run_menu():
"""
This launches an interactive menu.
"""
while True:
# menu loop
print(MENU)
inp = input(" option > ")
# quitting and help
if inp.lower() == 'q':
return
elif inp.lower() == 'h':
print(HELP_ENTRY)
input("press <return> to continue ...")
continue
elif inp.lower() in ('v', 'i', 'a'):
print(show_version_info(about=True))
input("press <return> to continue ...")
continue
# options
try:
inp = int(inp)
except ValueError:
print("Not a valid option.")
continue
if inp == 1:
# start everything, log to log files
server_operation("start", "all", False, False)
elif inp == 2:
# start everything, server interactive start
server_operation("start", "all", True, False)
elif inp == 3:
# start everything, portal interactive start
server_operation("start", "server", False, False)
server_operation("start", "portal", True, False)
elif inp == 4:
# start both server and portal interactively
server_operation("start", "server", True, False)
server_operation("start", "portal", True, False)
elif inp == 5:
# reload the server
server_operation("reload", "server", None, None)
elif inp == 6:
# reload the portal
server_operation("reload", "portal", None, None)
elif inp == 7:
# stop server and portal
server_operation("stop", "all", None, None)
elif inp == 8:
# stop server
server_operation("stop", "server", None, None)
elif inp == 9:
# stop portal
server_operation("stop", "portal", None, None)
else:
print("Not a valid option.")
continue
return
def server_operation(mode, service, interactive, profiler, logserver=False):
"""
Handle argument options given on the command line.
Args:
mode (str): Start/stop/restart and so on.
service (str): "server", "portal" or "all".
interactive (bool). Use interactive mode or daemon.
profiler (bool): Run the service under the profiler.
logserver (bool, optional): Log Server data to logfile
specified by settings.SERVER_LOG_FILE.
"""
cmdstr = [sys.executable, EVENNIA_RUNNER]
errmsg = "The %s does not seem to be running."
if mode == 'start':
# launch the error checker. Best to catch the errors already here.
error_check_python_modules()
# starting one or many services
if service == 'server':
if profiler:
cmdstr.append('--pserver')
if interactive:
cmdstr.append('--iserver')
if logserver:
cmdstr.append('--logserver')
cmdstr.append('--noportal')
elif service == 'portal':
if profiler:
cmdstr.append('--pportal')
if interactive:
cmdstr.append('--iportal')
cmdstr.append('--noserver')
django.core.management.call_command(
'collectstatic', verbosity=1, interactive=False)
else:
# all
# for convenience we don't start logging of
# portal, only of server with this command.
if profiler:
# this is the common case
cmdstr.append('--pserver')
if interactive:
cmdstr.append('--iserver')
if logserver:
cmdstr.append('--logserver')
django.core.management.call_command(
'collectstatic', verbosity=1, interactive=False)
cmdstr.extend([
GAMEDIR, TWISTED_BINARY, SERVER_LOGFILE,
PORTAL_LOGFILE, HTTP_LOGFILE])
# start the server
process = Popen(cmdstr, env=getenv())
if interactive:
try:
process.wait()
except KeyboardInterrupt:
server_operation("stop", "portal", False, False)
return
finally:
print(NOTE_KEYBOARDINTERRUPT)
elif mode == 'reload':
# restarting services
if os.name == 'nt':
print(
"Restarting from command line is not supported under Windows. "
"Log into the game to restart.")
return
if service == 'server':
kill(SERVER_PIDFILE, SIG, "Server reloaded.",
errmsg % 'Server', SERVER_RESTART, restart=True)
elif service == 'portal':
print(
"Note: Portal usually doesnt't need to be reloaded unless you "
"are debugging in interactive mode. If Portal was running in "
"default Daemon mode, it cannot be restarted. In that case "
"you have to restart it manually with 'evennia.py "
"start portal'")
kill(PORTAL_PIDFILE, SIG,
"Portal reloaded (or stopped, if it was in daemon mode).",
errmsg % 'Portal', PORTAL_RESTART, restart=True)
else:
# all
# default mode, only restart server
kill(SERVER_PIDFILE, SIG,
"Server reload.",
errmsg % 'Server', SERVER_RESTART, restart=True)
elif mode == 'stop':
# stop processes, avoiding reload
if service == 'server':
kill(SERVER_PIDFILE, SIG,
"Server stopped.", errmsg % 'Server', SERVER_RESTART)
elif service == 'portal':
kill(PORTAL_PIDFILE, SIG,
"Portal stopped.", errmsg % 'Portal', PORTAL_RESTART)
else:
kill(PORTAL_PIDFILE, SIG,
"Portal stopped.", errmsg % 'Portal', PORTAL_RESTART)
kill(SERVER_PIDFILE, SIG,
"Server stopped.", errmsg % 'Server', SERVER_RESTART)
def main():
"""
Run the evennia launcher main program.
"""
# set up argument parser
parser = ArgumentParser(description=CMDLINE_HELP)
parser.add_argument(
'-v', '--version', action='store_true',
dest='show_version', default=False,
help="Show version info.")
parser.add_argument(
'-i', '--interactive', action='store_true',
dest='interactive', default=False,
help="Start given processes in interactive mode.")
parser.add_argument(
'-l', '--log', action='store_true',
dest="logserver", default=False,
help="Log Server data to log file.")
parser.add_argument(
'--init', action='store', dest="init", metavar="name",
help="Creates a new game directory 'name' at the current location.")
parser.add_argument(
'--list', nargs='+', action='store', dest='listsetting', metavar="key",
help=("List values for server settings. Use 'all' to list all "
"available keys."))
parser.add_argument(
'--profiler', action='store_true', dest='profiler', default=False,
help="Start given server component under the Python profiler.")
parser.add_argument(
'--dummyrunner', nargs=1, action='store', dest='dummyrunner',
metavar="N",
help="Tests a running server by connecting N dummy players to it.")
parser.add_argument(
'--settings', nargs=1, action='store', dest='altsettings',
default=None, metavar="filename.py",
help=("Start evennia with alternative settings file in "
"gamedir/server/conf/."))
parser.add_argument(
'--initsettings', action='store_true', dest="initsettings",
default=False,
help="Creates a new, empty settings file as gamedir/server/conf/settings.py.")
parser.add_argument(
"option", nargs='?', default="noop",
help="Operational mode: 'start', 'stop', 'restart' or 'menu'.")
parser.add_argument(
"service", metavar="component", nargs='?', default="all",
help=("Server component to operate on: "
"'server', 'portal' or 'all' (default)."))
parser.epilog = (
"Example django-admin commands: "
"'migrate', 'flush', 'shell' and 'dbshell'. "
"See the django documentation for more django-admin commands.")
args, unknown_args = parser.parse_known_args()
# handle arguments
option, service = args.option, args.service
# make sure we have everything
check_main_evennia_dependencies()
if not args:
# show help pane
print(CMDLINE_HELP)
sys.exit()
elif args.init:
# initialization of game directory
create_game_directory(args.init)
print(CREATED_NEW_GAMEDIR.format(
gamedir=args.init,
settings_path=os.path.join(args.init, SETTINGS_PATH)))
sys.exit()
if args.show_version:
# show the version info
print(show_version_info(option == "help"))
sys.exit()
if args.altsettings:
# use alternative settings file
sfile = args.altsettings[0]
global SETTINGSFILE, SETTINGS_DOTPATH
SETTINGSFILE = sfile
SETTINGS_DOTPATH = "server.conf.%s" % sfile.rstrip(".py")
print("Using settings file '%s' (%s)." % (
SETTINGSFILE, SETTINGS_DOTPATH))
if args.initsettings:
# create new settings file
global GAMEDIR
GAMEDIR = os.getcwd()
try:
create_settings_file(init=False)
print(RECREATED_SETTINGS)
except IOError:
print(ERROR_INITSETTINGS)
sys.exit()
if args.dummyrunner:
# launch the dummy runner
init_game_directory(CURRENT_DIR, check_db=True)
run_dummyrunner(args.dummyrunner[0])
elif args.listsetting:
# display all current server settings
init_game_directory(CURRENT_DIR, check_db=False)
list_settings(args.listsetting)
elif option == 'menu':
# launch menu for operation
init_game_directory(CURRENT_DIR, check_db=True)
run_menu()
elif option in ('start', 'reload', 'stop'):
# operate the server directly
init_game_directory(CURRENT_DIR, check_db=True)
server_operation(option, service, args.interactive, args.profiler, args.logserver)
elif option != "noop":
# pass-through to django manager
check_db = False
if option in ('runserver', 'testserver'):
print(WARNING_RUNSERVER)
if option == "shell":
# to use the shell we need to initialize it first,
# and this only works if the database is set up
check_db = True
init_game_directory(CURRENT_DIR, check_db=check_db)
args = [option]
kwargs = {}
if service not in ("all", "server", "portal"):
args.append(service)
if unknown_args:
for arg in unknown_args:
if arg.startswith("--"):
print("arg:", arg)
if "=" in arg:
arg, value = [p.strip() for p in arg.split("=", 1)]
else:
value = True
kwargs[arg.lstrip("--")] = [value]
else:
args.append(arg)
try:
django.core.management.call_command(*args, **kwargs)
except django.core.management.base.CommandError as exc:
args = ", ".join(args)
kwargs = ", ".join(["--%s" % kw for kw in kwargs])
print(ERROR_INPUT.format(traceback=exc, args=args, kwargs=kwargs))
else:
# no input; print evennia info
print(ABOUT_INFO)
if __name__ == '__main__':
# start Evennia from the command line
main()
|
tommy-u/enable
|
enable/text_field_style.py
|
# Enthought library imports
from traits.api import HasTraits, Int, Bool
from kiva.trait_defs.api import KivaFont
from enable.colors import ColorTrait
class TextFieldStyle(HasTraits):
""" This class holds style settings for rendering an EnableTextField.
fixme: See docstring on EnableBoxStyle
"""
# The color of the text
text_color = ColorTrait((0,0,0,1.0))
# The font for the text (must be monospaced!)
font = KivaFont("Courier 12")
# The color of highlighted text
highlight_color = ColorTrait((.65,0,0,1.0))
# The background color of highlighted items
highlight_bgcolor = ColorTrait("lightgray")
# The font for flagged text (must be monospaced!)
highlight_font = KivaFont("Courier 14 bold")
# The number of pixels between each line
line_spacing = Int(3)
# Space to offset text from the widget's border
text_offset = Int(5)
# Cursor properties
cursor_color = ColorTrait((0,0,0,1))
cursor_width = Int(2)
# Drawing properties
border_visible = Bool(False)
border_color = ColorTrait((0,0,0,1))
bgcolor = ColorTrait((1,1,1,1))
|
aledista/django-view-timer
|
django_view_timer/urls.py
|
import warnings
from django.core.urlresolvers import ResolverMatch
from django.core.urlresolvers import (
RegexURLPattern as DjangoRegexURLPattern,
RegexURLResolver
)
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django_view_timer.profiler import ViewTimeProfiler
from django_view_timer.settings import DJANGO_VIEW_TIMER_ENABLED
class RegexURLPattern(DjangoRegexURLPattern):
def resolve(self, path):
match = self.regex.search(path)
if match:
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
kwargs.update(self.default_args)
callback = ViewTimeProfiler(self.callback) if DJANGO_VIEW_TIMER_ENABLED else self.callback
return ResolverMatch(callback, args, kwargs, self.name)
def url(regex, view, kwargs=None, name=None, prefix=''):
if isinstance(view, (list, tuple)):
urlconf_module, app_name, namespace = ViewTimeProfiler(view) if DJANGO_VIEW_TIMER_ENABLED else view
return RegexURLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
else:
if isinstance(view, six.string_types):
warnings.warn(
'Support for string view arguments to url() is deprecated and '
'will be removed in Django 2.0 (got %s). Pass the callable '
'instead.' % view,
RemovedInDjango20Warning, stacklevel=2
)
if not view:
raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
return RegexURLPattern(regex, view, kwargs, name)
def patterns(prefix, *args):
pattern_list = []
for t in args:
if isinstance(t, (list, tuple)):
t = url(prefix=prefix, *t)
elif isinstance(t, RegexURLPattern):
t.add_prefix(prefix)
pattern_list.append(t)
return pattern_list
|
solarpermit/solarpermit
|
website/models/server.py
|
import datetime
from django.db import models
from django.conf import settings
#server variables that needed to be stored in db
class ServerVariable(models.Model):
name = models.CharField(max_length=64, blank=True, null=True)
value = models.TextField(blank=True, null=True)
class Meta:
app_label = 'website'
@staticmethod
def get(name):
try:
server_variable = ServerVariable.objects.get(name=name)
except:
return None
return server_variable.value
@staticmethod
def set(name, value):
try:
server_variable = ServerVariable.objects.get(name=name)
except:
server_variable = ServerVariable(name=name)
server_variable.value = value
server_variable.save()
return server_variable
class MigrationHistory(models.Model):
jurisdiction_id = models.IntegerField(blank=True, null=True, db_index=True)
source_table = models.CharField(max_length=64, blank=True, null=True, db_index=True)
source_id = models.IntegerField(blank=True, null=True, db_index=True)
target_table = models.CharField(max_length=64, blank=True, null=True, db_index=True)
target_id = models.IntegerField(blank=True, null=True, db_index=True)
notes = models.TextField(blank=True, null=True)
notes2 = models.TextField(blank=True, null=True)
create_datetime = models.DateTimeField(auto_now_add=True)
modify_datetime = models.DateTimeField(auto_now=True)
class Meta:
app_label = 'website'
@staticmethod
def save_history(jurisdiction, source_table, source_id, target_table, target_id, notes='', notes2=''):
history, created = MigrationHistory.objects.get_or_create(source_table=source_table, source_id=source_id, target_table=target_table, target_id=target_id)
if jurisdiction != None:
history.jurisdiction_id = jurisdiction.id
history.notes = notes
history.notes2 = notes2
history.save()
return history
@staticmethod
def get_target_id(source_table, source_id, target_table):
try:
history = MigrationHistory.objects.get(source_table=source_table, source_id=source_id, target_table=target_table)
return history.target_id
except:
return None
|
WaveBlocks/WaveBlocksND
|
WaveBlocksND/Plot/stemcf3d.py
|
"""The WaveBlocks Project
Function for stem-plotting functions of the type f:IxI -> C
with abs(f) as z-value and phase(f) as color code.
This function makes a three dimensional stem plot.
@author: R. Bourquin
@copyright: Copyright (C) 2012, 2014, 2016 R. Bourquin
@license: Modified BSD License
"""
from numpy import real, squeeze
import mpl_toolkits.mplot3d.art3d as art3d
from matplotlib.pyplot import gcf
from WaveBlocksND.Plot.color_map import color_map
def stemcf3d(gridu, gridv, phase, modulus, darken=None, fig=None, markerp="o", **kwargs):
r"""Stemplot the modulus of a complex valued function :math:`f:I\times I -> \mathbb{C}` together with its
phase in a color coded fashion. Additional keyword arguments are passed to the plot function.
:param gridu: The x components of the grid nodes of the real domain grid :math:`\Gamma`
:param gridv: The y components of the grid nodes of the real domain grid :math:`\Gamma`
:param phase: The phase of the complex domain result :math:`f(\Gamma)`
:param modulus: The modulus of the complex domain result :math:`f(\Gamma)`
:param darken: Whether to take into account the modulus of the data to darken colors.
:param fig: The figure instance used for plotting.
:param markerp: The shape of the stemmed markers.
"""
# Color mapping
rgb_colors = squeeze(color_map(gridv, phase=phase, modulus=modulus, darken=darken))
# Plot to the given axis instance or retrieve the current one
if fig is None:
fig = gcf()
axes = fig.add_subplot(1, 1, 1, projection='3d')
for ui, vi, wi, col in zip(gridu, gridv, modulus, rgb_colors):
line = art3d.Line3D(*list(zip((ui, vi, 0), (ui, vi, wi))), marker=markerp, markevery=(1, 1), color=col)
axes.add_line(line)
axes.set_xlim3d(real(gridu).min(), real(gridu).max())
axes.set_ylim3d(real(gridv).min(), real(gridv).max())
axes.set_zlim3d(real(modulus).min(), real(modulus).max())
|
containers-ftw/apps
|
tests/circle_urls.py
|
#!/usr/bin/env python
'''
circle_urls.py will rename all url files to not have extension .html
'''
import sys
import os
from glob import glob
site_dir = os.path.abspath(sys.argv[1])
print("Using site directory %s" %(site_dir))
files = glob("%s/*.html" %(site_dir))
# For each file, we need to replace all links to have correct .html extension
search_names = [os.path.basename(f).replace('.html','') for f in files]
for html_file in files:
with open(html_file,'r') as filey:
content = filey.read()
for search_name in search_names:
content = content.replace('%s"' %(search_name),'%s.html"' %(search_name))
content = content.replace('/images/logo/logo.png','https://sci-f.github.io/apps/assets/img/logo/logo.png')
with open(html_file,'w') as filey:
filey.write(content)
|
aldenjenkins/foobargamingwebsite
|
paypal/standard/ipn/south_migrations/0006_auto__chg_field_paypalipn_custom__chg_field_paypalipn_transaction_subj.py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'PayPalIPN.custom'
db.alter_column(u'paypal_ipn', 'custom', self.gf('django.db.models.fields.CharField')(max_length=256))
# Changing field 'PayPalIPN.transaction_subject'
db.alter_column(u'paypal_ipn', 'transaction_subject', self.gf('django.db.models.fields.CharField')(max_length=256))
def backwards(self, orm):
# Changing field 'PayPalIPN.custom'
db.alter_column(u'paypal_ipn', 'custom', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalIPN.transaction_subject'
db.alter_column(u'paypal_ipn', 'transaction_subject', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
u'ipn.paypalipn': {
'Meta': {'object_name': 'PayPalIPN', 'db_table': "u'paypal_ipn'"},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_country_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_state': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount_per_cycle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auction_buyer_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'auction_closing_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'auction_multi_item': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'auth_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auth_exp': ('django.db.models.fields.CharField', [], {'max_length': '28', 'blank': 'True'}),
'auth_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'auth_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'business': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'case_creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'case_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'case_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'charset': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'custom': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'exchange_rate': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '16', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flag_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'flag_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'for_auction': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'from_view': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'handling_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_payment_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'invoice': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'ipaddress': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True', 'blank': 'True'}),
'item_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'item_number': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'mc_amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_currency': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'mc_fee': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_handling': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'memo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'mp_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'next_payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notify_version': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'num_cart_items': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'option_name1': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'option_name2': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'outstanding_balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'parent_txn_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'payer_business_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_email': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_id': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}),
'payer_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'payment_cycle': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'payment_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'payment_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'payment_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'pending_reason': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'period1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'period2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'period3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'period_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'product_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'profile_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'protection_eligibility': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'query': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reason_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reattempt': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'receiver_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'recur_times': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'recurring': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'recurring_payment_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remaining_settle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'residence_country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'retry_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rp_invoice_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'settle_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'settle_currency': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'subscr_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_effective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'test_ipn': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'transaction_entity': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'transaction_subject': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'txn_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'txn_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'verify_sign': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['ipn']
|
MwanzanFelipe/rockletonfortune
|
zillions/urls.py
|
from django.conf.urls import *
from . import views
from . import z_queries
from rockletonfortune import settings
from django.contrib.auth.views import login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth import views as auth_views
from django.views.static import serve
from django.db import models
from .views import *
from .z_queries import *
urlpatterns = [
# Show index page
url(r'^$', login_required(views.index), name='index'),
# Show list of transactions
url(r'ajax/transactions/$', login_required(z_queries.q_transaction_data_json), name='q_transaction_data_json'),
url(r'transactions/viz/$', login_required(views.viz_transaction_list), name = 'viz_transaction_list'),
url(r'transactions/$', login_required(views.transaction_list), name = 'transaction_list'),
# Add new transactions
url(r'transactions/new/$', login_required(AddTransactionView.as_view()), name = 'AddTransactionView'),
# Edit transactions
url(r'^transactions/edit/(?P<pk>\d+)/$', login_required(UpdateTransactionView.as_view()), name= 'UpdateTransactionView'),
#FUTURE: There's got to be a better way to handle this 3 part import workflow
#Point to the file to import
url(r'transactions/import/$', login_required(views.import_transactions), name = 'transaction_import_list'),
#Select transactions to import as new or to replace existing
url(r'transactions/import/input/$', login_required(views.import_transaction_input), name = 'transaction_import_input'),
#Enter the percentage per transaction
url(r'transactions/import/save/$', login_required(views.import_transaction_save), name = 'transaction_import_save'),
# Export transactions to csv for Google Sheets
url(r'transactions/csv/$', login_required(views.export_csv), name = 'export_csv'),
# Edit the weekly/monthly allocation per secondary transaction
url(r'budget/edit/$', login_required(views.budget_edit), name = 'budget_edit'),
#Fetch the weekly spend summary per secondary and primary categories
url('ajax/budget/$', login_required(z_queries.q_budget_view_json), name='q_budget_view_json'),
#Template to show weekly spend summary per secondary and primary categories
url(r'budget/$', login_required(views.budget_view), name = 'budget_view'),
# Media root for js libraries (d3, jquery, css, etc.)
#url(r'^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),
# Test Area
url('test/$', views.testview, kwargs={'variable': 'there'}, name='test'),
url('transfer/$', views.transfer_amount, name='transfer'),
url(r'^login/$', auth_views.login, name = 'login'),
url(r'^logout/$', auth_views.logout, name = 'logout'),
]
|
datapythonista/pandas
|
pandas/core/ops/mask_ops.py
|
"""
Ops for masked arrays.
"""
from typing import (
Optional,
Union,
)
import numpy as np
from pandas._libs import (
lib,
missing as libmissing,
)
def kleene_or(
left: Union[bool, np.ndarray],
right: Union[bool, np.ndarray],
left_mask: Optional[np.ndarray],
right_mask: Optional[np.ndarray],
):
"""
Boolean ``or`` using Kleene logic.
Values are NA where we have ``NA | NA`` or ``NA | False``.
``NA | True`` is considered True.
Parameters
----------
left, right : ndarray, NA, or bool
The values of the array.
left_mask, right_mask : ndarray, optional
The masks. Only one of these may be None, which implies that
the associated `left` or `right` value is a scalar.
Returns
-------
result, mask: ndarray[bool]
The result of the logical or, and the new mask.
"""
# To reduce the number of cases, we ensure that `left` & `left_mask`
# always come from an array, not a scalar. This is safe, since because
# A | B == B | A
if left_mask is None:
return kleene_or(right, left, right_mask, left_mask)
assert isinstance(left, np.ndarray)
raise_for_nan(right, method="or")
if right is libmissing.NA:
result = left.copy()
else:
result = left | right
if right_mask is not None:
# output is unknown where (False & NA), (NA & False), (NA & NA)
left_false = ~(left | left_mask)
right_false = ~(right | right_mask)
mask = (
(left_false & right_mask)
| (right_false & left_mask)
| (left_mask & right_mask)
)
else:
if right is True:
mask = np.zeros_like(left_mask)
elif right is libmissing.NA:
mask = (~left & ~left_mask) | left_mask
else:
# False
mask = left_mask.copy()
return result, mask
def kleene_xor(
left: Union[bool, np.ndarray],
right: Union[bool, np.ndarray],
left_mask: Optional[np.ndarray],
right_mask: Optional[np.ndarray],
):
"""
Boolean ``xor`` using Kleene logic.
This is the same as ``or``, with the following adjustments
* True, True -> False
* True, NA -> NA
Parameters
----------
left, right : ndarray, NA, or bool
The values of the array.
left_mask, right_mask : ndarray, optional
The masks. Only one of these may be None, which implies that
the associated `left` or `right` value is a scalar.
Returns
-------
result, mask: ndarray[bool]
The result of the logical xor, and the new mask.
"""
if left_mask is None:
return kleene_xor(right, left, right_mask, left_mask)
raise_for_nan(right, method="xor")
if right is libmissing.NA:
result = np.zeros_like(left)
else:
# error: Incompatible types in assignment (expression has type
# "Union[bool, Any]", variable has type "ndarray")
result = left ^ right # type: ignore[assignment]
if right_mask is None:
if right is libmissing.NA:
mask = np.ones_like(left_mask)
else:
mask = left_mask.copy()
else:
mask = left_mask | right_mask
return result, mask
def kleene_and(
left: Union[bool, libmissing.NAType, np.ndarray],
right: Union[bool, libmissing.NAType, np.ndarray],
left_mask: Optional[np.ndarray],
right_mask: Optional[np.ndarray],
):
"""
Boolean ``and`` using Kleene logic.
Values are ``NA`` for ``NA & NA`` or ``True & NA``.
Parameters
----------
left, right : ndarray, NA, or bool
The values of the array.
left_mask, right_mask : ndarray, optional
The masks. Only one of these may be None, which implies that
the associated `left` or `right` value is a scalar.
Returns
-------
result, mask: ndarray[bool]
The result of the logical xor, and the new mask.
"""
# To reduce the number of cases, we ensure that `left` & `left_mask`
# always come from an array, not a scalar. This is safe, since because
# A | B == B | A
if left_mask is None:
return kleene_and(right, left, right_mask, left_mask)
assert isinstance(left, np.ndarray)
raise_for_nan(right, method="and")
if right is libmissing.NA:
result = np.zeros_like(left)
else:
result = left & right
if right_mask is None:
# Scalar `right`
if right is libmissing.NA:
mask = (left & ~left_mask) | left_mask
else:
mask = left_mask.copy()
if right is False:
# unmask everything
mask[:] = False
else:
# unmask where either left or right is False
left_false = ~(left | left_mask)
right_false = ~(right | right_mask)
mask = (left_mask & ~right_false) | (right_mask & ~left_false)
return result, mask
def raise_for_nan(value, method: str):
if lib.is_float(value) and np.isnan(value):
raise ValueError(f"Cannot perform logical '{method}' with floating NaN")
|
glassesfactory/Shimehari
|
shimehari/core/manage/commands/create.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
===============================
Shimehari.core.manage.commands.create
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
アプリケーションを新たに作成する create コマンド
各コマンドモジュールは共通インターフェースとして
Command クラスを持ちます。
===============================
"""
import os
import sys
import errno
import shutil
from optparse import make_option
import shimehari
from shimehari.core.manage import CreatableCommand
from shimehari.core.helpers import importFromString
from shimehari.core.exceptions import CommandError
debugFormat = ('-' * 80 + '\\n' + '%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\\n' + '%(message)s\\n' + '-' * 80)
outputFormat = ('%(asctime)s %(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\\n' + '%(message)s\\n' + '-' * 80)
u"""
===============================
::pkg:: Shimehari.core.manage.commands.create
Command
~~~~~~~
コマンドの実装
===============================
"""
class Command(CreatableCommand):
name = 'create'
summary = 'Create Shimehari Application'
usage = "Usage: %prog APPLICATION_NAME [OPTIONS]"
option_list = CreatableCommand.option_list + (
make_option('--path', '-p', action='store', type='string', dest='path', help='target create path'),
make_option('--template', '-t', action='store', type='string', dest='template', help='using project tempalte')
)
def __init__(self):
super(Command, self).__init__()
def handle(self, appDir='app', *args, **options):
try:
importFromString(appDir)
except ImportError:
pass
else:
raise CommandError('%s mou aru' % appDir)
path = options.get('path')
if path is None:
appRootDir = os.path.join(os.getcwd(), appDir)
try:
os.makedirs(appRootDir)
except OSError, error:
if error.errno == errno.EEXIST:
msg = '%s is already exists' % appRootDir
else:
msg = error
raise CommandError(msg)
else:
appRootDir = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(appRootDir):
raise CommandError("auau")
template = options.get('template')
if template is None:
appTemplateDir = os.path.join(shimehari.__path__[0], 'core', 'conf', 'app_template')
else:
appTemplateDir = template
prefixLen = len(appTemplateDir) + 1
for root, dirs, files in os.walk(appTemplateDir):
pathRest = root[prefixLen:]
relativeDir = pathRest.replace('app_name', 'app')
if relativeDir:
targetDir = os.path.join(appRootDir, relativeDir)
if not os.path.exists(targetDir):
os.mkdir(targetDir)
for dirname in dirs[:]:
if dirname.startswith('.'):
dirs.remove(dirname)
for filename in files[:]:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
continue
oldPath = os.path.join(root, filename)
newPath = os.path.join(appRootDir, relativeDir, filename.replace('app_name', 'app'))
self.readAndCreateFile(oldPath, newPath)
#ここどうすっかな
self.createDirectory(appRootDir, 'views')
self.createDirectory(appRootDir, 'assets')
self.createDirectory(appRootDir, 'log')
#generate config file
confOrgPath = os.path.join(shimehari.__path__[0], 'core', 'conf', 'config.org.py')
newConfPath = os.path.join(os.getcwd(), 'config.py')
self.readAndCreateFileWithRename(confOrgPath, newConfPath,
(appDir, appDir, debugFormat, outputFormat))
sys.stdout.write("New App Create Complete. enjoy!\n")
u"""-----------------------------
::pkg:: Shimehari.core.manage.commands.create.Command
readAndCreateFile
~~~~~~~~~~~~~~~~~
指定されたディレクトリからテンプレートファイルを読み込み
新たに生成したい指定ディレクトリへファイルを生成します。
[args]
:old テンプレートファイルのパス
:new 生成したいディレクトリへのパスとファイル名
------------------------------"""
def readAndCreateFile(self, old, new):
if os.path.exists(new):
raise CommandError('already... %s' % new)
with open(old, 'r') as template:
content = template.read()
with open(new, 'w') as newFile:
newFile.write(content)
sys.stdout.write(u"Creating: %s\n" % new)
try:
shutil.copymode(old, new)
self.toWritable(new)
except OSError:
sys.stderr.write('permission error')
def readAndCreateFileWithRename(self, old, new, name):
if os.path.exists(new):
raise CommandError('Controller already exists.')
with open(old, 'r') as template:
content = template.read()
if '%s' in content:
content = content % name
with open(new, 'w') as newFile:
newFile.write(content)
sys.stdout.write("Creating: %s\n" % new)
try:
shutil.copymode(old, new)
self.toWritable(new)
except OSError:
sys.stderr.write('can not setting permission')
def createDirectory(self, rootDir, dirname):
targetName = os.path.join(rootDir, dirname)
if not os.path.exists(targetName):
os.mkdir(targetName)
sys.stdout.write("Creating: %s\n" % targetName)
Command()
|
mbdriscoll/asp-old
|
tests/asp_module_tests.py
|
import unittest2 as unittest
import asp.jit.asp_module as asp_module
import asp.codegen.cpp_ast as cpp_ast
from mock import Mock
class TimerTest(unittest.TestCase):
def test_timer(self):
pass
# mod = asp_module.ASPModule()
# mod.add_function("void test(){;;;;}", "test")
# # mod.test()
# self.failUnless("test" in mod.times.keys())
class ASPDBTests(unittest.TestCase):
def test_creating_db(self):
db = asp_module.ASPDB("test_specializer")
def test_create_db_if_nonexistent(self):
db = asp_module.ASPDB("test")
self.assertTrue(db.connection)
def test_create_table(self):
db = asp_module.ASPDB("test")
db.close() # close the real connection so we can mock it out
db.connection = Mock()
db.create_specializer_table()
db.connection.execute.assert_called_with(
'create table test (fname text, variant text, key text, perf real)')
def test_insert(self):
db = asp_module.ASPDB("test")
db.close() # close the real connection so we can mock it out
db.connection = Mock()
db.table_exists = Mock(return_value = True)
db.create_specializer_table()
db.insert("func", "func", "KEY", 4.321)
db.connection.execute.assert_called_with(
'insert into test values (?,?,?,?)', ("func", "func", "KEY", 4.321))
def test_create_if_insert_into_nonexistent_table(self):
db = asp_module.ASPDB("test")
db.close() # close the real connection so we can mock it out
db.connection = Mock()
# this is kind of a complicated situation. we want the cursor to
# return an array when fetchall() is called on it, and we want this
# cursor to be created when the mock connection is asked for a cursor
mock_cursor = Mock()
mock_cursor.fetchall.return_value = []
db.connection.cursor.return_value = mock_cursor
db.create_specializer_table = Mock()
db.insert("func", "v1", "KEY", 4.321)
self.assertTrue(db.create_specializer_table.called)
def test_get(self):
db = asp_module.ASPDB("test")
db.close() # close the real connection so we can mock it out
db.connection = Mock()
db.table_exists = Mock(return_value = True)
db.create_specializer_table()
# see note about mocks in test_create_if...
mock_cursor = Mock()
mock_cursor.fetchall.return_value = ['hello']
db.connection.cursor.return_value = mock_cursor
db.create_specializer_table = Mock()
db.get("func")
mock_cursor.execute.assert_called_with("select * from test where fname=?",
("func",))
def test_update(self):
db = asp_module.ASPDB("test")
db.close() # close the real connection so we can mock it out
db.connection = Mock()
db.table_exists = Mock(return_value = True)
db.update("foo", "foo_v1", "KEY", 3.21)
db.connection.execute.assert_called_with("update test set perf=? where fname=? and variant=? and key=?",
(3.21, "foo", "foo_v1", "KEY"))
def test_delete(self):
db = asp_module.ASPDB("test")
db.close() # close the real connection so we can mock it out
db.connection = Mock()
db.table_exists = Mock(return_value = True)
db.delete("foo", "foo_v1", "KEY")
db.connection.execute.assert_called_with("delete from test where fname=? and variant=? and key=?",
("foo", "foo_v1", "KEY"))
class SpecializedFunctionTests(unittest.TestCase):
def test_creating(self):
a = asp_module.SpecializedFunction("foo", None, Mock())
def test_add_variant(self):
mock_backend = asp_module.ASPBackend(Mock(), None, Mock())
a = asp_module.SpecializedFunction("foo", mock_backend, Mock())
a.add_variant("foo_1", "void foo_1(){return;}")
self.assertEqual(a.variant_names[0], "foo_1")
self.assertEqual(len(a.variant_funcs), 1)
# also check to make sure the backend added the function
self.assertTrue(mock_backend.module.add_to_module.called)
self.assertRaises(Exception, a.add_variant, "foo_1", None)
def test_add_variant_at_instantiation(self):
mock_backend = asp_module.ASPBackend(Mock(), None, Mock())
a = asp_module.SpecializedFunction("foo", mock_backend, Mock(),
["foo_1"], ["void foo_1(){return;}"])
self.assertEqual(len(a.variant_funcs), 1)
self.assertTrue(mock_backend.module.add_to_module.called)
def test_call(self):
# this is a complicated situation. we want the backend to have a fake
# module, and that fake module should return a fake compiled module.
# we'll cheat by just returning itself.
mock_backend_module = Mock()
mock_backend_module.compile.return_value = mock_backend_module
mock_backend = asp_module.ASPBackend(mock_backend_module, None, Mock())
mock_db = Mock()
mock_db.get.return_value = []
a = asp_module.SpecializedFunction("foo", mock_backend, mock_db)
a.add_variant("foo_1", "void foo_1(){return;}")
# test a call
a()
# it should call foo() on the backend module
self.assertTrue(mock_backend_module.foo_1.called)
def test_calling_with_multiple_variants(self):
# this is a complicated situation. we want the backend to have a fake
# module, and that fake module should return a fake compiled module.
# we'll cheat by just returning itself.
mock_backend_module = Mock()
mock_backend_module.compile.return_value = mock_backend_module
mock_backend = asp_module.ASPBackend(mock_backend_module, None, Mock())
mock_db = Mock()
mock_db.get.return_value = []
a = asp_module.SpecializedFunction("foo", mock_backend, mock_db)
a.add_variant("foo_1", "void foo_1(){return;}")
a.add_variant("foo_2", "void foo_2(){}")
# test 2 calls
a()
# ensure the second one sees that foo_1 was called the first time
mock_db.get.return_value = [["foo", "foo_1", None, None]]
a()
# it should call both variants on the backend module
self.assertTrue(mock_backend_module.foo_1.called)
self.assertTrue(mock_backend_module.foo_2.called)
def test_pick_next_variant(self):
mock_db = Mock()
mock_db.get.return_value = []
a = asp_module.SpecializedFunction("foo", Mock(), mock_db)
a.add_variant("foo_1", "void foo_1(){return;}")
a.add_variant("foo_2", "void foo_2(){}")
self.assertEqual(a.pick_next_variant(), "foo_1")
# now if one has run
mock_db.get.return_value = [[None, "foo_1", None, None]]
self.assertEqual(a.pick_next_variant(), "foo_2")
# now if both have run
mock_db.get.return_value = [[None, "foo_1", None, 1.0],
[None, "foo_2", None, 2.0]]
self.assertEqual(a.pick_next_variant(), "foo_1")
class HelperFunctionTests(unittest.TestCase):
def test_creating(self):
f = asp_module.HelperFunction("foo", "void foo(){}", Mock())
def test_call(self):
# this is a complicated situation. we want the backend to have a fake
# module, and that fake module should return a fake compiled module.
# we'll cheat by just returning itself.
mock_backend_module = Mock()
mock_backend_module.compile.return_value = mock_backend_module
mock_backend = asp_module.ASPBackend(mock_backend_module, None, Mock())
a = asp_module.HelperFunction("foo", "void foo(){}", mock_backend)
# test a call
a()
# it should call foo() on the backend module
self.assertTrue(mock_backend_module.foo.called)
class ASPModuleMiscTests(unittest.TestCase):
def test_generate(self):
a = asp_module.ASPModule()
mock_backend = Mock()
a.backends["c++"] = mock_backend
a.generate()
self.assertTrue(mock_backend.module.generate.called)
class SingleFuncTests(unittest.TestCase):
def test_adding_function(self):
m = asp_module.ASPModule()
m.add_function("foo", "void foo(){return;}")
self.assertTrue(isinstance(m.specialized_functions["foo"],
asp_module.SpecializedFunction))
def test_adding_and_calling(self):
m = asp_module.ASPModule()
m.add_function("foo", "PyObject* foo(){Py_RETURN_TRUE;}")
self.assertTrue(m.foo())
def test_db_integration(self):
m = asp_module.ASPModule()
m.add_function("foo", "void foo(){return;}")
m.foo()
# Now let's check the db for what's inside
self.assertEqual(len(m.db.get("foo")), 1)
def test_helper_function(self):
m = asp_module.ASPModule()
m.add_helper_function("foo_helper", "PyObject* foo_helper(){Py_RETURN_TRUE;}")
self.assertTrue("foo_helper" in m.specialized_functions)
self.assertTrue(m.foo_helper())
class MultipleFuncTests(unittest.TestCase):
def test_adding_multiple_variants(self):
mod = asp_module.ASPModule()
mod.add_function("foo", ["void foo_1(){};", "void foo_2(){};"],
["foo_1", "foo_2"])
self.assertTrue("foo_1" in mod.specialized_functions["foo"].variant_names)
def test_running_multiple_variants(self):
mod = asp_module.ASPModule()
mod = asp_module.ASPModule()
mod.add_function("foo", ["void foo_1(){};", "void foo_2(){};"],
["foo_1", "foo_2"])
mod.foo()
mod.foo()
self.assertEqual(len(mod.db.get("foo")), 2)
"""
def test_adding_multiple_versions(self):
mod = asp_module.ASPModule()
mod.add_function_with_variants(
["void test_1(){return;}", "void test_2(){return;}"],
"test",
["test_1", "test_2"])
mod.compile()
self.failUnless("test" in mod.compiled_methods.keys())
self.failUnless("test_1" in mod.compiled_methods["test"])
def test_running_multiple_variants(self):
mod = asp_module.ASPModule()
mod.add_function_with_variants(
["PyObject* test_1(PyObject* a){return a;}",
"PyObject* test_2(PyObject* b){Py_RETURN_NONE;}"],
"test",
["test_1", "test_2"])
result1 = mod.test("a")
result2 = mod.test("a")
self.assertEqual(set([result1,result2]) == set(["a", None]), True)
self.assertNotEqual(
mod.compiled_methods["test"].database.get_oracular_best("test"),
False)
def test_running_multiple_variants_and_inputs(self):
mod = asp_module.ASPModule()
key_func = lambda name, *args, **_: (name, args)
mod.add_function_with_variants(
["void test_1(PyObject* a, PyObject* b){ long c = PyInt_AS_LONG(a); for(; c > 0; c--) b = PyNumber_Add(b,a); }",
"void test_2(PyObject* a, PyObject* b){ long c = PyInt_AS_LONG(b); for(; c > 0; c--) a = PyNumber_Add(a,b); }"] ,
"test",
["test_1", "test_2"],
key_func )
val = 2000000
mod.test(1,val)
mod.test(1,val)
mod.test(val,1)
self.assertNotEqual(
mod.compiled_methods["test"].database.get_oracular_best(key_func("test",1,val)), # best time found for this input
False)
self.assertEqual(
mod.compiled_methods["test"].database.get_oracular_best(key_func("test",7,7)), # this input never previously tried
False)
self.assertEqual(
mod.compiled_methods["test"].database.get_oracular_best(key_func("test",val,1)), # only one variant timed for this input
False)
mod.test(val,1)
self.assertNotEqual(
mod.compiled_methods["test"].database.get_oracular_best(key_func("test",val,1)), # now both variants have been timed
False)
self.assertEqual(mod.compiled_methods["test"].database.get_oracular_best(key_func("test",1,val)), 'test_1')
self.assertEqual(mod.compiled_methods["test"].database.get_oracular_best(key_func("test",val,1)), 'test_2')
def test_adding_variants_incrementally(self):
mod = asp_module.ASPModule()
key_func = lambda name, *args, **_: (name, args)
mod.add_function_with_variants(
["PyObject* test_1(PyObject* a, PyObject* b){ long c = PyInt_AS_LONG(a); for(; c > 0; c--) b = PyNumber_Add(b,a); return a;}"],
"test",
["test_1"],
key_func )
mod.test(1,20000)
self.assertNotEqual(
mod.compiled_methods["test"].database.get_oracular_best(key_func("test",1,20000)), # best time found for this input
False)
mod.add_function_with_variants(
["PyObject* test_2(PyObject* a, PyObject* b){ long c = PyInt_AS_LONG(b); for(; c > 0; c--) a = PyNumber_Add(a,b); return b;}"] ,
"test",
["test_2"] )
self.assertEqual(
mod.compiled_methods["test"].database.get_oracular_best(key_func("test",1,20000)), # time is no longer definitely best
False)
mod.test(1,20000)
mod.test(1,20000)
self.assertNotEqual(
mod.compiled_methods["test"].database.get_oracular_best(key_func("test",1,20000)), # best time found again
False)
self.assertEqual(mod.compiled_methods["test"].database.get_oracular_best(key_func("test",1,20000)), 'test_1')
def test_pickling_variants_data(self):
mod = asp_module.ASPModule()
key_func = lambda name, *args, **_: (name, args)
mod.add_function_with_variants(
["PyObject* test_1(PyObject* a, PyObject* b){ long c = PyInt_AS_LONG(a); for(; c > 0; c--) b = PyNumber_Add(b,a); return a;}",
"PyObject* test_2(PyObject* a, PyObject* b){ long c = PyInt_AS_LONG(b); for(; c > 0; c--) a = PyNumber_Add(a,b); return b;}"] ,
"test",
["test_1", "test_2"],
key_func )
mod.test(1,2)
mod.test(1,2)
mod.test(2,1)
#mod.save_method_timings("test")
mod.clear_method_timings("test")
mod.restore_method_timings("test")
self.assertNotEqual(
mod.compiled_methods["test"].database.variant_times[key_func("test",1,2)], # time found for this input
False)
self.assertEqual(
key_func("test",7,7) not in mod.compiled_methods["test"].database.variant_times, # this input never previously tried
True)
self.assertEqual(
len(mod.compiled_methods["test"].database.variant_times[key_func("test",2,1)]), # only one variant timed for this input
1)
def test_dealing_with_preidentified_compilation_errors(self):
mod = asp_module.ASPModule()
key_func = lambda name, *args, **_: (name, args)
mod.add_function_with_variants(
variant_bodies=["PyObject* test_1(PyObject* a, PyObject* b){ long c = PyInt_AS_LONG(a); for(; c > 0; c--) b = PyNumber_Add(b,a); return a;}",
"PyObject* test_2(PyObject* a, PyObject* b){ /*Dummy*/}",
"PyObject* test_3(PyObject* a, PyObject* b){ long c = PyInt_AS_LONG(b); for(; c > 0; c--) a = PyNumber_Add(a,b); return b;}"] ,
"test",
["test_1", "test_2", "test_3"],
key_func,
[lambda name, *args, **kwargs: True]*3,
[True, False, True],
['a', 'b'] )
mod.test(1,20000)
mod.test(1,20000)
mod.test(1,20000)
self.assertNotEqual(
mod.compiled_methods["test"].database.get_oracular_best(key_func("test",1,20000)), # best time found for this input
False)
self.assertEqual(
mod.compiled_methods["test"].database.variant_times[("test",(1,20000))]['test_2'], # second variant was uncompilable
-1)
# Disabled, currently failing
""
def test_dealing_with_preidentified_runtime_errors(self):
mod = asp_module.ASPModule()
key_func = lambda name, *args, **_: (name, args)
mod.add_function_with_variants(
variant_bodies=["PyObject* test_1(PyObject* a, PyObject* b){ long c = PyInt_AS_LONG(a); for(; c > 0; c--) b = PyNumber_Add(b,a); return a;}",
"PyObject* test_2(PyObject* a, PyObject* b){ long c = PyInt_AS_LONG(a); for(; c > 0; c--) b = PyNumber_Add(b,a); return a;}",
"PyObject* test_3(PyObject* a, PyObject* b){ long c = PyInt_AS_LONG(b); for(; c > 0; c--) a = PyNumber_Add(a,b); return b;}"] ,
"test",
["test_1", "test_2", "test_3"],
key_func,
[lambda name, *args, **kwargs: True, lambda name, *args, **kwargs: args[1] < 10001, lambda name, *args, **kwargs: True],
[True]*3,
['a', 'b'] )
result1 = mod.test(1,20000)
result2 = mod.test(1,20000)
result3 = mod.test(1,20000)
result1 = mod.test(1,10000)
result2 = mod.test(1,10000)
result3 = mod.test(1,10000)
self.assertNotEqual(
mod.compiled_methods["test"].database.get_oracular_best(key_func("test",1,20000)), # best time found for this input
False)
self.assertNotEqual(
mod.compiled_methods["test"].database.get_oracular_best(key_func("test",1,10000)), # best time found for this input
False)
self.assertEqual(
mod.compiled_methods["test"].database.variant_times[("test",(1,20000))]['test_2'], # second variant was unrannable for 20000
-1)
self.assertNotEqual(
mod.compiled_methods["test"].database.variant_times[("test",(1,10000))]['test_2'], # second variant was runnable for 10000
-1)
"""
if __name__ == '__main__':
unittest.main()
|
ericmjl/bokeh
|
bokeh/core/validation/__init__.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' The validation module provides the capability to perform integrity
checks on an entire collection of Bokeh models.
To create a Bokeh visualization, the central task is to assemble a collection
model objects from |bokeh.models| into a graph that represents the scene that
should be created in the client. It is possible to to this "by hand", using the
model objects directly. However, to make this process easier, Bokeh provides
higher level interfaces such as |bokeh.plotting| for users.
These interfaces automate common "assembly" steps, to ensure a Bokeh object
graph is created in a consistent, predictable way. However, regardless of what
interface is used, it is possible to put Bokeh models together in ways that are
incomplete, or that do not make sense in some way.
To assist with diagnosing potential problems, Bokeh performs a validation step
when outputting a visualization for display. This module contains error and
warning codes as well as helper functions for defining validation checks.
One use case for warnings is to loudly point users in the right direction
when they accidentally do something that they probably didn't mean to do - this
is the case for EMPTY_LAYOUT for instance. Since warnings don't necessarily
indicate misuse, they are configurable. To silence a warning, use the silence
function provided.
.. code-block:: python
>>> from bokeh.core.validation import silence
>>> from bokeh.core.validation.warnings import EMPTY_LAYOUT
>>> silence(EMPTY_LAYOUT, True)
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from .check import check_integrity, silence, silenced
from .decorators import error, warning
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
p1c2u/openapi-core
|
openapi_core/validation/request/shortcuts.py
|
"""OpenAPI core validation request shortcuts module"""
from functools import partial
from openapi_core.validation.request.validators import RequestBodyValidator
from openapi_core.validation.request.validators import (
RequestParametersValidator,
)
from openapi_core.validation.request.validators import RequestSecurityValidator
from openapi_core.validation.request.validators import RequestValidator
def validate_request(validator, request):
result = validator.validate(request)
result.raise_for_errors()
return result
def spec_validate_request(
spec,
request,
request_factory=None,
validator_class=RequestValidator,
result_attribute=None,
):
if request_factory is not None:
request = request_factory(request)
validator = validator_class(spec)
result = validator.validate(request)
result.raise_for_errors()
if result_attribute is None:
return result
return getattr(result, result_attribute)
spec_validate_parameters = partial(
spec_validate_request,
validator_class=RequestParametersValidator,
result_attribute="parameters",
)
spec_validate_body = partial(
spec_validate_request,
validator_class=RequestBodyValidator,
result_attribute="body",
)
spec_validate_security = partial(
spec_validate_request,
validator_class=RequestSecurityValidator,
result_attribute="security",
)
|
datavisyn/tdp_core
|
tdp_core/config.py
|
from phovea_server.ns import Namespace, abort
from phovea_server.util import jsonify
from phovea_server.config import get as get_config
from phovea_server.plugin import list as list_plugins
import logging
app = Namespace(__name__)
_log = logging.getLogger(__name__)
@app.route('/<path:path>')
def _config(path):
path = path.split('/')
key = path[0]
plugin = next((p for p in list_plugins('tdp-config-safe-keys') if p.id == key), None)
if plugin is None:
_log.error('404: config key "{}" not found'.format(key))
abort(404, 'config key "{}" not found'.format(key))
path[0] = plugin.configKey
return jsonify(get_config('.'.join(path)))
def create():
return app
|
kevin-intel/scikit-learn
|
sklearn/impute/_knn.py
|
# Authors: Ashim Bhattarai <ashimb9@gmail.com>
# Thomas J Fan <thomasjpfan@gmail.com>
# License: BSD 3 clause
import numpy as np
from ._base import _BaseImputer
from ..utils.validation import FLOAT_DTYPES
from ..metrics import pairwise_distances_chunked
from ..metrics.pairwise import _NAN_METRICS
from ..neighbors._base import _get_weights
from ..neighbors._base import _check_weights
from ..utils import is_scalar_nan
from ..utils._mask import _get_mask
from ..utils.validation import check_is_fitted
class KNNImputer(_BaseImputer):
"""Imputation for completing missing values using k-Nearest Neighbors.
Each sample's missing values are imputed using the mean value from
`n_neighbors` nearest neighbors found in the training set. Two samples are
close if the features that neither is missing are close.
Read more in the :ref:`User Guide <knnimpute>`.
.. versionadded:: 0.22
Parameters
----------
missing_values : int, float, str, np.nan or None, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to np.nan, since `pd.NA` will be converted to np.nan.
n_neighbors : int, default=5
Number of neighboring samples to use for imputation.
weights : {'uniform', 'distance'} or callable, default='uniform'
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood are
weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- callable : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
metric : {'nan_euclidean'} or callable, default='nan_euclidean'
Distance metric for searching neighbors. Possible values:
- 'nan_euclidean'
- callable : a user-defined function which conforms to the definition
of ``_pairwise_callable(X, Y, metric, **kwds)``. The function
accepts two arrays, X and Y, and a `missing_values` keyword in
`kwds` and returns a scalar distance value.
copy : bool, default=True
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible.
add_indicator : bool, default=False
If True, a :class:`MissingIndicator` transform will stack onto the
output of the imputer's transform. This allows a predictive estimator
to account for missingness despite imputation. If a feature has no
missing values at fit/train time, the feature won't appear on the
missing indicator even if there are missing values at transform/test
time.
Attributes
----------
indicator_ : :class:`~sklearn.impute.MissingIndicator`
Indicator used to add binary indicators for missing values.
``None`` if add_indicator is False.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
References
----------
* Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor
Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing
value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17
no. 6, 2001 Pages 520-525.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import KNNImputer
>>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
>>> imputer = KNNImputer(n_neighbors=2)
>>> imputer.fit_transform(X)
array([[1. , 2. , 4. ],
[3. , 4. , 3. ],
[5.5, 6. , 5. ],
[8. , 8. , 7. ]])
"""
def __init__(self, *, missing_values=np.nan, n_neighbors=5,
weights="uniform", metric="nan_euclidean", copy=True,
add_indicator=False):
super().__init__(
missing_values=missing_values,
add_indicator=add_indicator
)
self.n_neighbors = n_neighbors
self.weights = weights
self.metric = metric
self.copy = copy
def _calc_impute(self, dist_pot_donors, n_neighbors,
fit_X_col, mask_fit_X_col):
"""Helper function to impute a single column.
Parameters
----------
dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)
Distance matrix between the receivers and potential donors from
training set. There must be at least one non-nan distance between
a receiver and a potential donor.
n_neighbors : int
Number of neighbors to consider.
fit_X_col : ndarray of shape (n_potential_donors,)
Column of potential donors from training set.
mask_fit_X_col : ndarray of shape (n_potential_donors,)
Missing mask for fit_X_col.
Returns
-------
imputed_values: ndarray of shape (n_receivers,)
Imputed values for receiver.
"""
# Get donors
donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1,
axis=1)[:, :n_neighbors]
# Get weight matrix from from distance matrix
donors_dist = dist_pot_donors[
np.arange(donors_idx.shape[0])[:, None], donors_idx]
weight_matrix = _get_weights(donors_dist, self.weights)
# fill nans with zeros
if weight_matrix is not None:
weight_matrix[np.isnan(weight_matrix)] = 0.0
# Retrieve donor values and calculate kNN average
donors = fit_X_col.take(donors_idx)
donors_mask = mask_fit_X_col.take(donors_idx)
donors = np.ma.array(donors, mask=donors_mask)
return np.ma.average(donors, axis=1, weights=weight_matrix).data
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : array-like shape of (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
self : object
"""
# Check data integrity and calling arguments
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
if self.metric not in _NAN_METRICS and not callable(self.metric):
raise ValueError(
"The selected metric does not support NaN values")
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got {}".format(self.n_neighbors))
X = self._validate_data(X, accept_sparse=False, dtype=FLOAT_DTYPES,
force_all_finite=force_all_finite,
copy=self.copy)
_check_weights(self.weights)
self._fit_X = X
self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
super()._fit_indicator(self._mask_fit_X)
return self
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
X : array-like of shape (n_samples, n_output_features)
The imputed dataset. `n_output_features` is the number of features
that is not always missing during `fit`.
"""
check_is_fitted(self)
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
X = self._validate_data(X, accept_sparse=False, dtype=FLOAT_DTYPES,
force_all_finite=force_all_finite,
copy=self.copy, reset=False)
mask = _get_mask(X, self.missing_values)
mask_fit_X = self._mask_fit_X
valid_mask = ~np.all(mask_fit_X, axis=0)
X_indicator = super()._transform_indicator(mask)
# Removes columns where the training data is all nan
if not np.any(mask):
# No missing values in X
# Remove columns where the training data is all nan
return X[:, valid_mask]
row_missing_idx = np.flatnonzero(mask.any(axis=1))
non_missing_fix_X = np.logical_not(mask_fit_X)
# Maps from indices from X to indices in dist matrix
dist_idx_map = np.zeros(X.shape[0], dtype=int)
dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])
def process_chunk(dist_chunk, start):
row_missing_chunk = row_missing_idx[start:start + len(dist_chunk)]
# Find and impute missing by column
for col in range(X.shape[1]):
if not valid_mask[col]:
# column was all missing during training
continue
col_mask = mask[row_missing_chunk, col]
if not np.any(col_mask):
# column has no missing values
continue
potential_donors_idx, = np.nonzero(non_missing_fix_X[:, col])
# receivers_idx are indices in X
receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
# distances for samples that needed imputation for column
dist_subset = (dist_chunk[dist_idx_map[receivers_idx] - start]
[:, potential_donors_idx])
# receivers with all nan distances impute with mean
all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
if all_nan_receivers_idx.size:
col_mean = np.ma.array(self._fit_X[:, col],
mask=mask_fit_X[:, col]).mean()
X[all_nan_receivers_idx, col] = col_mean
if len(all_nan_receivers_idx) == len(receivers_idx):
# all receivers imputed with mean
continue
# receivers with at least one defined distance
receivers_idx = receivers_idx[~all_nan_dist_mask]
dist_subset = (dist_chunk[dist_idx_map[receivers_idx]
- start]
[:, potential_donors_idx])
n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
value = self._calc_impute(
dist_subset,
n_neighbors,
self._fit_X[potential_donors_idx, col],
mask_fit_X[potential_donors_idx, col])
X[receivers_idx, col] = value
# process in fixed-memory chunks
gen = pairwise_distances_chunked(
X[row_missing_idx, :],
self._fit_X,
metric=self.metric,
missing_values=self.missing_values,
force_all_finite=force_all_finite,
reduce_func=process_chunk)
for chunk in gen:
# process_chunk modifies X in place. No return value.
pass
return super()._concatenate_indicator(X[:, valid_mask], X_indicator)
|
JakubBrachTieto/openthread
|
tests/scripts/thread-cert/Cert_5_6_01_NetworkDataRegisterBeforeAttachLeader.py
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
LEADER = 1
ROUTER = 2
ED1 = 3
SED1 = 4
class Cert_5_6_1_NetworkDataLeaderAsBr(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,5):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].add_whitelist(self.nodes[ED1].get_addr64())
self.nodes[ROUTER].add_whitelist(self.nodes[SED1].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ROUTER].set_router_selection_jitter(1)
self.nodes[ED1].set_panid(0xface)
self.nodes[ED1].set_mode('rsn')
self.nodes[ED1].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[ED1].enable_whitelist()
self.nodes[SED1].set_panid(0xface)
self.nodes[SED1].set_mode('s')
self.nodes[SED1].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[SED1].enable_whitelist()
self.nodes[SED1].set_timeout(3)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[LEADER].add_prefix('2001:2:0:1::/64', 'paros')
self.nodes[LEADER].add_prefix('2001:2:0:2::/64', 'paro')
self.nodes[LEADER].register_netdata()
self.nodes[ROUTER].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED1].start()
time.sleep(5)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
self.nodes[SED1].start()
time.sleep(5)
self.assertEqual(self.nodes[SED1].get_state(), 'child')
addrs = self.nodes[ED1].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertTrue(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
addrs = self.nodes[SED1].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertFalse(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
|
data-exp-lab/girder_ythub
|
plugin_tests/notebook_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mock
from tests import base
from girder.models.model_base import ValidationException
def setUpModule():
base.enabledPlugins.append('ythub')
base.startServer()
def tearDownModule():
base.stopServer()
class FakeAsyncResult(object):
def __init__(self):
self.task_id = 'fake_id'
def get(self):
return dict(
nodeId='123456',
volumeId='blah_volume',
serviceId='tmp-blah',
urlPath='?token=foo'
)
class FakeAsyncResult2(object):
def __init__(self):
self.task_id = 'fake_id'
def get(self):
return dict(
nodeId='654321',
volumeId='foobar_volume',
serviceId='tmp-foobar',
urlPath='?token=blah'
)
class FakeAsyncResult3(object):
def __init__(self):
self.task_id = 'fake_id'
def get(self):
return dict(
nodeId='162534',
volumeId='foobaz_volume',
serviceId='tmp-foobaz',
urlPath='?token=ragl'
)
class NotebookTestCase(base.TestCase):
def _getUser(self, userDict):
try:
user = self.model('user').createUser(**userDict)
except ValidationException:
resp = self.request(
path='/user/authentication', method='GET',
basicAuth='{login}:{password}'.format(**userDict))
self.assertStatusOk(resp)
user = resp.json['user']
return user
def setUp(self):
global PluginSettings
from girder.plugins.ythub.constants import PluginSettings
self.model('setting').set(
PluginSettings.TMPNB_URL, "http://tmpnb.null")
users = ({
'email': 'root@dev.null',
'login': 'admin',
'firstName': 'Root',
'lastName': 'van Klompf',
'password': 'secret'
}, {
'email': 'joe@dev.null',
'login': 'joeregular',
'firstName': 'Joe',
'lastName': 'Regular',
'password': 'secret'
})
self.admin, self.user = [self._getUser(user) for user in users]
def testNotebooks(self):
# Grab the default user folders
resp = self.request(
path='/folder', method='GET', user=self.user, params={
'parentType': 'user',
'parentId': self.user['_id'],
'sort': 'name',
'sortdir': 1
})
privateFolder = resp.json[0]
publicFolder = resp.json[1]
example_frontend = {
'imageName': 'xarthisius/ythub',
'command': './perform_magic',
'memLimit': '2048m',
'port': 12345,
'user': 'user',
'targetMount': '/blah',
'urlPath': '?token={token}',
'description': 'foo',
'cpuShares': None,
'public': True,
}
# Actually create a new frontend (private)
resp = self.request(
path='/frontend', method='POST', params=example_frontend,
user=self.admin)
self.assertStatus(resp, 200)
frontend = resp.json
with mock.patch('celery.Celery') as celeryMock:
with mock.patch('urllib.request.urlopen') as urllibMock:
instance = celeryMock.return_value
instance.send_task.side_effect = [
FakeAsyncResult(), FakeAsyncResult(),
FakeAsyncResult2(), FakeAsyncResult2(),
FakeAsyncResult3(), FakeAsyncResult3(),
FakeAsyncResult(), FakeAsyncResult()
]
req = urllibMock.return_value
req.fetch.return_value = {}
params = {
'frontendId': str(frontend['_id']),
'folderId': str(privateFolder['_id'])
}
resp = self.request(
'/notebook', method='POST',
user=self.user, params=params)
self.assertStatus(resp, 200)
notebook = resp.json
self.assertEqual(notebook['serviceInfo']['nodeId'], '123456')
self.assertEqual(notebook['serviceInfo']['volumeId'], 'blah_volume')
self.assertEqual(notebook['serviceInfo']['serviceId'], 'tmp-blah')
self.assertEqual(notebook['url'], 'http://tmp-blah.tmpnb.null/?token=foo')
self.assertEqual(notebook['frontendId'], str(frontend['_id']))
self.assertEqual(notebook['folderId'], str(privateFolder['_id']))
self.assertEqual(notebook['creatorId'], str(self.user['_id']))
with mock.patch('celery.Celery') as celeryMock:
with mock.patch('urllib.request.urlopen') as urllibMock:
params = {
'frontendId': str(frontend['_id']),
'folderId': str(privateFolder['_id'])
}
# Return exisiting
resp = self.request(
path='/notebook', method='POST', user=self.user,
params=params)
self.assertStatus(resp, 200)
self.assertEqual(resp.json['_id'], notebook['_id'])
# Create 2nd user's nb
params['folderId'] = str(publicFolder['_id'])
resp = self.request(
path='/notebook', method='POST', user=self.user,
params=params)
self.assertStatus(resp, 200)
other_notebook = resp.json
# Create admin nb
params['folderId'] = str(publicFolder['_id'])
resp = self.request(
path='/notebook', method='POST', user=self.admin,
params=params)
self.assertStatus(resp, 200)
admin_notebook = resp.json
# By default user can list only his/her notebooks
resp = self.request(
path='/notebook', method='GET', user=self.user)
self.assertStatus(resp, 200)
self.assertEqual([_['_id'] for _ in resp.json],
[other_notebook['_id'], notebook['_id']])
# Filter by folder
resp = self.request(
path='/notebook', method='GET', user=self.admin,
params={'folderId': publicFolder['_id']})
self.assertStatus(resp, 200)
self.assertEqual([_['_id'] for _ in resp.json],
[admin_notebook['_id'], other_notebook['_id']])
# Filter by folder and user
resp = self.request(
path='/notebook', method='GET', user=self.admin,
params={'folderId': publicFolder['_id'],
'userId': self.user['_id']})
self.assertStatus(resp, 200)
self.assertEqual(resp.json[0]['_id'], other_notebook['_id'])
# Get notebook by Id
resp = self.request(
path='/notebook/{_id}'.format(**notebook), method='GET')
self.assertStatus(resp, 401)
resp = self.request(
path='/notebook/{_id}'.format(**admin_notebook), method='GET',
user=self.user)
self.assertStatus(resp, 403)
resp = self.request(
path='/notebook/{_id}'.format(**notebook), method='GET',
user=self.admin)
self.assertStatus(resp, 200)
self.assertEqual(resp.json['_id'], notebook['_id'])
with mock.patch('celery.Celery') as celeryMock:
resp = self.request(
path='/notebook/{_id}'.format(**admin_notebook),
method='DELETE', user=self.user)
self.assertStatus(resp, 403)
resp = self.request(
path='/notebook/{_id}'.format(**notebook), method='DELETE',
user=self.admin)
self.assertStatus(resp, 200)
# Check if notebook is gone
resp = self.request(
path='/notebook/{_id}'.format(**notebook), method='GET',
user=self.admin)
self.assertStatus(resp, 400)
def tearDown(self):
self.model('user').remove(self.user)
self.model('user').remove(self.admin)
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractSecretchateauWordpressCom.py
|
def extractSecretchateauWordpressCom(item):
'''
Parser for 'secretchateau.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
titlemap = [
('GRMHCD ', 'Grim Reaper Makes His C-Debut', 'translated'),
('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
idlesign/uwsgiconf
|
tests/presets/test_nice.py
|
from os import environ
from uwsgiconf.presets.nice import Section, PythonSection
def test_nice_section(assert_lines):
assert_lines([
'env = LANG=en_US.UTF-8',
'workers = %k',
'die-on-term = true',
'vacuum = true',
'threads = 4',
], Section(threads=4))
assert_lines([
'logto',
], Section(), assert_in=False)
assert_lines([
'enable-threads = true',
'uid = www-data',
'gid = www-data',
'logto = /a/b.log',
], Section(threads=True, log_into='/a/b.log').configure_owner())
assert_lines([
'workers = 13',
'touch-reload', 'test_nice.py',
], Section(workers=13, touch_reload=__file__))
assert_lines([
'disable-write-exception = true',
'ignore-write-errors = true',
'ignore-sigpipe = true',
'log-master = true',
'threaded-logger = true',
], Section(log_dedicated=True, ignore_write_errors=True))
assert '%(headers) headers in %(hsize) bytes' in Section().get_log_format_default()
def test_get_bundled_static_path(assert_lines):
path = Section.get_bundled_static_path('503.html')
assert path.endswith('uwsgiconf/contrib/django/uwsgify/static/uwsgify/503.html')
def test_configure_https_redirect(assert_lines):
section = Section()
section.configure_https_redirect()
assert_lines(
'route-if-not = eq:${HTTPS};on redirect-301:https://${HTTP_HOST}${REQUEST_URI}',
section
)
def test_configure_maintenance_mode(assert_lines, tmpdir):
section = Section()
section.configure_maintenance_mode('/watch/that/file', '/serve/this/file')
section.configure_maintenance_mode('/watch/that/file/also', 'http://pythonz.net')
assert_lines([
'route-if = exists:/watch/that/file static:/serve/this/file',
'route-if = exists:/watch/that/file/also redirect-302:http://pythonz.net',
], section)
afile = tmpdir.join('maintenance_file')
section = Section()
section.configure_maintenance_mode(f'{afile}', 'app')
assert_lines([
f'env = UWSGICONF_MAINTENANCE={afile}',
f'touch-reload = {afile}',
], section)
assert_lines([
'wsgi = uwsgiconf.maintenance:app_maintenance',
], section, assert_in=False)
# Create file
afile.write('')
section = Section()
section.configure_maintenance_mode(f'{afile}', 'app')
assert_lines([
f'env = UWSGICONF_MAINTENANCE={afile}',
f'touch-reload = {afile}',
'env = UWSGICONF_MAINTENANCE_INPLACE=1',
'wsgi = uwsgiconf.maintenance:app_maintenance',
], section)
assert environ['UWSGICONF_MAINTENANCE'] == f'{afile}'
assert environ['UWSGICONF_MAINTENANCE_INPLACE'] == '1'
section.configure_maintenance_mode(f'{afile}', 'app::mypack.here.there:myfunc')
assert_lines([
'wsgi = mypack.here.there:myfunc',
], section)
def test_configure_logging_json(assert_lines):
section = Section()
section.configure_logging_json()
assert_lines([
'logger-req = stdio:',
'log-format = %(method) %(uri) -> %(status)',
'log-req-encoder = json {"dt": "${strftime:%%Y-%%m-%%dT%%H:%%M:%%S%%z}", "src": "uwsgi.req"',
'log-req-encoder = nl',
'"src": "uwsgi.out"',
], section)
def test_configure_certbot_https(assert_lines, monkeypatch):
monkeypatch.setattr('pathlib.Path.exists', lambda self: True)
section = Section()
section.configure_certbot_https('mydomain.org', '/var/www/', address=':4443')
assert_lines([
'static-map2 = /.well-known/=/var/www/',
'https-socket = :4443,/etc/letsencrypt/live/mydomain.org/fullchain.pem,'
'/etc/letsencrypt/live/mydomain.org/privkey.pem',
], section)
section = Section.bootstrap(['http://:80'])
section.configure_certbot_https('mydomain.org', '/var/www/', http_redirect=True)
assert_lines([
'shared-socket = :80',
'shared-socket = :443',
'http-socket = =0',
'https-socket = =1,/etc/letsencrypt/live/mydomain.org/fullchain.pem,'
'/etc/letsencrypt/live/mydomain.org/privkey.pem',
'route-if-not = eq:${HTTPS};on redirect-301:https://${HTTP_HOST}${REQUEST_URI}',
], section)
def test_nice_python(assert_lines):
assert_lines([
'plugin = python',
'pyhome = /home/idle/venv/\npythonpath = /home/idle/apps/',
'wsgi = somepackage.module',
'need-app = true',
], PythonSection(
params_python=dict(
# We'll run our app using virtualenv.
python_home='/home/idle/venv/',
search_path='/home/idle/apps/',
),
wsgi_module='somepackage.module',
embedded_plugins=None
))
# Embedded plugins = True
assert_lines('plugin = python', PythonSection(wsgi_module='somepackage.module'), assert_in=False)
|
jeisenma/ProgrammingConcepts
|
11-gui/slidy.py
|
from slider import *
def setup():
global slidy
size(400,200)
slidy = Slider( Rect(50,80,300,40), minVal=100, maxVal=255 )
def draw():
background(slidy.value)
slidy.draw()
def mousePressed():
slidy.press()
def mouseDragged():
slidy.drag()
def mouseReleased():
slidy.release()
|
pbs/django-cms
|
cms/test_utils/cli.py
|
# -*- coding: utf-8 -*-
import os
gettext = lambda s: s
urlpatterns = []
def configure(**extra):
from django.conf import settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'cms.test_utils.cli'
defaults = dict(
CACHE_BACKEND='locmem:///',
DEBUG=True,
DATABASE_SUPPORTS_TRANSACTIONS=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
},
SITE_ID=1,
USE_I18N=True,
MEDIA_ROOT='/media/',
STATIC_ROOT='/static/',
CMS_MEDIA_ROOT='/cms-media/',
CMS_MEDIA_URL='/cms-media/',
MEDIA_URL='/media/',
STATIC_URL='/static/',
ADMIN_MEDIA_PREFIX='/static/admin/',
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
SECRET_KEY='key',
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.abspath(
os.path.join(os.path.dirname(__file__),
'project',
'templates')
)
],
'OPTIONS': {
'context_processors': [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.media",
'django.template.context_processors.csrf',
"cms.context_processors.media",
"sekizai.context_processors.sekizai",
"django.template.context_processors.static",
],
'loaders': (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
),
},
},
],
MIDDLEWARE_CLASSES=[
'django.contrib.sessions.middleware.SessionMiddleware',
'cms.middleware.multilingual.MultilingualURLMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.admindocs.middleware.XViewMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
],
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.staticfiles',
'cms',
'menus',
'mptt',
'cms.test_utils.project.sampleapp',
'cms.test_utils.project.placeholderapp',
'cms.test_utils.project.pluginapp',
'cms.test_utils.project.pluginapp.plugins.manytomany_rel',
'cms.test_utils.project.pluginapp.plugins.extra_context',
'cms.test_utils.project.pluginapp.plugins.meta',
'cms.test_utils.project.fakemlng',
'cms.test_utils.project.fileapp',
'cms.plugins.text',
'cms.plugins.picture',
'cms.plugins.file',
'cms.plugins.flash',
'cms.plugins.link',
'cms.plugins.snippet',
'cms.plugins.googlemap',
'cms.plugins.teaser',
'cms.plugins.video',
'cms.plugins.twitter',
'cms.plugins.inherit',
'reversion',
'sekizai',
],
LANGUAGE_CODE="en",
LANGUAGES=(
('en', gettext('English')),
('fr', gettext('French')),
('de', gettext('German')),
('pt-BR', gettext("Brazil")),
('nl', gettext("Dutch")),
),
CMS_LANGUAGES=(
('en', gettext('English')),
('fr', gettext('French')),
('de', gettext('German')),
('pt-BR', gettext("Brazil")),
('nl', gettext("Dutch")),
),
CMS_FRONTEND_LANGUAGES=(
'fr',
'de',
'nl',
),
CMS_LANGUAGE_CONF={
'de': ['fr', 'en'],
'en': ['fr', 'de'],
},
CMS_SITE_LANGUAGES={
1: ['en', 'de', 'fr', 'pt-BR'],
2: ['de', 'fr'],
3: ['nl'],
},
CMS_TEMPLATES=(
('col_two.html', gettext('two columns')),
('col_three.html', gettext('three columns')),
('nav_playground.html', gettext('navigation examples')),
),
CMS_PLACEHOLDER_CONF={
'col_sidebar': {
'plugins': ('FilePlugin', 'FlashPlugin', 'LinkPlugin',
'PicturePlugin', 'TextPlugin', 'SnippetPlugin'),
'name': gettext("sidebar column")
},
'col_left': {
'plugins': ('FilePlugin', 'FlashPlugin', 'LinkPlugin',
'PicturePlugin', 'TextPlugin', 'SnippetPlugin',
'GoogleMapPlugin',),
'name': gettext("left column")
},
'col_right': {
'plugins': ('FilePlugin', 'FlashPlugin', 'LinkPlugin',
'PicturePlugin', 'TextPlugin', 'SnippetPlugin',
'GoogleMapPlugin',),
'name': gettext("right column")
},
'extra_context': {
"plugins": ('TextPlugin',),
"extra_context": {"width": 250},
"name": "extra context"
},
},
CMS_SOFTROOT=True,
CMS_MODERATOR=True,
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='all',
CMS_CACHE_DURATIONS={
'menus': 0,
'content': 0,
'permissions': 0,
},
CMS_APPHOOKS=[],
CMS_REDIRECTS=True,
CMS_SEO_FIELDS=True,
CMS_FLAT_URLS=False,
CMS_MENU_TITLE_OVERWRITE=True,
CMS_HIDE_UNTRANSLATED=False,
CMS_URL_OVERWRITE=True,
CMS_SHOW_END_DATE=True,
CMS_SHOW_START_DATE=True,
CMS_PLUGIN_PROCESSORS=tuple(),
CMS_PLUGIN_CONTEXT_PROCESSORS=tuple(),
CMS_SITE_CHOICES_CACHE_KEY='CMS:site_choices',
CMS_PAGE_CHOICES_CACHE_KEY='CMS:page_choices',
CMS_NAVIGATION_EXTENDERS=(
('cms.test_utils.project.sampleapp.menu_extender.get_nodes',
'SampleApp Menu'),
),
TEST_RUNNER='cms.test_utils.runners.NormalTestRunner',
JUNIT_OUTPUT_DIR='.',
TIME_TESTS=False,
ROOT_URLCONF='cms.test_utils.cli',
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
)
)
# Disable migrations for Django 1.7+
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return "notmigrations"
defaults['MIGRATION_MODULES'] = DisableMigrations()
defaults.update(extra)
settings.configure(**defaults)
import django
django.setup()
|
douban/douban-sqlstore
|
douban/sqlstore/genconfig.py
|
#!/usr/bin/env python
#coding=utf8
"""Utility script for generating sqlstore configs
sample settings file:
################# starts #####################
default_params = {
'roles': ['m', 's', 'b', 'g', 'h'],
'rw_user': {
'user': 'rw_user',
'passwd': 'password'
},
'ro_user': {
'user': 'ro_user',
'passwd': 'password'
},
'tables': [],
}
farms = {
'luz': {
'port': 3306,
'dbs': ['luz_farm'],
'online': True,
},
}
configs = {
'shire-online.erb': {
'instances': ['luz_m'],
}
}
################## ends ######################
"""
import imp
import sys
import pprint
import json
import argparse
from StringIO import StringIO
from douban.sqlstore import SqlFarm
verbose = False
class DuplicatedTable(Exception):
def __init__(self, table, farms):
self.table = table
self.farms = farms
def __str__(self):
return 'There are duplicated tables in these farms: %s' % \
','.join(self.farms)
class FarmManager(object):
def __init__(self, config):
if isinstance(config, basestring):
config = imp.load_source('sqlstore_settings', config)
self._default_params = config.default_params
self.farms = config.farms
self.configs = config.configs
def get_conf(self, instance):
"""Get MySQLdb compatible config
instance: luz_m, orc_s etc.
"""
farm, role = instance.rsplit('_', 1)
_conf = self._default_params.copy()
_conf.update(self.farms.get(farm, {}))
_available_roles = self._default_params.get('roles', [])
fallback_role = None
if role not in _conf['roles']:
# fallback to next higher priority role
# b -> s -> m
# if there is not "b" role in the farm, then fallback to "s" for
# example
fallback_roles = reversed(_available_roles[:_available_roles.index(role)])
for _role in fallback_roles:
if _role in _conf['roles']:
fallback_role = _role
break
if not fallback_role:
return {}
else:
role = fallback_role
if role == 'm':
user = _conf['rw_user']['user']
passwd = _conf['rw_user']['passwd']
else:
user = _conf['ro_user']['user']
passwd = _conf['ro_user']['passwd']
host_prefix = _conf.get('host_prefix', None)
host = '%s_%s' % (farm, role) if not host_prefix else '%s_%s' % (host_prefix, role)
host = _conf['hostname'] if _conf.get('hostname') else host
conf = {
'host': host,
'port': _conf['port'],
'user': user,
'passwd': passwd,
'db': _conf['dbs'][0],
}
return conf
def get_tables(self, farm):
tables = self.farms.get(farm, {}).get('tables')
if tables:
return tables
dbcnf = self.get_sqlstore_dbcnf('%s_m' % farm)
farm = SqlFarm(dbcnf, connect_timeout=1)
cursor = farm.get_cursor()
cursor.execute('show tables')
tables = [r[0] for r in cursor.fetchall()]
return tables
def get_sqlstore_dbcnf(self, instance):
conf = self.get_conf(instance)
return '%(host)s:%(port)d:%(db)s:%(user)s:%(passwd)s' % conf if conf else ''
def gen_config(self, name, instances, extras={}, roles=['m', 's', 'b']):
conf = {
'farms': {},
'migration': {},
'options': {},
}
all_tables = {}
all_instances = set()
for index, instance in enumerate(instances):
if instance in all_instances:
if verbose:
print >>sys.stderr, 'duplicate instance:', instance
all_instances.add(instance)
try:
# FIXME: roles override the roles param
name, roles = instance.rsplit('_', 1)
except ValueError:
name = instance
instance = '%s_m' % instance
farm_name = '%s_farm' % name
conf['farms'][farm_name] = {}
role_names = {
'm': 'master',
's': 'slave',
'b': 'backup',
}
for role in roles:
instance = '%s_%s' % (name, role)
dbcnf = self.get_sqlstore_dbcnf(instance)
#TODO: len(roles) == 1 means non-algorithm configs
if len(roles) == 1:
role_name = 'master'
else:
role_name = role_names[role]
conf['farms'][farm_name][role_name] = dbcnf
tables = self.get_tables(name)
if tables is None:
return None
for table in tables:
if table in all_tables:
farms = [farm_name, all_tables[table]]
raise DuplicatedTable(table, farms)
all_tables[table] = farm_name
if index == 0:
tables.append('*')
conf['farms'][farm_name]['tables'] = tables
if verbose:
print >>sys.stderr, 'done!'
conf.update(extras)
return conf
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config',
default='/etc/sqlstore/settings.py')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
global verbose
verbose = args.verbose
try:
config = imp.load_source('sqlstore_settings', args.config)
except Exception, exc:
print >>sys.stderr, 'Read config "%s" "fail: %s' % (args.config, exc)
return 1
fm = FarmManager(config)
skipped = 0
for output_filename, options in fm.configs.items():
if verbose:
print >>sys.stderr, 'Processing config "%s"...' % output_filename,
try:
farms_config = fm.gen_config(output_filename,
options['instances'],
extras=options.get('extras', {}))
except Exception, exc:
print >>sys.stderr, 'Skip generating config file "%s": %s' % (output_filename, exc)
skipped += 1
continue
_format = options.get('format', 'python')
if not _format in ('python', 'json'):
raise Exception('Invaid output format: %s' % _format)
config = farms_config
if _format == 'python':
output = StringIO()
pp = pprint.PrettyPrinter(indent=4, stream=output)
pp.pprint(config)
with open(output_filename, 'w') as cf:
cf.write(output.getvalue())
elif _format == 'json':
json.dump(config, open(output_filename, 'w'), indent=4)
return skipped
if __name__ == '__main__':
sys.exit(main())
|
saltastro/timDIMM
|
weather.py
|
#!/usr/bin/env python
import sys
import html5lib
import urllib2
from numpy import median, array
from xml_icd import parseICD
from html5lib import treebuilders
def salt():
wx = {}
try:
tcs = parseICD("http://icd.salt/xml/salt-tcs-icd.xml")
time = tcs['tcs xml time info']
bms = tcs['bms external conditions']
temps = bms['Temperatures']
wx["Temp"] = median(array(temps.values()))
wx["Temp 2m"] = temps["2m"]
wx["Temp 30m"] = temps["30m"]
# get time
wx["SAST"] = time["SAST"].split()[1]
wx["Date"] = time["SAST"].split()[0]
# set up other values of interest
wx["Air Pressure"] = bms["Air pressure"] * 10.0
wx["Dewpoint"] = bms["Dewpoint"]
wx["RH"] = bms["Rel Humidity"]
wx["Wind Speed (30m)"] = bms["Wind mag 30m"] * 3.6
wx["Wind Speed"] = bms["Wind mag 10m"] * 3.6
wx["Wind Dir (30m)"] = bms["Wind dir 30m"]
wx["Wind Dir"] = bms["Wind dir 10m"]
wx["T - DP"] = wx["Temp 2m"] - bms["Dewpoint"]
wx["Raining"] = bms["Rain detected"]
return wx
except:
return False
def wasp():
wx = {}
try:
p = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
doc = p.parse(urllib2.urlopen("http://swaspgateway.suth/",
timeout=1).read())
t = doc.getElementsByTagName("table")[0]
tds = t.getElementsByTagName("td")
wx["Temp"] = float(tds[7].firstChild.nodeValue)
if tds[10].firstChild.nodeValue == "RAIN":
wx["Sky"] = "Rain"
wx["Sky Temp"] = wx["Temp"]
else:
sky, stemp = tds[10].firstChild.nodeValue.split('(')
stemp = stemp[0:-1]
wx["Sky"] = sky
wx["Sky Temp"] = stemp
wx["T - DP"] = float(tds[9].firstChild.nodeValue)
wx["RH"] = float(tds[8].firstChild.nodeValue)
tds[6].normalize()
wx["Wind Dir"] = tds[6].firstChild.nodeValue[1:]
wx["Wind Speed"] = float(tds[5].firstChild.nodeValue)
rain = tds[4].firstChild.nodeValue
if rain == "DRY":
wx["Raining"] = False
else:
wx["Raining"] = True
wx["UT"] = tds[3].firstChild.nodeValue.strip()
tds[31].normalize()
wx["Status"] = tds[31].firstChild.nodeValue.strip()
return wx
except:
return False
def grav():
wx = {}
p = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
kan11 = p.parse(urllib2.urlopen("http://sg1.suth/tmp/kan11.htm",
timeout=1).read())
kan16 = p.parse(urllib2.urlopen("http://sg1.suth/tmp/kan16.htm",
timeout=1).read())
kan11_tds = kan11.getElementsByTagName("td")
kan16_tds = kan16.getElementsByTagName("td")
wx["Date"], wx["UT"] = kan11_tds[12].firstChild.nodeValue.split()
kan11_tds[14].normalize()
kan11_tds[15].normalize()
wx["Temp"] = float(kan11_tds[14].firstChild.nodeValue)
wx["RH"] = float(kan11_tds[15].firstChild.nodeValue)
kan16_tds[13].normalize()
kan16_tds[14].normalize()
wx["Wind Dir"] = int(kan16_tds[13].firstChild.nodeValue)
wx["Wind Speed"] = float(kan16_tds[14].firstChild.nodeValue) * 3.6
return wx
if __name__ == '__main__':
if len(sys.argv) == 1:
print "Usage: weather.py <salt|wasp|grav>"
else:
wx = eval("%s()" % sys.argv[1].lower())
if wx:
for k, v in sorted(wx.items()):
print "%20s : \t %s" % (k, v)
else:
print "No information received."
|
beregond/jsonmodels
|
setup.py
|
#!/usr/bin/env python
# coding: utf-8
import os
import sys
from setuptools.command.test import test as TestCommand
from jsonmodels import __version__, __author__, __email__
from setuptools import setup
PROJECT_NAME = 'jsonmodels'
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ['--cov', PROJECT_NAME]
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# Hacking tests.
try:
import tests
except ImportError:
pass
else:
if 'test' in sys.argv and '--no-lint' in sys.argv:
tests.LINT = False
del sys.argv[sys.argv.index('--no-lint')]
if 'test' in sys.argv and '--spelling' in sys.argv:
tests.CHECK_SPELLING = True
del sys.argv[sys.argv.index('--spelling')]
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name=PROJECT_NAME,
version=__version__,
description='Models to make easier to deal with structures that'
' are converted to, or read from JSON.',
long_description=readme + '\n\n' + history,
author=__author__,
author_email=__email__,
url='https://github.com/beregond/jsonmodels',
packages=[
PROJECT_NAME,
],
package_dir={PROJECT_NAME: PROJECT_NAME},
include_package_data=True,
install_requires=[
'python-dateutil',
'six',
],
license="BSD",
zip_safe=False,
keywords=PROJECT_NAME,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
cmdclass={
'test': PyTest,
},
)
|
Teekuningas/mne-python
|
mne/preprocessing/_fine_cal.py
|
# -*- coding: utf-8 -*-
# Authors: Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import numpy as np
from ..utils import check_fname, _check_fname
def read_fine_calibration(fname):
"""Read fine calibration information from a .dat file.
The fine calibration typically includes improved sensor locations,
calibration coefficients, and gradiometer imbalance information.
Parameters
----------
fname : str
The filename.
Returns
-------
calibration : dict
Fine calibration information.
"""
# Read new sensor locations
fname = _check_fname(fname, overwrite='read', must_exist=True)
check_fname(fname, 'cal', ('.dat',))
ch_names = list()
locs = list()
imb_cals = list()
with open(fname, 'r') as fid:
for line in fid:
if line[0] in '#\n':
continue
vals = line.strip().split()
if len(vals) not in [14, 16]:
raise RuntimeError('Error parsing fine calibration file, '
'should have 14 or 16 entries per line '
'but found %s on line:\n%s'
% (len(vals), line))
# `vals` contains channel number
ch_name = vals[0]
if len(ch_name) in (3, 4): # heuristic for Neuromag fix
try:
ch_name = int(ch_name)
except ValueError: # something other than e.g. 113 or 2642
pass
else:
ch_name = 'MEG' + '%04d' % ch_name
ch_names.append(ch_name)
# (x, y, z), x-norm 3-vec, y-norm 3-vec, z-norm 3-vec
locs.append(np.array([float(x) for x in vals[1:13]]))
# and 1 or 3 imbalance terms
imb_cals.append([float(x) for x in vals[13:]])
locs = np.array(locs)
return dict(ch_names=ch_names, locs=locs, imb_cals=imb_cals)
def write_fine_calibration(fname, calibration):
"""Write fine calibration information to a .dat file.
Parameters
----------
fname : str
The filename to write out.
calibration : dict
Fine calibration information.
"""
fname = _check_fname(fname, overwrite=True)
check_fname(fname, 'cal', ('.dat',))
with open(fname, 'wb') as cal_file:
for ci, chan in enumerate(calibration['ch_names']):
# Write string containing 1) channel, 2) loc info, 3) calib info
# with field widths (e.g., %.6f) chosen to match how Elekta writes
# them out
cal_line = np.concatenate([calibration['locs'][ci],
calibration['imb_cals'][ci]]).round(6)
cal_str = str(chan) + ' ' + ' '.join(map(lambda x: "%.6f" % x,
cal_line))
cal_file.write((cal_str + '\n').encode('ASCII'))
|
tpltnt/SimpleCV
|
SimpleCV/examples/detection/MotionTracker.py
|
#!/usr/bin/python
'''
This SimpleCV example uses a technique called frame differencing to determine
if motion has occured. You take an initial image, then another, subtract
the difference, what is left over is what has changed between those two images
this are typically blobs on the images, so we do a blob search to count
the number of blobs and if they exist then motion has occured
'''
from __future__ import print_function
import sys, time, socket
from SimpleCV import *
cam = Camera() #setup the camera
#settings for the project
min_size = 0.1*cam.getProperty("width")*cam.getProperty("height") #make the threshold adapatable for various camera sizes
thresh = 10 # frame diff threshold
show_message_for = 2 # the amount of seconds to show the motion detected message
motion_timestamp = int(time.time())
message_text = "Motion detected"
draw_message = False
lastImg = cam.getImage()
lastImg.show()
while True:
newImg = cam.getImage()
trackImg = newImg - lastImg # diff the images
blobs = trackImg.findBlobs() #use adapative blob detection
now = int(time.time())
#If blobs are found then motion has occured
if blobs:
motion_timestamp = now
draw_message = True
#See if the time has exceeded to display the message
if (now - motion_timestamp) > show_message_for:
draw_message = False
#Draw the message on the screen
if(draw_message):
newImg.drawText(message_text, 5,5)
print(message_text)
lastImg = newImg # update the image
newImg.show()
|
ksmaheshkumar/django-DefectDojo
|
dojo/models.py
|
from datetime import date, datetime
import os
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q
from django.utils.timezone import now
from pytz import timezone
localtz = timezone(settings.TIME_ZONE)
def get_current_date():
return localtz.normalize(now()).date()
def get_current_datetime():
return localtz.normalize(now())
# proxy class for convenience and UI
class Dojo_User(User):
class Meta:
proxy = True
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s (%s)' % (self.first_name,
self.last_name,
self.username)
return full_name.strip()
def __unicode__(self):
return self.get_full_name()
class Contact(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
team = models.CharField(max_length=100)
is_admin = models.BooleanField(default=False)
is_globally_read_only = models.BooleanField(default=False)
updated = models.DateTimeField(editable=False)
class Product_Type(models.Model):
name = models.CharField(max_length=300)
def findings_count(self):
findings = Finding.objects.filter(active=True, mitigated__isnull=True,
false_p=False, verified=True)
findings = findings.filter(Q(severity="Critical") |
Q(severity="High") |
Q(severity="Medium") |
Q(severity="Low"))
findings = findings.filter(test__engagement__product__prod_type=self)
return len(findings)
def products_count(self):
products = Product.objects.filter(prod_type=self)
return len(products)
def __unicode__(self):
return self.name
class Product_Line(models.Model):
name = models.CharField(max_length=300)
description = models.CharField(max_length=2000)
def __unicode__(self):
return self.name
class Report_Type(models.Model):
name = models.CharField(max_length=300)
class Test_Type(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=300)
description = models.CharField(max_length=2000)
prod_manager = models.CharField(default=0, max_length=200)
tech_contact = models.CharField(default=0, max_length=200)
manager = models.CharField(default=0, max_length=200)
created = models.DateTimeField(editable=False, null=True, blank=True)
prod_type = models.ForeignKey(Product_Type, related_name='prod_type',
null=True, blank=True)
updated = models.DateTimeField(editable=False, null=True, blank=True)
tid = models.IntegerField(default=0, editable=False)
authorized_users = models.ManyToManyField(User, null=True, blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ('name',)
@property
def findings_count(self):
findings = Finding.objects.filter(active=True, mitigated__isnull=True,
false_p=False, verified=True)
e = Engagement.objects.filter(product=self)
f_count = 0
for engagement in e:
t = Test.objects.filter(engagement=engagement)
for test in t:
f_count += findings.filter(test=test).count()
return f_count
@property
def endpoint_count(self):
return Endpoint.objects.filter(finding__test__engagement__product=self,
finding__active=True,
finding__verified=True,
finding__mitigated__isnull=True).distinct().count()
def open_findings(self, start_date=None, end_date=None):
if start_date is None or end_date is None:
return {}
else:
critical = Finding.objects.filter(test__engagement__product=self,
active=True,
mitigated__isnull=True,
false_p=False, verified=True,
severity="Critical",
date__range=[start_date,
end_date]).count()
high = Finding.objects.filter(test__engagement__product=self,
active=True, mitigated__isnull=True,
false_p=False, verified=True,
severity="High",
date__range=[start_date,
end_date]).count()
medium = Finding.objects.filter(test__engagement__product=self,
active=True,
mitigated__isnull=True,
false_p=False, verified=True,
severity="Medium",
date__range=[start_date,
end_date]).count()
low = Finding.objects.filter(test__engagement__product=self,
active=True, mitigated__isnull=True,
false_p=False, verified=True,
severity="Low",
date__range=[start_date,
end_date]).count()
return {'Critical': critical,
'High': high,
'Medium': medium,
'Low': low,
'Total': (critical + high + medium + low)}
def reported_findings(self, start_date=None, end_date=None):
if start_date is None or end_date is None:
return {}
else:
critical = Finding.objects.filter(test__engagement__product=self,
false_p=False, verified=True,
severity="Critical",
date__range=[start_date,
end_date])
high = Finding.objects.filter(test__engagement__product=self,
active=True, mitigated__isnull=True,
false_p=False, verified=True,
severity="High",
date__range=[start_date,
end_date])
medium = Finding.objects.filter(test__engagement__product=self,
active=True,
mitigated__isnull=True,
false_p=False, verified=True,
severity="Medium",
date__range=[start_date,
end_date])
low = Finding.objects.filter(test__engagement__product=self,
active=True, mitigated__isnull=True,
false_p=False, verified=True,
severity="Low",
date__range=[start_date,
end_date])
return ((len(critical) + len(high) + len(medium) + len(low)),
[critical,
high,
medium,
low])
class ScanSettings(models.Model):
product = models.ForeignKey(Product, default=1, editable=False)
addresses = models.TextField(default="none")
user = models.ForeignKey(User, editable=False)
date = models.DateTimeField(editable=False, blank=True,
default=get_current_datetime)
frequency = models.CharField(max_length=10000, null=True,
blank=True)
email = models.CharField(max_length=512)
protocol = models.CharField(max_length=10, default='TCP')
def addresses_as_list(self):
if self.addresses:
return [a.strip() for a in self.addresses.split(',')]
return []
"""
Modified by Fatimah and Micheal
removed ip_scans field
"""
class Scan(models.Model):
scan_settings = models.ForeignKey(ScanSettings, default=1, editable=False)
date = models.DateTimeField(editable=False, blank=True,
default=get_current_datetime)
protocol = models.CharField(max_length=10, default='TCP')
status = models.CharField(max_length=10, default='Pending', editable=False)
baseline = models.BooleanField(default=False,
verbose_name="Current Baseline")
"""
Modified by Fatimah and Micheal
Changed services from a ManytToMany field to a formatted string
"port,protocol,status"
Added scan_id
"""
class IPScan(models.Model):
address = models.TextField(editable=False, default="none")
services = models.CharField(max_length=800, null=True)
scan = models.ForeignKey(Scan, default=1, editable=False)
class Engagement_Type(models.Model):
name = models.CharField(max_length=200)
class Engagement(models.Model):
name = models.CharField(max_length=300, null=True, blank=True)
version = models.CharField(max_length=100, null=True, blank=True)
eng_type = models.ForeignKey(Engagement_Type, null=True, blank=True)
first_contacted = models.DateField(null=True, blank=True)
target_start = models.DateField(null=False, blank=False)
target_end = models.DateField(null=False, blank=False)
lead = models.ForeignKey(User, editable=True, null=True)
requester = models.ForeignKey(Contact, null=True, blank=True)
reason = models.CharField(max_length=2000, null=True, blank=True)
report_type = models.ForeignKey(Report_Type, null=True, blank=True)
product = models.ForeignKey(Product)
updated = models.DateTimeField(editable=False, null=True, blank=True)
active = models.BooleanField(default=True, editable=False)
test_strategy = models.URLField(editable=True, blank=True, null=True)
threat_model = models.BooleanField(default=True)
api_test = models.BooleanField(default=True)
pen_test = models.BooleanField(default=True)
check_list = models.BooleanField(default=True)
status = models.CharField(editable=True, max_length=2000, default='',
null=True,
choices=(('In Progress', 'In Progress'),
('On Hold', 'On Hold'),
('Completed', 'Completed')))
progress = models.CharField(max_length=100,
default='threat_model', editable=False)
tmodel_path = models.CharField(max_length=1000, default='none',
editable=False, blank=True, null=True)
risk_path = models.CharField(max_length=1000, default='none',
editable=False, blank=True, null=True)
risk_acceptance = models.ManyToManyField("Risk_Acceptance",
default=None, null=True,
editable=False, blank=True)
done_testing = models.BooleanField(default=False, editable=False)
class Meta:
ordering = ['-target_start']
def __unicode__(self):
return "Engagement: %s (%s)" % (self.name if self.name else '',
self.target_start.strftime(
"%b %d, %Y"))
class CWE(models.Model):
url = models.CharField(max_length=1000)
description = models.CharField(max_length=2000)
number = models.IntegerField()
class Endpoint(models.Model):
protocol = models.CharField(null=True, blank=True, max_length=10,
help_text="The communication protocl such as 'http', 'ftp', etc.")
host = models.CharField(null=True, blank=True, max_length=500,
help_text="The host name or IP address, you can also include the port number. For example"
"'127.0.0.1', '127.0.0.1:8080', 'localhost', 'yourdomain.com'.")
path = models.CharField(null=True, blank=True, max_length=500,
help_text="The location of the resource, it should start with a '/'. For example"
"/endpoint/420/edit")
query = models.CharField(null=True, blank=True, max_length=5000,
help_text="The query string, the question mark should be omitted."
"For example 'group=4&team=8'")
fragment = models.CharField(null=True, blank=True, max_length=500,
help_text="The fragment identifier which follows the hash mark. The hash mark should "
"be omitted. For example 'section-13', 'paragraph-2'.")
product = models.ForeignKey(Product, null=True, blank=True, )
class Meta:
ordering = ['product', 'protocol', 'host', 'path', 'query', 'fragment']
def __unicode__(self):
from urlparse import urlunsplit, uses_netloc
netloc = self.host
scheme = self.protocol
url = self.path if self.path else ''
query = self.query
fragment = self.fragment
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
if scheme:
url = '//' + (netloc or '') + url
else:
url = (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return url
def finding_count(self):
findings = Finding.objects.filter(endpoints__in=[self],
active=True,
verified=True)
return findings.count()
def active_findings(self):
return Finding.objects.filter(endpoints__in=[self],
active=True,
verified=True,
mitigated__isnull=True,
false_p=False,
duplicate=False,
is_template=False).order_by('numerical_severity')
class Notes(models.Model):
entry = models.CharField(max_length=2400)
date = models.DateTimeField(null=False, editable=False,
default=get_current_datetime)
author = models.ForeignKey(User, editable=False)
class Meta:
ordering = ['-date']
def __unicode__(self):
return self.entry
class Development_Environment(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class Test(models.Model):
engagement = models.ForeignKey(Engagement, editable=False)
test_type = models.ForeignKey(Test_Type)
target_start = models.DateTimeField()
target_end = models.DateTimeField()
estimated_time = models.TimeField(null=True, blank=True, editable=False)
actual_time = models.TimeField(null=True, blank=True, editable=False, )
percent_complete = models.IntegerField(null=True, blank=True,
editable=True)
notes = models.ManyToManyField(Notes, null=True, blank=True,
editable=False)
environment = models.ForeignKey(Development_Environment, null=True,
blank=False)
def __unicode__(self):
return "%s (%s)" % (self.test_type,
self.target_start.strftime("%b %d, %Y"))
class VA(models.Model):
address = models.TextField(editable=False, default="none")
user = models.ForeignKey(User, editable=False)
result = models.ForeignKey(Test, editable=False, null=True, blank=True)
status = models.BooleanField(default=False, editable=False)
start = models.CharField(max_length=100)
class Finding(models.Model):
title = models.TextField(max_length=1000)
date = models.DateField(default=get_current_date)
cwe = models.IntegerField(default=0, null=True, blank=True)
url = models.TextField(null=True, blank=True, editable=False)
severity = models.CharField(max_length=200)
description = models.TextField()
mitigation = models.TextField()
impact = models.TextField()
# will deprecate in version 1.0.3
endpoint = models.TextField()
endpoints = models.ManyToManyField(Endpoint, null=True, blank=True, )
references = models.TextField(null=True, blank=True, db_column="refs")
test = models.ForeignKey(Test, editable=False)
is_template = models.BooleanField(default=False)
active = models.BooleanField(default=True)
verified = models.BooleanField(default=True)
false_p = models.BooleanField(default=False, verbose_name="False Positive")
duplicate = models.BooleanField(default=False)
out_of_scope = models.BooleanField(default=False)
thread_id = models.IntegerField(default=0, editable=False)
mitigated = models.DateTimeField(editable=False, null=True, blank=True)
reporter = models.ForeignKey(User, editable=False)
notes = models.ManyToManyField(Notes, null=True, blank=True,
editable=False)
numerical_severity = models.CharField(max_length=4)
class Meta:
ordering = ('numerical_severity', '-date')
def __unicode__(self):
return self.title
def status(self):
status = []
if self.active:
status += ['Active']
if self.verified:
status += ['Verified']
if self.mitigated:
status += ['Mitigated']
if self.false_p:
status += ['False Positive']
if self.out_of_scope:
status += ['Out Of Scope']
if self.duplicate:
status += ['Duplicate']
if len(self.risk_acceptance_set.all()) > 0:
status += ['Accepted']
if not len(status):
status += ['Unknown']
return ", ".join([str(s) for s in status])
def age(self):
if self.mitigated:
days = (self.mitigated.date() - localtz.localize(datetime.combine(self.date,
datetime.min.time())).date()).days
else:
days = (get_current_date() - localtz.localize(datetime.combine(self.date, datetime.min.time())).date()).days
return days if days > 0 else 0
def long_desc(self):
long_desc = ''
long_desc += '=== ' + self.title + ' ===\n\n'
long_desc += '*Severity:* ' + self.severity + '\n\n'
long_desc += '*Systems*: \n' + self.endpoint + '\n\n'
long_desc += '*Description*: \n' + self.description + '\n\n'
long_desc += '*Impact*: \n' + self.impact + '\n\n'
long_desc += '*References*:' + self.references
return long_desc
def clean(self):
no_check = ["test", "reporter"]
bigfields = ["description", "mitigation", "references", "impact",
"endpoint", "url"]
for field_obj in self._meta.fields:
field = field_obj.name
if field not in no_check:
val = getattr(self, field)
if not val and field == "title":
setattr(self, field, "No title given")
if not val and field in bigfields:
setattr(self, field, "No %s given" % field)
class Check_List(models.Model):
session_management = models.CharField(max_length=50, default='none')
session_issues = models.ManyToManyField(Finding,
related_name='session_issues',
blank=True, null=True)
encryption_crypto = models.CharField(max_length=50, default='none')
crypto_issues = models.ManyToManyField(Finding,
related_name='crypto_issues',
blank=True, null=True)
configuration_management = models.CharField(max_length=50, default='')
config_issues = models.ManyToManyField(Finding,
related_name='config_issues',
blank=True, null=True)
authentication = models.CharField(max_length=50, default='none')
auth_issues = models.ManyToManyField(Finding,
related_name='auth_issues',
blank=True, null=True)
authorization_and_access_control = models.CharField(max_length=50,
default='none')
author_issues = models.ManyToManyField(Finding,
related_name='author_issues',
blank=True, null=True)
data_input_sanitization_validation = models.CharField(max_length=50,
default='none')
data_issues = models.ManyToManyField(Finding, related_name='data_issues',
blank=True, null=True)
sensitive_data = models.CharField(max_length=50, default='none')
sensitive_issues = models.ManyToManyField(Finding,
related_name='sensitive_issues',
blank=True, null=True)
other = models.CharField(max_length=50, default='none')
other_issues = models.ManyToManyField(Finding, related_name='other_issues',
blank=True, null=True)
engagement = models.ForeignKey(Engagement, editable=False,
related_name='eng_for_check')
@staticmethod
def get_status(pass_fail):
if pass_fail == 'Pass':
return 'success'
elif pass_fail == 'Fail':
return 'danger'
else:
return 'warning'
class BurpRawRequestResponse(models.Model):
finding = models.ForeignKey(Finding, blank=True, null=True)
burpRequestBase64 = models.BinaryField()
burpResponseBase64 = models.BinaryField()
class Risk_Acceptance(models.Model):
path = models.FileField(upload_to='risk/%Y/%m/%d',
editable=False, null=False,
blank=False, verbose_name="Risk Acceptance File")
accepted_findings = models.ManyToManyField(Finding, null=False)
reporter = models.ForeignKey(User, editable=False)
notes = models.ManyToManyField(Notes, editable=False)
created = models.DateTimeField(null=False, editable=False,
default=now)
def __unicode__(self):
return "Risk Acceptance added on %s" % self.created.strftime(
"%b %d, %Y")
def filename(self):
return os.path.basename(self.path.name) \
if self.path is not None else ''
admin.site.register(Test)
admin.site.register(Finding)
admin.site.register(Engagement)
admin.site.register(Risk_Acceptance)
admin.site.register(Check_List)
admin.site.register(Test_Type)
admin.site.register(Endpoint)
admin.site.register(Product)
|
dimagi/commcare-hq
|
corehq/apps/data_interfaces/migrations/0006_case_rule_refactor.py
|
# Generated by Django 1.10.6 on 2017-04-04 12:54
import django.db.models.deletion
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0005_remove_match_type_choices'),
]
operations = [
migrations.CreateModel(
name='CaseRuleAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='CaseRuleCriteria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='ClosedParentDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.CharField(default='parent', max_length=126)),
('relationship_id', models.PositiveSmallIntegerField(default=1)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CustomActionDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=126)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CustomMatchDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=126)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MatchPropertyDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('property_name', models.CharField(max_length=126)),
('property_value', models.CharField(max_length=126, null=True)),
('match_type', models.CharField(max_length=15)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UpdateCaseDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('properties_to_update', jsonfield.fields.JSONField(default=list)),
('close_case', models.BooleanField()),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='automaticupdaterule',
name='migrated',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='caserulecriteria',
name='closed_parent_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.ClosedParentDefinition'),
),
migrations.AddField(
model_name='caserulecriteria',
name='custom_match_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.CustomMatchDefinition'),
),
migrations.AddField(
model_name='caserulecriteria',
name='match_property_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.MatchPropertyDefinition'),
),
migrations.AddField(
model_name='caserulecriteria',
name='rule',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='data_interfaces.AutomaticUpdateRule'),
),
migrations.AddField(
model_name='caseruleaction',
name='custom_action_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.CustomActionDefinition'),
),
migrations.AddField(
model_name='caseruleaction',
name='rule',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='data_interfaces.AutomaticUpdateRule'),
),
migrations.AddField(
model_name='caseruleaction',
name='update_case_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.UpdateCaseDefinition'),
),
]
|
CountZer0/PipelineConstructionSet
|
python/maya/site-packages/pymel-1.0.5/pymel/internal/cmdcache.py
|
# Built-in imports
import os, re, inspect, keyword
# Maya imports
import maya.cmds as cmds
import maya.mel as mm
# PyMEL imports
import pymel.util as util
import pymel.versions as versions
# Module imports
from . import plogging
from . import startup
_logger = plogging.getLogger(__name__)
moduleNameShortToLong = {
'modeling' : 'Modeling',
'rendering' : 'Rendering',
'effects' : 'Effects',
'animation' : 'Animation',
'windows' : 'Windows',
'system' : 'System',
'general' : 'General',
'language' : 'Language'
}
#: these are commands which need to be manually added to the list parsed from the docs
moduleCommandAdditions = {
'windows' : ['connectControl', 'deleteUI','uiTemplate','setUITemplate','renameUI','setParent','objectTypeUI','lsUI', 'disable', 'dimWhen'],
'general' : ['encodeString', 'format', 'assignCommand', 'commandEcho', 'condition', 'evalDeferred', 'isTrue', 'itemFilter', 'itemFilterAttr',
'itemFilterRender', 'itemFilterType', 'pause', 'refresh', 'stringArrayIntersector', 'selectionConnection']
}
#: secondary flags can only be used in conjunction with other flags so we must exclude them when creating classes from commands.
#: because the maya docs do not specify in any parsable way which flags are secondary modifiers, we must maintain this dictionary.
#: once this list is reliable enough and includes default values, we can use them as keyword arguments in the class methods that they modify.
secondaryFlags = {
'xform' : ( ( 'absolute', None,[] ),
( 'relative', None,[] ),
( 'euler', None,['relative'] ),
( 'objectSpace', True, ['scalePivot', 'rotatePivot', 'rotateAxis', 'rotation', 'rotateTranslation', 'translation', 'matrix', 'boundingBox', 'boundingBoxInvisible', 'pivots'] ),
( 'worldSpace', False, ['scalePivot', 'rotatePivot', 'rotateAxis', 'rotation', 'rotateTranslation', 'translation', 'matrix', 'boundingBox', 'boundingBoxInvisible', 'pivots'] ),
( 'preserve', None,['scalePivot', 'rotatePivot', 'rotateOrder', 'rotateAxis', 'centerPivots'] ),
( 'worldSpaceDistance', None,['scalePivot', 'rotatePivot', 'scaleTranslation', 'rotateTranslation', 'translation', 'pivots'] )
),
'file' : ( ( 'loadAllDeferred', False, ['open'] ),
( 'loadNoReferences', False, ['open', 'i', 'reference', 'loadReference'] ),
( 'loadReferenceDepth', None, ['open', 'i', 'reference', 'loadReference'] ),
( 'force', False, ['open', 'newFile', 'save', 'exportAll', 'exportSelected', 'exportAnim',
'exportSelectedAnim', 'exportAnimFromReference', 'exportSelectedAnimFromReference' ] ),
( 'constructionHistory', True, ['exportSelected'] ),
( 'channels', True, ['exportSelected'] ),
( 'constraints', True, ['exportSelected'] ),
( 'expressions', True, ['exportSelected'] ),
( 'shader', True, ['exportSelected'] ),
( 'defaultNamespace', False, ['reference', 'i'] ),
( 'deferReference', False, ['reference', 'i'] ),
( 'editCommand', None, ['cleanReference'] ),
( 'groupReference', False, ['reference', 'i'] ),
( 'groupLocator', None,['reference'] ),
( 'groupName', None,['reference', 'i'] ),
( 'namespace', None,['reference', 'exportAsReference', 'namespace'] ),
( 'referenceNode', None,['reference', 'exportAnimFromReference', 'exportSelectedAnimFromReference'] ),
( 'renameAll', None,['i'] ),
( 'renamingPrefix', None,['reference', 'i','exportAsReference'] ),
#( 'saveTextures', "unlessRef", ['saveAs']),
( 'swapNamespace', None, ['reference', 'i'] ),
( 'sharedReferenceFile', None, ['reference'] ),
( 'sharedNodes', None, ['reference'] ),
( 'returnNewNodes', False, ['open', 'reference', 'i', 'loadReference' ] ),
#( 'loadSettings', ),
( 'preserveReferences', False, ['i', 'exportAll', 'exportSelected'] ),
( 'preSaveScript', None, ['save'] ),
( 'postSaveScript', None, ['save'] ),
( 'type', None, ['open', 'newFile', 'save', 'exportAll', 'exportSelected', 'exportAnim',
'exportSelectedAnim', 'exportAnimFromReference', 'exportSelectedAnimFromReference' ] ),
),
'joint' : ( ( 'absolute', True, ['position'] ),
( 'relative', True, ['position'] ) )
}
UI_COMMANDS ="""attrColorSliderGrp attrControlGrp
attrEnumOptionMenu attrEnumOptionMenuGrp
attrFieldGrp attrFieldSliderGrp
attrNavigationControlGrp attributeMenu
colorIndexSliderGrp colorSliderButtonGrp
colorSliderGrp columnLayout
colorEditor floatField
floatFieldGrp floatScrollBar
floatSlider floatSlider2
floatSliderButtonGrp floatSliderGrp
frameLayout iconTextButton
iconTextCheckBox iconTextRadioButton
iconTextRadioCollection iconTextScrollList
iconTextStaticLabel intField
intFieldGrp intScrollBar
intSlider intSliderGrp
paneLayout panel
radioButton radioButtonGrp
radioCollection radioMenuItemCollection
symbolButton symbolCheckBox
textCurves textField
textFieldButtonGrp textFieldGrp
text textScrollList
toolButton toolCollection
window blendShapeEditor
blendShapePanel button
checkBox checkBoxGrp
confirmDialog fontDialog
formLayout menu
menuBarLayout menuEditor
menuItem menuSet
promptDialog scrollField
scrollLayout scriptedPanel
scriptedPanelType shelfButton
shelfLayout shelfTabLayout
tabLayout outlinerEditor
optionMenu outlinerPanel
optionMenuGrp animCurveEditor
animDisplay separator
visor layout
layoutDialog layerButton
hyperGraph hyperPanel
hyperShade rowColumnLayout
rowLayout renderLayerButton
renderWindowEditor glRenderEditor
scriptTable keyframeStats
keyframeOutliner canvas
channelBox gradientControl
gradientControlNoAttr gridLayout
messageLine popupMenu
modelEditor modelPanel
helpLine hardwareRenderPanel
image nodeIconButton
commandLine progressBar
defaultLightListCheckBox exclusiveLightCheckBox
shellField clipSchedulerOutliner
clipEditor deviceEditor
devicePanel dynRelEdPanel
dynRelEditor dynPaintEditor
nameField cmdScrollFieldExecuter
cmdScrollFieldReporter cmdShell
nameField palettePort """.split()
#: creation commands whose names do not match the type of node they return require this dict
#: to resolve which command the class should wrap
nodeTypeToNodeCommand = {
#'failed' : 'clip',
#'failed' : 'clipSchedule',
'airField' : 'air',
'dragField' : 'drag',
'emitter' : 'emitter',
'turbulenceField' : 'turbulence',
#'failed' : 'effector',
'volumeAxisField' : 'volumeAxis',
'uniformField' : 'uniform',
'gravityField' : 'gravity',
#'failed' : 'event',
#'failed' : 'pointCurveConstraint',
#'failed' : 'deformer',
#'failed' : 'constrain',
'locator' : 'spaceLocator',
'vortexField' : 'vortex',
'makeNurbTorus' : 'torus',
'makeNurbCone' : 'cone',
'makeNurbCylinder' : 'cylinder',
'nurbsCurve' : 'curve', # returns a single transform, but creates a nurbsCurve
'makeNurbSphere' : 'sphere',
'makeNurbCircle' : 'circle',
'makeNurbPlane' : 'nurbsPlane',
'makeNurbsSquare' : 'nurbsSquare',
'makeNurbCube' : 'nurbsCube',
'skinPercent' : 'skinCluster',
'file' : None, # prevent File node from using cmds.file
'nurbsSurface' : 'surface',
'annotationShape' : 'annotate',
'condition' : None, # prevent Condition node from using cmds.condition (which is for script conditions)
}
cmdlistOverrides = {}
#util.setCascadingDictItem( cmdlistOverrides, ( 'optionMenu', 'shortFlags', 'sl', 'modes' ), ['create', 'query', 'edit'] )
util.setCascadingDictItem( cmdlistOverrides, ( 'optionMenu', 'flags', 'select', 'modes' ), ['create', 'query', 'edit'] )
util.setCascadingDictItem( cmdlistOverrides, ( 'ikHandle', 'flags', 'jointList', 'modes' ), ['query'] )
#util.setCascadingDictItem( cmdlistOverrides, ( 'ikHandle', 'shortFlags', 'jl', 'modes' ), ['query'] )
util.setCascadingDictItem( cmdlistOverrides, ( 'keyframe', 'flags', 'index', 'args' ), 'timeRange' ) # make sure this is a time range so it gets proper slice syntax
# Need to override this, rather than having it deteced from testNodeCmd, because
# it crashes testNodeCmd
util.setCascadingDictItem( cmdlistOverrides, ( 'pointOnPolyConstraint', 'resultNeedsUnpacking', ), True )
def getCmdInfoBasic( command ):
typemap = {
'string' : unicode,
'length' : float,
'float' : float,
'angle' : float,
'int' : int,
'unsignedint' : int,
'on|off' : bool,
'script' : callable,
'name' : 'PyNode'
}
flags = {}
shortFlags = {}
removedFlags = {}
try:
lines = cmds.help( command ).split('\n')
except RuntimeError:
pass
else:
synopsis = lines.pop(0)
# certain commands on certain platforms have an empty first line
if not synopsis:
synopsis = lines.pop(0)
#_logger.debug(synopsis)
if lines:
lines.pop(0) # 'Flags'
#_logger.debug(lines)
for line in lines:
line = line.replace( '(Query Arg Mandatory)', '' )
line = line.replace( '(Query Arg Optional)', '' )
tokens = line.split()
try:
tokens.remove('(multi-use)')
multiuse = True
except ValueError:
multiuse = False
#_logger.debug(tokens)
if len(tokens) > 1 and tokens[0].startswith('-'):
args = [ typemap.get(x.lower(), util.uncapitalize(x) ) for x in tokens[2:] ]
numArgs = len(args)
# lags with no args in mel require a boolean val in python
if numArgs == 0:
args = bool
# numArgs will stay at 0, which is the number of mel arguments.
# this flag should be renamed to numMelArgs
#numArgs = 1
elif numArgs == 1:
args = args[0]
longname = str(tokens[1][1:])
shortname = str(tokens[0][1:])
if longname in keyword.kwlist:
removedFlags[ longname ] = shortname
longname = shortname
elif shortname in keyword.kwlist:
removedFlags[ shortname ] = longname
shortname = longname
#sometimes the longname is empty, so we'll use the shortname for both
elif longname == '':
longname = shortname
flags[longname] = { 'longname' : longname, 'shortname' : shortname, 'args' : args, 'numArgs' : numArgs, 'docstring' : '' }
if multiuse:
flags[longname].setdefault('modes', []).append('multiuse')
shortFlags[shortname] = longname
#except:
# pass
#_logger.debug("could not retrieve command info for", command)
res = { 'flags': flags, 'shortFlags': shortFlags, 'description' : '', 'example': '', 'type' : 'other' }
if removedFlags:
res['removedFlags'] = removedFlags
return res
def getCmdInfo( command, version, python=True ):
"""Since many maya Python commands are builtins we can't get use getargspec on them.
besides most use keyword args that we need the precise meaning of ( if they can be be used with
edit or query flags, the shortnames of flags, etc) so we have to parse the maya docs"""
from parsers import CommandDocParser, mayaDocsLocation
basicInfo = getCmdInfoBasic(command)
try:
docloc = mayaDocsLocation(version)
if python:
docloc = os.path.join( docloc , 'CommandsPython/%s.html' % (command) )
else:
docloc = os.path.join( docloc , 'Commands/%s.html' % (command) )
f = open( docloc )
parser = CommandDocParser(command)
parser.feed( f.read() )
f.close()
example = parser.example
example = example.rstrip()
if python:
pass
# start with basic info, gathered using mel help command, then update with info parsed from docs
# we copy because we need access to the original basic info below
basicFlags = basicInfo.get('flags', {})
flags = basicInfo['flags'].copy()
flags.update( parser.flags )
# if we have a "true" mel boolean flag, then getCmdInfoBasic will return
# numArgs == 0, but parsing the PYTHON docs will return a numArgs of 1;
# keep the numArgs of 0
for flag, flagInfo in parser.flags.iteritems():
if flagInfo.get('args') == bool and flagInfo.get('numArgs') == 1:
basicFlagInfo = basicFlags.get(flag, {})
if (basicFlagInfo.get('args') == bool
and basicFlagInfo.get('numArgs') == 0):
flagInfo['numArgs'] = 0
if command in secondaryFlags:
for secondaryFlag, defaultValue, modifiedList in secondaryFlags[command]:
#_logger.debug(command, "2nd", secondaryFlag)
flags[secondaryFlag]['modified'] = modifiedList
#_logger.debug(sorted(modifiedList))
#_logger.debug(sorted(parser.flags.keys()))
for primaryFlag in modifiedList:
#_logger.debug(command, "1st", primaryFlag)
if 'secondaryFlags' in parser.flags[primaryFlag]:
flags[primaryFlag]['secondaryFlags'].append(secondaryFlag)
else:
flags[primaryFlag]['secondaryFlags'] = [secondaryFlag]
# add shortname lookup
#_logger.debug((command, sorted( basicInfo['flags'].keys() )))
#_logger.debug((command, sorted( flags.keys() )))
# args and numArgs is more reliable from mel help command than from parsed docs,
# so, here we put that back in place and create shortflags.
# also use original 'multiuse' info...
for flag, flagData in flags.items():
basicFlagData = basicFlags.get(flag)
if basicFlagData:
if 'args' in basicFlagData and 'numargs' in basicFlagData:
flagData['args'] = basicFlagData['args']
flagData['numArgs'] = basicFlagData['numArgs']
if ( 'multiuse' in basicFlagData.get('modes', [])
and 'multiuse' not in flagData.get('modes', [])):
flagData.setdefault('modes', []).append('multiuse')
shortFlags = basicInfo['shortFlags']
res = { 'flags': flags,
'shortFlags': shortFlags,
'description' : parser.description,
'example': example }
try:
res['removedFlags'] = basicInfo['removedFlags']
except KeyError: pass
return res
except IOError:
_logger.debug("could not find docs for %s" % command)
return basicInfo
#raise IOError, "cannot find maya documentation directory"
def fixCodeExamples(style='maya', force=False):
"""cycle through all examples from the maya docs, replacing maya.cmds with pymel and inserting pymel output.
NOTE: this can only be run from gui mode
WARNING: back up your preferences before running
TODO: auto backup and restore of maya prefs
"""
manipOptions = cmds.manipOptions( q=1, handleSize=1, scale=1 )
animOptions = []
animOptions.append( cmds.animDisplay( q=1, timeCode=True ) )
animOptions.append( cmds.animDisplay( q=1, timeCodeOffset=True ) )
animOptions.append( cmds.animDisplay( q=1, modelUpdate=True ) )
openWindows = cmds.lsUI(windows=True)
examples = CmdExamplesCache().read()
processedExamples = CmdProcessedExamplesCache().read()
processedExamples = {} if processedExamples is None else processedExamples
allCmds = set(examples.keys())
# put commands that require manual interaction first
manualCmds = ['fileBrowserDialog', 'fileDialog', 'fileDialog2', 'fontDialog']
skipCmds = ['colorEditor', 'emit', 'finder', 'doBlur', 'messageLine', 'renderWindowEditor',
'ogsRender', 'webBrowser', 'deleteAttrPattern', 'grabColor']
allCmds.difference_update(manualCmds)
sortedCmds = manualCmds + sorted(allCmds)
for command in sortedCmds:
example = examples[command]
if not force and command in processedExamples:
_logger.info("%s: already completed. skipping." % command)
continue
_logger.info("Starting command %s", command)
if style == 'doctest' :
DOC_TEST_SKIP = ' #doctest: +SKIP'
else:
DOC_TEST_SKIP = ''
# change from cmds to pymel
reg = re.compile(r'\bcmds\.')
example = example.replace('import maya.cmds as cmds', 'import pymel.core as pm' + DOC_TEST_SKIP, 1)
example = reg.sub( 'pm.', example )
#example = example.replace( 'import maya.cmds as cmds', 'import pymel as pm\npm.newFile(f=1) #fresh scene' )
lines = example.split('\n')
if len(lines)==1:
_logger.info("removing empty example for command %s", command)
examples.pop(command)
processedExamples[command] = ''
# write out after each success so that if we crash we don't have to start from scratch
CmdProcessedExamplesCache().write(processedExamples)
continue
if command in skipCmds:
example = '\n'.join( lines )
processedExamples[command] = example
# write out after each success so that if we crash we don't have to start from scratch
CmdProcessedExamplesCache().write(processedExamples)
#lines.insert(1, 'pm.newFile(f=1) #fresh scene')
# create a fresh scene. this does not need to be in the docstring unless we plan on using it in doctests, which is probably unrealistic
cmds.file(new=1,f=1)
newlines = []
statement = []
# narrowed down the commands that cause maya to crash to these prefixes
if re.match( '(dis)|(dyn)|(poly)', command) :
evaluate = False
elif command in skipCmds:
evaluate = False
else:
evaluate = True
# gives a little leniency for where spaces are placed in the result line
resultReg = re.compile('# Result:\s*(.*) #$')
try: # funky things can happen when executing maya code: some exceptions somehow occur outside the eval/exec
for i, line in enumerate(lines):
res = None
# replace with pymel results '# Result: 1 #'
m = resultReg.match(line)
if m:
if evaluate is False:
line = m.group(1)
newlines.append(' ' + line)
else:
if evaluate:
if line.strip().endswith(':') or line.startswith(' ') or line.startswith('\t'):
statement.append(line)
else:
# evaluate the compiled statement using exec, which can do multi-line if statements and so on
if statement:
try:
#_logger.debug("executing %s", statement)
exec( '\n'.join(statement) )
# reset statement
statement = []
except Exception, e:
_logger.info("stopping evaluation %s", str(e))# of %s on line %r" % (command, line)
evaluate = False
try:
_logger.debug("evaluating: %r" % line)
res = eval( line )
#if res is not None: _logger.info("result", repr(repr(res)))
#else: _logger.info("no result")
except:
#_logger.debug("failed evaluating:", str(e))
try:
exec( line )
except (Exception, TypeError), e:
_logger.info("stopping evaluation %s", str(e))# of %s on line %r" % (command, line)
evaluate = False
if style == 'doctest':
if line.startswith(' ') or line.startswith('\t'):
newlines.append(' ... ' + line )
else:
newlines.append(' >>> ' + line + DOC_TEST_SKIP )
if res is not None:
newlines.append( ' ' + repr(res) )
else:
newlines.append(' ' + line )
if res is not None:
newlines.append( ' # Result: %r #' % (res,) )
if evaluate:
_logger.info("successful evaluation! %s", command)
example = '\n'.join( newlines )
processedExamples[command] = example
except Exception, e:
raise
#_logger.info("FAILED: %s: %s" % (command, e) )
else:
# write out after each success so that if we crash we don't have to start from scratch
CmdProcessedExamplesCache().write(processedExamples)
# cleanup opened windows
for ui in set(cmds.lsUI(windows=True)).difference(openWindows):
try: cmds.deleteUI(ui, window=True)
except:pass
_logger.info("Done Fixing Examples")
# restore manipulators and anim options
print manipOptions
cmds.manipOptions( handleSize=manipOptions[0], scale=manipOptions[1] )
print animOptions
cmds.animDisplay( e=1, timeCode=animOptions[0], timeCodeOffset=animOptions[1], modelUpdate=animOptions[2])
#CmdExamplesCache(examples)
def getModuleCommandList( category, version=None ):
from parsers import CommandModuleDocParser
parser = CommandModuleDocParser(category, version)
return parser.parse()
def getCallbackFlags(cmdInfo):
"""used parsed data and naming convention to determine which flags are callbacks"""
commandFlags = []
try:
flagDocs = cmdInfo['flags']
except KeyError:
pass
else:
for flag, data in flagDocs.items():
if data['args'] in ['script', callable] or 'command' in flag.lower():
commandFlags += [flag, data['shortname']]
return commandFlags
def getModule(funcName, knownModuleCmds):
# determine to which module this function belongs
module = None
if funcName in ['eval', 'file', 'filter', 'help', 'quit']:
module = None
elif funcName.startswith('ctx') or funcName.endswith('Ctx') or funcName.endswith('Context'):
module = 'context'
#elif funcName in self.uiClassList:
# module = 'uiClass'
#elif funcName in nodeHierarchyTree or funcName in nodeTypeToNodeCommand.values():
# module = 'node'
else:
for moduleName, commands in knownModuleCmds.iteritems():
if funcName in commands:
module = moduleName
break
if module is None:
if mm.eval('whatIs "%s"' % funcName ) == 'Run Time Command':
module = 'runtime'
else:
module = 'other'
return module
#-----------------------------------------------
# Command Help Documentation
#-----------------------------------------------
_cmdArgMakers = {}
def cmdArgMakers(force=False):
global _cmdArgMakers
if _cmdArgMakers and not force:
return _cmdArgMakers
def makeCircle():
return cmds.circle()[0]
def makeEp():
return makeCircle() + '.ep[1]'
def makeSphere():
return cmds.polySphere()[0]
def makeCube():
return cmds.polyCube()[0]
def makeIk():
j1 = cmds.joint()
j2 = cmds.joint()
return cmds.ikHandle(j1, j2, solver='ikRPsolver')[0]
def makeJoint():
return cmds.joint()
def makeSkin():
j1 = cmds.joint()
j2 = cmds.joint()
sphere = makeSphere()
return cmds.skinCluster(j1, j2, sphere)[0]
_cmdArgMakers = \
{ 'tangentConstraint' : ( makeCircle, makeCube ),
'poleVectorConstraint': ( makeSphere, makeIk ),
'pointCurveConstraint': ( makeEp, ),
'skinCluster' : ( makeJoint, makeJoint, makeSphere ),
}
constraintCmds = [x for x in dir(cmds)
if x.endswith('onstraint')
and not cmds.runTimeCommand(x, q=1, exists=1)
and x != 'polySelectConstraint']
for constrCmd in constraintCmds:
if constrCmd not in _cmdArgMakers:
_cmdArgMakers[constrCmd] = ( makeSphere, makeCube )
return _cmdArgMakers
def nodeCreationCmd(func, nodeType):
argMakers = cmdArgMakers()
# compile the args list for node creation
createArgs = argMakers.get(nodeType, [])
if createArgs:
createArgs = [argMaker() for argMaker in createArgs]
# run the function
return func(*createArgs)
def testNodeCmd( funcName, cmdInfo, nodeCmd=False, verbose=False ):
_logger.info(funcName.center( 50, '='))
if funcName in [ 'character', 'lattice', 'boneLattice', 'sculpt', 'wire' ]:
_logger.debug("skipping")
return cmdInfo
# These cause crashes... confirmed that pointOnPolyConstraint still
# crashes in 2012
dangerousCmds = ['doBlur', 'pointOnPolyConstraint']
if funcName in dangerousCmds:
_logger.debug("skipping 'dangerous command'")
return cmdInfo
def _formatCmd( cmd, args, kwargs ):
args = [ x.__repr__() for x in args ]
kwargs = [ '%s=%s' % (key, val.__repr__()) for key, val in kwargs.items() ]
return '%s( %s )' % ( cmd, ', '.join( args+kwargs ) )
def _objectToType( result ):
"convert a an instance or list of instances to a python type or list of types"
if isinstance(result, list):
return [ type(x) for x in result ]
else:
return type(result)
_castList = [float, int, bool]
# def _listIsCastable(resultType):
# "ensure that all elements are the same type and that the types are castable"
# try:
# typ = resultType[0]
# return typ in _castList and all([ x == typ for x in resultType ])
# except IndexError:
# return False
module = cmds
try:
func = getattr(module, funcName)
except AttributeError:
_logger.warning("could not find function %s in modules %s" % (funcName, module.__name__))
return cmdInfo
# get the current list of objects in the scene so we can cleanup later, after we make nodes
allObjsBegin = set( cmds.ls(l=1) )
try:
# Attempt to create the node
cmds.select(cl=1)
# the arglist passed from creation to general testing
args = []
constrObj = None
if nodeCmd:
#------------------
# CREATION
#------------------
obj = nodeCreationCmd(func, funcName)
if isinstance(obj, list):
_logger.debug("Return %s", obj)
if len(obj) == 1:
_logger.info("%s: creation return values need unpacking" % funcName)
cmdInfo['resultNeedsUnpacking'] = True
elif not obj:
raise ValueError, "returned object is an empty list"
objTransform = obj[0]
obj = obj[-1]
if obj is None:
#emptyFunctions.append( funcName )
raise ValueError, "Returned object is None"
elif not cmds.objExists( obj ):
raise ValueError, "Returned object %s is Invalid" % obj
args = [obj]
except (TypeError,RuntimeError, ValueError), msg:
_logger.debug("failed creation: %s", msg)
else:
objType = cmds.objectType(obj)
#------------------
# TESTING
#------------------
#(func, args, data) = cmdList[funcName]
#(usePyNode, baseClsName, nodeName)
flags = cmdInfo['flags']
hasQueryFlag = flags.has_key( 'query' )
hasEditFlag = flags.has_key( 'edit' )
anyNumRe = re.compile('\d+')
for flag in sorted(flags.keys()):
flagInfo = flags[flag]
if flag in ['query', 'edit']:
continue
assert flag != 'ype', "%s has bad flag" % funcName
# special case for constraints
if constrObj and flag in ['weight']:
flagargs = [constrObj] + args
else:
flagargs = args
try:
modes = flagInfo['modes']
testModes = False
except KeyError, msg:
#raise KeyError, '%s: %s' % (flag, msg)
#_logger.debug(flag, "Testing modes")
flagInfo['modes'] = []
modes = []
testModes = True
# QUERY
val = None
argtype = flagInfo['args']
if 'query' in modes or testModes == True:
if hasQueryFlag:
kwargs = {'query':True, flag:True}
else:
kwargs = { flag:True }
cmd = _formatCmd(funcName, flagargs, kwargs)
try:
_logger.debug(cmd)
val = func( *flagargs, **kwargs )
#_logger.debug(val)
resultType = _objectToType(val)
# ensure symmetry between edit and query commands:
# if this flag is queryable and editable, then its queried value should be symmetric to its edit arguments
if 'edit' in modes and argtype != resultType:
# there are certain patterns of asymmetry which we can safely correct:
singleItemList = (isinstance( resultType, list)
and len(resultType) ==1
and 'multiuse' not in flagInfo.get('modes', []))
# [bool] --> bool
if singleItemList and resultType[0] == argtype:
_logger.info("%s, %s: query flag return values need unpacking" % (funcName, flag))
flagInfo['resultNeedsUnpacking'] = True
val = val[0]
# [int] --> bool
elif singleItemList and argtype in _castList and resultType[0] in _castList:
_logger.info("%s, %s: query flag return values need unpacking and casting" % (funcName, flag))
flagInfo['resultNeedsUnpacking'] = True
flagInfo['resultNeedsCasting'] = True
val = argtype(val[0])
# int --> bool
elif argtype in _castList and resultType in _castList:
_logger.info("%s, %s: query flag return values need casting" % (funcName, flag))
flagInfo['resultNeedsCasting'] = True
val = argtype(val)
else:
# no valid corrctions found
_logger.info(cmd)
_logger.info("\treturn mismatch")
_logger.info('\tresult: %s', val.__repr__())
_logger.info('\tpredicted type: %s', argtype)
_logger.info('\tactual type: %s', resultType)
# value is no good. reset to None, so that a default will be generated for edit
val = None
else:
_logger.debug("\tsucceeded")
_logger.debug('\tresult: %s', val.__repr__())
_logger.debug('\tresult type: %s', resultType)
except TypeError, msg:
# flag is no longer supported
if str(msg).startswith( 'Invalid flag' ):
#if verbose:
_logger.info("removing flag %s %s %s", funcName, flag, msg)
shortname = flagInfo['shortname']
flagInfo.pop(flag,None)
flagInfo.pop(shortname,None)
modes = [] # stop edit from running
else:
_logger.info(cmd)
_logger.info("\t" + str(msg).rstrip('\n'))
val = None
except RuntimeError, msg:
_logger.info(cmd)
_logger.info("\tRuntimeError: " + str(msg).rstrip('\n') )
val = None
except ValueError, msg:
_logger.info(cmd)
_logger.info("\tValueError: " + str(msg).rstrip('\n') )
val = None
else:
# some flags are only in mel help and not in maya docs, so we don't know their
# supported per-flag modes. we fill that in here
if 'query' not in flagInfo['modes']:
flagInfo['modes'].append('query')
# EDIT
if 'edit' in modes or testModes == True:
#_logger.debug("Args:", argtype)
try:
# we use the value returned from query above as defaults for putting back in as edit args
# but if the return was empty we need to produce something to test on.
# NOTE: this is just a guess
if val is None:
if isinstance(argtype, list):
val = []
for typ in argtype:
if type == unicode or isinstance(type,basestring):
val.append('persp')
else:
if 'query' in modes:
val.append( typ(0) )
# edit only, ensure that bool args are True
else:
val.append( typ(1) )
else:
if argtype == unicode or isinstance(argtype,basestring):
val = 'persp'
elif 'query' in modes:
val = argtype(0)
else:
# edit only, ensure that bool args are True
val = argtype(1)
kwargs = {'edit':True, flag:val}
cmd = _formatCmd(funcName, args, kwargs)
_logger.debug(cmd)
# some commands will either delete or rename a node, ie:
# spaceLocator(e=1, name=...)
# container(e=1, removeContainer=True )
# ...which will then make subsequent cmds fail.
# To get around this, we need to undo the cmd.
try:
cmds.undoInfo(openChunk=True)
editResult = func( *args, **kwargs )
finally:
cmds.undoInfo(closeChunk=True)
if not cmds.objExists(obj):
# cmds.camera(e=1, name=...) does weird stuff - it
# actually renames the parent transform, even if you give
# the name of the shape... which means the shape
# then gets a second 'Shape1' tacked at the end...
# ...and in addition, undo is broken as well.
# So we need a special case for this, where we rename...
if objType == 'camera' and flag == 'name':
_logger.info('\t(Undoing camera rename)')
renamePattern = anyNumRe.sub('*', obj)
possibleRenames = cmds.ls(renamePattern, type=objType)
possibleRenames = [x for x in possibleRenames
if x not in allObjsBegin]
# newName might not be the exact same as our original,
# but as long as it's the same maya type, and isn't
# one of the originals, it shouldn't matter...
newName = possibleRenames[-1]
cmds.rename(newName, obj)
else:
_logger.info('\t(Undoing cmd)')
cmds.undo()
_logger.debug("\tsucceeded")
#_logger.debug('\t%s', editResult.__repr__())
#_logger.debug('\t%s %s', argtype, type(editResult))
#_logger.debug("SKIPPING %s: need arg of type %s" % (flag, flagInfo['argtype']))
except TypeError, msg:
if str(msg).startswith( 'Invalid flag' ):
#if verbose:
# flag is no longer supported
_logger.info("removing flag %s %s %s", funcName, flag, msg)
shortname = flagInfo['shortname']
flagInfo.pop(flag,None)
flagInfo.pop(shortname,None)
else:
_logger.info(funcName)
_logger.info("\t" + str(msg).rstrip('\n'))
_logger.info("\tpredicted arg: %s", argtype)
if not 'query' in modes:
_logger.info("\tedit only")
except RuntimeError, msg:
_logger.info(cmd)
_logger.info("\t" + str(msg).rstrip('\n'))
_logger.info("\tpredicted arg: %s", argtype)
if not 'query' in modes:
_logger.info("\tedit only")
except ValueError, msg:
_logger.info(cmd)
_logger.info("\tValueError: " + str(msg).rstrip('\n') )
val = None
else:
if 'edit' not in flagInfo['modes']:
flagInfo['modes'].append('edit')
# cleanup
allObjsEnd = set( cmds.ls(l=1) )
newObjs = list(allObjsEnd.difference( allObjsBegin ) )
if newObjs:
cmds.delete( newObjs )
return cmdInfo
def _getNodeHierarchy( version=None ):
"""
get node hierarchy as a list of 3-value tuples:
( nodeType, parents, children )
"""
import pymel.util.trees as trees
import pymel.internal.apicache as apicache
if versions.current() >= versions.v2012:
# We now have nodeType(isTypeName)! yay!
inheritances = {}
for nodeType in apicache._getAllMayaTypes():
try:
inheritances[nodeType] = apicache.getInheritance(nodeType)
except apicache.ManipNodeTypeError:
continue
parentTree = {}
# Convert inheritance lists node=>parent dict
for nodeType, inheritance in inheritances.iteritems():
for i in xrange(len(inheritance)):
child = inheritance[i]
if i == 0:
if child == 'dependNode':
continue
else:
parent = 'dependNode'
else:
parent = inheritance[i - 1]
if child in parentTree:
assert parentTree[child] == parent, "conflicting parents: node type '%s' previously determined parent was '%s'. now '%s'" % (child, parentTree[child], parent)
else:
parentTree[child] = parent
nodeHierarchyTree = trees.treeFromDict(parentTree)
else:
from .parsers import NodeHierarchyDocParser
parser = NodeHierarchyDocParser(version)
nodeHierarchyTree = trees.IndexedTree(parser.parse())
return [ (x.value, tuple(y.value for y in x.parents()), tuple(y.value for y in x.childs()) ) \
for x in nodeHierarchyTree.preorder() ]
class CmdExamplesCache(startup.PymelCache):
NAME = 'mayaCmdsExamples'
DESC = 'the list of Maya command examples'
USE_VERSION = True
class CmdProcessedExamplesCache(CmdExamplesCache):
USE_VERSION = False
class CmdDocsCache(startup.PymelCache):
NAME = 'mayaCmdsDocs'
DESC = 'the Maya command documentation'
class CmdCache(startup.SubItemCache):
NAME = 'mayaCmdsList'
DESC = 'the list of Maya commands'
_CACHE_NAMES = '''cmdlist nodeHierarchy uiClassList
nodeCommandList moduleCmds'''.split()
CACHE_TYPES = {'nodeHierarchy':list,
'uiClassList':list,
'nodeCommandList':list,
}
def rebuild(self) :
"""Build and save to disk the list of Maya Python commands and their arguments
WARNING: will unload existing plugins, then (re)load all maya-installed
plugins, without making an attempt to return the loaded plugins to the
state they were at before this command is run. Also, the act of
loading all the plugins may crash maya, especially if done from a
non-GUI session
"""
# Put in a debug, because this can be crashy
_logger.debug("Starting CmdCache.rebuild...")
# With extension can't get docs on unix 64
# path is
# /usr/autodesk/maya2008-x64/docs/Maya2008/en_US/Nodes/index_hierarchy.html
# and not
# /usr/autodesk/maya2008-x64/docs/Maya2008-x64/en_US/Nodes/index_hierarchy.html
long_version = versions.installName()
from parsers import mayaDocsLocation
cmddocs = os.path.join(mayaDocsLocation(long_version), 'CommandsPython')
assert os.path.exists(cmddocs), "Command documentation does not exist: %s" % cmddocs
_logger.info("Rebuilding the maya node hierarchy...")
# Load all plugins to get the nodeHierarchy / nodeFunctions
import pymel.api.plugins as plugins
# We don't want to add in plugin nodes / commands - let that be done
# by the plugin callbacks. However, unloading mechanism is not 100%
# ... sometimes functions get left in maya.cmds... and then trying
# to use those left-behind functions can cause crashes (ie,
# FBXExportQuaternion). So check which methods SHOULD be unloaded
# first, so we know to skip those if we come across them even after
# unloading the plugin
pluginCommands = set()
loadedPlugins = cmds.pluginInfo(q=True, listPlugins=True)
if loadedPlugins:
for plug in loadedPlugins:
plugCmds = plugins.pluginCommands(plug)
if plugCmds:
pluginCommands.update(plugCmds)
plugins.unloadAllPlugins()
self.nodeHierarchy = _getNodeHierarchy(long_version)
nodeFunctions = [ x[0] for x in self.nodeHierarchy ]
nodeFunctions += nodeTypeToNodeCommand.values()
_logger.info("Rebuilding the list of Maya commands...")
#nodeHierarchyTree = trees.IndexedTree(self.nodeHierarchy)
self.uiClassList = UI_COMMANDS
self.nodeCommandList = []
tmpModuleCmds = {}
for moduleName, longname in moduleNameShortToLong.items():
tmpModuleCmds[moduleName] = getModuleCommandList( longname, long_version )
tmpCmdlist = inspect.getmembers(cmds, callable)
#self.moduleCmds = defaultdict(list)
self.moduleCmds = dict( (k,[]) for k in moduleNameShortToLong.keys() )
self.moduleCmds.update( {'other':[], 'runtime': [], 'context': [], 'uiClass': [] } )
def addCommand(funcName):
_logger.debug('adding command: %s' % funcName)
module = getModule(funcName, tmpModuleCmds)
cmdInfo = {}
if module:
self.moduleCmds[module].append(funcName)
if module != 'runtime':
cmdInfo = getCmdInfo(funcName, long_version)
if module != 'windows':
if funcName in nodeFunctions:
self.nodeCommandList.append(funcName)
cmdInfo = testNodeCmd( funcName, cmdInfo, nodeCmd=True, verbose=True )
#elif module != 'context':
# cmdInfo = testNodeCmd( funcName, cmdInfo, nodeCmd=False, verbose=True )
cmdInfo['type'] = module
flags = getCallbackFlags(cmdInfo)
if flags:
cmdInfo['callbackFlags'] = flags
self.cmdlist[funcName] = cmdInfo
# # func, args, (usePyNode, baseClsName, nodeName)
# # args = dictionary of command flags and their data
# # usePyNode = determines whether the class returns its 'nodeName' or uses PyNode to dynamically return
# # baseClsName = for commands which should generate a class, this is the name of the superclass to inherit
# # nodeName = most creation commands return a node of the same name, this option is provided for the exceptions
# try:
# self.cmdlist[funcName] = args, pymelCmdsList[funcName] )
# except KeyError:
# # context commands generate a class based on unicode (which is triggered by passing 'None' to baseClsName)
# if funcName.startswith('ctx') or funcName.endswith('Ctx') or funcName.endswith('Context'):
# self.cmdlist[funcName] = (funcName, args, (False, None, None) )
# else:
# self.cmdlist[funcName] = (funcName, args, () )
for funcName, _ in tmpCmdlist :
if funcName in pluginCommands:
_logger.debug("command %s was a plugin command that should have been unloaded - skipping" % funcName)
continue
addCommand(funcName)
# split the cached data for lazy loading
cmdDocList = {}
examples = {}
for cmdName, cmdInfo in self.cmdlist.iteritems():
try:
examples[cmdName] = cmdInfo.pop('example')
except KeyError:
pass
newCmdInfo = {}
if 'description' in cmdInfo:
newCmdInfo['description'] = cmdInfo.pop('description')
newFlagInfo = {}
if 'flags' in cmdInfo:
for flag, flagInfo in cmdInfo['flags'].iteritems():
newFlagInfo[flag] = { 'docstring' : flagInfo.pop('docstring') }
newCmdInfo['flags'] = newFlagInfo
if newCmdInfo:
cmdDocList[cmdName] = newCmdInfo
CmdDocsCache().write(cmdDocList)
CmdExamplesCache().write(examples)
def build(self):
super(CmdCache, self).build()
# corrections that are always made, to both loaded and freshly built caches
util.mergeCascadingDicts( cmdlistOverrides, self.cmdlist )
# add in any nodeCommands added after cache rebuild
self.nodeCommandList = set(self.nodeCommandList).union(nodeTypeToNodeCommand.values())
self.nodeCommandList = sorted( self.nodeCommandList )
for module, funcNames in moduleCommandAdditions.iteritems():
for funcName in funcNames:
currModule = self.cmdlist[funcName]['type']
if currModule != module:
self.cmdlist[funcName]['type'] = module
id = self.moduleCmds[currModule].index(funcName)
self.moduleCmds[currModule].pop(id)
self.moduleCmds[module].append(funcName)
return (self.cmdlist,self.nodeHierarchy,self.uiClassList,self.nodeCommandList,self.moduleCmds)
|
LeoYReyes/GoogleSearchAutomator
|
Crawler.py
|
import google
import re
from bs4 import BeautifulSoup
def findContactPage(url):
html = google.get_page(url)
soup = BeautifulSoup(html)
contactStr = soup.find_all('a', href=re.compile(".*?contact", re.IGNORECASE))
return contactStr
if __name__ == "__main__":
url = "http://www.wrangler.com/"
contactStr = findContactPage(url)
if(len(contactStr) > 0):
contactPage = google.get_page(contactStr[0].get("href"))
print contactStr[0].get("href")#.find_parents("a")
soup = BeautifulSoup(contactPage)
emailStr = soup.find_all(text=re.compile("[\w\.-]+@[\w\.-]+"))
if(len(emailStr) > 0) :
print addressStr
else:
print "could not find email"
else:
print "could not find contacts page"
|
clawpack/clawpack-4.x
|
doc/sphinx/conf.py
|
# -*- coding: utf-8 -*-
#
# Clawpack documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 25 12:07:14 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('../..'))
sys.path.append(os.path.abspath('./ext'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx','plot_directive','only_directives',
'sphinx.ext.inheritance_diagram']
# extensions.append('sphinx.ext.jsmath')
extensions.append('sphinx.ext.pngmath')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clawpack'
copyright = u'2009, Randall J. LeVeque and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.6'
# The full version, including alpha/beta/rc tags.
release = '4.6.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['users']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = 'math'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# html_style = 'mpl.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'clawlogo.jpg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'clawicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Clawpackdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Clawpack.tex', ur'Clawpack Documentation',
ur'RJL', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/dev': None}
jsmath_path = 'jsmath/easy/load.js'
# jsmath_path = '_static/jsMath/easy/load.js'
keep_warnings = 'True'
|
qedsoftware/commcare-hq
|
corehq/sql_accessors/migrations/0035_add_undelete_functions.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.sql_db.operations import RawSQLMigration
from corehq.form_processor.models import XFormInstanceSQL
migrator = RawSQLMigration(('corehq', 'sql_accessors', 'sql_templates'), {
'FORM_STATE_DELETED': XFormInstanceSQL.DELETED
})
class Migration(migrations.Migration):
dependencies = [
('sql_accessors', '0034_update_reindex_functions'),
]
operations = [
migrator.get_migration('soft_undelete_cases.sql'),
migrator.get_migration('soft_undelete_forms.sql'),
]
|
WaveBlocks/WaveBlocks
|
src/tests/TestComplexMath.py
|
"""The WaveBlocks Project
Test the complex math functions.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
from numpy import *
from matplotlib.pyplot import *
from WaveBlocks.ComplexMath import *
from WaveBlocks.Plot import plotcf
# Continuous complex angle function
a = linspace(0,5,1000)
b = linspace(5,10,1000)
c = hstack([a, b])
y = hstack([ exp(-1.0j*a**2), exp(+1.0j*b**1.6) ])
figure()
plotcf(c, angle(y), abs(y))
plot(c, real(y), "b-", label=r"$\Re y$")
plot(c, imag(y), "g-", label=r"$\Im y$")
plot(c, angle(y), "c-", label=r"$\arg y$")
plot(c, cont_angle(y), "m-", label=r"$\arg_c y$")
plot(c, pi*ones(c.shape), "y--", label=r"$\pi$")
plot(c, -pi*ones(c.shape), "y--", label=r"$-\pi$")
legend(loc="lower left")
savefig("complex_angle_continuous.png")
# Continuous complex sqrt
x = linspace(0, 6*pi, 5000)
y = 2*exp(1.0j*x)
figure()
polar(x, abs(y))
savefig("complex_numbers.png")
# Do it "wrong"
z = sqrt(y)
figure()
subplot(2,1,1)
plotcf(x, angle(y), abs(y))
plot(x, real(y), "-b")
plot(x, imag(y), "-g")
grid(True)
ylim([-2.1,2.1])
#xlabel(r"$\phi$")
ylabel(r"$2 \cdot \exp(i \cdot \phi)$")
title(r"$z = r \cdot \exp(i \cdot \phi)$")
subplot(2,1,2)
plotcf(x, angle(z), abs(z))
plot(x, real(z), "-b")
plot(x, imag(z), "-g")
grid(True)
ylim([-2.1,2.1])
xlabel(r"$\phi$")
ylabel(r"$\sqrt{2 \cdot \exp(i \cdot \phi)}$")
title(r"$\sqrt{z} = \sqrt{r} \cdot \exp \left( i \cdot \frac{\phi}{2} \right)$")
savefig("complex_sqrt_non-continuous.png")
# Do it "right"
z = cont_sqrt(y)
figure()
subplot(2,1,1)
plotcf(x, angle(y), abs(y))
plot(x, real(y), "-b")
plot(x, imag(y), "-g")
grid(True)
ylim([-2.1,2.1])
#xlabel(r"$\phi$")
ylabel(r"$2 \cdot \exp(i \cdot \phi)$")
title(r"$z = r \cdot \exp(i \cdot \phi)$")
subplot(2,1,2)
plotcf(x, angle(z), abs(z))
plot(x, real(z), "-b")
plot(x, imag(z), "-g")
grid(True)
ylim([-2.1,2.1])
xlabel(r"$\phi$")
ylabel(r"$\sqrt{2 \cdot \exp(i \cdot \phi)}$")
title(r"$\sqrt{z} = \sqrt{r} \cdot \exp \left( i \cdot \frac{\phi}{2} \right)$")
savefig("complex_sqrt_continuous.png")
# Another example
x = linspace(0, 6*pi, 5000)
y = 0.4*x*exp(1.0j*x)
figure()
polar(x,abs(y))
savefig("complex_numbers2.png")
# Do it "right"
z = cont_sqrt(y)
figure()
subplot(2,1,1)
plotcf(x, angle(y), abs(y))
plot(x, real(y), "-b")
plot(x, imag(y), "-g")
grid(True)
#xlabel(r"$\phi$")
ylabel(r"$2 \cdot \exp(i \cdot \phi)$")
title(r"$z = r \cdot \exp(i \cdot \phi)$")
subplot(2,1,2)
plotcf(x, angle(z), abs(z), darken=True)
plot(x, real(z), "-b")
plot(x, imag(z), "-g")
grid(True)
xlabel(r"$\phi$")
ylabel(r"$\sqrt{2 \cdot \exp(i \cdot \phi)}$")
title(r"$\sqrt{z} = \sqrt{r} \cdot \exp \left( i \cdot \frac{\phi}{2} \right)$")
savefig("complex_sqrt_continuous2.png")
|
mrgloom/menpofit
|
menpofit/modelinstance.py
|
import numpy as np
from menpo.base import Targetable, Vectorizable
from menpo.model import MeanInstanceLinearModel
from menpofit.differentiable import DP
def similarity_2d_instance_model(shape):
r"""
A MeanInstanceLinearModel that encodes all possible 2D similarity
transforms of a 2D shape (of n_points).
Parameters
----------
shape : 2D :class:`menpo.shape.Shape`
Returns
-------
model : `menpo.model.linear.MeanInstanceLinearModel`
Model with four components, linear combinations of which
represent the original shape under a similarity transform. The
model is exhaustive (that is, all possible similarity transforms
can be expressed in the model).
"""
shape_vector = shape.as_vector()
components = np.zeros((4, shape_vector.shape[0]))
components[0, :] = shape_vector # Comp. 1 - just the shape
rotated_ccw = shape.points[:, ::-1].copy() # flip x,y -> y,x
rotated_ccw[:, 0] = -rotated_ccw[:, 0] # negate (old) y
components[1, :] = rotated_ccw.flatten() # C2 - the shape rotated 90 degs
components[2, ::2] = 1 # Tx
components[3, 1::2] = 1 # Ty
return MeanInstanceLinearModel(components, shape_vector, shape)
class ModelInstance(Targetable, Vectorizable, DP):
r"""A instance of a :map:`InstanceBackedModel`.
This class describes an instance produced from one of Menpo's
:map:`InstanceBackedModel`. The actual instance provided by the model can
be found at self.target. This class is targetable, and so
:meth:`set_target` can be used to update the target - this will produce the
closest possible instance the Model can produce to the target and set the
weights accordingly.
Parameters
----------
model : :map:`InstanceBackedModel`
The generative model that instances will be taken from
"""
def __init__(self, model):
self.model = model
self._target = None
# set all weights to 0 (yielding the mean, first call to
# from_vector_inplace() or set_target() will update this)
self._weights = np.zeros(self.model.n_active_components)
self._sync_target_from_state()
@property
def n_weights(self):
r"""
The number of parameters in the linear model.
:type: int
"""
return self.model.n_active_components
@property
def weights(self):
r"""
In this simple :map:`ModelInstance` the weights are just the weights
of the model.
"""
return self._weights
@property
def target(self):
return self._target
def _target_setter(self, new_target):
r"""
Called by the Targetable framework when set_target() is called.
This method **ONLY SETS THE NEW TARGET** it does no synchronisation
logic (for that, see _sync_state_from_target())
"""
self._target = new_target
def _new_target_from_state(self):
r"""
Return the appropriate target for the parameters provided.
Subclasses can override this.
Returns
-------
new_target: model instance
"""
return self.model.instance(self.weights)
def _sync_state_from_target(self):
# 1. Find the optimum parameters and set them
self._weights = self._weights_for_target(self.target)
# 2. Find the closest target the model can reproduce and trigger an
# update of our transform
self._target_setter(self._new_target_from_state())
def _weights_for_target(self, target):
r"""
Return the appropriate model weights for target provided.
Subclasses can override this.
Parameters
----------
target: model instance
The target that the statistical model will try to reproduce
Returns
-------
weights: (P,) ndarray
Weights of the statistical model that generate the closest
instance to the requested target
"""
return self.model.project(target)
def _as_vector(self):
r"""
Return the current parameters of this transform - this is the
just the linear model's weights
Returns
-------
params : (`n_parameters`,) ndarray
The vector of parameters
"""
return self.weights
def from_vector_inplace(self, vector):
r"""
Updates this :map:`ModelInstance` from it's
vectorized form (in this case, simply the weights on the linear model)
"""
self._weights = vector
self._sync_target_from_state()
class PDM(ModelInstance, DP):
r"""Specialization of :map:`ModelInstance` for use with spatial data.
"""
@property
def n_dims(self):
r"""
The number of dimensions of the spatial instance of the model
:type: int
"""
return self.model.template_instance.n_dims
def d_dp(self, points):
"""
Returns the Jacobian of the PCA model reshaped to have the standard
Jacobian shape:
n_points x n_params x n_dims
which maps to
n_features x n_components x n_dims
on the linear model
Returns
-------
jacobian : (n_features, n_components, n_dims) ndarray
The Jacobian of the model in the standard Jacobian shape.
"""
d_dp = self.model.components.reshape(self.model.n_active_components,
-1, self.n_dims)
return d_dp.swapaxes(0, 1)
# TODO: document me
class GlobalPDM(PDM):
r"""
"""
def __init__(self, model, global_transform_cls):
# Start the global_transform as an identity (first call to
# from_vector_inplace() or set_target() will update this)
mean = model.mean()
self.global_transform = global_transform_cls(mean, mean)
super(GlobalPDM, self).__init__(model)
@property
def n_global_parameters(self):
r"""
The number of parameters in the `global_transform`
:type: int
"""
return self.global_transform.n_parameters
@property
def global_parameters(self):
r"""
The parameters for the global transform.
:type: (`n_global_parameters`,) ndarray
"""
return self.global_transform.as_vector()
def _new_target_from_state(self):
r"""
Return the appropriate target for the model weights provided,
accounting for the effect of the global transform
Returns
-------
new_target: :class:`menpo.shape.PointCloud`
A new target for the weights provided
"""
return self.global_transform.apply(self.model.instance(self.weights))
def _weights_for_target(self, target):
r"""
Return the appropriate model weights for target provided, accounting
for the effect of the global transform. Note that this method
updates the global transform to be in the correct state.
Parameters
----------
target: :class:`menpo.shape.PointCloud`
The target that the statistical model will try to reproduce
Returns
-------
weights: (P,) ndarray
Weights of the statistical model that generate the closest
PointCloud to the requested target
"""
self._update_global_transform(target)
projected_target = self.global_transform.pseudoinverse().apply(target)
# now we have the target in model space, project it to recover the
# weights
new_weights = self.model.project(projected_target)
# TODO investigate the impact of this, could be problematic
# the model can't perfectly reproduce the target we asked for -
# reset the global_transform.target to what it CAN produce
#refined_target = self._target_for_weights(new_weights)
#self.global_transform.target = refined_target
return new_weights
def _update_global_transform(self, target):
self.global_transform.set_target(target)
def _as_vector(self):
r"""
Return the current parameters of this transform - this is the
just the linear model's weights
Returns
-------
params : (`n_parameters`,) ndarray
The vector of parameters
"""
return np.hstack([self.global_parameters, self.weights])
def from_vector_inplace(self, vector):
# First, update the global transform
global_parameters = vector[:self.n_global_parameters]
self._update_global_weights(global_parameters)
# Now extract the weights, and let super handle the update
weights = vector[self.n_global_parameters:]
PDM.from_vector_inplace(self, weights)
def _update_global_weights(self, global_weights):
r"""
Hook that allows for overriding behavior when the global weights are
set. Default implementation simply asks global_transform to
update itself from vector.
"""
self.global_transform.from_vector_inplace(global_weights)
def d_dp(self, points):
# d_dp is always evaluated at the mean shape
points = self.model.mean().points
# compute dX/dp
# dX/dq is the Jacobian of the global transform evaluated at the
# current target
# (n_points, n_global_params, n_dims)
dX_dq = self._global_transform_d_dp(points)
# by application of the chain rule dX/db is the Jacobian of the
# model transformed by the linear component of the global transform
# (n_points, n_weights, n_dims)
dS_db = PDM.d_dp(self, [])
# (n_points, n_dims, n_dims)
dX_dS = self.global_transform.d_dx(points)
# (n_points, n_weights, n_dims)
dX_db = np.einsum('ilj, idj -> idj', dX_dS, dS_db)
# dX/dp is simply the concatenation of the previous two terms
# (n_points, n_params, n_dims)
return np.hstack((dX_dq, dX_db))
def _global_transform_d_dp(self, points):
return self.global_transform.d_dp(points)
# TODO: document me
class OrthoPDM(GlobalPDM):
r"""
"""
def __init__(self, model, global_transform_cls):
# 1. Construct similarity model from the mean of the model
self.similarity_model = similarity_2d_instance_model(model.mean())
# 2. Orthonormalize model and similarity model
model_cpy = model.copy()
model_cpy.orthonormalize_against_inplace(self.similarity_model)
self.similarity_weights = self.similarity_model.project(
model_cpy.mean())
super(OrthoPDM, self).__init__(model_cpy, global_transform_cls)
@property
def global_parameters(self):
r"""
The parameters for the global transform.
:type: (`n_global_parameters`,) ndarray
"""
return self.similarity_weights
def _update_global_transform(self, target):
self.similarity_weights = self.similarity_model.project(target)
self._update_global_weights(self.similarity_weights)
def _update_global_weights(self, global_weights):
self.similarity_weights = global_weights
new_target = self.similarity_model.instance(global_weights)
self.global_transform.set_target(new_target)
def _global_transform_d_dp(self, points):
return self.similarity_model.components.reshape(
self.n_global_parameters, -1, self.n_dims).swapaxes(0, 1)
|
desihub/desisurvey
|
py/desisurvey/holdingpen.py
|
import os
import subprocess
import re
import glob
import numpy as np
from astropy.io import fits
from astropy.time import Time
from astropy.table import Table
import desiutil.log
import desisurvey.config
import desisurvey.plan
import desisurvey.tiles
from desisurvey.utils import yesno
logger = desiutil.log.get_logger()
def make_tileid_list(fadir):
fafiles = glob.glob(os.path.join(fadir, '**/*.fits*'), recursive=True)
rgx = re.compile(r'.*fiberassign-(\d+)\.fits(\.gz)?')
existing_tileids = []
existing_fafiles = []
for fn in fafiles:
match = rgx.match(fn)
if match:
existing_tileids.append(int(match.group(1)))
existing_fafiles.append(fn)
return np.array(existing_tileids), np.array(existing_fafiles)
def tileid_to_clean(faholddir, fadir, mtldone):
"""Identify invalidated fiberassign files for deletion.
Scans faholddir for fiberassign files. Compares the MTLTIMES with the times in
the mtldone file. If a fiberassign file was designed before an overlapping
tile which later had MTL updates, that fiberassign file is "invalid" and
should be deleted.
Parameters
----------
faholddir : str
directory name of fiberassign holding pen
fadir : str
directory name of svn-controlled fiber assign directory.
mtldone : array
numpy array of finished tile MTL updates. Must contain at least
TIMESTAMP and TILEID fields.
"""
import dateutil.parser
cfg = desisurvey.config.Configuration()
tiles = desisurvey.tiles.get_tiles()
plan = desisurvey.plan.Planner(restore=cfg.tiles_file())
existing = tiles.tileID[plan.tile_status != 'unobs']
m = (plan.tile_status == 'unobs') & (plan.tile_priority <= 0)
existing_tileids, existing_fafiles = make_tileid_list(faholddir)
intiles = np.isin(existing_tileids, tiles.tileID)
existing_tileids = existing_tileids[intiles]
existing_fafiles = existing_fafiles[intiles]
existing = existing[np.isin(existing, existing_tileids)]
logger.info('Reading in MTLTIME header from %d fiberassign files...' %
len(existing_fafiles))
mtltime = [fits.getheader(fn).get('MTLTIME', 'None')
for fn in existing_fafiles]
m = np.array([mtltime0 is not None for mtltime0 in mtltime],
dtype='bool')
if np.any(~m):
logger.warning('MTLTIME not found for tiles {}!'.format(
' '.join([x for x in existing_fafiles[~m]])))
if np.sum(m) == 0:
return np.zeros(0, dtype='i4')
existing_tileids = existing_tileids[m]
existing_fafiles = existing_fafiles[m]
mtltime = np.array(mtltime)[m]
mtltime = Time([dateutil.parser.parse(mtltime0)
for mtltime0 in mtltime]).mjd
# we have the mtl times for all existing fa files.
# we want the largest MTL time of any overlapping tile which has
# status != 'unobs'
tilemtltime = np.zeros(tiles.ntiles, dtype='f8') - 1
index, mask = tiles.index(existing_tileids, return_mask=True)
if np.sum(~mask) > 0:
logger.info('Ignoring {} TILEID not in the tile file'.format(
np.sum(~mask)))
index = index[mask]
existing_tileids = existing_tileids[mask]
mtltime = mtltime[mask]
tilemtltime[index] = mtltime
# this has the MTL design time of all of the tiles.
# we also need the MTL done time of all the tiles.
index, mask = tiles.index(mtldone['TILEID'], return_mask=True)
mtldonetime = [dateutil.parser.parse(mtltime0)
for mtltime0 in mtldone['TIMESTAMP']]
mtldonetime = Time(mtldonetime).mjd
tilemtldonetime = np.zeros(tiles.ntiles, dtype='f8')
tilemtldonetime[index[mask]] = mtldonetime[mask]
maxoverlappingtilemtldonetime = np.zeros(tiles.ntiles, dtype='f8')
for i, neighbors in enumerate(tiles.overlapping):
if len(neighbors) == 0:
continue
maxoverlappingtilemtldonetime[i] = np.max(tilemtldonetime[neighbors])
expired = ((maxoverlappingtilemtldonetime > tilemtltime)
& (plan.tile_status == 'unobs') & (tilemtltime > -1))
for tileid in existing:
tileidpadstr = '%06d' % tileid
fafn = os.path.join(fadir, tileidpadstr[:3],
'fiberassign-%s.fits.gz' % tileidpadstr)
if not os.path.exists(fafn):
logger.error('Tile {} is not unobs, '.format(fafn) +
'but does not exist in SVN?!')
return tiles.tileID[expired]
def remove_tiles_from_dir(dirname, tileid):
for tileid0 in tileid:
for ext in ['fits.gz', 'png', 'log']:
expidstr= '{:06d}'.format(tileid0)
os.remove(os.path.join(
dirname, expidstr[:3],
'fiberassign-{}.{}'.format(expidstr, ext)))
def missing_tileid(fadir, faholddir):
"""Return missing TILEID and superfluous TILEID.
The fiberassign holding pen should include all TILEID
for available, unobserved tiles. It should include no TILEID
for unavailable or observed tiles. This function computes the list
of TILEID that should exist, but do not, as well as the list of TILEID
that should not exist, but do.
Parameters
----------
fadir : str
directory name of fiberassign directory
faholddir : str
directory name of fiberassign holding pen
Returns
-------
missingtiles, extratiles
missingtiles : array
array of TILEID for tiles that do not exist, but should.
These need to be designed and added to the holding pen.
extratiles : array
array of TILEID for tiles that exist, but should not.
These need to be deleted from the holding pen.
"""
cfg = desisurvey.config.Configuration()
tiles = desisurvey.tiles.get_tiles()
plan = desisurvey.plan.Planner(restore=cfg.tiles_file())
tileid, fafn = make_tileid_list(faholddir)
shouldexist = tiles.tileID[(plan.tile_status == 'unobs') &
(plan.tile_priority > 0)]
missingtiles = set(shouldexist) - set(tileid)
shouldnotexist = tiles.tileID[(plan.tile_status != 'unobs') |
(plan.tile_priority <= 0)]
doesexist = np.isin(tileid, shouldnotexist)
count = 0
for tileid0 in tileid[doesexist]:
expidstr = '{:06d}'.format(tileid0)
if not os.path.exists(os.path.join(
fadir, expidstr[:3],
'fiberassign-{}.fits.gz'.format(expidstr))):
logger.error('TILEID %d should be checked into svn and is not!' %
tileid0)
else:
count += 1
if count > 0:
logger.info('Confirmed %d files in SVN also in holding pen.' %
count)
logger.info('TILEID: ' + ' '.join(
[str(x) for x in np.sort(tileid[doesexist])]))
return (np.sort(np.array([x for x in missingtiles])),
np.sort(tileid[doesexist]))
def get_untracked_fnames(svn):
fnames = []
res = subprocess.run(['svn', 'status', svn], capture_output=True)
output = res.stdout.decode('utf8')
for line in output.split('\n'):
if len(line) == 0:
continue
modtype = line[0]
if modtype != '?':
print('unrecognized line: "{}", ignoring.'.format(line))
continue
# new file. We need to check it in or delete it.
fname = line[8:]
fnames.append(fname)
return fnames
def maintain_svn(svn, untrackedonly=True, verbose=False):
cfg = desisurvey.config.Configuration()
tiles = desisurvey.tiles.get_tiles()
plan = desisurvey.plan.Planner(restore=cfg.tiles_file())
if untrackedonly:
fnames = get_untracked_fnames(svn)
rgxdir = re.compile(svn + '/' + r'\d\d\d$')
for fname in fnames:
# if it's a new directory, go ahead and add it
# so that we can see its contents.
matchdir = rgxdir.match(fname.strip())
if matchdir:
subprocess.run(['svn', 'add', fname,
'--depth=empty'])
print('svn-adding new directory %s' % fname)
fnames = get_untracked_fnames(svn)
else:
import glob
fnames = glob.glob(os.path.join(svn, '**/*'), recursive=True)
rgx = re.compile(svn + '/' +
r'\d\d\d/fiberassign-(\d+)\.(fits|fits\.gz|png|log)')
todelete = []
tocommit = []
mintileid = np.min(tiles.tileID)
maxtileid = np.max(tiles.tileID)
for fname in fnames:
match = rgx.match(fname)
if not match:
if verbose:
logger.warn('unrecognized filename: "{}", '
'ignoring.'.format(fname))
continue
tileid = int(match.group(1))
idx, mask = tiles.index(tileid, return_mask=True)
if not mask:
if verbose and (tileid >= mintileid) and (tileid <= maxtileid):
logger.warn('unrecognized TILEID {}, ignoring.'.format(tileid))
continue
if plan.tile_status[idx] == 'unobs':
todelete.append(fname)
else:
tocommit.append(fname)
if not untrackedonly:
tocommit = []
return todelete, tocommit
def execute_svn_maintenance(todelete, tocommit, echo=False, svnrm=False):
if echo:
cmd = ['echo', 'svn']
else:
cmd = ['svn']
for fname in todelete:
if svnrm:
subprocess.run(cmd + ['rm', fname])
else:
if not echo:
os.remove(fname)
else:
print('removing ', fname)
for fname in tocommit:
subprocess.run(cmd + ['add', fname])
def maintain_holding_pen_and_svn(fbadir, faholddir, mtldonefn):
todelete, tocommit = maintain_svn(fbadir)
if len(todelete) + len(tocommit) > 0:
logger.info(('To delete from %s:\n' % fbadir) +
'\n'.join([os.path.basename(x) for x in todelete]))
logger.info(('To commit to %s:\n' % fbadir) +
'\n'.join([os.path.basename(x) for x in tocommit]))
qstr = ('Preparing to perform svn fiberassign maintenance, '
'deleting {} and committing {} files. Continue?'.format(
len(todelete), len(tocommit)))
okay = yesno(qstr)
if okay:
execute_svn_maintenance(todelete, tocommit, echo=True)
okay = yesno('The following commands will be executed. '
'Still okay?')
if okay:
execute_svn_maintenance(todelete, tocommit)
okay = yesno('Commit to svn?')
if okay:
subprocess.run(['svn', 'ci', fbadir,
'-m "Adding newly observed tiles."'])
if mtldonefn is not None:
invalid = tileid_to_clean(faholddir, fbadir, Table.read(mtldonefn))
if len(invalid) > 0:
okay = yesno(('Deleting {} out-of-date fiberassign files from ' +
'holding pen. Continue?').format(len(invalid)))
if okay:
remove_tiles_from_dir(faholddir, invalid)
missing, extra = missing_tileid(fbadir, faholddir)
if len(extra) > 0:
okay = yesno(('Deleting {} fiberassign files in SVN from the '
'holding pen. Continue?').format(len(extra)))
if okay:
remove_tiles_from_dir(faholddir, extra)
if len(missing) < 100:
logger.info('Need to design the following tiles here! ' +
' '.join([str(x) for x in missing]))
else:
logger.info('Need to design many (%d) tiles here!' % len(missing))
|
mitsuhiko/sentry
|
tests/sentry/api/endpoints/test_shared_group_details.py
|
from __future__ import absolute_import, print_function
from sentry.testutils import APITestCase
class SharedGroupDetailsTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
group = self.create_group()
event = self.create_event(group=group)
url = '/api/0/shared/issues/{}/'.format(group.get_share_id())
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert response.data['id'] == str(group.id)
assert response.data['latestEvent']['id'] == str(event.id)
assert response.data['project']['slug'] == group.project.slug
assert response.data['project']['organization']['slug'] == group.organization.slug
def test_feature_disabled(self):
self.login_as(user=self.user)
group = self.create_group()
org = group.organization
org.flags.disable_shared_issues = True
org.save()
url = '/api/0/shared/issues/{}/'.format(group.get_share_id())
response = self.client.get(url, format='json')
assert response.status_code == 404
|
vlegoff/tsunami
|
src/secondaires/auberge/commandes/auberge/editer.py
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant le paramètre 'éditer' de la commande 'auberge'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmEditer(Parametre):
"""Paramètre 'éditer de la commande 'auberge'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "éditer", "edit")
self.schema = "<cle>"
self.aide_courte = "ouvre l'éditeur d'auberge"
self.aide_longue = \
"Cette commande permet d'accéder à l'éditeur " \
"d'une auberge. Cet éditeur permet d'éditer les différentes " \
"chambres de l'auberge, leur numéro, prix et quelques autres " \
"informations."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
cle = dic_masques["cle"].cle
if cle not in importeur.auberge.auberges:
personnage << "|err|Cette auberge n'existe pas.|ff|"
return
auberge = importeur.auberge.auberges[cle]
editeur = importeur.interpreteur.construire_editeur(
"aubedit", personnage, auberge)
personnage.contextes.ajouter(editeur)
editeur.actualiser()
|
wasit7/PythonDay
|
django/mysite2/mysite2/settings.py
|
"""
Django settings for mysite2 project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f@d3+wz7y8uj!+alcvc!6du++db!-3jh6=vr(%z(e^2n5_fml-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myauthen',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
NigelCleland/Tessen
|
Tessen/generate.py
|
"""
"""
import pandas as pd
import numpy as np
from OfferPandas import Frame, load_offerframe
import sys
import os
import datetime
import time
def create_fan(energy, reserve, fName=None, return_fan=True, break_tp=False,
force_plsr_only=True, verbose=False, *args, **kargs):
""" A wrapper which implements some optional filtering arguments
to speed up the process, otherwise iterating can take a very large time.
Can handle the breakdown as well into trading period pieces.
Parameters:
-----------
energy: OfferFrame of the energy values
reserve: OfferFrame of the reserve values
fName: Optional, file to save the resulting fan data to
break_tp: Optional, if a save location is specified will break the files
into smaller trading period pieces
return_fan: Whether to return the Pandas DataFrame containing the Fan.
force_plsr_only: Set to True to exclude TWDSR offers
*args: Filter arguments, e.g. {"Company": "MRPL"} etc (A dictionary)
**kargs: Keyword filter arguments, e.g. Company="MRPL"
Returns:
--------
fan: The Fan Data as a pandas DataFrame.
"""
# Set up a time reporting function:
begin_time = datetime.datetime.now()
filtered_energy = energy.efilter(*args, **kargs)
if force_plsr_only:
filtered_reserve = reserve.efilter(Product_Type="PLSR", *args, **kargs)
else:
print """Warning, TWDSR is still a little funky and the visualisation
of it in a composite fan diagram can be misleading. Most notably it
is difficult to show how it integrates when PLSR is also
dispatched\n"""
filtered_reserve = reserve.efilter(*args, **kargs)
estimate_number = len(filtered_energy[["Node",
"Trading_Period_ID"]].drop_duplicates()) * 2
if verbose:
print """I'm beginning to create fan curves, I estimate I'll need to do
at least %s of these which may take at least %s seconds, hold tight""" % (
estimate_number, estimate_number * 0.008)
fan = _create_fan(filtered_energy, filtered_reserve)
elapsed_time = datetime.datetime.now() - begin_time
number_fans = len(fan[["Node", "Trading_Period_ID", "Reserve_Type",
"Reserve Price"]].drop_duplicates())
if verbose:
print "I successfully calculated %s fans in %s seconds" % (number_fans,
elapsed_time.seconds)
if fName:
if break_tp:
if verbose:
print "I'll now begin saving these to individual trading period files"
for each in fan["Trading_Period_ID"].unique():
single = fan[fan["Trading_Period_ID"] == each]
new_ext = '_' + str(each) + '.csv'
single_name = fName.replace('.csv', new_ext)
single.to_csv(single_name, header=True, index=False)
else:
if verbose:
print "I'll now begin saving these to a single file"
fan.to_csv(fName, header=True, index=False)
if return_fan:
return fan
return None
def _create_fan(energy, reserve):
"""Given an energy and reserve offer frame, PLSR, will construct the
full fan curve for these on a station by station, band by band and by
reserve type.
These fans may then be filtered and visualised using the visualise
functionality.
Parameters:
-----------
energy: An energy offer frame, fans will be created for every permutation
in this frame.
reserve: The corresponding reserve offer frame.
Returns
-------
DataFrame: A DataFrame containing the fan object
"""
station_dates = energy[["Node", "Trading_Period_ID"]].drop_duplicates()
fan_assembly = []
for index, station, tpid in station_dates.itertuples():
single_energy = energy.efilter(Trading_Period_ID=tpid,
Node=station)
for reserve_type in ("FIR", "SIR"):
single_reserve = reserve.efilter(Trading_Period_ID=tpid,
Node=station, Reserve_Type=reserve_type)
fan_assembly.append(station_fan(single_energy, single_reserve,
assumed_reserve=reserve_type))
return pd.concat(fan_assembly, ignore_index=True)
def station_fan(energy, reserve, assumed_reserve=None):
""" Create the fan information for a given station and single reserve type.
If multiple reserve types are passed this will fail miserably.
Parameters:
-----------
energy: Energy OfferFrame containing the information about a single
station and trading period
reserve: PLSR OfferFrame containing the informaiton about a single
station, trading period and reserve type, note TWDSR should work.
Returns:
--------
DataFrame: A stacked DataFrame object containing the fan data for a
particular station.
"""
energy = energy[energy["Quantity"] > 0]
if len(energy) == 0:
return None
station_metadata = get_station_metadata(energy)
# Do an check just in case the reserve is equal to zero.
# Will return an energy only version, all reserve set to zero.
# Don't need to concat there should only be a single version.
if len(reserve) == 0:
return create_energy_stack(energy, station_metadata,
assumed_reserve=assumed_reserve)
if len(reserve["Reserve_Type"].unique()) > 1:
raise ValueError("Must only pass a single Reserve Type, you passed\
more than 1")
# Create the Energy Stack
sorted_energy = energy.sort("Price")
energy_stack = incremental_energy_stack(
sorted_energy[["Price", "Quantity"]].values)
# Get the nameplate capacity of the station, all values are duplicates
# so we just take the first one. Set initial remaining capacity equal to
# the nameplate capacity
nameplate_capacity = remaining_capacity = energy["Max_Output"].values[0]
# Filter Reserve Offers, create a band stack for each pairing
nonzero_reserve = reserve[reserve["Quantity"] > 0]
if len(nonzero_reserve) == 0:
return create_energy_stack(energy, station_metadata,
assumed_reserve=assumed_reserve)
# Do a check for zero priced reserve offers here,
# If there aren't any then add one to the band stacks call.
# Add a zero reserve stack array as it will have no impact upon
# The actual fan created, note this may create additional fans
# But the tradeoff is a small one.
band_stacks = []
if 0. not in nonzero_reserve["Price"]:
band_stacks.append(create_energy_stack(energy, station_metadata,
assumed_reserve=assumed_reserve))
for (index, percent, price, quantity, reserve_type, product_type
) in nonzero_reserve[["Percent", "Price", "Quantity", "Reserve_Type",
"Product_Type"]].itertuples():
# Check for TWDSR, set percent to essentially infinity.
if product_type == "TWDSR":
percent = 1000000
reserve_stack = feasible_reserve_region(energy_stack, price, quantity,
percent, nameplate_capacity,
remaining_capacity,
product_type)
# Update the remaining capacity
remaining_capacity -= quantity
# Create a Band DataFrame
full_metadata = update_metadata(station_metadata, reserve_type,
product_type, percent)
band_stacks.append(band_dataframe(reserve_stack, full_metadata))
return pd.concat(band_stacks)
def create_energy_stack(energy, station_metadata, assumed_reserve=None):
""" Creates an energy version of the stack with zero reserve offers
and zero prices. Due to the way the aggregations work this step is
required or units which offer reserve at high prices have their energy
offers excluded from the low priced ones
Parameters:
-----------
energy: Energy offers for that station in question.
station_metadata: Information about the station
assumed_reserve: FIR or SIR, is repeated more than once.
Returns:
--------
DataFrame: A DataFrame containing an energy only incremental DF with
asscociated metadata
"""
energy_version = energy_only(energy)
full_metadata = update_metadata(station_metadata, assumed_reserve,
"PLSR", 0)
return band_dataframe(energy_version, full_metadata)
def energy_only(energy):
""" Mimics the fan curve for an energy only station by setting all
reserve poritons of the stack to zero.
Parameters:
-----------
energy: DataFrame of the energy offers
Returns:
--------
energy_version: Numpy array containing zeros for reserve portions
but the full energy information.
"""
sorted_energy = energy.sort("Price")
energy_stack = incremental_energy_stack(
sorted_energy[["Price", "Quantity"]].values)
length = energy_stack.shape[0]
energy_version = np.zeros((length, 8))
energy_version[:, :4] = energy_stack
return energy_version
def update_metadata(station_metadata, reserve_type, product_type, percent):
""" Takes the metadata dictionary and appends some new key value
pairs to it.
"""
full_metadata = station_metadata.copy()
full_metadata["Reserve_Type"] = reserve_type
full_metadata["Product_Type"] = product_type
full_metadata["Reserve_Percent"] = percent
return full_metadata
def band_dataframe(full_stack, full_metadata):
""" Creates a DataFrame for a single band taking into account the full
stack along with the metadata for it.
Returns this DataFrame which may then be added together to create the
station frame.
"""
columns = ["Energy Price", "Energy Quantity",
"Incremental Energy Quantity", "Cumulative Energy Quantity",
"Reserve Price", "Reserve Quantity",
"Incremental Reserve Quantity", "Cumulative Reserve Quantity"]
# Create the DataFrame for a single band
df = pd.DataFrame(full_stack, columns=columns)
# Add the metadata
for key, value in full_metadata.iteritems():
df[key] = value
return df
def get_station_metadata(offer_data):
""" Look into the station offer data and grab out some metadata which will
be useful in applying at the end, do not want to work with concatting files
together.
Inputs:
-------
offer_data: A OfferPandas.Frame energy offer frame (preferred) from
which to extract useful metadata.
Returns:
--------
meta_data: Dictionary, contains metadata information about the station.
"""
excluded_columns = ("Band", "Price", "Quantity", "Product_Type",
"Reserve_Type", "Is_Injection", "Is_Hvdc",
"Created_Date", "Last_Amended_Date")
meta_data = {item: offer_data[item][offer_data.index[0]] for item in
offer_data.columns if item not in excluded_columns}
return meta_data
def incremental_energy_stack(pairs):
""" Takes an array of price quantity pairs and returns a numpy array
of this transformed into a single increment version (using step size 1)
Inputs:
-------
pairs: numpy array of Nx2 dimension containing price and quantity pairs.
Returns:
--------
stack: a (M+1x4) array where M is the sum of the quantities offered in
the pairs input. Columns are (Price, Quantity, Incremental Quantity,
Cumulative Quantity)
"""
if pairs.shape[1] != 2:
raise ValueError("Shape of the array passed to the function must\
be a Nx2 array, current size is %sx%s" % pairs.shape)
partial_arrays = [np.zeros((1,4))]
for p, q in pairs:
# Incremental Capacity
partial = np.zeros((np.ceil(q), 4))
partial[:,2] = np.ones(np.ceil(q))
if np.ceil(q) != q:
partial[-1, 2] = q % 1
# Price and Quantity
partial[:,0] = p
partial[:,1] = q
partial_arrays.append(partial)
full_array = np.concatenate(partial_arrays)
full_array[:,3] = np.cumsum(full_array[:,2])
return full_array
def feasible_reserve_region(stack, res_price, res_quantity, res_percent,
nameplate_capacity, remaining_capacity,
product_type):
"""
Create a feasible region array with information about energy and reserve
prices for a single band. This array contains information on an incremental
fashion regarding the energy and reserve tradeoff. Ideally should keep
all of the data together in one place:
Parameters:
-----------
stack: The full energy stack as previously calculated.
res_price: The band reserve price
res_quantity: Maximum band reserve quantity
res_percent: The percentage for the reserve band, note TWDSR will be
arbitrarily high
nameplate_capacity: The original capacity (nameplate) of the unit
remaining_capacity: Subtracting the reserve bands at lower price quantities
from the nameplate capacity to leave a residual
quantity
reserve_type: string, either "PLSR" or "TWDSR". indicates some special
behaviour for "TWDSR".
Returns:
--------
reserve_coupling: numpy array of energy and reserve values.
"""
length = stack.shape[0]
# Update the capacity line to be of the size of the full capacity but
# shifted to reflect what capacity has already been used by cheaper reserve
# offers.
utilised_capacity = nameplate_capacity - remaining_capacity
capacity_line = nameplate_capacity - stack[:,3]
capacity_line = np.where(capacity_line - utilised_capacity <= 0,
0, capacity_line - utilised_capacity)
# Create a line due to the proportionality constraint.
# Note percentages are reported as is...
# Still need to figure out how to do TWD here...
reserve_line = stack[:,3] * res_percent /100.
reserve_line = np.where(reserve_line <= res_quantity, reserve_line,
res_quantity)
# Adjust for the modified capacity line
reserve_line = np.where(reserve_line <= capacity_line, reserve_line,
capacity_line)
# Create an incremental reserve line as well
incremental_reserve_line = np.zeros(len(reserve_line))
incremental_reserve_line[1:] = reserve_line[1:] - reserve_line[:-1]
# Create a new array and add the values
reserve_coupling = np.zeros((length, 8))
reserve_coupling[:, :4] = stack
reserve_coupling[:, 4] = res_price
reserve_coupling[:, 5] = res_quantity
reserve_coupling[:, 6] = incremental_reserve_line
reserve_coupling[:, 7] = reserve_line
return reserve_coupling
if __name__ == '__main__':
pass
|
chimeno/wagtail
|
wagtail/wagtailsnippets/widgets.py
|
from __future__ import absolute_import, unicode_literals
import json
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from wagtail.wagtailadmin.widgets import AdminChooser
class AdminSnippetChooser(AdminChooser):
target_content_type = None
def __init__(self, content_type=None, **kwargs):
if 'snippet_type_name' in kwargs:
snippet_type_name = kwargs.pop('snippet_type_name')
self.choose_one_text = _('Choose %s') % snippet_type_name
self.choose_another_text = _('Choose another %s') % snippet_type_name
super(AdminSnippetChooser, self).__init__(**kwargs)
if content_type is not None:
self.target_content_type = content_type
def render_html(self, name, value, attrs):
original_field_html = super(AdminSnippetChooser, self).render_html(name, value, attrs)
model_class = self.target_content_type.model_class()
instance = self.get_instance(model_class, value)
return render_to_string("wagtailsnippets/widgets/snippet_chooser.html", {
'widget': self,
'original_field_html': original_field_html,
'attrs': attrs,
'value': value,
'item': instance,
})
def render_js_init(self, id_, name, value):
content_type = self.target_content_type
return "createSnippetChooser({id}, {content_type});".format(
id=json.dumps(id_),
content_type=json.dumps('{app}/{model}'.format(
app=content_type.app_label,
model=content_type.model)))
|
expfactory/expfactory
|
expfactory/database/relational.py
|
"""
Copyright (c) 2017-2022, Vanessa Sochat
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from expfactory.logger import bot
from expfactory.utils import write_json
from expfactory.defaults import EXPFACTORY_SUBID, EXPFACTORY_DATA
from glob import glob
import os
import uuid
import pickle
import json
import sys
# RELATIONAL ###################################################################
#
# This is an Expfactory Flask Server database plugin. It implements common
# functions (generate_subid, save_data, init_db) that should prepare a
# database and perform actions to save data to it. The functions are added
# to the main application upon initialization of the server. This relational
# module has support for sqlite3, mysql, and postgres
#
################################################################################
def generate_subid(self, token=None, return_user=False):
"""generate a new user in the database, still session based so we
create a new identifier.
"""
from expfactory.database.models import Participant
if not token:
p = Participant()
else:
p = Participant(token=token)
self.session.add(p)
self.session.commit()
if return_user is True:
return p
return p.id
def print_user(self, user):
"""print a relational database user"""
status = "active"
token = user.token
if token in ["finished", "revoked"]:
status = token
if token is None:
token = ""
subid = "%s\t%s[%s]" % (user.id, token, status)
print(subid)
return subid
def list_users(self, user=None):
"""list users, each having a model in the database. A headless experiment
will use protected tokens, and interactive will be based on auto-
incremented ids.
"""
from expfactory.database.models import Participant
participants = Participant.query.all()
users = []
for user in participants:
users.append(self.print_user(user))
return users
# Actions ######################################################################
def generate_user(self):
"""generate a new user in the database, still session based so we
create a new identifier. This function is called from the users new
entrypoint, and it assumes we want a user generated with a token.
"""
token = str(uuid.uuid4())
return self.generate_subid(token=token, return_user=True)
def finish_user(self, subid):
"""finish user will remove a user's token, making the user entry not
accesible if running in headless model"""
p = self.revoke_token(subid)
p.token = "finished"
self.session.commit()
return p
def restart_user(self, subid):
"""restart a user, which means revoking and issuing a new token."""
p = self.revoke_token(subid)
p = self.refresh_token(subid)
return p
# Tokens #######################################################################
def validate_token(self, token):
"""retrieve a subject based on a token. Valid means we return a participant
invalid means we return None
"""
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.token == token).first()
if p is not None:
if p.token.endswith(("finished", "revoked")):
p = None
else:
p = p.id
return p
def revoke_token(self, subid):
"""revoke a token by removing it. Is done at finish, and also available
as a command line option"""
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.id == subid).first()
if p is not None:
p.token = "revoked"
self.session.commit()
return p
def refresh_token(self, subid):
"""refresh or generate a new token for a user"""
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.id == subid).first()
if p is not None:
p.token = str(uuid.uuid4())
self.session.commit()
return p
def save_data(self, session, exp_id, content):
"""save data will obtain the current subid from the session, and save it
depending on the database type. Currently we just support flat files"""
from expfactory.database.models import Participant, Result
subid = session.get("subid")
token = session.get("token")
self.logger.info("Saving data for subid %s" % subid)
# We only attempt save if there is a subject id, set at start
if subid is not None:
p = Participant.query.filter(
Participant.id == subid
).first() # better query here
# Does
if self.headless and p.token != token:
self.logger.warning(
"%s attempting to use mismatched token [%s] skipping save"
% (p.id, token)
)
elif self.headless and p.token.endswith(("finished", "revoked")):
self.logger.warning(
"%s attempting to use expired token [%s] skipping save" % (p.id, token)
)
else:
# Preference is to save data under 'data', otherwise do all of it
if "data" in content:
content = content["data"]
result = Result(
data=content, exp_id=exp_id, participant_id=p.id
) # check if changes from str/int
# Create and save the result
self.session.add(result)
p.results.append(result)
self.session.commit()
self.logger.info("Save [participant] %s [result] %s" % (p, result))
Base = declarative_base()
def init_db(self):
"""initialize the database, with the default database path or custom with
a format corresponding to the database type:
Examples:
sqlite:////scif/data/expfactory.db
"""
# The user can provide a custom string
if self.database is None:
self.logger.error("You must provide a database url, exiting.")
sys.exit(1)
self.engine = create_engine(self.database, convert_unicode=True)
self.session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=self.engine)
)
# Database Setup
Base.query = self.session.query_property()
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import expfactory.database.models
self.Base = Base
self.Base.metadata.create_all(bind=self.engine)
|
cmry/ebacs
|
corks.py
|
import bottle
from cork import Cork
from utils import skeleton
aaa = Cork('users', email_sender='c.emmery@outlook.com',
smtp_url='smtp://smtp.magnet.ie')
authorize = aaa.make_auth_decorator(fail_redirect='/login', role="user")
def postd():
return bottle.request.forms
def post_get(name, default=''):
return bottle.request.POST.get(name, default).strip()
@bottle.post('/login')
def login():
"""Authenticate users"""
username = post_get('username')
password = post_get('password')
aaa.login(username, password, success_redirect='/', fail_redirect='/login')
@bottle.route('/logout')
def logout():
aaa.logout(success_redirect='/login')
@bottle.post('/register')
def register():
"""Send out registration email"""
aaa.register(post_get('username'), post_get('password'),
post_get('email_address'))
return 'Please check your mailbox.'
@bottle.route('/validate_registration/:registration_code')
def validate_registration(registration_code):
"""Validate registration, create user account"""
aaa.validate_registration(registration_code)
return 'Thanks. <a href="/login">Go to login</a>'
@bottle.post('/change_password')
def change_password():
"""Change password"""
aaa.reset_password(post_get('reset_code'), post_get('password'))
return 'Thanks. <a href="/login">Go to login</a>'
@bottle.post('/create_user')
def create_user():
try:
aaa.create_user(postd().username, postd().role, postd().password)
return dict(ok=True, msg='')
except Exception as e:
return dict(ok=False, msg=e.message)
@bottle.post('/delete_user')
def delete_user():
try:
aaa.delete_user(post_get('username'))
return dict(ok=True, msg='')
except Exception as e:
print(repr(e))
return dict(ok=False, msg=e.message)
@bottle.post('/create_role')
def create_role():
try:
aaa.create_role(post_get('role'), post_get('level'))
return dict(ok=True, msg='')
except Exception as e:
return dict(ok=False, msg=e.message)
@bottle.post('/delete_role')
def delete_role():
try:
aaa.delete_role(post_get('role'))
return dict(ok=True, msg='')
except Exception as e:
return dict(ok=False, msg=e.message)
# Static pages
@bottle.route('/login')
def login_form():
"""Serve login form"""
return skeleton(bottle.template('login_form'))
@bottle.route('/sorry_page')
def sorry_page():
"""Serve sorry page"""
return '<p>Sorry, you are not authorized to perform this action</p>'
|
bitcraft/pyglet
|
contrib/experimental/mt_media/drivers/directsound/__init__.py
|
#!/usr/bin/python
# $Id:$
import ctypes
import math
import sys
import threading
import time
import pyglet
_debug = pyglet.options['debug_media']
import mt_media
from . import lib_dsound as lib
from pyglet.window.win32 import user32, kernel32
class DirectSoundException(mt_media.MediaException):
pass
def _db(gain):
"""Convert linear gain in range [0.0, 1.0] to 100ths of dB."""
if gain <= 0:
return -10000
return max(-10000, min(int(1000 * math.log(min(gain, 1))), 0))
class DirectSoundWorker(mt_media.MediaThread):
_min_write_size = 9600
# Time to wait if there are players, but they're all full.
_nap_time = 0.05
# Time to wait if there are no players.
_sleep_time = None
def __init__(self):
super().__init__()
self.players = set()
def run(self):
while True:
# This is a big lock, but ensures a player is not deleted while
# we're processing it -- this saves on extra checks in the
# player's methods that would otherwise have to check that it's
# still alive.
if _debug:
print('DirectSoundWorker run attempt acquire')
self.condition.acquire()
if _debug:
print('DirectSoundWorker run acquire')
if self.stopped:
self.condition.release()
break
sleep_time = -1
if self.players:
player = None
write_size = 0
for p in self.players:
s = p.get_write_size()
if s > write_size:
player = p
write_size = s
if write_size > self._min_write_size:
player.refill(write_size)
else:
sleep_time = self._nap_time
else:
sleep_time = self._sleep_time
self.condition.release()
if _debug:
print('DirectSoundWorker run release')
if sleep_time != -1:
self.sleep(sleep_time)
if _debug:
print('DirectSoundWorker exiting')
def add(self, player):
if _debug:
print('DirectSoundWorker add', player)
self.condition.acquire()
self.players.add(player)
self.condition.notify()
self.condition.release()
if _debug:
print('return DirectSoundWorker add', player)
def remove(self, player):
if _debug:
print('DirectSoundWorker remove', player)
self.condition.acquire()
try:
self.players.remove(player)
except KeyError:
pass
self.condition.notify()
self.condition.release()
if _debug:
print('return DirectSoundWorker remove', player)
class DirectSoundAudioPlayer(mt_media.AbstractAudioPlayer):
# How many bytes the ring buffer should be
_buffer_size = 44800 * 1
# Need to cache these because pyglet API allows update separately, but
# DSound requires both to be set at once.
_cone_inner_angle = 360
_cone_outer_angle = 360
def __init__(self, source_group, player):
super().__init__(source_group, player)
# Locking strategy:
# All DirectSound calls should be locked. All instance vars relating
# to buffering/filling/time/events should be locked (used by both
# application and worker thread). Other instance vars (consts and
# 3d vars) do not need to be locked.
self._lock = threading.RLock()
# Desired play state (may be actually paused due to underrun -- not
# implemented yet).
self._playing = False
# Up to one audio data may be buffered if too much data was received
# from the source that could not be written immediately into the
# buffer. See refill().
self._next_audio_data = None
# Theoretical write and play cursors for an infinite buffer. play
# cursor is always <= write cursor (when equal, underrun is
# happening).
self._write_cursor = 0
self._play_cursor = 0
# Cursor position of end of data. Silence is written after
# eos for one buffer size.
self._eos_cursor = None
# Indexes into DSound circular buffer. Complications ensue wrt each
# other to avoid writing over the play cursor. See get_write_size and
# write().
self._play_cursor_ring = 0
self._write_cursor_ring = 0
# List of (play_cursor, MediaEvent), in sort order
self._events = list()
# List of (cursor, timestamp), in sort order (cursor gives expiry
# place of the timestamp)
self._timestamps = list()
audio_format = source_group.audio_format
wfx = lib.WAVEFORMATEX()
wfx.wFormatTag = lib.WAVE_FORMAT_PCM
wfx.nChannels = audio_format.channels
wfx.nSamplesPerSec = audio_format.sample_rate
wfx.wBitsPerSample = audio_format.sample_size
wfx.nBlockAlign = wfx.wBitsPerSample * wfx.nChannels // 8
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign
dsbdesc = lib.DSBUFFERDESC()
dsbdesc.dwSize = ctypes.sizeof(dsbdesc)
dsbdesc.dwFlags = (lib.DSBCAPS_GLOBALFOCUS |
lib.DSBCAPS_GETCURRENTPOSITION2 |
lib.DSBCAPS_CTRLFREQUENCY |
lib.DSBCAPS_CTRLVOLUME)
if audio_format.channels == 1:
dsbdesc.dwFlags |= lib.DSBCAPS_CTRL3D
dsbdesc.dwBufferBytes = self._buffer_size
dsbdesc.lpwfxFormat = ctypes.pointer(wfx)
# DSound buffer
self._buffer = lib.IDirectSoundBuffer()
driver._dsound.CreateSoundBuffer(dsbdesc,
ctypes.byref(self._buffer),
None)
if audio_format.channels == 1:
self._buffer3d = lib.IDirectSound3DBuffer()
self._buffer.QueryInterface(lib.IID_IDirectSound3DBuffer,
ctypes.byref(self._buffer3d))
else:
self._buffer3d = None
self._buffer.SetCurrentPosition(0)
self.refill(self._buffer_size)
def __del__(self):
try:
self.delete()
except:
pass
def delete(self):
if driver and driver.worker:
driver.worker.remove(self)
self.lock()
self._buffer.Stop()
self._buffer.Release()
self._buffer = None
if self._buffer3d:
self._buffer3d.Release()
self._buffer3d = None
self.unlock()
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def play(self):
if _debug:
print('DirectSound play')
driver.worker.add(self)
self.lock()
if not self._playing:
self._playing = True
self._buffer.Play(0, 0, lib.DSBPLAY_LOOPING)
self.unlock()
if _debug:
print('return DirectSound play')
def stop(self):
if _debug:
print('DirectSound stop')
driver.worker.remove(self)
self.lock()
if self._playing:
self._playing = False
self._buffer.Stop()
self.unlock()
if _debug:
print('return DirectSound stop')
def clear(self):
if _debug:
print('DirectSound clear')
self.lock()
self._buffer.SetCurrentPosition(0)
self._play_cursor_ring = self._write_cursor_ring = 0
self._play_cursor = self._write_cursor
self._eos_cursor = None
self._next_audio_data = None
del self._events[:]
del self._timestamps[:]
self.unlock()
def refill(self, write_size):
self.lock()
while write_size > 0:
if _debug:
print('refill, write_size =', write_size)
# Get next audio packet (or remains of last one)
if self._next_audio_data:
audio_data = self._next_audio_data
self._next_audio_data = None
else:
audio_data = self.source_group.get_audio_data(write_size)
# Write it, or silence if there are no more packets
if audio_data:
# Add events
for event in audio_data.events:
event_cursor = self._write_cursor + event.timestamp * \
self.source_group.audio_format.bytes_per_second
self._events.append((event_cursor, event))
# Add timestamp (at end of this data packet)
ts_cursor = self._write_cursor + audio_data.length
self._timestamps.append(
(ts_cursor, audio_data.timestamp + audio_data.duration))
# Write data
if _debug:
print('write', audio_data.length)
length = min(write_size, audio_data.length)
self.write(audio_data, length)
if audio_data.length:
self._next_audio_data = audio_data
write_size -= length
else:
# Write silence
if self._eos_cursor is None:
self._eos_cursor = self._write_cursor
self._events.append(
(self._eos_cursor,
mt_media.MediaEvent(0, 'on_eos')))
self._events.append(
(self._eos_cursor,
mt_media.MediaEvent(0, 'on_source_group_eos')))
self._events.sort()
if self._write_cursor > self._eos_cursor + self._buffer_size:
self.stop()
else:
self.write(None, write_size)
write_size = 0
self.unlock()
def update_play_cursor(self):
self.lock()
play_cursor_ring = lib.DWORD()
self._buffer.GetCurrentPosition(play_cursor_ring, None)
if play_cursor_ring.value < self._play_cursor_ring:
# Wrapped around
self._play_cursor += self._buffer_size - self._play_cursor_ring
self._play_cursor_ring = 0
self._play_cursor += play_cursor_ring.value - self._play_cursor_ring
self._play_cursor_ring = play_cursor_ring.value
# Dispatch pending events
pending_events = list()
while self._events and self._events[0][0] <= self._play_cursor:
_, event = self._events.pop(0)
pending_events.append(event)
if _debug:
print('Dispatching pending events:', pending_events)
print('Remaining events:', self._events)
# Remove expired timestamps
while self._timestamps and self._timestamps[0][0] < self._play_cursor:
del self._timestamps[0]
self.unlock()
for event in pending_events:
event._sync_dispatch_to_player(self.player)
def get_write_size(self):
self.update_play_cursor()
self.lock()
play_cursor = self._play_cursor
write_cursor = self._write_cursor
self.unlock()
return self._buffer_size - (write_cursor - play_cursor)
def write(self, audio_data, length):
# Pass audio_data=None to write silence
if length == 0:
return 0
self.lock()
p1 = ctypes.c_void_p()
l1 = lib.DWORD()
p2 = ctypes.c_void_p()
l2 = lib.DWORD()
self._buffer.Lock(self._write_cursor_ring, length,
ctypes.byref(p1), l1, ctypes.byref(p2), l2, 0)
assert length == l1.value + l2.value
if audio_data:
ctypes.memmove(p1, audio_data.data, l1.value)
audio_data.consume(l1.value, self.source_group.audio_format)
if l2.value:
ctypes.memmove(p2, audio_data.data, l2.value)
audio_data.consume(l2.value, self.source_group.audio_format)
else:
ctypes.memset(p1, 0, l1.value)
if l2.value:
ctypes.memset(p2, 0, l2.value)
self._buffer.Unlock(p1, l1, p2, l2)
self._write_cursor += length
self._write_cursor_ring += length
self._write_cursor_ring %= self._buffer_size
self.unlock()
def get_time(self):
self.lock()
if self._timestamps:
cursor, ts = self._timestamps[0]
result = ts + (self._play_cursor - cursor) / \
float(self.source_group.audio_format.bytes_per_second)
else:
result = None
self.unlock()
return result
def set_volume(self, volume):
volume = _db(volume)
self.lock()
self._buffer.SetVolume(volume)
self.unlock()
def set_position(self, position):
if self._buffer3d:
x, y, z = position
self.lock()
self._buffer3d.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
self.unlock()
def set_min_distance(self, min_distance):
if self._buffer3d:
self.lock()
self._buffer3d.SetMinDistance(min_distance, lib.DS3D_IMMEDIATE)
self.unlock()
def set_max_distance(self, max_distance):
if self._buffer3d:
self.lock()
self._buffer3d.SetMaxDistance(max_distance, lib.DS3D_IMMEDIATE)
self.unlock()
def set_pitch(self, pitch):
frequency = int(pitch * self.audio_format.sample_rate)
self.lock()
self._buffer.SetFrequency(frequency)
self.unlock()
def set_cone_orientation(self, cone_orientation):
if self._buffer3d:
x, y, z = cone_orientation
self.lock()
self._buffer3d.SetConeOrientation(x, y, -z, lib.DS3D_IMMEDIATE)
self.unlock()
def set_cone_inner_angle(self, cone_inner_angle):
if self._buffer3d:
self._cone_inner_angle = int(cone_inner_angle)
self._set_cone_angles()
def set_cone_outer_angle(self, cone_outer_angle):
if self._buffer3d:
self._cone_outer_angle = int(cone_outer_angle)
self._set_cone_angles()
def _set_cone_angles(self):
inner = min(self._cone_inner_angle, self._cone_outer_angle)
outer = max(self._cone_inner_angle, self._cone_outer_angle)
self.lock()
self._buffer3d.SetConeAngles(inner, outer, lib.DS3D_IMMEDIATE)
self.unlock()
def set_cone_outer_gain(self, cone_outer_gain):
if self._buffer3d:
volume = _db(cone_outer_gain)
self.lock()
self._buffer3d.SetConeOutsideVolume(volume, lib.DS3D_IMMEDIATE)
self.unlock()
class DirectSoundDriver(mt_media.AbstractAudioDriver):
def __init__(self):
self._dsound = lib.IDirectSound()
lib.DirectSoundCreate(None, ctypes.byref(self._dsound), None)
# A trick used by mplayer.. use desktop as window handle since it
# would be complex to use pyglet window handles (and what to do when
# application is audio only?).
hwnd = user32.GetDesktopWindow()
self._dsound.SetCooperativeLevel(hwnd, lib.DSSCL_NORMAL)
# Create primary buffer with 3D and volume capabilities
self._buffer = lib.IDirectSoundBuffer()
dsbd = lib.DSBUFFERDESC()
dsbd.dwSize = ctypes.sizeof(dsbd)
dsbd.dwFlags = (lib.DSBCAPS_CTRL3D |
lib.DSBCAPS_CTRLVOLUME |
lib.DSBCAPS_PRIMARYBUFFER)
self._dsound.CreateSoundBuffer(dsbd, ctypes.byref(self._buffer), None)
# Create listener
self._listener = lib.IDirectSound3DListener()
self._buffer.QueryInterface(lib.IID_IDirectSound3DListener,
ctypes.byref(self._listener))
# Create worker thread
self.worker = DirectSoundWorker()
self.worker.start()
def __del__(self):
try:
if self._buffer:
self.delete()
except:
pass
def create_audio_player(self, source_group, player):
return DirectSoundAudioPlayer(source_group, player)
def delete(self):
self.worker.stop()
self._buffer.Release()
self._buffer = None
self._listener.Release()
self._listener = None
# Listener API
def _set_volume(self, volume):
self._volume = volume
self._buffer.SetVolume(_db(volume))
def _set_position(self, position):
self._position = position
x, y, z = position
self._listener.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
def _set_forward_orientation(self, orientation):
self._forward_orientation = orientation
self._set_orientation()
def _set_up_orientation(self, orientation):
self._up_orientation = orientation
self._set_orientation()
def _set_orientation(self):
x, y, z = self._forward_orientation
ux, uy, uz = self._up_orientation
self._listener.SetOrientation(
x, y, -z, ux, uy, -uz, lib.DS3D_IMMEDIATE)
def create_audio_driver():
global driver
driver = DirectSoundDriver()
return driver
# Global driver needed for access to worker thread and _dsound
driver = None
|
jcfr/mystic
|
models/storn.py
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Author: Patrick Hung (patrickh @caltech)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
__doc__ = _doc = """
This is part of Storn's "Differential Evolution" test suite, as defined
in [2], with 'Corana' function definitions drawn from [3,4], 'Griewangk'
function definitions drawn from [5], and 'Zimmermann' function definitions
drawn from [6].
References::
[1] Storn, R. and Price, K. "Differential Evolution - A Simple and
Efficient Heuristic for Global Optimization over Continuous Spaces"
Journal of Global Optimization 11: 341-359, 1997.
[2] Storn, R. and Price, K. "Differential Evolution - A Simple and
Efficient Heuristic for Global Optimization over Continuous Spaces"
TR-95-012, ICSI, 1995. http://www.icsi.berkeley.edu/~storn/TR-95-012.pdf
[3] Ingber, L. "Simulated Annealing: Practice Versus Theory" J. of
Mathematical and Computer Modeling 18(11), 29-57, 1993.
[4] Corana, A. and Marchesi, M. and Martini, C. and Ridella, S.
"Minimizing Multimodal Functions of Continuous Variables with the
'Simulated Annealing Algorithm'" ACM Transactions on Mathematical
Software, March, 272-280, 1987.
[5] Griewangk, A.O. "Generalized Descent for Global Optimization"
Journal of Optimization Theory and Applications 34: 11-39, 1981.
[6] Zimmermann, W. "Operations Research" Oldenbourg Munchen, Wien, 1990.
"""
from abstract_model import AbstractFunction
from numpy import asarray
from math import pow, cos, sqrt
from numpy import sign, floor
class Corana(AbstractFunction):
__doc__ = \
"""a Corana's parabola function generator
Corana's parabola function [1,2,3,4] defines a paraboloid whose
axes are parallel to the coordinate axes. This funciton has a
large number of wells that increase in depth with proximity to
the origin. The global minimum is a plateau around the origin.
The generated function f(x) is a modified version of equation (22)
of [2], where len(x) <= 4.
""" + _doc
def __init__(self, ndim=4): # is n-dimensional n=[1,4] (n=4 in ref)
AbstractFunction.__init__(self, ndim=ndim)
return
def function(self,coeffs):
"""evaluates a 4-D Corana's parabola function for a list of coeffs
f(x) = \sum_(i=0)^(3) f_0(x)
Where for \abs(x_i - z_i) < 0.05:
f_0(x) = 0.15*(z_i - 0.05*\sign(z_i))^(2) * d_i
and otherwise:
f_0(x) = d_i * x_(i)^(2),
with z_i = \floor(\abs(x_i/0.2)+0.49999)*\sign(x_i)*0.2
and d_i = 1,1000,10,100.
For len(x) == 1, x = x_0,0,0,0;
for len(x) == 2, x = x_0,0,x_1,0;
for len(x) == 3, x = x_0,0,x_1,x_2;
for len(x) >= 4, x = x_0,x_1,x_2,x_3.
Inspect with mystic_model_plotter using::
mystic.models.corana -b "-1:1:.01, -1:1:.01" -d -x 1
The minimum is f(x)=0 for \abs(x_i) < 0.05 for all i."""
d = [1., 1000., 10., 100.]
_d = [0, 3, 1, 2] # ordering for lower dimensions
#x = asarray(coeffs) #XXX: converting to numpy.array slows by 10x
x = [0.]*4 # ensure that there are 4 coefficients
if len(coeffs) < 4:
_x = x[:]
_x[:len(coeffs)]=coeffs
for i in range(4):
x[_d.index(i)] = _x[i]
else:
x = coeffs
r = 0
for j in range(4):
zj = floor( abs(x[j]/0.2) + 0.49999 ) * sign(x[j]) * 0.2
if abs(x[j]-zj) < 0.05:
r += 0.15 * pow(zj - 0.05*sign(zj), 2) * d[j]
else:
r += d[j] * x[j] * x[j]
return r
minimizers = None #FIXME: degenerate minimum... (-0.05, 0.05)
# minimum is f(x)=0 for \abs(x_i) < 0.05 for all i."""
pass
class Griewangk(AbstractFunction):
__doc__ = \
"""a Griewangk's function generator
Griewangk's function [1,2,5] is a multi-dimensional cosine
function that provides several periodic local minima, with
the global minimum at the origin. The local minima are
fractionally more shallow than the global minimum, such that
when viewed at a very coarse scale the function appears as
a multi-dimensional parabola similar to De Jong's sphere.
The generated function f(x) is a modified version of equation (23)
of [2], where len(x) >= 0.
""" + _doc
def __init__(self, ndim=10): # is n-dimensional (n=10 in ref)
AbstractFunction.__init__(self, ndim=ndim)
return
def function(self,coeffs):
"""evaluates an N-dimensional Griewangk's function for a list of coeffs
f(x) = f_0(x) - f_1(x) + 1
Where:
f_0(x) = \sum_(i=0)^(N-1) x_(i)^(2) / 4000.
and:
f_1(x) = \prod_(i=0)^(N-1) \cos( x_i / (i+1)^(1/2) )
Inspect with mystic_model_plotter using::
mystic.models.griewangk -b "-10:10:.1, -10:10:.1" -d -x 5
The minimum is f(x)=0.0 for x_i=0.0"""
#x = asarray(x) #XXX: converting to numpy.array slows by 10x
term1 = sum([c*c for c in coeffs])/4000.
term2 = 1
for i in range(len(coeffs)):
term2 = term2 * cos( coeffs[i] / sqrt(i+1.0) )
return term1 - term2 + 1
minimizers = [0.] #XXX: there are many periodic local minima
pass
class Zimmermann(AbstractFunction):
__doc__ = \
"""a Zimmermann function generator
A Zimmermann function [1,2,6] poses difficulty for minimizers
as the minimum is located at the corner of the constrained region.
A penalty is applied to all values outside the constrained region,
creating a local minimum.
The generated function f(x) is a modified version of equation (24-26)
of [2], and requires len(x) == 2.
""" + _doc
def __init__(self, ndim=2):
AbstractFunction.__init__(self, ndim=ndim)
return
def function(self,coeffs):
"""evaluates a Zimmermann function for a list of coeffs
f(x) = max(f_0(x), p_i(x)), with i = 0,1,2,3
Where:
f_0(x) = 9 - x_0 - x_1
with for x_0 < 0:
p_0(x) = -100 * x_0
and for x_1 < 0:
p_1(x) = -100 * x_1
and for c_2(x) > 16 and c_3(x) > 14:
p_i(x) = 100 * c_i(x), with i = 2,3
c_2(x) = (x_0 - 3)^2 + (x_1 - 2)^2
c_3(x) = x_0 * x_1
Otherwise, p_i(x)=0 for i=0,1,2,3 and c_i(x)=0 for i=2,3.
Inspect with mystic_model_plotter using::
mystic.models.zimmermann -b "-5:10:.1, -5:10:.1" -d -x 1
The minimum is f(x)=0.0 at x=(7.0,2.0)"""
x0, x1 = coeffs #must provide 2 values (x0,y0)
f8 = 9 - x0 - x1
#XXX: apply penalty p(k) = 100 + 100*k; k = |f(x) - c(x)|
c0,c1,c2,c3 = 0,0,0,0
if x0 < 0: c0 = -100 * x0
if x1 < 0: c1 = -100 * x1
xx = (x0-3.)*(x0-3) + (x1-2.)*(x1-2)
if xx > 16: c2 = 100 * (xx-16)
if x0 * x1 > 14: c3 = 100 * (x0*x1-14.)
return max(f8,c0,c1,c2,c3)
minimizers = [(7., 2.), (2.35477650, 5.94832200)]
#minima = [0.0, 0.69690150]
pass
# cleanup
del _doc
# prepared instances
corana = Corana().function
griewangk = Griewangk().function
zimmermann = Zimmermann().function
# End of file
|
kaiix/schematics
|
schematics/types/base.py
|
import uuid
import re
import datetime
import decimal
import itertools
import functools
import random
import string
import six
from six import iteritems
from ..exceptions import (
StopValidation, ValidationError, ConversionError, MockCreationError
)
try:
from string import ascii_letters # PY3
except ImportError:
from string import letters as ascii_letters #PY2
try:
basestring #PY2
except NameError:
basestring = str #PY3
try:
unicode #PY2
except:
import codecs
unicode = str #PY3
def utf8_decode(s):
if six.PY3:
s = str(s) #todo: right thing to do?
else:
s = unicode(s, 'utf-8')
return s
def fill_template(template, min_length, max_length):
return template % random_string(
get_value_in(
min_length,
max_length,
padding=len(template) - 2,
required_length=1))
def force_unicode(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
#obj = unicode(obj, encoding)
obj = utf8_decode(obj)
elif not obj is None:
#obj = unicode(obj)
obj = utf8_decode(obj)
return obj
def get_range_endpoints(min_length, max_length, padding=0, required_length=0):
if min_length is None and max_length is None:
min_length = 0
max_length = 16
elif min_length is None:
min_length = 0
elif max_length is None:
max_length = max(min_length * 2, 16)
if padding:
max_length = max_length - padding
min_length = max(min_length - padding, 0)
if max_length < required_length:
raise MockCreationError(
'This field is too short to hold the mock data')
min_length = max(min_length, required_length)
return min_length, max_length
def get_value_in(min_length, max_length, padding=0, required_length=0):
return random.randint(
*get_range_endpoints(min_length, max_length, padding, required_length))
def random_string(length, chars=ascii_letters + string.digits):
return ''.join(random.choice(chars) for _ in range(length))
_last_position_hint = -1
_next_position_hint = itertools.count()
class TypeMeta(type):
"""
Meta class for BaseType. Merges `MESSAGES` dict and accumulates
validator methods.
"""
def __new__(mcs, name, bases, attrs):
messages = {}
validators = []
for base in reversed(bases):
if hasattr(base, 'MESSAGES'):
messages.update(base.MESSAGES)
if hasattr(base, "_validators"):
validators.extend(base._validators)
if 'MESSAGES' in attrs:
messages.update(attrs['MESSAGES'])
attrs['MESSAGES'] = messages
for attr_name, attr in iteritems(attrs):
if attr_name.startswith("validate_"):
validators.append(attr)
attrs["_validators"] = validators
return type.__new__(mcs, name, bases, attrs)
class BaseType(TypeMeta('BaseTypeBase', (object, ), {})):
"""A base class for Types in a Schematics model. Instances of this
class may be added to subclasses of ``Model`` to define a model schema.
Validators that need to access variables on the instance
can be defined be implementing methods whose names start with ``validate_``
and accept one parameter (in addition to ``self``)
:param required:
Invalidate field when value is None or is not supplied. Default:
False.
:param default:
When no data is provided default to this value. May be a callable.
Default: None.
:param serialized_name:
The name of this field defaults to the class attribute used in the
model. However if the field has another name in foreign data set this
argument. Serialized data will use this value for the key name too.
:param deserialize_from:
A name or list of named fields for which foreign data sets are
searched to provide a value for the given field. This only effects
inbound data.
:param choices:
A list of valid choices. This is the last step of the validator
chain.
:param validators:
A list of callables. Each callable receives the value after it has been
converted into a rich python type. Default: []
:param serialize_when_none:
Dictates if the field should appear in the serialized data even if the
value is None. Default: True
:param messages:
Override the error messages with a dict. You can also do this by
subclassing the Type and defining a `MESSAGES` dict attribute on the
class. A metaclass will merge all the `MESSAGES` and override the
resulting dict with instance level `messages` and assign to
`self.messages`.
"""
MESSAGES = {
'required': u"This field is required.",
'choices': u"Value must be one of {0}.",
}
def __init__(self, required=False, default=None, serialized_name=None,
choices=None, validators=None, deserialize_from=None,
serialize_when_none=None, messages=None):
super(BaseType, self).__init__()
self.required = required
self._default = default
self.serialized_name = serialized_name
if choices and not isinstance(choices, (list, tuple)):
raise TypeError('"choices" must be a list or tuple')
self.choices = choices
self.deserialize_from = deserialize_from
self.validators = [functools.partial(v, self) for v in self._validators]
if validators:
self.validators += validators
self.serialize_when_none = serialize_when_none
self.messages = dict(self.MESSAGES, **(messages or {}))
self._position_hint = next(_next_position_hint) # For ordering of fields
def __call__(self, value):
return self.to_native(value)
def _mock(self, context=None):
return None
def _setup(self, field_name, owner_model):
"""Perform late-stage setup tasks that are run after the containing model
has been created.
"""
self.name = field_name
self.owner_model = owner_model
@property
def default(self):
default = self._default
if callable(self._default):
default = self._default()
return default
def to_primitive(self, value, context=None):
"""Convert internal data to a value safe to serialize.
"""
return value
def to_native(self, value, context=None):
"""
Convert untrusted data to a richer Python construct.
"""
return value
def allow_none(self):
if hasattr(self, 'owner_model'):
return self.owner_model.allow_none(self)
else:
return self.serialize_when_none
def validate(self, value):
"""
Validate the field and return a clean value or raise a
``ValidationError`` with a list of errors raised by the validation
chain. Stop the validation process from continuing through the
validators by raising ``StopValidation`` instead of ``ValidationError``.
"""
errors = []
for validator in self.validators:
try:
validator(value)
except ValidationError as exc:
errors.extend(exc.messages)
if isinstance(exc, StopValidation):
break
if errors:
raise ValidationError(errors)
def validate_required(self, value):
if self.required and value is None:
raise ValidationError(self.messages['required'])
def validate_choices(self, value):
if self.choices is not None:
if value not in self.choices:
raise ValidationError(self.messages['choices']
.format(unicode(self.choices)))
def mock(self, context=None):
if not self.required and not random.choice([True, False]):
return self.default
if self.choices is not None:
return random.choice(self.choices)
return self._mock(context)
class UUIDType(BaseType):
"""A field that stores a valid UUID value.
"""
MESSAGES = {
'convert': u"Couldn't interpret '{0}' value as UUID.",
}
def _mock(self, context=None):
return uuid.uuid4()
def to_native(self, value, context=None):
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except (AttributeError, TypeError, ValueError):
raise ConversionError(self.messages['convert'].format(value))
return value
def to_primitive(self, value, context=None):
return str(value)
class IPv4Type(BaseType):
""" A field that stores a valid IPv4 address """
def _mock(self, context=None):
return '.'.join(str(random.randrange(256)) for _ in range(4))
@classmethod
def valid_ip(cls, addr):
try:
addr = addr.strip().split(".")
except AttributeError:
return False
try:
return len(addr) == 4 and all(0 <= int(octet) < 256 for octet in addr)
except ValueError:
return False
def validate(self, value):
"""
Make sure the value is a IPv4 address:
http://stackoverflow.com/questions/9948833/validate-ip-address-from-list
"""
if not IPv4Type.valid_ip(value):
error_msg = 'Invalid IPv4 address'
raise ValidationError(error_msg)
return True
class StringType(BaseType):
"""A unicode string field. Default minimum length is one. If you want to
accept empty strings, init with ``min_length`` 0.
"""
allow_casts = (int, str)
MESSAGES = {
'convert': u"Couldn't interpret '{0}' as string.",
'max_length': u"String value is too long.",
'min_length': u"String value is too short.",
'regex': u"String value did not match validation regex.",
}
def __init__(self, regex=None, max_length=None, min_length=None, **kwargs):
self.regex = regex
self.max_length = max_length
self.min_length = min_length
super(StringType, self).__init__(**kwargs)
def _mock(self, context=None):
return random_string(get_value_in(self.min_length, self.max_length))
def to_native(self, value, context=None):
if value is None:
return None
if not isinstance(value, unicode):
if isinstance(value, self.allow_casts):
if not isinstance(value, str):
value = str(value)
value = utf8_decode(value) #unicode(value, 'utf-8')
else:
raise ConversionError(self.messages['convert'].format(value))
return value
def validate_length(self, value):
len_of_value = len(value) if value else 0
if self.max_length is not None and len_of_value > self.max_length:
raise ValidationError(self.messages['max_length'])
if self.min_length is not None and len_of_value < self.min_length:
raise ValidationError(self.messages['min_length'])
def validate_regex(self, value):
if self.regex is not None and re.match(self.regex, value) is None:
raise ValidationError(self.messages['regex'])
class URLType(StringType):
"""A field that validates input as an URL.
If verify_exists=True is passed the validate function will make sure
the URL makes a valid connection.
"""
MESSAGES = {
'invalid_url': u"Not a well formed URL.",
'not_found': u"URL does not exist.",
}
URL_REGEX = re.compile(
r'^https?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,2000}[A-Z0-9])?\.)+[A-Z]{2,63}\.?|'
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE
)
def __init__(self, verify_exists=False, **kwargs):
self.verify_exists = verify_exists
super(URLType, self).__init__(**kwargs)
def _mock(self, context=None):
return fill_template('http://a%s.ZZ', self.min_length,
self.max_length)
def validate_url(self, value):
if not URLType.URL_REGEX.match(value):
raise StopValidation(self.messages['invalid_url'])
if self.verify_exists:
from six.moves import urllib
try:
request = urllib.Request(value)
urllib.urlopen(request)
except Exception:
raise StopValidation(self.messages['not_found'])
class EmailType(StringType):
"""A field that validates input as an E-Mail-Address.
"""
MESSAGES = {
'email': u"Not a well formed email address."
}
EMAIL_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016'
r'-\177])*"'
# domain
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,2000}[A-Z0-9])?\.)+[A-Z]{2,63}\.?$',
re.IGNORECASE
)
def _mock(self, context=None):
return fill_template('%s@example.com', self.min_length,
self.max_length)
def validate_email(self, value):
if not EmailType.EMAIL_REGEX.match(value):
raise StopValidation(self.messages['email'])
class NumberType(BaseType):
"""A number field.
"""
MESSAGES = {
'number_coerce': u"Value '{0}' is not {1}.",
'number_min': u"{0} value should be greater than {1}.",
'number_max': u"{0} value should be less than {1}.",
}
def __init__(self, number_class, number_type,
min_value=None, max_value=None, **kwargs):
self.number_class = number_class
self.number_type = number_type
self.min_value = min_value
self.max_value = max_value
super(NumberType, self).__init__(**kwargs)
def _mock(self, context=None):
return get_value_in(self.min_value, self.max_value)
def to_native(self, value, context=None):
try:
value = self.number_class(value)
except (TypeError, ValueError):
raise ConversionError(self.messages['number_coerce']
.format(value, self.number_type.lower()))
return value
def validate_is_a_number(self, value):
try:
self.number_class(value)
except (TypeError, ValueError):
raise ConversionError(self.messages['number_coerce']
.format(value, self.number_type.lower()))
def validate_range(self, value):
if self.min_value is not None and value < self.min_value:
raise ValidationError(self.messages['number_min']
.format(self.number_type, self.min_value))
if self.max_value is not None and value > self.max_value:
raise ValidationError(self.messages['number_max']
.format(self.number_type, self.max_value))
return value
class IntType(NumberType):
"""A field that validates input as an Integer
"""
def __init__(self, *args, **kwargs):
super(IntType, self).__init__(number_class=int,
number_type='Int',
*args, **kwargs)
class LongType(NumberType):
"""A field that validates input as a Long
"""
def __init__(self, *args, **kwargs):
try:
number_class = long #PY2
except NameError:
number_class = int #PY3
super(LongType, self).__init__(number_class=number_class,
number_type='Long',
*args, **kwargs)
class FloatType(NumberType):
"""A field that validates input as a Float
"""
def __init__(self, *args, **kwargs):
super(FloatType, self).__init__(number_class=float,
number_type='Float',
*args, **kwargs)
class DecimalType(BaseType):
"""A fixed-point decimal number field.
"""
MESSAGES = {
'number_coerce': u"Number '{0}' failed to convert to a decimal.",
'number_min': u"Value should be greater than {0}.",
'number_max': u"Value should be less than {0}.",
}
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(DecimalType, self).__init__(**kwargs)
def _mock(self, context=None):
return get_value_in(self.min_value, self.max_value)
def to_primitive(self, value, context=None):
return unicode(value)
def to_native(self, value, context=None):
if not isinstance(value, decimal.Decimal):
if not isinstance(value, basestring):
value = unicode(value)
try:
value = decimal.Decimal(value)
except (TypeError, decimal.InvalidOperation):
raise ConversionError(self.messages['number_coerce'].format(value))
return value
def validate_range(self, value):
if self.min_value is not None and value < self.min_value:
error_msg = self.messages['number_min'].format(self.min_value)
raise ValidationError(error_msg)
if self.max_value is not None and value > self.max_value:
error_msg = self.messages['number_max'].format(self.max_value)
raise ValidationError(error_msg)
return value
class HashType(BaseType):
MESSAGES = {
'hash_length': u"Hash value is wrong length.",
'hash_hex': u"Hash value is not hexadecimal.",
}
def _mock(self, context=None):
return random_string(self.LENGTH, string.hexdigits)
def to_native(self, value, context=None):
if len(value) != self.LENGTH:
raise ValidationError(self.messages['hash_length'])
try:
int(value, 16)
except ValueError:
raise ConversionError(self.messages['hash_hex'])
return value
class MD5Type(HashType):
"""A field that validates input as resembling an MD5 hash.
"""
LENGTH = 32
class SHA1Type(HashType):
"""A field that validates input as resembling an SHA1 hash.
"""
LENGTH = 40
class BooleanType(BaseType):
"""A boolean field type. In addition to ``True`` and ``False``, coerces these
values:
+ For ``True``: "True", "true", "1"
+ For ``False``: "False", "false", "0"
"""
TRUE_VALUES = ('True', 'true', '1')
FALSE_VALUES = ('False', 'false', '0')
def _mock(self, context=None):
return random.choice([True, False])
def to_native(self, value, context=None):
if isinstance(value, basestring):
if value in self.TRUE_VALUES:
value = True
elif value in self.FALSE_VALUES:
value = False
if isinstance(value, int) and value in [0, 1]:
value = bool(value)
if not isinstance(value, bool):
raise ConversionError(u"Must be either true or false.")
return value
class DateType(BaseType):
"""Defaults to converting to and from ISO8601 date values.
"""
SERIALIZED_FORMAT = '%Y-%m-%d'
MESSAGES = {
'parse': u"Could not parse {0}. Should be ISO8601 (YYYY-MM-DD).",
}
def __init__(self, **kwargs):
self.serialized_format = self.SERIALIZED_FORMAT
super(DateType, self).__init__(**kwargs)
def _mock(self, context=None):
return datetime.datetime(
year=random.randrange(600) + 1900,
month=random.randrange(12) + 1,
day=random.randrange(28) + 1,
)
def to_native(self, value, context=None):
if isinstance(value, datetime.date):
return value
try:
return datetime.datetime.strptime(value, self.serialized_format).date()
except (ValueError, TypeError):
raise ConversionError(self.messages['parse'].format(value))
def to_primitive(self, value, context=None):
return value.strftime(self.serialized_format)
class DateTimeType(BaseType):
"""Defaults to converting to and from ISO8601 datetime values.
:param formats:
A value or list of values suitable for ``datetime.datetime.strptime``
parsing. Default: `('%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%SZ')`
:param serialized_format:
The output format suitable for Python ``strftime``. Default: ``'%Y-%m-%dT%H:%M:%S.%f'``
"""
DEFAULT_FORMATS = (
'%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%SZ',
)
SERIALIZED_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
MESSAGES = {
'parse_formats': u'Could not parse {0}. Valid formats: {1}',
'parse': u"Could not parse {0}. Should be ISO8601.",
}
def __init__(self, formats=None, serialized_format=None, **kwargs):
"""
"""
if isinstance(formats, basestring):
formats = [formats]
if formats is None:
formats = self.DEFAULT_FORMATS
if serialized_format is None:
serialized_format = self.SERIALIZED_FORMAT
self.formats = formats
self.serialized_format = serialized_format
super(DateTimeType, self).__init__(**kwargs)
def _mock(self, context=None):
return datetime.datetime(
year=random.randrange(600) + 1900,
month=random.randrange(12) + 1,
day=random.randrange(28) + 1,
hour=random.randrange(24),
minute=random.randrange(60),
second=random.randrange(60),
microsecond=random.randrange(1000000),
)
def to_native(self, value, context=None):
if isinstance(value, datetime.datetime):
return value
for fmt in self.formats:
try:
return datetime.datetime.strptime(value, fmt)
except (ValueError, TypeError):
continue
if self.formats == self.DEFAULT_FORMATS:
message = self.messages['parse'].format(value)
else:
message = self.messages['parse_formats'].format(
value, ", ".join(self.formats)
)
raise ConversionError(message)
def to_primitive(self, value, context=None):
if callable(self.serialized_format):
return self.serialized_format(value)
return value.strftime(self.serialized_format)
class GeoPointType(BaseType):
"""A list storing a latitude and longitude.
"""
def _mock(self, context=None):
return (random.randrange(-90, 90), random.randrange(-180, 180))
def to_native(self, value, context=None):
"""Make sure that a geo-value is of type (x, y)
"""
if not len(value) == 2:
raise ValueError('Value must be a two-dimensional point')
if isinstance(value, dict):
for val in value.values():
if not isinstance(val, (float, int)):
raise ValueError('Both values in point must be float or int')
elif isinstance(value, (list, tuple)):
if (not isinstance(value[0], (float, int)) or
not isinstance(value[1], (float, int))):
raise ValueError('Both values in point must be float or int')
else:
raise ValueError('GeoPointType can only accept tuples, lists, or dicts')
return value
class MultilingualStringType(BaseType):
"""
A multilanguage string field, stored as a dict with {'locale': 'localized_value'}.
Minimum and maximum lengths apply to each of the localized values.
At least one of ``default_locale`` or ``context['locale']`` must be defined
when calling ``.to_primitive``.
"""
allow_casts = (int, str)
MESSAGES = {
'convert': u"Couldn't interpret value as string.",
'max_length': u"String value in locale {0} is too long.",
'min_length': u"String value in locale {0} is too short.",
'locale_not_found': u"No requested locale was available.",
'no_locale': u"No default or explicit locales were given.",
'regex_locale': u"Name of locale {0} did not match validation regex.",
'regex_localized': u"String value in locale {0} did not match validation regex.",
}
LOCALE_REGEX = r'^[a-z]{2}(:?_[A-Z]{2})?$'
def __init__(self, regex=None, max_length=None, min_length=None,
default_locale=None, locale_regex=LOCALE_REGEX, **kwargs):
self.regex = re.compile(regex) if regex else None
self.max_length = max_length
self.min_length = min_length
self.default_locale = default_locale
self.locale_regex = re.compile(locale_regex) if locale_regex else None
super(MultilingualStringType, self).__init__(**kwargs)
def _mock(self, context=None):
return random_string(get_value_in(self.min_length, self.max_length))
def to_native(self, value, context=None):
"""Make sure a MultilingualStringType value is a dict or None."""
if not (value is None or isinstance(value, dict)):
raise ValueError('Value must be a dict or None')
return value
def to_primitive(self, value, context=None):
"""
Use a combination of ``default_locale`` and ``context['locale']`` to return
the best localized string.
"""
if value is None:
return None
context_locale = None
if context is not None and 'locale' in context:
context_locale = context['locale']
# Build a list of all possible locales to try
possible_locales = []
for locale in (context_locale, self.default_locale):
if not locale:
continue
if isinstance(locale, basestring):
possible_locales.append(locale)
else:
possible_locales.extend(locale)
if not possible_locales:
raise ConversionError(self.messages['no_locale'])
for locale in possible_locales:
if locale in value:
localized = value[locale]
break
else:
raise ConversionError(self.messages['locale_not_found'])
if not isinstance(localized, unicode):
if isinstance(localized, self.allow_casts):
if not isinstance(localized, str):
localized = str(localized)
#localized = unicode(localized, 'utf-8')
localized = utf8_decode(localized)
else:
raise ConversionError(self.messages['convert'])
return localized
def validate_length(self, value):
for locale, localized in value.items():
len_of_value = len(localized) if localized else 0
if self.max_length is not None and len_of_value > self.max_length:
raise ValidationError(self.messages['max_length'].format(locale))
if self.min_length is not None and len_of_value < self.min_length:
raise ValidationError(self.messages['min_length'].format(locale))
def validate_regex(self, value):
if self.regex is None and self.locale_regex is None:
return
for locale, localized in value.items():
if self.regex is not None and self.regex.match(localized) is None:
raise ValidationError(
self.messages['regex_localized'].format(locale))
if self.locale_regex is not None and self.locale_regex.match(locale) is None:
raise ValidationError(
self.messages['regex_locale'].format(locale))
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/googlecloudsdk/core/resource/resource_projection_spec.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class that creates resource projection specification."""
import sys
from googlecloudsdk.third_party.py27 import py27_copy as copy
PROJECTION_ARG_DOC = ' projection: The parent ProjectionSpec.'
ALIGN_DEFAULT = 'left'
ALIGNMENTS = {'left': lambda s, w: s.ljust(w),
'center': lambda s, w: s.center(w),
'right': lambda s, w: s.rjust(w)}
class ProjectionSpec(object):
"""Creates a resource projection specification.
A resource projection is an expression string that contains a list of resource
keys with optional attributes. A projector is a method that takes a projection
specification and a resource object as input and produces a new
JSON-serializable object containing only the values corresponding to the keys
in the projection specification.
Optional projection key attributes may transform the values in the output
JSON-serializable object. Cloud SDK projection attributes are used for output
formatting.
A default or empty projection expression still produces a projector that
converts a resource to a JSON-serializable object.
This class is used by the resource projection expression parser to create a
resource projection specification from a projection expression string.
Attributes:
aliases: The short key name alias dictionary.
_active: The transform active level. Incremented each time Defaults() is
called. Used to determine active transforms.
attributes: Projection attributes dict indexed by attribute name.
_columns: A list of (key,_Attribute) tuples used to project a resource to
a list of columns.
_compiler: The projection compiler method for nested projections.
_empty: An empty projection _Tree used by Projector().
_name: The projection name from the expression string.
_tree: The projection _Tree root, used by
resource_projector.Evaluate() to efficiently project each resource.
symbols: Default and caller-defined transform function dict indexed by
function name.
"""
DEFAULT = 0 # _Attribute default node flag.
INNER = 1 # _Attribute inner node flag.
PROJECT = 2 # _Attribute project node flag.
class _Column(object):
"""Column key and transform attribute for self._columns.
Attributes:
key: The column key.
attribute: The column key _Attribute.
"""
def __init__(self, key, attribute):
self.key = key
self.attribute = attribute
def __init__(self, defaults=None, symbols=None, compiler=None):
"""Initializes a projection.
Args:
defaults: resource_projection_spec.ProjectionSpec defaults.
symbols: Transform function symbol table dict indexed by function name.
compiler: The projection compiler method for nested projections.
"""
self.aliases = {}
self.attributes = {}
self._columns = []
self._compiler = compiler
self._empty = None
self._name = None
self._snake_headings = {}
self._snake_re = None
if defaults:
self._active = defaults.active
self._tree = copy.deepcopy(defaults.GetRoot())
self.Defaults()
if defaults.symbols:
self.symbols = copy.deepcopy(defaults.symbols)
if symbols:
self.symbols.update(symbols)
else:
self.symbols = symbols if symbols else {}
self.aliases.update(defaults.aliases)
else:
self._active = 0
self._tree = None
self.symbols = symbols
@property
def active(self):
"""Gets the transform active level."""
return self._active
@property
def compiler(self):
"""Returns the projection compiler method for nested projections."""
return self._compiler
def _Defaults(self, projection):
"""Defaults() helper -- converts a projection to a default projection.
Args:
projection: A node in the original projection _Tree.
"""
projection.attribute.flag = self.DEFAULT
for node in projection.tree.values():
self._Defaults(node)
def _Print(self, projection, out, level):
"""Print() helper -- prints projection node p and its children.
Args:
projection: A _Tree node in the original projection.
out: The output stream.
level: The nesting level counting from 1 at the root.
"""
for key in projection.tree:
out.write('{indent} {key} : {attribute}\n'.format(
indent=' ' * level,
key=key,
attribute=projection.tree[key].attribute))
self._Print(projection.tree[key], out, level + 1)
def AddAttribute(self, name, value):
"""Adds name=value to the attributes.
Args:
name: The attribute name.
value: The attribute value
"""
self.attributes[name] = value
def DelAttribute(self, name):
"""Deletes name from the attributes if it is in the attributes.
Args:
name: The attribute name.
"""
if name in self.attributes:
del self.attributes[name]
def AddAlias(self, name, key):
"""Adds name as an alias for key to the projection.
Args:
name: The short (no dots) alias name for key.
key: The parsed key to add.
"""
self.aliases[name] = key
def AddKey(self, key, attribute):
"""Adds key and attribute to the projection.
Args:
key: The parsed key to add.
attribute: Parsed _Attribute to add.
"""
self._columns.append(self._Column(key, attribute))
def SetName(self, name):
"""Sets the projection name.
The projection name is the rightmost of the names in the expression.
Args:
name: The projection name.
"""
if self._name:
# Reset the name-specific attributes.
self.attributes = {}
self._name = name
def GetRoot(self):
"""Returns the projection root node.
Returns:
The resource_projector_parser._Tree root node.
"""
return self._tree
def SetRoot(self, root):
"""Sets the projection root node.
Args:
root: The resource_projector_parser._Tree root node.
"""
self._tree = root
def GetEmpty(self):
"""Returns the projector resource_projector_parser._Tree empty node.
Returns:
The projector resource_projector_parser._Tree empty node.
"""
return self._empty
def SetEmpty(self, node):
"""Sets the projector resource_projector_parser._Tree empty node.
The empty node is used by to apply [] empty slice projections.
Args:
node: The projector resource_projector_parser._Tree empty node.
"""
self._empty = node
def Columns(self):
"""Returns the projection columns.
Returns:
The columns in the projection, None if the entire resource is projected.
"""
return self._columns
def ColumnCount(self):
"""Returns the number of columns in the projection.
Returns:
The number of columns in the projection, 0 if the entire resource is
projected.
"""
return len(self._columns)
def Defaults(self):
"""Converts the projection to a default projection.
A default projection provides defaults for attribute values and function
symbols. An explicit non-default projection value always overrides the
corresponding default value.
"""
if self._tree:
self._Defaults(self._tree)
self._columns = []
self._active += 1
def Aliases(self):
"""Returns the short key name alias dictionary.
This dictionary maps short (no dots) names to parsed keys.
Returns:
The short key name alias dictionary.
"""
return self.aliases
def Attributes(self):
"""Returns the projection _Attribute dictionary.
Returns:
The projection _Attribute dictionary.
"""
return self.attributes
def Alignments(self):
"""Returns the projection column justfication list.
Returns:
The ordered list of alignment functions, where each function is one of
ljust [default], center, or rjust.
"""
return [ALIGNMENTS[col.attribute.align] for col in self._columns]
def Labels(self):
"""Returns the ordered list of projection labels.
Returns:
The ordered list of projection label strings, None if all labels are
empty.
"""
labels = [col.attribute.label or '' for col in self._columns]
return labels if any(labels) else None
def Name(self):
"""Returns the projection name.
The projection name is the rightmost of the names in the expression.
Returns:
The projection name, None if none was specified.
"""
return self._name
def Order(self):
"""Returns the projection sort key order suitable for use by sorted().
Example:
projection = resource_projector.Compile('...')
order = projection.Order()
if order:
rows = sorted(rows, key=itemgetter(*order))
Returns:
The list of (sort-key-index, reverse), [] if projection is None
or if all sort order indices in the projection are None (unordered).
"""
ordering = []
for i, col in enumerate(self._columns):
if col.attribute.order or col.attribute.reverse:
ordering.append(
(col.attribute.order or sys.maxint, i, col.attribute.reverse))
return [(i, reverse) for _, i, reverse in sorted(ordering)]
def Print(self, out=sys.stdout):
"""Prints the projection with indented nesting.
Args:
out: The output stream, sys.stdout if None.
"""
if self._tree:
self._Print(self._tree, out, 1)
def Tree(self):
"""Returns the projection tree root.
Returns:
The projection tree root.
"""
return self._tree
|
OpenACalendar/OpenACalendar-Tools-Social
|
example-facebook-post-weekly/facebook-post-weekly.py
|
#!/usr/bin/env python
import logging
from pdb import set_trace
import requests
import simplejson
from time import time
import os
import facebook
# MY_API_URL
# MY_SITE_MSG
# MY_GROUP_NAME
# POST_TO_ID = None
def run():
data = get_from_cal_json()
msg = create_msg(data)
post(msg)
def get_from_cal_json():
print "Getting data from OpenACalendar"
r = requests.get(MY_API_URL)
if r.status_code != requests.codes.ok:
r.raise_for_status()
j = simplejson.loads(r.text)
now = time()
inaweek = now + 60 * 60 * 24 * 7
data = [
x for x in j['data']
if x['start']['timestamp'] > now
and x['start']['timestamp'] < inaweek
and not x['deleted']
]
print "Got Data From OpenACalendar"
return data
def create_msg(data):
for x in data:
x['displaystart'] = x['start']['displaytimezone']
msgbits = []
msgbits.append(MY_SITE_MSG + ':')
msgbits.append('')
for x in data:
msgbits.append(x['displaystart'])
msgbits.append(x['summaryDisplay'])
msgbits.append(x['url'])
msgbits.append('')
msg = '\n'.join(msgbits)
return msg
def get_group_ids(graph):
print "Getting Groups ID"
# need user_groups permission
# Why doesn't Facebook provide an API for getting the
# group id from the name?
my_groups = graph.get_connections('me', 'groups')['data']
print "Got Group ID"
# Add your group names here
group_names = [
MY_GROUP_NAME,
]
assert group_names, "Need to add some groups to post to"
return [x['id'] for x in my_groups if x['name'] in group_names]
def post(msg):
token = os.environ['FACEBOOK_ACCESS_TOKEN']
graph = facebook.GraphAPI(token)
profile = graph.get_object("me")
if POST_TO_ID is None:
group_ids = get_group_ids(graph)
else:
group_ids = [ POST_TO_ID, ]
print msg
return
for group_id in group_ids:
print "Posting to "+str(group_id)
graph.put_object(str(group_id), "feed", message=msg)
print "Posted!"
if __name__ == '__main__':
try:
MY_API_URL
except:
print "Set your MY_API_URL e.g. 'http://jazzcal.com/api1/events.json'"
exit (-1)
try:
MY_SITE_MSG
except:
print "Set your MY_SITE_MSG e.g. 'This week's jazz gigs on Jazzcal.com'"
exit (-1)
try:
MY_GROUP_NAME
except:
print "Set your MY_GROUP_NAME"
exit (-1)
try:
token = os.environ['FACEBOOK_ACCESS_TOKEN']
except:
print "Set the env var FACEBOOK_ACCESS_TOKEN"
exit (-1)
run()
# eof
|
docwalter/py3status
|
py3status/modules/whoami.py
|
# -*- coding: utf-8 -*-
"""
Display logged-in username.
Configuration parameters:
format: display format for whoami (default '{username}')
Format placeholders:
{username} display current username
Inspired by i3 FAQ:
https://faq.i3wm.org/question/1618/add-user-name-to-status-bar.1.html
@author ultrabug
SAMPLE OUTPUT
{'full_text': u'ultrabug'}
"""
from getpass import getuser
class Py3status:
"""
"""
# available configuration parameters
format = '{username}'
class Meta:
deprecated = {
'remove': [
{
'param': 'cache_timeout',
'msg': 'obsolete parameter',
},
],
}
def whoami(self):
"""
We use the getpass module to get the current user.
"""
username = '{}'.format(getuser())
return {
'cached_until': self.py3.CACHE_FOREVER,
'full_text': self.py3.safe_format(self.format, {'username': username})
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
kapteyn-astro/kapteyn
|
doc/source/EXAMPLES/mu_minorticks.py
|
from kapteyn import maputils
from matplotlib import pyplot as plt
fitsobj = maputils.FITSimage("m101.fits")
fig = plt.figure()
fig.subplots_adjust(left=0.18, bottom=0.10, right=0.90,
top=0.90, wspace=0.95, hspace=0.20)
for i in range(4):
f = fig.add_subplot(2,2,i+1)
mplim = fitsobj.Annotatedimage(f)
if i == 0:
majorgrat = mplim.Graticule()
majorgrat.setp_gratline(visible=False)
elif i == 1:
majorgrat = mplim.Graticule(offsetx=True, unitsx='ARCMIN')
majorgrat.setp_gratline(visible=False)
elif i == 2:
majorgrat = mplim.Graticule(skyout='galactic', unitsx='ARCMIN')
majorgrat.setp_gratline(color='b')
else:
majorgrat = mplim.Graticule(skyout='galactic',
offsetx=True, unitsx='ARCMIN')
majorgrat.setp_gratline(color='b')
majorgrat.setp_tickmark(markersize=10)
majorgrat.setp_ticklabel(fontsize=6)
majorgrat.setp_plotaxis(plotaxis=[0,1], fontsize=10)
minorgrat = mplim.Minortickmarks(majorgrat, 3, 5,
color="#aa44dd", markersize=3, markeredgewidth=2)
maputils.showall()
plt.show()
|
puttarajubr/commcare-hq
|
corehq/apps/reports/models.py
|
from datetime import datetime, timedelta
import logging
from urllib import urlencode
from django.http import Http404
from django.utils import html
from django.utils.safestring import mark_safe
import pytz
from corehq import Domain
from corehq.apps import reports
from corehq.apps.app_manager.models import get_app, Form, RemoteApp
from corehq.apps.app_manager.util import get_case_properties
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
from corehq.apps.domain.middleware import CCHQPRBACMiddleware
from corehq.apps.export.models import FormQuestionSchema
from corehq.apps.reports.display import xmlns_to_name
from dimagi.ext.couchdbkit import *
from corehq.apps.reports.exportfilters import form_matches_users, is_commconnect_form, default_form_filter, \
default_case_filter
from corehq.apps.users.models import WebUser, CommCareUser, CouchUser
from corehq.util.view_utils import absolute_reverse
from couchexport.models import SavedExportSchema, GroupExportConfiguration, FakeSavedExportSchema, SplitColumn
from couchexport.transforms import couch_to_excel_datetime, identity
from couchexport.util import SerializableFunction
import couchforms
from dimagi.utils.couch.cache import cache_core
from dimagi.utils.couch.database import get_db
from dimagi.utils.decorators.memoized import memoized
from django.conf import settings
from django.core.validators import validate_email
from corehq.apps.reports.dispatcher import ProjectReportDispatcher, CustomProjectReportDispatcher
import json
import calendar
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from dimagi.utils.logging import notify_exception
from django_prbac.exceptions import PermissionDenied
class HQUserType(object):
REGISTERED = 0
DEMO_USER = 1
ADMIN = 2
UNKNOWN = 3
COMMTRACK = 4
human_readable = [settings.COMMCARE_USER_TERM,
ugettext_noop("demo_user"),
ugettext_noop("admin"),
ugettext_noop("Unknown Users"),
ugettext_noop("CommCare Supply")]
toggle_defaults = (True, False, False, False, False)
count = len(human_readable)
included_defaults = (True, True, True, True, False)
@classmethod
def use_defaults(cls):
return cls._get_manual_filterset(cls.included_defaults, cls.toggle_defaults)
@classmethod
def all_but_users(cls):
no_users = [True] * cls.count
no_users[cls.REGISTERED] = False
return cls._get_manual_filterset(cls.included_defaults, no_users)
@classmethod
def commtrack_defaults(cls):
# this is just a convenience method for clairty on commtrack projects
return cls.all()
@classmethod
def all(cls):
defaults = (True,) * cls.count
return cls._get_manual_filterset(defaults, cls.toggle_defaults)
@classmethod
def _get_manual_filterset(cls, included, defaults):
"""
manually construct a filter set. included and defaults should both be
arrays of booleans mapping to values in human_readable and whether they should be
included and defaulted, respectively.
"""
return [HQUserToggle(i, defaults[i]) for i in range(cls.count) if included[i]]
@classmethod
def use_filter(cls, ufilter):
return [HQUserToggle(i, unicode(i) in ufilter) for i in range(cls.count)]
class HQToggle(object):
type = None
show = False
name = None
def __init__(self, type, show, name):
self.type = type
self.name = name
self.show = show
def __repr__(self):
return "%(klass)s[%(type)s:%(show)s:%(name)s]" % dict(
klass = self.__class__.__name__,
type=self.type,
name=self.name,
show=self.show
)
class HQUserToggle(HQToggle):
def __init__(self, type, show):
name = _(HQUserType.human_readable[type])
super(HQUserToggle, self).__init__(type, show, name)
class TempCommCareUser(CommCareUser):
filter_flag = IntegerProperty()
def __init__(self, domain, username, uuid):
if username == HQUserType.human_readable[HQUserType.DEMO_USER]:
filter_flag = HQUserType.DEMO_USER
elif username == HQUserType.human_readable[HQUserType.ADMIN]:
filter_flag = HQUserType.ADMIN
else:
filter_flag = HQUserType.UNKNOWN
super(TempCommCareUser, self).__init__(
domain=domain,
username=username,
_id=uuid,
date_joined=datetime.utcnow(),
is_active=False,
user_data={},
first_name='',
last_name='',
filter_flag=filter_flag
)
def save(self, **params):
raise NotImplementedError
@property
def userID(self):
return self._id
@property
def username_in_report(self):
if self.filter_flag == HQUserType.UNKNOWN:
final = mark_safe('%s <strong>[unregistered]</strong>' % html.escape(self.username))
elif self.filter_flag == HQUserType.DEMO_USER:
final = mark_safe('<strong>%s</strong>' % html.escape(self.username))
else:
final = mark_safe('<strong>%s</strong> (%s)' % tuple(map(html.escape, [self.username, self.user_id])))
return final
@property
def raw_username(self):
return self.username
class Meta:
app_label = 'reports'
DATE_RANGE_CHOICES = ['last7', 'last30', 'lastn', 'lastmonth', 'since', 'range', '']
class ReportConfig(CachedCouchDocumentMixin, Document):
domain = StringProperty()
# the prefix of the report dispatcher class for this report, used to
# get route name for url reversing, and report names
report_type = StringProperty()
report_slug = StringProperty()
subreport_slug = StringProperty(default=None)
name = StringProperty()
description = StringProperty()
owner_id = StringProperty()
filters = DictProperty()
date_range = StringProperty(choices=DATE_RANGE_CHOICES)
days = IntegerProperty(default=None)
start_date = DateProperty(default=None)
end_date = DateProperty(default=None)
datespan_slug = StringProperty(default=None)
def delete(self, *args, **kwargs):
notifications = self.view('reportconfig/notifications_by_config',
reduce=False, include_docs=True, key=self._id).all()
for n in notifications:
n.config_ids.remove(self._id)
if n.config_ids:
n.save()
else:
n.delete()
return super(ReportConfig, self).delete(*args, **kwargs)
@classmethod
def by_domain_and_owner(cls, domain, owner_id, report_slug=None,
stale=True, skip=None, limit=None):
if stale:
#kwargs['stale'] = settings.COUCH_STALE_QUERY
pass
if report_slug is not None:
key = ["name slug", domain, owner_id, report_slug]
else:
key = ["name", domain, owner_id]
db = cls.get_db()
kwargs = {}
if skip is not None:
kwargs['skip'] = skip
if limit is not None:
kwargs['limit'] = limit
result = cache_core.cached_view(
db,
"reportconfig/configs_by_domain",
reduce=False,
include_docs=True,
startkey=key,
endkey=key + [{}],
wrapper=cls.wrap,
**kwargs
)
return result
@classmethod
def default(self):
return {
'name': '',
'description': '',
#'date_range': 'last7',
'days': None,
'start_date': None,
'end_date': None,
'filters': {}
}
def to_complete_json(self):
result = super(ReportConfig, self).to_json()
result.update({
'url': self.url,
'report_name': self.report_name,
'date_description': self.date_description,
'datespan_filters': self.datespan_filters,
'has_ucr_datespan': self.has_ucr_datespan,
})
return result
@property
@memoized
def _dispatcher(self):
from corehq.apps.userreports.reports.view import ConfigurableReport
dispatchers = [
ProjectReportDispatcher,
CustomProjectReportDispatcher,
ConfigurableReport,
]
for dispatcher in dispatchers:
if dispatcher.prefix == self.report_type:
return dispatcher()
raise Exception("Unknown dispatcher: %s" % self.report_type)
def get_date_range(self):
"""Duplicated in reports.config.js"""
date_range = self.date_range
# allow old report email notifications to represent themselves as a
# report config by leaving the default date range up to the report
# dispatcher
if not date_range:
return {}
import datetime
from dateutil.relativedelta import relativedelta
today = datetime.date.today()
if date_range == 'since':
start_date = self.start_date
end_date = today
elif date_range == 'range':
start_date = self.start_date
end_date = self.end_date
elif date_range == 'lastmonth':
end_date = today
start_date = today - relativedelta(months=1) + timedelta(days=1) # add one day to handle inclusiveness
else:
end_date = today
if date_range == 'last7':
days = 7
elif date_range == 'last30':
days = 30
elif date_range == 'lastn':
days = self.days
else:
raise Exception("Invalid date range")
start_date = today - datetime.timedelta(days=days)
if start_date is None or end_date is None:
# this is due to bad validation. see: http://manage.dimagi.com/default.asp?110906
logging.error('scheduled report %s is in a bad state (no startdate or enddate)' % self._id)
return {}
dates = {
'startdate': start_date.isoformat(),
'enddate': end_date.isoformat(),
}
if self.is_configurable_report:
filter_slug = self.datespan_slug
if filter_slug:
return {
'%s-start' % filter_slug: start_date.isoformat(),
'%s-end' % filter_slug: end_date.isoformat(),
filter_slug: '%(startdate)s to %(enddate)s' % dates,
}
return dates
@property
@memoized
def query_string(self):
params = {}
if self._id != 'dummy':
params['config_id'] = self._id
if not self.is_configurable_report:
params.update(self.filters)
params.update(self.get_date_range())
return urlencode(params, True)
@property
@memoized
def view_kwargs(self):
kwargs = {'domain': self.domain,
'report_slug': self.report_slug}
if self.subreport_slug:
kwargs['subreport_slug'] = self.subreport_slug
return kwargs
@property
@memoized
def url(self):
try:
from django.core.urlresolvers import reverse
from corehq.apps.userreports.reports.view import ConfigurableReport
if self.is_configurable_report:
url_base = reverse(ConfigurableReport.slug, args=[self.domain, self.subreport_slug])
else:
url_base = reverse(self._dispatcher.name(), kwargs=self.view_kwargs)
return url_base + '?' + self.query_string
except Exception:
return "#"
@property
@memoized
def report(self):
"""
Returns None if no report is found for that report slug, which happens
when a report is no longer available. All callers should handle this
case.
"""
return self._dispatcher.get_report(
self.domain, self.report_slug, self.subreport_slug
)
@property
def report_name(self):
try:
if self.report is None:
return _("Deleted Report")
else:
return _(self.report.name)
except Exception:
return _("Unsupported Report")
@property
def full_name(self):
if self.name:
return "%s (%s)" % (self.name, self.report_name)
else:
return self.report_name
@property
def date_description(self):
if self.date_range == 'lastmonth':
return "Last Month"
elif self.days and not self.start_date:
day = 'day' if self.days == 1 else 'days'
return "Last %d %s" % (self.days, day)
elif self.end_date:
return "From %s to %s" % (self.start_date, self.end_date)
elif self.start_date:
return "Since %s" % self.start_date
else:
return ''
@property
@memoized
def owner(self):
try:
return WebUser.get_by_user_id(self.owner_id)
except CouchUser.AccountTypeError:
return CommCareUser.get_by_user_id(self.owner_id)
def get_report_content(self, lang, attach_excel=False):
"""
Get the report's HTML content as rendered by the static view format.
"""
try:
if self.report is None:
return _("The report used to create this scheduled report is no"
" longer available on CommCare HQ. Please delete this"
" scheduled report and create a new one using an available"
" report."), None
except Exception:
pass
from django.http import HttpRequest, QueryDict
request = HttpRequest()
request.couch_user = self.owner
request.user = self.owner.get_django_user()
request.domain = self.domain
request.couch_user.current_domain = self.domain
request.couch_user.language = lang
request.GET = QueryDict(
self.query_string
+ '&filterSet=true'
+ ('&'
+ urlencode(self.filters, True)
+ '&'
+ urlencode(self.get_date_range(), True)
if self.is_configurable_report else '')
)
# Make sure the request gets processed by PRBAC Middleware
CCHQPRBACMiddleware.apply_prbac(request)
try:
if self.is_configurable_report:
response = self._dispatcher.dispatch(
request,
self.subreport_slug,
render_as='email',
**self.view_kwargs
)
else:
response = self._dispatcher.dispatch(
request,
render_as='email',
permissions_check=self._dispatcher.permissions_check,
**self.view_kwargs
)
if attach_excel is True:
if self.is_configurable_report:
file_obj = self._dispatcher.dispatch(
request, self.subreport_slug,
render_as='excel',
**self.view_kwargs
)
else:
file_obj = self._dispatcher.dispatch(
request,
render_as='excel',
permissions_check=self._dispatcher.permissions_check,
**self.view_kwargs
)
else:
file_obj = None
return json.loads(response.content)['report'], file_obj
except PermissionDenied:
return _(
"We are sorry, but your saved report '%(config_name)s' "
"is no longer accessible because your subscription does "
"not allow Custom Reporting. Please talk to your Project "
"Administrator about enabling Custom Reports. If you "
"want CommCare HQ to stop sending this message, please "
"visit %(saved_reports_url)s to remove this "
"Emailed Report."
) % {
'config_name': self.name,
'saved_reports_url': absolute_reverse('saved_reports',
args=[request.domain]),
}, None
except Http404:
return _("We are sorry, but your saved report '%(config_name)s' "
"can not be generated since you do not have the correct permissions. "
"Please talk to your Project Administrator about getting permissions for this"
"report.") % {'config_name': self.name}, None
except Exception:
notify_exception(None, "Error generating report: {}".format(self.report_slug), details={
'domain': self.domain,
'user': self.owner.username,
'report': self.report_slug,
'report config': self.get_id
})
return _("An error occurred while generating this report."), None
@property
def is_configurable_report(self):
from corehq.apps.userreports.reports.view import ConfigurableReport
return isinstance(self._dispatcher, ConfigurableReport)
@property
@memoized
def languages(self):
if self.is_configurable_report:
return self.report.spec.get_languages()
return set()
@property
@memoized
def configurable_report(self):
from corehq.apps.userreports.reports.view import ConfigurableReport
return ConfigurableReport.get_report(
self.domain, self.report_slug, self.subreport_slug
)
@property
def datespan_filters(self):
return (self.configurable_report.datespan_filters
if self.is_configurable_report else [])
@property
def has_ucr_datespan(self):
return self.is_configurable_report and self.datespan_filters
class UnsupportedScheduledReportError(Exception):
pass
class ReportNotification(CachedCouchDocumentMixin, Document):
domain = StringProperty()
owner_id = StringProperty()
recipient_emails = StringListProperty()
config_ids = StringListProperty()
send_to_owner = BooleanProperty()
attach_excel = BooleanProperty()
# language is only used if some of the config_ids refer to UCRs.
language = StringProperty()
hour = IntegerProperty(default=8)
minute = IntegerProperty(default=0)
day = IntegerProperty(default=1)
interval = StringProperty(choices=["daily", "weekly", "monthly"])
@property
def is_editable(self):
try:
self.report_slug
return False
except AttributeError:
return True
@classmethod
def by_domain_and_owner(cls, domain, owner_id, stale=True, **kwargs):
if stale:
kwargs['stale'] = settings.COUCH_STALE_QUERY
key = [domain, owner_id]
db = cls.get_db()
result = cache_core.cached_view(db, "reportconfig/user_notifications", reduce=False,
include_docs=True, startkey=key, endkey=key + [{}],
wrapper=cls.wrap, **kwargs)
return result
@property
def all_recipient_emails(self):
# handle old documents
if not self.owner_id:
return [self.owner.get_email()]
emails = []
if self.send_to_owner:
if self.owner.is_web_user():
emails.append(self.owner.username)
else:
email = self.owner.get_email()
try:
validate_email(email)
emails.append(email)
except Exception:
pass
emails.extend(self.recipient_emails)
return emails
@property
@memoized
def owner(self):
id = self.owner_id
try:
return WebUser.get_by_user_id(id)
except CouchUser.AccountTypeError:
return CommCareUser.get_by_user_id(id)
@property
@memoized
def configs(self):
"""
Access the notification's associated configs as a list, transparently
returning an appropriate dummy for old notifications which have
`report_slug` instead of `config_ids`.
"""
if self.config_ids:
configs = ReportConfig.view('_all_docs', keys=self.config_ids,
include_docs=True).all()
configs = [c for c in configs if not hasattr(c, 'deleted')]
elif self.report_slug == 'admin_domains':
raise UnsupportedScheduledReportError("admin_domains is no longer "
"supported as a schedulable report for the time being")
else:
# create a new ReportConfig object, useful for its methods and
# calculated properties, but don't save it
class ReadonlyReportConfig(ReportConfig):
def save(self, *args, **kwargs):
pass
config = ReadonlyReportConfig()
object.__setattr__(config, '_id', 'dummy')
config.report_type = ProjectReportDispatcher.prefix
config.report_slug = self.report_slug
config.domain = self.domain
config.owner_id = self.owner_id
configs = [config]
return configs
@property
def day_name(self):
if self.interval == 'weekly':
return calendar.day_name[self.day]
return {
"daily": _("Every day"),
"monthly": _("Day %s of every month" % self.day),
}[self.interval]
@classmethod
def day_choices(cls):
"""Tuples for day of week number and human-readable day of week"""
return tuple([(val, calendar.day_name[val]) for val in range(7)])
@classmethod
def hour_choices(cls):
"""Tuples for hour number and human-readable hour"""
return tuple([(val, "%s:00" % val) for val in range(24)])
def send(self):
from dimagi.utils.django.email import send_HTML_email
from corehq.apps.reports.views import get_scheduled_report_response
# Scenario: user has been removed from the domain that they
# have scheduled reports for. Delete this scheduled report
if not self.owner.is_member_of(self.domain):
self.delete()
return
if self.all_recipient_emails:
title = "Scheduled report from CommCare HQ"
if hasattr(self, "attach_excel"):
attach_excel = self.attach_excel
else:
attach_excel = False
body, excel_files = get_scheduled_report_response(self.owner, self.domain, self._id, attach_excel=attach_excel)
for email in self.all_recipient_emails:
send_HTML_email(title, email, body.content, email_from=settings.DEFAULT_FROM_EMAIL, file_attachments=excel_files)
class AppNotFound(Exception):
pass
class HQExportSchema(SavedExportSchema):
doc_type = 'SavedExportSchema'
domain = StringProperty()
transform_dates = BooleanProperty(default=True)
@property
def global_transform_function(self):
if self.transform_dates:
return couch_to_excel_datetime
else:
return identity
@classmethod
def wrap(cls, data):
if 'transform_dates' not in data:
data['transform_dates'] = False
self = super(HQExportSchema, cls).wrap(data)
if not self.domain:
self.domain = self.index[0]
return self
class FormExportSchema(HQExportSchema):
doc_type = 'SavedExportSchema'
app_id = StringProperty()
include_errors = BooleanProperty(default=False)
split_multiselects = BooleanProperty(default=False)
def update_schema(self):
super(FormExportSchema, self).update_schema()
if self.split_multiselects:
self.update_question_schema()
for column in [column for table in self.tables for column in table.columns]:
if isinstance(column, SplitColumn):
question = self.question_schema.question_schema.get(column.index)
column.options = question.options
column.ignore_extras = True
def update_question_schema(self):
schema = self.question_schema
schema.update_schema()
@property
def question_schema(self):
return FormQuestionSchema.get_or_create(self.domain, self.app_id, self.xmlns)
@property
@memoized
def app(self):
if self.app_id:
try:
return get_app(self.domain, self.app_id, latest=True)
except Http404:
logging.error('App %s in domain %s not found for export %s' % (
self.app_id,
self.domain,
self.get_id
))
raise AppNotFound()
else:
return None
@classmethod
def wrap(cls, data):
self = super(FormExportSchema, cls).wrap(data)
if self.filter_function == 'couchforms.filters.instances':
# grandfather in old custom exports
self.include_errors = False
self.filter_function = None
return self
@property
def filter(self):
user_ids = set(CouchUser.ids_by_domain(self.domain))
user_ids.update(CouchUser.ids_by_domain(self.domain, is_active=False))
user_ids.add('demo_user')
def _top_level_filter(form):
# careful, closures used
return form_matches_users(form, user_ids) or is_commconnect_form(form)
f = SerializableFunction(_top_level_filter)
if self.app_id is not None:
f.add(reports.util.app_export_filter, app_id=self.app_id)
if not self.include_errors:
f.add(couchforms.filters.instances)
actual = SerializableFunction(default_form_filter, filter=f)
return actual
@property
def domain(self):
return self.index[0]
@property
def xmlns(self):
return self.index[1]
@property
def formname(self):
return xmlns_to_name(self.domain, self.xmlns, app_id=self.app_id)
@property
@memoized
def question_order(self):
try:
if not self.app:
return []
except AppNotFound:
if settings.DEBUG:
return []
raise
else:
questions = self.app.get_questions(self.xmlns)
order = []
for question in questions:
if not question['value']: # question probably belongs to a broken form
continue
index_parts = question['value'].split('/')
assert index_parts[0] == ''
index_parts[1] = 'form'
index = '.'.join(index_parts[1:])
order.append(index)
return order
def get_default_order(self):
return {'#': self.question_order}
def uses_cases(self):
if not self.app or isinstance(self.app, RemoteApp):
return False
form = self.app.get_form_by_xmlns(self.xmlns)
if form and isinstance(form, Form):
return bool(form.active_actions())
return False
class FormDeidExportSchema(FormExportSchema):
@property
def transform(self):
return SerializableFunction()
@classmethod
def get_case(cls, doc, case_id):
pass
class CaseExportSchema(HQExportSchema):
doc_type = 'SavedExportSchema'
@property
def filter(self):
return SerializableFunction(default_case_filter)
@property
def domain(self):
return self.index[0]
@property
def domain_obj(self):
return Domain.get_by_name(self.domain)
@property
def case_type(self):
return self.index[1]
@property
def applications(self):
return self.domain_obj.full_applications(include_builds=False)
@property
def case_properties(self):
props = set([])
for app in self.applications:
prop_map = get_case_properties(app, [self.case_type], defaults=("name",))
props |= set(prop_map[self.case_type])
return props
class FakeFormExportSchema(FakeSavedExportSchema):
def remap_tables(self, tables):
# kill the weird confusing stuff, and rename the main table to something sane
tables = _apply_removal(tables, ('#|#export_tag|#', '#|location_|#', '#|history|#'))
return _apply_mapping(tables, {
'#': 'Forms',
})
def _apply_mapping(export_tables, mapping_dict):
def _clean(tabledata):
def _clean_tablename(tablename):
return mapping_dict.get(tablename, tablename)
return (_clean_tablename(tabledata[0]), tabledata[1])
return map(_clean, export_tables)
def _apply_removal(export_tables, removal_list):
return [tabledata for tabledata in export_tables if not tabledata[0] in removal_list]
class HQGroupExportConfiguration(CachedCouchDocumentMixin, GroupExportConfiguration):
"""
HQ's version of a group export, tagged with a domain
"""
domain = StringProperty()
def get_custom_exports(self):
def _rewrap(export):
# custom wrap if relevant
try:
return {
'form': FormExportSchema,
'case': CaseExportSchema,
}[export.type].wrap(export._doc)
except KeyError:
return export
for custom in list(self.custom_export_ids):
custom_export = self._get_custom(custom)
if custom_export:
yield _rewrap(custom_export)
def exports_of_type(self, type):
return self._saved_exports_from_configs([
config for config, schema in self.all_exports if schema.type == type
])
@property
@memoized
def form_exports(self):
return self.exports_of_type('form')
@property
@memoized
def case_exports(self):
return self.exports_of_type('case')
@classmethod
def by_domain(cls, domain):
return cache_core.cached_view(cls.get_db(), "groupexport/by_domain",
key=domain,
reduce=False,
include_docs=True,
wrapper=cls.wrap,
)
@classmethod
def get_for_domain(cls, domain):
"""
For when we only expect there to be one of these per domain,
which right now is always.
"""
groups = cls.by_domain(domain)
if groups:
if len(groups) > 1:
logging.error("Domain %s has more than one group export config! This is weird." % domain)
return groups[0]
return HQGroupExportConfiguration(domain=domain)
@classmethod
def add_custom_export(cls, domain, export_id):
group = cls.get_for_domain(domain)
if export_id not in group.custom_export_ids:
group.custom_export_ids.append(export_id)
group.save()
return group
@classmethod
def remove_custom_export(cls, domain, export_id):
group = cls.get_for_domain(domain)
updated = False
while export_id in group.custom_export_ids:
group.custom_export_ids.remove(export_id)
updated = True
if updated:
group.save()
return group
|
kennethlove/django_bookmarks
|
dj_bookmarks/bookmarks/migrations/0006_bookmark_collections.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-15 17:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0005_auto_20170915_1015'),
]
operations = [
migrations.AddField(
model_name='bookmark',
name='collections',
field=models.ManyToManyField(to='bookmarks.Collection'),
),
]
|
leprikon-cz/leprikon
|
leprikon/views/journals.py
|
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.urls.base import reverse_lazy as reverse
from django.utils.translation import ugettext_lazy as _
from ..forms.journals import JournalEntryForm, JournalForm, JournalLeaderEntryForm
from ..models.journals import Journal, JournalEntry, JournalLeaderEntry, Subject
from .generic import CreateView, DeleteView, DetailView, TemplateView, UpdateView
class AlternatingView(TemplateView):
template_name = "leprikon/alternating.html"
def get_title(self):
return _("Alternating in school year {}").format(self.request.school_year)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["alternate_leader_entries"] = self.request.leader.get_alternate_leader_entries(self.request.school_year)
return context
class JournalQuerySetMixin:
def get_queryset(self):
qs = super().get_queryset()
if not self.request.user.is_staff:
qs = qs.filter(leaders=self.request.leader)
return qs
class JournalView(JournalQuerySetMixin, DetailView):
model = Journal
template_name_suffix = "_journal"
class JournalCreateView(CreateView):
model = Journal
form_class = JournalForm
template_name = "leprikon/journal_form.html"
title = _("New journal")
def dispatch(self, request, subject):
kwargs = {"id": subject}
if not self.request.user.is_staff:
kwargs["leaders"] = self.request.leader
self.subject = get_object_or_404(Subject, **kwargs)
self.success_url = reverse("leprikon:subject_journals", args=(self.subject.subject_type.slug, self.subject.id))
return super().dispatch(request)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["subject"] = self.subject
return kwargs
def get_message(self):
return _("New journal {} has been created.").format(self.object)
class JournalUpdateView(JournalQuerySetMixin, UpdateView):
model = Journal
form_class = JournalForm
success_url = reverse("leprikon:summary")
template_name = "leprikon/journal_form.html"
title = _("Change journal")
class JournalDeleteView(DeleteView):
model = Journal
title = _("Delete journal")
message = _("Journal has been deleted.")
def get_queryset(self):
qs = super().get_queryset()
if not self.request.user.is_staff:
qs = qs.filter(subject__leaders=self.request.leader)
return qs
def get_object(self):
obj = super().get_object()
if obj.all_journal_entries:
raise Http404()
return obj
def get_question(self):
return _("Do You really want to delete the journal {}?").format(self.object)
class JournalEntryCreateView(CreateView):
model = JournalEntry
form_class = JournalEntryForm
template_name = "leprikon/journalentry_form.html"
title = _("New journal entry")
message = _("The journal entry has been created.")
def dispatch(self, request, *args, **kwargs):
if self.request.user.is_staff:
self.journal = get_object_or_404(Journal, id=int(kwargs.pop("journal")))
else:
self.journal = get_object_or_404(Journal, id=int(kwargs.pop("journal")), leaders=self.request.leader)
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["journal"] = self.journal
return kwargs
class JournalEntryUpdateView(UpdateView):
model = JournalEntry
form_class = JournalEntryForm
template_name = "leprikon/journalentry_form.html"
title = _("Change journal entry")
message = _("The journal entry has been updated.")
def get_object(self):
obj = super().get_object()
if self.request.user.is_staff or self.request.leader in obj.journal.all_leaders + obj.all_alternates:
return obj
else:
raise Http404()
class JournalEntryDeleteView(DeleteView):
model = JournalEntry
title = _("Delete journal entry")
message = _("The journal entry has been deleted.")
def get_queryset(self):
qs = super().get_queryset()
if not self.request.user.is_staff:
qs = qs.filter(journal__leaders=self.request.leader)
return qs
def get_object(self):
obj = super().get_object()
if obj.affects_submitted_timesheets:
raise Http404()
return obj
def get_question(self):
return _("Do You really want to delete journal entry?")
class JournalLeaderEntryUpdateView(UpdateView):
model = JournalLeaderEntry
form_class = JournalLeaderEntryForm
template_name = "leprikon/journalleaderentry_form.html"
title = _("Change timesheet entry")
message = _("The timesheet entry has been updated.")
def get_object(self):
obj = super().get_object()
if (
self.request.user.is_staff
or obj.timesheet.leader == self.request.leader
or self.request.leader in obj.journal_entry.journal.all_leaders
):
return obj
else:
raise Http404()
class JournalLeaderEntryDeleteView(DeleteView):
model = JournalLeaderEntry
title = _("Delete timesheet entry")
message = _("The timesheet entry has been deleted.")
def get_queryset(self):
return (
super()
.get_queryset()
.filter(
timesheet__leader=self.request.leader,
timesheet__submitted=False,
)
)
def get_question(self):
return _("Do You really want to delete timesheet entry?")
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/firewalls_utils.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and functions for firewall rules."""
import re
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions as calliope_exceptions
ALLOWED_METAVAR = 'PROTOCOL[:PORT[-PORT]]'
LEGAL_SPECS = re.compile(
r"""
(?P<protocol>[a-zA-Z0-9+.-]+) # The protocol group.
(:(?P<ports>\d+(-\d+)?))? # The optional ports group.
# May specify a range.
$ # End of input marker.
""",
re.VERBOSE)
def AddCommonArgs(parser, for_update=False):
"""Adds common arguments for firewall create or update subcommands."""
min_length = 0 if for_update else 1
switch = [] if min_length == 0 else None
allow = parser.add_argument(
'--allow',
metavar=ALLOWED_METAVAR,
type=arg_parsers.ArgList(min_length=min_length),
action=arg_parsers.FloatingListValuesCatcher(switch_value=switch),
help='The list of IP protocols and ports which will be allowed.',
required=not for_update)
allow.detailed_help = """\
A list of protocols and ports whose traffic will be allowed.
PROTOCOL is the IP protocol whose traffic will be allowed.
PROTOCOL can be either the name of a well-known protocol
(e.g., tcp or icmp) or the IP protocol number.
A list of IP protocols can be found at
link:http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml[].
A port or port range can be specified after PROTOCOL to
allow traffic through specific ports. If no port or port range
is specified, connections through all ranges are allowed. For
example, the following will create a rule that allows TCP traffic
through port 80 and allows ICMP traffic:
$ {command} MY-RULE --allow tcp:80 icmp
TCP and UDP rules must include a port or port range.
"""
if for_update:
allow.detailed_help += """
Setting this will override the current values.
"""
parser.add_argument(
'--description',
help='A textual description for the firewall rule.{0}'.format(
' Set to an empty string to clear existing.' if for_update else ''))
source_ranges = parser.add_argument(
'--source-ranges',
default=None if for_update else [],
metavar='CIDR_RANGE',
type=arg_parsers.ArgList(min_length=min_length),
action=arg_parsers.FloatingListValuesCatcher(switch_value=switch),
help=('A list of IP address blocks that may make inbound connections '
'in CIDR format.'))
source_ranges.detailed_help = """\
A list of IP address blocks that are allowed to make inbound
connections that match the firewall rule to the instances on
the network. The IP address blocks must be specified in CIDR
format:
link:http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing[].
"""
if for_update:
source_ranges.detailed_help += """
Setting this will override the existing source ranges for the firewall.
The following will clear the existing source ranges:
$ {command} MY-RULE --source-ranges
"""
else:
source_ranges.detailed_help += """
If neither --source-ranges nor --source-tags is provided, then this
flag will default to 0.0.0.0/0, allowing all sources. Multiple IP
address blocks can be specified if they are separated by spaces.
"""
source_tags = parser.add_argument(
'--source-tags',
default=None if for_update else [],
metavar='TAG',
type=arg_parsers.ArgList(min_length=min_length),
action=arg_parsers.FloatingListValuesCatcher(switch_value=switch),
help=('A list of instance tags indicating the set of instances on the '
'network which may make network connections that match the '
'firewall rule.'))
source_tags.detailed_help = """\
A list of instance tags indicating the set of instances on the
network which may make network connections that match the
firewall rule. If omitted, all instances on the network can
make connections that match the rule.
Tags can be assigned to instances during instance creation.
"""
if for_update:
source_tags.detailed_help += """
Setting this will override the existing source tags for the firewall.
The following will clear the existing source tags:
$ {command} MY-RULE --source-tags
"""
target_tags = parser.add_argument(
'--target-tags',
default=None if for_update else [],
metavar='TAG',
type=arg_parsers.ArgList(min_length=min_length),
action=arg_parsers.FloatingListValuesCatcher(switch_value=switch),
help=('A list of instance tags indicating the set of instances on the '
'network which may make accept inbound connections that match '
'the firewall rule.'))
target_tags.detailed_help = """\
A list of instance tags indicating the set of instances on the
network which may make accept inbound connections that match the
firewall rule. If omitted, all instances on the network can
receive inbound connections that match the rule.
Tags can be assigned to instances during instance creation.
"""
if for_update:
target_tags.detailed_help += """
Setting this will override the existing target tags for the firewall.
The following will clear the existing target tags:
$ {command} MY-RULE --target-tags
"""
parser.add_argument(
'name',
help='The name of the firewall rule to {0}'.format(
'update.' if for_update else 'create.'))
def ParseAllowed(allowed, message_classes):
"""Parses protocol:port mappings from --allow command line."""
allowed_value_list = []
for spec in allowed or []:
match = LEGAL_SPECS.match(spec)
if not match:
raise calliope_exceptions.ToolException(
'Firewall rules must be of the form {0}; received [{1}].'
.format(ALLOWED_METAVAR, spec))
if match.group('ports'):
ports = [match.group('ports')]
else:
ports = []
allowed_value_list.append(message_classes.Firewall.AllowedValueListEntry(
IPProtocol=match.group('protocol'),
ports=ports))
return allowed_value_list
|
westurner/pkgsetcomp
|
pkgsetcomp/pyrpo.py
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
"""Search for code repositories and generate reports"""
import datetime
import errno
import logging
import os
import pprint
import re
import subprocess
import sys
from collections import deque, namedtuple
from distutils.util import convert_path
from itertools import chain, imap, izip_longest
# TODO: arrow
from dateutil.parser import parse as parse_date
try:
from collections import OrderedDict as Dict
except ImportError as e:
Dict = dict
# def parse_date(*args, **kwargs):
# print(args)
# print(kwargs)
# logging.basicConfig()
log = logging.getLogger('repos')
dtformat = lambda x: x.strftime('%Y-%m-%d %H:%M:%S %z')
def itersplit(s, sep=None):
if not s:
yield s
return
exp = re.compile(r'\s+' if sep is None else re.escape(sep))
pos = 0
while True:
m = exp.search(s, pos)
if not m:
if pos < len(s) or sep is not None:
yield s[pos:]
break
if pos < m.start() or sep is not None:
yield s[pos:m.start()]
pos = m.end()
DEFAULT_FSEP = ' ||| '
DEFAULT_LSEP = ' |..|'
# DEFAULT_FSEP=u' %s ' % unichr(0xfffd)
# DEFAULT_LSEP=unichr(0xfffc)
def itersplit_to_fields(_str,
fsep=DEFAULT_FSEP,
revtuple=None,
fields=[],
preparse=None):
if preparse:
_str = preparse(_str)
_fields = itersplit(_str, fsep)
if revtuple is not None:
try:
values = (t[1] for t in izip_longest(revtuple._fields, _fields))
return revtuple(*values)
except:
log.error(revtuple)
log.error(_fields)
raise
return tuple(izip_longest(fields, _fields, fillvalue=None))
_missing = unichr(822)
class cached_property(object):
"""Decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class must have a `__dict__` (e.g. be a subclass of object)
:copyright: BSD
see: https://github.com/mitsuhiko/werkzeug/blob/master/werkzeug/utils.py
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, _type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
# TODO: sarge
def sh(cmd, ignore_error=False, cwd=None, *args, **kwargs):
kwargs.update({
'shell': True,
'cwd': cwd,
'stderr': subprocess.STDOUT,
'stdout': subprocess.PIPE})
log.debug('cmd: %s %s' % (cmd, kwargs))
p = subprocess.Popen(cmd, **kwargs)
p_stdout = p.communicate()[0]
if p.returncode and not ignore_error:
raise Exception("Subprocess return code: %d\n%r\n%r" % (
p.returncode, cmd, p_stdout))
return p_stdout
class Repository(object):
label = None
prefix = None
preparse = None
fsep = DEFAULT_FSEP
lsep = DEFAULT_LSEP
fields = []
clone_cmd = 'clone'
def __init__(self, fpath):
self.fpath = os.path.abspath(fpath)
self.symlinks = []
def __new__(cls, name):
self = super(Repository, cls).__new__(cls, name)
self._tuple = self._namedtuple
return self
@property
def relpath(self):
here = os.path.abspath(os.path.curdir)
relpath = os.path.relpath(self.fpath, here)
return relpath
@cached_property
def _namedtuple(cls):
return namedtuple(
''.join((str.capitalize(cls.label), "Rev")),
(f[0] for f in cls.fields))
def unique_id(self):
"""
:returns: str
"""
pass
def status(self):
"""
:returns: str
"""
pass
def remote_url(self):
"""
:returns: str
"""
pass
def diff(self):
"""
:returns: str
"""
pass
def current_id(self):
"""
:returns: str
"""
pass
def branch(self):
"""
:returns: str
"""
pass
@cached_property
def last_commit(self):
return self.log_iter(maxentries=1).next()
def log(self, n=None, **kwargs):
"""
:returns: str
"""
pass
def itersplit_to_fields(self, _str):
if self.preparse:
_str = self.preparse(_str)
_fields = itersplit(_str, self.fsep)
try:
values = (
t[1] for t in izip_longest(self._tuple._fields, _fields))
return self._tuple(*values)
except:
log.error(self._tuple)
log.error(_fields)
raise
_parselog = itersplit_to_fields
def log_iter(self, maxentries=None, template=None, **kwargs):
# op = self.sh((
# "hg log %s --template"
# % (maxentries and ('-l%d' % maxentries) or '')),
# ignore_error=True
# )
template = repr(template or self.template)
op = self.log(n=maxentries, template=template, **kwargs)
if not op:
return
print(op)
for l in itersplit(op, self.lsep):
l = l.strip()
if not l:
continue
try:
yield self._parselog(l,)
except Exception:
log.error("%s %r" % (str(self), l))
raise
return
# def search_upwards():
# """ Implemented for Repositories that store per-directory
# metadata """
# pass
def full_report(self):
yield ''
yield "# %s" % self.origin_report().next()
yield "%s [%s]" % (self.last_commit, self)
if self.status:
for l in self.status.split('\n'):
yield l
yield ''
if hasattr(self, 'log_iter'):
for r in self.log_iter():
yield r
return
@cached_property
def eggname(self):
return os.path.basename(self.fpath)
@classmethod
def to_normal_url(cls, url):
return url
def str_report(self):
yield pprint.pformat(self.to_dict())
def sh_report(self):
output = []
if not self.remote_url:
output.append('#')
output.extend([
self.label,
self.clone_cmd,
repr(self.remote_url), # TODO: shell quote?
repr(self.relpath)
])
yield ' '.join(output)
def pip_report(self):
comment = '#' if not self.remote_url else ''
if os.path.exists(os.path.join(self.fpath, 'setup.py')):
yield u"%s-e %s+%s@%s#egg=%s" % (
comment,
self.label,
self.to_normal_url(self.remote_url),
self.current_id,
self.eggname)
return
def origin_report(self):
yield "%s://%s = %s" % (
self.label,
self.fpath,
self.remote_url,
# revid
)
return
def status_report(self):
yield '######'
yield self.sh_report().next()
yield self.last_commit
yield self.status
yield ""
def hgsub_report(self):
if self.relpath == '.':
return
yield "%s = [%s]%s" % (
self.fpath.lstrip('./'),
self.label,
self.remote_url)
def gitsubmodule_report(self):
fpath = self.relpath
if fpath == '.':
return
yield '[submodule "%s"]' % fpath.replace(os.path.sep, '_')
yield "path = %s" % fpath
yield "url = %s" % self.remote_url
yield ""
def __unicode__(self):
return '%s://%s' % (self.label, self.fpath)
def __str__(self):
return self.__unicode__()
@cached_property
def mtime(self, fpath=None):
return dtformat(
datetime.datetime.utcfromtimestamp(
os.path.getmtime(fpath or self.fpath)))
@cached_property
def ctime(self, fpath=None):
return dtformat(
datetime.datetime.utcfromtimestamp(
os.path.getctime(fpath or self.fpath)))
@cached_property
def find_symlinks(self):
cmd = ("find . -type l -printf '%p -> %l\n'")
return self.sh(cmd)
def lately(self, count=15):
excludes = '|'.join(('*.pyc', '*.swp', '*.bak', '*~'))
cmd = ('''find . -printf "%%T@ %%p\\n" '''
'''| egrep -v '%s' '''
'''| sort -n '''
'''| tail -n %d''') % (excludes, count)
op = self.sh(cmd)
for l in op.split('\n'):
l = l.strip()
if not l:
continue
mtime, fname = l.split(' ', 1)
mtime = datetime.datetime.fromtimestamp(float(mtime))
mtimestr = dtformat(mtime)
yield mtimestr, fname
def sh(self, cmd, ignore_error=False, cwd=None, *args, **kwargs):
kwargs.update({
'shell': True,
'cwd': cwd or self.fpath,
'stderr': subprocess.STDOUT,
'stdout': subprocess.PIPE})
log.debug('cmd: %s %s' % (cmd, kwargs))
return sh(cmd, ignore_error=ignore_error, **kwargs)
# p = subprocess.Popen(cmd, **kwargs)
# p_stdout = p.communicate()[0]
# if p.returncode and not ignore_error:
# raise Exception("Subprocess return code: %d\n%r\n%r" % (
# p.returncode, cmd, p_stdout))
# return p_stdout #.rstrip()
def to_dict(self):
return self.__dict__
class MercurialRepository(Repository):
label = 'hg'
prefix = '.hg'
fields = (
('datestr', '{date|isodatesec}', parse_date),
('noderev', '{node|short}', None),
('author', '{author|firstline}', None),
('tags', '{tags}', lambda x: x.strip().split()),
('desc', '{desc}', None),
)
template = ''.join((
DEFAULT_FSEP.join(f[1] for f in fields),
DEFAULT_LSEP)
)
@property
def unique_id(self):
return self.fpath # self.sh('hg id -r 0').rstrip()
@cached_property
def status(self):
return self.sh('hg status').rstrip()
@cached_property
def remote_url(self):
return self.sh('hg showconfig paths.default',
ignore_error=True).strip()
@cached_property
def remote_urls(self):
return self.sh('hg showconfig paths')
@cached_property
def diff(self):
return self.sh('hg diff -g')
@cached_property
def current_id(self):
return self.sh('hg id -i').rstrip().rstrip('+') # TODO
@cached_property
def branch(self):
return self.sh('hg branch')
def log(self, n=None, **kwargs):
# TODO: nested generator
return self.sh(' '.join((
'hg log',
('-l%d' % n) if n else '',
' '.join(
('--%s=%s' % (k, v)) for (k, v) in kwargs.iteritems()
)
))
)
def loggraph(self):
return self.sh('hg log --graph')
def unpushed(self):
raise NotImplementedError()
def serve(self):
return self.sh('hg serve')
# @cached_property # TODO: once
@staticmethod
def _get_url_scheme_regexes():
output = sh("hg showconfig | grep '^schemes.'").split('\n')
log.debug(output)
schemes = (
l.split('.', 1)[1].split('=') for l in output if '=' in l)
regexes = sorted(
((k, v, re.compile(v.replace('{1}', '(.*)')+'(.*)'))
for k, v in schemes),
key=lambda x: (len(x[0]), x),
reverse=True)
return regexes
@classmethod
def to_hg_scheme_url(cls, url):
"""
convert a URL to local mercurial URL schemes
example::
# schemes.gh = git://github.com/
>> remote_url = git://github.com/westurner/dotfiles'
>> to_hg_scheme_url(remote_url)
<< gh://westurner/dotfiles
"""
regexes = cls._get_url_scheme_regexes()
for scheme_key, pattern, regex in regexes:
match = regex.match(url)
if match is not None:
groups = match.groups()
if len(groups) == 2:
return u''.join(
scheme_key,
'://',
pattern.replace('{1}', groups[0]),
groups[1])
elif len(groups) == 1:
return u''.join(
scheme_key,
'://',
pattern,
groups[0])
@classmethod
def to_normal_url(cls, url):
"""
convert a URL from local mercurial URL schemes to "normal" URLS
example::
# schemes.gh = git://github.com/
# remote_url = "gh://westurner/dotfiles"
>> to_normal_url(remote_url)
<< 'git://github.com/westurner/dotfiles'
"""
regexes = cls._get_url_scheme_regexes()
_url = url[:]
for scheme_key, pattern, regex in regexes:
if _url.startswith(scheme_key):
if '{1}' in pattern:
_url = pattern.replace('{1}', _url.lstrip(scheme_key))
else:
_url = (pattern + _url.lstrip(scheme_key).lstrip('://'))
return _url
# def to_pip_compatible_url(cls, url):
# PATTERNS = (
# ('gh+ssh://','https://github.com/'),
# ('bb+ssh://', 'https://bitbucket.org/'),
# )
# ('gcode', '') ,
# ('gcode+svn', ''),
# for p in PATTERNS:
# url = url.replace(*p)
class GitRepository(Repository):
label = 'git'
prefix = '.git'
fields = (
('datestr', '%ai', None, parse_date),
('noderev', '%h', None),
('author', '%an', None),
('tags', '%d', lambda x: x.strip(' ()').split(', ')),
('desc', '%s ', None),
)
template = ''.join((
DEFAULT_FSEP.join(f[1] for f in fields),
DEFAULT_LSEP)
)
@property
def unique_id(self):
return self.fpath
@cached_property
def status(self):
return self.sh('git status -s')
@cached_property
def remote_url(self):
return self.sh('git config remote.origin.url',
ignore_error=True).strip() # .split('=',1)[1]# *
@cached_property
def remote_urls(self):
return self.sh('git config -l | grep "url"',
ignore_error=True).strip() # .split('=',1)[1]# *
@cached_property
def current_id(self):
return self.sh('git rev-parse --short HEAD').rstrip()
def diff(self):
return self.sh('git diff')
@cached_property
def branch(self):
return self.sh('git branch')
def log(self, n=None, **kwargs):
kwargs['format'] = kwargs.pop('template')
cmd = ' '.join((
'git log',
('-n%d' % n) if n else '',
' '.join(
('--%s=%s' % (k, v)) for (k, v) in kwargs.iteritems()
)
))
try:
output = self.sh(cmd)
if "fatal: bad default revision 'HEAD'" in output:
return output
return output
except Exception as e:
return
def loggraph(self):
return self.sh('git log --graph')
@cached_property
def last_commit(self):
return self.log_iter(maxentries=1).next()
# def __log_iter(self, maxentries=None):
# rows = self.log(
# n=maxentries,
# format="%ai ||| %h ||| %an ||| %d ||| %s ||||\n",)
# if not rows:
# return
# for row in rows.split('||||\n'):
# row = row.strip()
# if not row:
# continue
# try:
# fields = (s.strip() for s in row.split('|||'))
# datestr, noderev, author, branches, desc = fields
# except ValueError:
# print(str(self), row, fields)
# raise
# branches = branches.strip()[1:-1]
# yield datestr, (noderev, author, branches, desc)
# return
def unpushed(self):
return self.sh("git log master --not --remotes='*/master'")
def serve(self):
return self.sh("git serve")
class BzrRepository(Repository):
label = 'bzr'
prefix = '.bzr'
template = None
lsep = '-'*60
fsep = '\n'
fields = (
('datestr', None, parse_date),
('noderev', None, None),
('author', None, None),
('tags', None, None),
('branchnick', None, None),
('desc', None, None),
)
field_trans = {
'branch nick': 'branchnick',
'timestamp': 'datestr',
'revno': 'noderev',
'committer': 'author',
'message': 'desc'
}
logrgx = re.compile(
r'^(revno|tags|committer|branch\snick|timestamp|message):\s?(.*)\n?')
clone_cmd = 'branch'
@property
def unique_id(self):
return self.fpath
@cached_property
def status(self):
return self.sh('bzr status')
@cached_property
def remote_url(self):
return self.sh(
"""bzr info | egrep '^ parent branch:' | awk '{ print $3 }'""",
ignore_error=True)
def diff(self):
return self.sh('bzr diff')
@cached_property
def current_id(self):
return self.sh("bzr version-info --custom --template='{revision_id}'")
@cached_property
def branch(self):
return self.sh('bzr nick')
def log(self, n=None, template=None):
return self.sh(' '.join((
'bzr log',
'-l%d' % n if n else '')))
# @cached_property
# def last_commit(self):
# op = self.sh('bzr log -l1')
# return self._parselog(op)
@classmethod
def _logmessage_transform(cls, s, by=2):
if len(s) >= by:
return s[by:].strip('\n')
return s.strip('\n')
@classmethod
def _parselog(self, r):
"""
Parse bazaar log file format
::
$ bzr log -l1
------------------------------------------------------------
revno: 1
committer: ubuntu <ubuntu@ubuntu-desktop>
branch nick: ubuntu-desktop /etc repository
timestamp: Wed 2011-10-12 01:16:55 -0500
message:
Initial commit
"""
def __parselog(entry):
bufname = None
buf = deque()
print(entry)
if entry == ['']:
return
for l in itersplit(entry, '\n'):
if not l:
continue
mobj = self.logrgx.match(l)
if not mobj:
# " - Log message"
buf.append(self._logmessage_transform(l))
if mobj:
mobjlen = len(mobj.groups())
if mobjlen == 2:
# "attr: value"
attr, value = mobj.groups()
if attr == 'message':
bufname = 'desc'
else:
attr = self.field_trans.get(attr, attr)
yield (self.field_trans.get(attr, attr), value)
else:
raise Exception()
if bufname is not None:
if len(buf):
buf.pop()
len(buf) > 1 and buf.popleft()
yield (bufname, '\n'.join(buf))
return
kwargs = dict(__parselog(r)) # FIXME
if kwargs:
if 'tags' not in kwargs:
kwargs['tags'] = tuple()
else:
kwargs['tags'].split(' ') # TODO
if 'branchnick' not in kwargs:
kwargs['branchnick'] = None
try:
yield kwargs # TODO
# return self._tuple(**kwargs)
except:
log.error(r)
log.error(kwargs)
raise
else:
log.error("failed to parse: %r" % r)
class SvnRepository(Repository):
label = 'svn'
prefix = '.svn'
fsep = ' | '
lsep = ''.join(('-' * 72, '\n'))
template = None
fields = (
('noderev', None, None),
('author', None, None),
('datestr', None, None),
('changecount', None, None),
('desc', None, None),
# TODO:
)
# def preparse(self, s):
# return s# s.replace('\n\n',self.fsep,1)
@cached_property
def unique_id(self):
cmdo = self.sh('svn info | grep "^Repository UUID"',
ignore_error=True)
if cmdo:
return cmdo.split(': ', 1)[1].rstrip()
return None
@cached_property
def status(self):
return self.sh('svn status')
@cached_property
def remote_url(self):
return (
self.sh('svn info | grep "^Repository Root:"')
.split(': ', 1)[1]).strip()
def diff(self):
return self.sh('svn diff')
def current_id(self):
# from xml.etree import ElementTree as ET
# info = ET.fromstringlist(self.sh('svn info --xml'))
# return info.find('entry').get('revision')
return (
self.sh('svn info | grep "^Revision: "')
.split(': ', 1)[1].strip())
def log(self, n=None, template=None, **kwargs):
return (
self.sh(' '.join((
'svn log',
('-l%n' % n) if n else '',
' '.join(('--%s=%s' % (k, v)) for (k, v) in kwargs.items())
))
)
)
@cached_property
def _last_commit(self):
"""
::
$ svn log -l1
------------------------------------------------------------------------
r25701 | bhendrix | 2010-08-02 12:14:25 -0500 (Mon, 02 Aug 2010) | 1 line
added selection range traits to make it possible for users to replace
------------------------------------------------------------------------
.. note:: svn log references the svn server
"""
op = self.sh('svn log -l1')
data, rest = op.split('\n', 2)[1:]
revno, user, datestr, lc = data.split(' | ', 3)
desc = '\n'.join(rest.split('\n')[1:-2])
revno = revno[1:]
# lc = long(lc.rstrip(' line'))
return datestr, (revno, user, None, desc)
@cached_property
def __last_commit(self):
"""
$ svn info
Path: .
URL: http://python-dlp.googlecode.com/svn/trunk/layercake-python
Repository Root: http://python-dlp.googlecode.com/svn
Repository UUID: d0ad5f6e-b329-0410-b51c-492c9c4f233d
Revision: 378
Node Kind: directory
Schedule: normal
Last Changed Author: chimezie
Last Changed Rev: 378
Last Changed Date: 2011-05-01 01:31:38 -0500 (Sun, 01 May 2011)
"""
op = self.sh("svn info")
if not op:
return None
author, rev, datestr = op.split('\n')[7:10]
author = author.split(': ', 1)[1].strip()
rev = rev.split(': ', 1)[1].strip()
datestr = datestr.split(': ', 1)[1].split('(', 1)[0].strip()
return datestr, (rev, author, None, None)
@cached_property
def last_commit(self):
return self.log_iter().next()
# @cached_property
def search_upwards(self, fpath=None, repodirname='.svn', upwards={}):
"""
Traverse filesystem upwards, searching for .svn directories
with matching UUIDs
repo/.svn
repo/dir1/.svn
repo/dir1/dir2/.svn
>> search_upwards('repo/')
<< 'repo/'
>> search_upwards('repo/dir1')
<< 'repo/'
>> search_upwards('repo/dir1/dir2')
<< 'repo/'
repo/.svn
repo/dirA/
repo/dirA/dirB/.svn
>> search_upwards('repo/dirA')
<< 'repo/'
>> search_upwards('repo/dirA/dirB')
>> 'repo/dirB')
"""
fpath = fpath or self.fpath
uuid = self.unique_id
last_path = self
path_comp = fpath.split(os.path.sep)
# [0:-1], [0:-2], [0:-1*len(path_comp)]
for n in xrange(1, len(path_comp)-1):
checkpath = os.path.join(*path_comp[0:-1 * n])
repodir = os.path.join(checkpath, repodirname)
upw_uuid = upwards.get(repodir)
if upw_uuid:
if upw_uuid == uuid:
last_path = SvnRepository(checkpath)
continue
else:
break
elif os.path.exists(repodir):
repo = SvnRepository(checkpath)
upw_uuid = repo.unique_id
upwards[repodir] = upw_uuid
# TODO: match on REVISION too
if upw_uuid == uuid:
last_path = repo
continue
else:
break
return last_path
REPO_REGISTRY = [
MercurialRepository,
GitRepository,
BzrRepository,
# SvnRepository, # NOP'ing this functionality for now. requires net access.
]
REPO_PREFIXES = dict((r.prefix, r) for r in REPO_REGISTRY)
REPO_REGEX = (
'|'.join('/%s' % r.prefix for r in REPO_REGISTRY)).replace('.', '\.')
def listdir_find_repos(where):
stack = deque([(convert_path(where), '')])
while stack:
where, prefix = stack.pop()
try:
for name in sorted(os.listdir(where), reverse=True):
fn = os.path.join(where, name)
if os.path.isdir(fn):
if name in REPO_PREFIXES:
# yield name[1:], fn.rstrip(name)[:-1] # abspath
repo = REPO_PREFIXES[name](fn.rstrip(name)[:-1])
yield repo
stack.append((fn, prefix + name + '/'))
except OSError as e:
if e.errno == errno.EACCES:
log.error("Skipping: %s", e)
else:
raise
def find_find_repos(where, ignore_error=True):
if os.uname()[0] == 'Darwin':
cmd = ("find",
" -E",
repr(where),
' -type d',
" -regex '.*(%s)$'" % REPO_REGEX)
else:
cmd = ("find",
" -O3 ",
repr(where), # " .",
" -type d",
" -regextype posix-egrep",
" -regex '.*(%s)$'" % REPO_REGEX)
cmd = ' '.join(cmd)
log.debug("find_find_repos(%r) = %s" % (where, cmd))
kwargs = {
'shell': True,
'cwd': where,
'stderr': sys.stderr,
'stdout': subprocess.PIPE}
p = subprocess.Popen(cmd, **kwargs)
if p.returncode and not ignore_error:
p_stdout = p.communicate()[0]
raise Exception("Subprocess return code: %d\n%r\n%r" % (
p.returncode, cmd, p_stdout))
for l in iter(p.stdout.readline, ''):
path = l.rstrip()
_path, _prefix = os.path.dirname(path), os.path.basename(path)
repo = REPO_PREFIXES.get(_prefix)
if repo is None:
log.error("repo for path %r and prefix %r is None" %
(path, _prefix))
if repo:
yield repo(_path)
# yield repo
def find_unique_repos(where):
repos = Dict()
path_uuids = Dict()
log.debug("find_unique_repos(%r)" % where)
for repo in find_find_repos(where):
# log.debug(repo)
repo2 = (hasattr(repo, 'search_upwards')
and repo.search_upwards(upwards=path_uuids))
if repo2:
if repo2 == repo:
continue
else:
repo = repo2
if (repo.fpath not in repos):
log.debug("%s | %s | %s" %
(repo.prefix, repo.fpath, repo.unique_id))
repos[repo.fpath] = repo
yield repo
REPORT_TYPES = dict(
(attr, getattr(Repository, "%s_report" % attr)) for attr in (
"str",
"sh",
"origin",
"full",
"pip",
"status",
"hgsub",
"gitsubmodule",
)
)
def do_repo_report(repos, report='full', output=sys.stdout, *args, **kwargs):
for i, repo in enumerate(repos):
log.debug(str((i, repo.origin_report().next())))
try:
if repo is not None:
reportfunc = REPORT_TYPES.get(report)
if reportfunc is None:
raise Exception("Unrecognized report type: %r (%s)" %
(report, ', '.join(REPORT_TYPES.keys())))
for l in reportfunc(repo, *args, **kwargs):
print(l, file=output)
except Exception as e:
log.error(repo)
log.error(report)
log.error(e)
raise
yield repo
def do_tortoisehg_report(repos, output):
"""generate a thg-reporegistry.xml file from a list of repos and print
to output
"""
import operator
import xml.etree.ElementTree as ET
root = ET.Element('reporegistry')
item = ET.SubElement(root, 'treeitem')
group = ET.SubElement(item, 'group', attrib=Dict(name='groupname'))
def fullname_to_shortname(fullname):
shortname = fullname.replace(os.environ['HOME'], '~')
shortname = shortname.lstrip('./')
return shortname
for repo in sorted(repos, key=operator.attrgetter('fpath')):
fullname = os.path.join(
os.path.dirname(repo.fpath),
os.path.basename(repo.fpath))
shortname = fullname_to_shortname(fullname)
if repo.prefix != '.hg':
shortname = "%s%s" % (shortname, repo.prefix)
_ = ET.SubElement(group, 'repo',
attrib=Dict(
root=repo.fpath,
shortname=shortname,
basenode='0'*40))
_
print('<?xml version="1.0" encoding="UTF-8"?>', file=output)
print("<!-- autogenerated: %s -->" % "TODO", file=output)
print(ET.dump(root), file=output)
def main():
"""
mainfunc
"""
import optparse
import logging
prs = optparse.OptionParser(usage="./")
prs.add_option('-s', '--scan',
dest='scan',
action='append',
default=[],
help='Path(s) to scan for repositories')
prs.add_option('-r', '--report',
dest='reports',
action='append',
default=[],
help='pip || full || status || hgsub || thg')
prs.add_option('--thg',
dest='thg_report',
action='store_true',
help='Write a thg-reporegistry.xml file to stdout')
prs.add_option('--template',
dest='report_template',
action='store',
help='Report template')
prs.add_option('-v', '--verbose',
dest='verbose',
action='store_true',)
prs.add_option('-q', '--quiet',
dest='quiet',
action='store_true',)
(opts, args) = prs.parse_args()
if not opts.quiet:
_format = None
_format = "%(levelname)s\t%(message)s"
# _format = "%(message)s"
logging.basicConfig(format=_format)
log = logging.getLogger('repos')
if opts.verbose:
log.setLevel(logging.DEBUG)
elif opts.quiet:
log.setLevel(logging.ERROR)
else:
log.setLevel(logging.INFO)
if not opts.scan:
opts.scan = ['.']
if opts.scan:
# if not opts.reports:
# opts.reports = ['pip']
if opts.reports or opts.thg_report:
opts.reports = [s.strip().lower() for s in opts.reports]
if 'thg' in opts.reports:
opts.thg_report = True
opts.reports.remove('thg')
# repos = []
# for _path in opts.scan:
# repos.extend(find_unique_repos(_path))
log.debug("SCANNING PATHS: %s" % opts.scan)
repos = chain(*imap(find_unique_repos, opts.scan))
if opts.reports and opts.thg_report:
repos = list(repos)
# TODO: tee
if opts.reports:
for report in opts.reports:
list(do_repo_report(repos, report=report))
if opts.thg_report:
import sys
do_tortoisehg_report(repos, output=sys.stdout)
else:
opts.scan = '.'
list(do_repo_report(
find_unique_repos(opts.scan),
report='sh'))
if __name__ == "__main__":
main()
|
ihmeuw/vivarium
|
src/vivarium/framework/resource.py
|
"""
===================
Resource Management
===================
This module provides a tool to manage dependencies on resources within a
:mod:`vivarium` simulation. These resources take the form of things that can
be created and utilized by components, for example columns in the
:mod:`state table <vivarium.framework.population>`
or :mod:`named value pipelines <vivarium.framework.values>`.
Because these resources need to be created before they can be used, they are
sensitive to ordering. The intent behind this tool is to provide an interface
that allows other managers to register resources with the resource manager
and in turn ask for ordered sequences of these resources according to their
dependencies or raise exceptions if this is not possible.
"""
from types import MethodType
from typing import Any, Callable, Iterable, List
import networkx as nx
from loguru import logger
from vivarium.exceptions import VivariumError
class ResourceError(VivariumError):
"""Error raised when a dependency requirement is violated."""
pass
RESOURCE_TYPES = {
"value",
"value_source",
"missing_value_source",
"value_modifier",
"column",
"stream",
}
NULL_RESOURCE_TYPE = "null"
class ResourceGroup:
"""Resource groups are the nodes in the resource dependency graph.
A resource group represents the pool of resources produced by a single
callable and all the dependencies necessary to produce that resource.
When thinking of the dependency graph, this represents a vertex and
all in-edges. This is a local-information representation that can be
used to construct the entire dependency graph once all resources are
specified.
"""
def __init__(
self,
resource_type: str,
resource_names: List[str],
producer: Callable,
dependencies: List[str],
):
self._resource_type = resource_type
self._resource_names = resource_names
self._producer = producer
self._dependencies = dependencies
@property
def type(self) -> str:
"""The type of resource produced by this resource group's producer.
Must be one of `RESOURCE_TYPES`.
"""
return self._resource_type
@property
def names(self) -> List[str]:
"""The long names (including type) of all resources in this group."""
return [f"{self._resource_type}.{name}" for name in self._resource_names]
@property
def producer(self) -> Any:
"""The method or object that produces this group of resources."""
return self._producer
@property
def dependencies(self) -> List[str]:
"""The long names (including type) of dependencies for this group."""
return self._dependencies
def __iter__(self) -> Iterable[str]:
return iter(self.names)
def __repr__(self) -> str:
resources = ", ".join(self)
return f"ResourceProducer({resources})"
def __str__(self) -> str:
resources = ", ".join(self)
return f"({resources})"
class ResourceManager:
"""Manages all the resources needed for population initialization."""
def __init__(self):
# This will be a dict with string keys representing the the resource
# and the resource group they belong to. This is a one to many mapping
# as some resource groups contain many resources.
self._resource_group_map = {}
# null producers are those that don't produce any resources externally
# but still consume other resources (i.e., have dependencies) - these
# are only pop initializers as of 9/26/2019. Tracker is here to assign
# them unique ids.
self._null_producer_count = 0
# Attribute used for lazy (but cached) graph initialization.
self._graph = None
# Attribute used for lazy (but cached) graph topo sort.
self._sorted_nodes = None
@property
def name(self) -> str:
"""The name of this manager."""
return "resource_manager"
@property
def graph(self) -> nx.DiGraph:
"""The networkx graph representation of the resource pool."""
if self._graph is None:
self._graph = self._to_graph()
return self._graph
@property
def sorted_nodes(self):
"""Returns a topological sort of the resource graph.
Notes
-----
Topological sorts are not stable. Be wary of depending on order
where you shouldn't.
"""
if self._sorted_nodes is None:
try:
self._sorted_nodes = list(nx.algorithms.topological_sort(self.graph))
except nx.NetworkXUnfeasible:
raise ResourceError(
f"The resource pool contains at least one cycle: "
f"{nx.find_cycle(self.graph)}."
)
return self._sorted_nodes
def add_resources(
self,
resource_type: str,
resource_names: List[str],
producer: Any,
dependencies: List[str],
):
"""Adds managed resources to the resource pool.
Parameters
----------
resource_type
The type of the resources being added. Must be one of
`RESOURCE_TYPES`.
resource_names
A list of names of the resources being added.
producer
A method or object that will produce the resources.
dependencies
A list of resource names formatted as
``resource_type.resource_name`` that the producer requires.
Raises
------
ResourceError
If either the resource type is invalid, a component has multiple
resource producers for the ``column`` resource type, or
there are multiple producers of the same resource.
"""
if resource_type not in RESOURCE_TYPES:
raise ResourceError(
f"Unknown resource type {resource_type}. "
f"Permitted types are {RESOURCE_TYPES}."
)
resource_group = self._get_resource_group(
resource_type, resource_names, producer, dependencies
)
for resource in resource_group:
if resource in self._resource_group_map:
other_producer = self._resource_group_map[resource].producer
raise ResourceError(
f"Both {producer} and {other_producer} are registered as "
f"producers for {resource}."
)
self._resource_group_map[resource] = resource_group
def _get_resource_group(
self,
resource_type: str,
resource_names: List[str],
producer: MethodType,
dependencies: List[str],
) -> ResourceGroup:
"""Packages resource information into a resource group.
See Also
--------
:class:`ResourceGroup`
"""
if not resource_names:
# We have a "producer" that doesn't produce anything, but
# does have dependencies. This is necessary for components that
# want to track private state information.
resource_type = NULL_RESOURCE_TYPE
resource_names = [str(self._null_producer_count)]
self._null_producer_count += 1
return ResourceGroup(resource_type, resource_names, producer, dependencies)
def _to_graph(self) -> nx.DiGraph:
"""Constructs the full resource graph from information in the groups.
Components specify local dependency information during setup time.
When the resources are required at population creation time,
the graph is generated as all resources must be registered at that
point.
Notes
-----
We are taking advantage of lazy initialization to sneak this in
between post setup time when the :class:`values manager
<vivarium.framework.values.ValuesManager>` finalizes pipeline
dependencies and population creation time.
"""
resource_graph = nx.DiGraph()
# networkx ignores duplicates
resource_graph.add_nodes_from(self._resource_group_map.values())
for resource_group in resource_graph.nodes:
for dependency in resource_group.dependencies:
if dependency not in self._resource_group_map:
# Warn here because this sometimes happens naturally
# if observer components are missing from a simulation.
logger.warning(
f"Resource {dependency} is not provided by any component but is needed to "
f"compute {resource_group}."
)
continue
dependency_group = self._resource_group_map[dependency]
resource_graph.add_edge(dependency_group, resource_group)
return resource_graph
def __iter__(self) -> Iterable[MethodType]:
"""Returns a dependency-sorted iterable of population initializers.
We exclude all non-initializer dependencies. They were necessary in
graph construction, but we only need the column producers at population
creation time.
"""
return iter(
[
r.producer
for r in self.sorted_nodes
if r.type in {"column", NULL_RESOURCE_TYPE}
]
)
def __repr__(self):
out = {}
for resource_group in set(self._resource_group_map.values()):
produced = ", ".join(resource_group)
out[produced] = ", ".join(resource_group.dependencies)
return "\n".join([f"{produced} : {depends}" for produced, depends in out.items()])
class ResourceInterface:
"""The resource management system.
A resource in :mod:`vivarium` is something like a state table column
or a randomness stream. These resources are used to initialize or alter
the state of the simulation. Many of these resources might depend on each
other and therefore need to be created or updated in a particular order.
These dependency chains can be quite long and complex.
Placing the ordering responsibility on end users makes simulations very
fragile and difficult to understand. Instead, the resource management
system allows users to only specify local dependencies. The system then
uses the local dependency information to construct a full dependency
graph, validate that there are no cyclic dependencies, and return
resources and their producers in an order that makes sense.
"""
def __init__(self, manager: ResourceManager):
self._manager = manager
def add_resources(
self,
resource_type: str,
resource_names: List[str],
producer: Any,
dependencies: List[str],
):
"""Adds managed resources to the resource pool.
Parameters
----------
resource_type
The type of the resources being added. Must be one of
`RESOURCE_TYPES`.
resource_names
A list of names of the resources being added.
producer
A method or object that will produce the resources.
dependencies
A list of resource names formatted as
``resource_type.resource_name`` that the producer requires.
Raises
------
ResourceError
If either the resource type is invalid, a component has multiple
resource producers for the ``column`` resource type, or
there are multiple producers of the same resource.
"""
self._manager.add_resources(resource_type, resource_names, producer, dependencies)
def __iter__(self):
"""Returns a dependency-sorted iterable of population initializers.
We exclude all non-initializer dependencies. They were necessary in
graph construction, but we only need the column producers at population
creation time.
"""
return iter(self._manager)
|
gustavla/self-supervision
|
selfsup/caffe.py
|
from .util import DummyDict
from .util import tprint
import deepdish as dd
import numpy as np
# CAFFE WEIGHTS: O x I x H x W
# TFLOW WEIGHTS: H x W x I x O
def to_caffe(tfW, name=None, shape=None, color_layer='', conv_fc_transitionals=None, info=DummyDict()):
assert conv_fc_transitionals is None or name is not None
if tfW.ndim == 4:
if (name == 'conv1_1' or name == 'conv1' or name == color_layer) and tfW.shape[2] == 3:
tfW = tfW[:, :, ::-1]
info[name] = 'flipped'
cfW = tfW.transpose(3, 2, 0, 1)
return cfW
else:
if conv_fc_transitionals is not None and name in conv_fc_transitionals:
cf_shape = conv_fc_transitionals[name]
tf_shape = (cf_shape[2], cf_shape[3], cf_shape[1], cf_shape[0])
cfW = tfW.reshape(tf_shape).transpose(3, 2, 0, 1).reshape(cf_shape[0], -1)
info[name] = 'fc->c transitioned with caffe shape {}'.format(cf_shape)
return cfW
else:
return tfW.T
def from_caffe(cfW, name=None, color_layer='', conv_fc_transitionals=None, info=DummyDict()):
assert conv_fc_transitionals is None or name is not None
if cfW.ndim == 4:
tfW = cfW.transpose(2, 3, 1, 0)
assert conv_fc_transitionals is None or name is not None
if (name == 'conv1_1' or name == 'conv1' or name == color_layer) and tfW.shape[2] == 3:
tfW = tfW[:, :, ::-1]
info[name] = 'flipped'
return tfW
else:
if conv_fc_transitionals is not None and name in conv_fc_transitionals:
cf_shape = conv_fc_transitionals[name]
tfW = cfW.reshape(cf_shape).transpose(2, 3, 1, 0).reshape(-1, cf_shape[0])
info[name] = 'c->fc transitioned with caffe shape {}'.format(cf_shape)
return tfW
else:
return cfW.T
def load_caffemodel(path, session, prefix='', ignore=set(),
conv_fc_transitionals=None, renamed_layers=DummyDict(),
color_layer='', verbose=False, pre_adjust_batch_norm=False):
import tensorflow as tf
def find_weights(name, which='weights'):
for tw in tf.trainable_variables():
if tw.name.split(':')[0] == name + '/' + which:
return tw
return None
"""
def find_batch_norm(name, which='mean'):
for tw in tf.all_variables():
if tw.name.endswith(name + '/bn_' + which + ':0'):
return tw
return None
"""
data = dd.io.load(path, '/data')
assigns = []
loaded = []
info = {}
for key in data:
local_key = prefix + renamed_layers.get(key, key)
if key not in ignore:
bn_name = 'batch_' + key
if '0' in data[key]:
weights = find_weights(local_key, 'weights')
if weights is not None:
W = from_caffe(data[key]['0'], name=key, info=info,
conv_fc_transitionals=conv_fc_transitionals,
color_layer=color_layer)
if W.ndim != weights.get_shape().as_list():
W = W.reshape(weights.get_shape().as_list())
init_str = ''
if pre_adjust_batch_norm and bn_name in data:
bn_data = data[bn_name]
sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
W /= sigma
init_str += ' batch-adjusted'
assigns.append(weights.assign(W))
loaded.append('{}:0 -> {}:weights{} {}'.format(key, local_key, init_str, info.get(key, '')))
if '1' in data[key]:
biases = find_weights(local_key, 'biases')
if biases is not None:
bias = data[key]['1']
init_str = ''
if pre_adjust_batch_norm and bn_name in data:
bn_data = data[bn_name]
sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
mu = bn_data['0'] / bn_data['2']
bias = (bias - mu) / sigma
init_str += ' batch-adjusted'
assigns.append(biases.assign(bias))
loaded.append('{}:1 -> {}:biases{}'.format(key, local_key, init_str))
# Check batch norm and load them (unless they have been folded into)
#if not pre_adjust_batch_norm:
session.run(assigns)
if verbose:
tprint('Loaded model from', path)
for l in loaded:
tprint('-', l)
return loaded
def save_caffemodel(path, session, layers, prefix='',
conv_fc_transitionals=None, color_layer='', verbose=False,
save_batch_norm=False, lax_naming=False):
import tensorflow as tf
def find_weights(name, which='weights'):
for tw in tf.trainable_variables():
if lax_naming:
ok = tw.name.split(':')[0].endswith(name + '/' + which)
else:
ok = tw.name.split(':')[0] == name + '/' + which
if ok:
return tw
return None
def find_batch_norm(name, which='mean'):
for tw in tf.all_variables():
#if name + '_moments' in tw.name and tw.name.endswith(which + '/batch_norm:0'):
if tw.name.endswith(name + '/bn_' + which + ':0'):
return tw
return None
data = {}
saved = []
info = {}
for lay in layers:
if isinstance(lay, tuple):
lay, p_lay = lay
else:
p_lay = lay
weights = find_weights(prefix + p_lay, 'weights')
d = {}
if weights is not None:
tfW = session.run(weights)
cfW = to_caffe(tfW, name=lay,
conv_fc_transitionals=conv_fc_transitionals,
info=info, color_layer=color_layer)
d['0'] = cfW
saved.append('{}:weights -> {}:0 {}'.format(prefix + p_lay, lay, info.get(lay, '')))
biases = find_weights(prefix + p_lay, 'biases')
if biases is not None:
b = session.run(biases)
d['1'] = b
saved.append('{}:biases -> {}:1'.format(prefix + p_lay, lay))
if d:
data[lay] = d
if save_batch_norm:
mean = find_batch_norm(lay, which='mean')
variance = find_batch_norm(lay, which='var')
if mean is not None and variance is not None:
d = {}
d['0'] = np.squeeze(session.run(mean))
d['1'] = np.squeeze(session.run(variance))
d['2'] = np.array([1.0], dtype=np.float32)
data['batch_' + lay] = d
saved.append('batch_norm({}) saved'.format(lay))
dd.io.save(path, dict(data=data), compression=None)
if verbose:
tprint('Saved model to', path)
for l in saved:
tprint('-', l)
return saved
|
all-of-us/raw-data-repository
|
rdr_service/lib_fhir/fhirclient_4_0_0/models/binary.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Binary) on 2019-05-07.
# 2019, SMART Health IT.
from . import resource
class Binary(resource.Resource):
""" Pure binary content defined by a format other than FHIR.
A resource that represents the data of a single raw artifact as digital
content accessible in its native format. A Binary resource can contain any
content, whether text, image, pdf, zip archive, etc.
"""
resource_type = "Binary"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contentType = None
""" MimeType of the binary content.
Type `str`. """
self.data = None
""" The actual content.
Type `str`. """
self.securityContext = None
""" Identifies another resource to use as proxy when enforcing access
control.
Type `FHIRReference` (represented as `dict` in JSON). """
super(Binary, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Binary, self).elementProperties()
js.extend([
("contentType", "contentType", str, False, None, True),
("data", "data", str, False, None, False),
("securityContext", "securityContext", fhirreference.FHIRReference, False, None, False),
])
return js
import sys
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
|
patrickm/chromium.src
|
content/test/gpu/gpu_tests/cloud_storage_test_base.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base classes for a test and validator which upload results
(reference images, error images) to cloud storage."""
import os
import re
import tempfile
from telemetry import test
from telemetry.core import bitmap
from telemetry.page import cloud_storage
from telemetry.page import page_test
test_data_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', 'data', 'gpu'))
default_generated_data_dir = os.path.join(test_data_dir, 'generated')
error_image_cloud_storage_bucket = 'chromium-browser-gpu-tests'
def _CompareScreenshotSamples(screenshot, expectations, device_pixel_ratio):
for expectation in expectations:
location = expectation["location"]
x = location[0] * device_pixel_ratio
y = location[1] * device_pixel_ratio
if x < 0 or y < 0 or x > screenshot.width or y > screenshot.height:
raise page_test.Failure(
'Expected pixel location [%d, %d] is out of range on [%d, %d] image' %
(x, y, screenshot.width, screenshot.height))
actual_color = screenshot.GetPixelColor(x, y)
expected_color = bitmap.RgbaColor(
expectation["color"][0],
expectation["color"][1],
expectation["color"][2])
if not actual_color.IsEqual(expected_color, expectation["tolerance"]):
raise page_test.Failure('Expected pixel at ' + str(location) +
' to be ' +
str(expectation["color"]) + " but got [" +
str(actual_color.r) + ", " +
str(actual_color.g) + ", " +
str(actual_color.b) + "]")
class ValidatorBase(page_test.PageTest):
def __init__(self, test_method_name):
super(ValidatorBase, self).__init__(test_method_name)
# Parameters for cloud storage reference images.
self.vendor_id = None
self.device_id = None
self.vendor_string = None
self.device_string = None
self.msaa = False
###
### Routines working with the local disk (only used for local
### testing without a cloud storage account -- the bots do not use
### this code path).
###
def _UrlToImageName(self, url):
image_name = re.sub(r'^(http|https|file)://(/*)', '', url)
image_name = re.sub(r'\.\./', '', image_name)
image_name = re.sub(r'(\.|/|-)', '_', image_name)
return image_name
def _WriteImage(self, image_path, png_image):
output_dir = os.path.dirname(image_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
png_image.WritePngFile(image_path)
def _WriteErrorImages(self, img_dir, img_name, screenshot, ref_png):
full_image_name = img_name + '_' + str(self.options.build_revision)
full_image_name = full_image_name + '.png'
# Always write the failing image.
self._WriteImage(
os.path.join(img_dir, 'FAIL_' + full_image_name), screenshot)
if ref_png:
# Save the reference image.
# This ensures that we get the right revision number.
self._WriteImage(
os.path.join(img_dir, full_image_name), ref_png)
# Save the difference image.
diff_png = screenshot.Diff(ref_png)
self._WriteImage(
os.path.join(img_dir, 'DIFF_' + full_image_name), diff_png)
###
### Cloud storage code path -- the bots use this.
###
def _ComputeGpuInfo(self, tab):
if ((self.vendor_id and self.device_id) or
(self.vendor_string and self.device_string)):
return
browser = tab.browser
if not browser.supports_system_info:
raise Exception('System info must be supported by the browser')
system_info = browser.GetSystemInfo()
if not system_info.gpu:
raise Exception('GPU information was absent')
device = system_info.gpu.devices[0]
if device.vendor_id and device.device_id:
self.vendor_id = device.vendor_id
self.device_id = device.device_id
elif device.vendor_string and device.device_string:
self.vendor_string = device.vendor_string
self.device_string = device.device_string
else:
raise Exception('GPU device information was incomplete')
self.msaa = not (
'disable_multisampling' in system_info.gpu.driver_bug_workarounds)
def _FormatGpuInfo(self, tab):
self._ComputeGpuInfo(tab)
msaa_string = '_msaa' if self.msaa else '_non_msaa'
if self.vendor_id:
return '%s_%04x_%04x%s' % (
self.options.os_type, self.vendor_id, self.device_id, msaa_string)
else:
return '%s_%s_%s%s' % (
self.options.os_type, self.vendor_string, self.device_string,
msaa_string)
def _FormatReferenceImageName(self, img_name, page, tab):
return '%s_v%s_%s.png' % (
img_name,
page.revision,
self._FormatGpuInfo(tab))
def _UploadBitmapToCloudStorage(self, bucket, name, bitmap, public=False):
# This sequence of steps works on all platforms to write a temporary
# PNG to disk, following the pattern in bitmap_unittest.py. The key to
# avoiding PermissionErrors seems to be to not actually try to write to
# the temporary file object, but to re-open its name for all operations.
temp_file = tempfile.NamedTemporaryFile().name
bitmap.WritePngFile(temp_file)
cloud_storage.Insert(bucket, name, temp_file, publicly_readable=public)
def _ConditionallyUploadToCloudStorage(self, img_name, page, tab, screenshot):
"""Uploads the screenshot to cloud storage as the reference image
for this test, unless it already exists. Returns True if the
upload was actually performed."""
if not self.options.refimg_cloud_storage_bucket:
raise Exception('--refimg-cloud-storage-bucket argument is required')
cloud_name = self._FormatReferenceImageName(img_name, page, tab)
if not cloud_storage.Exists(self.options.refimg_cloud_storage_bucket,
cloud_name):
self._UploadBitmapToCloudStorage(self.options.refimg_cloud_storage_bucket,
cloud_name,
screenshot)
return True
return False
def _DownloadFromCloudStorage(self, img_name, page, tab):
"""Downloads the reference image for the given test from cloud
storage, returning it as a Telemetry Bitmap object."""
# TODO(kbr): there's a race condition between the deletion of the
# temporary file and gsutil's overwriting it.
if not self.options.refimg_cloud_storage_bucket:
raise Exception('--refimg-cloud-storage-bucket argument is required')
temp_file = tempfile.NamedTemporaryFile().name
cloud_storage.Get(self.options.refimg_cloud_storage_bucket,
self._FormatReferenceImageName(img_name, page, tab),
temp_file)
return bitmap.Bitmap.FromPngFile(temp_file)
def _UploadErrorImagesToCloudStorage(self, image_name, screenshot, ref_img):
"""For a failing run, uploads the failing image, reference image (if
supplied), and diff image (if reference image was supplied) to cloud
storage. This subsumes the functionality of the
archive_gpu_pixel_test_results.py script."""
machine_name = re.sub('\W+', '_', self.options.test_machine_name)
upload_dir = '%s_%s_telemetry' % (self.options.build_revision, machine_name)
base_bucket = '%s/runs/%s' % (error_image_cloud_storage_bucket, upload_dir)
image_name_with_revision = '%s_%s.png' % (
image_name, self.options.build_revision)
self._UploadBitmapToCloudStorage(
base_bucket + '/gen', image_name_with_revision, screenshot,
public=True)
if ref_img:
self._UploadBitmapToCloudStorage(
base_bucket + '/ref', image_name_with_revision, ref_img, public=True)
diff_img = screenshot.Diff(ref_img)
self._UploadBitmapToCloudStorage(
base_bucket + '/diff', image_name_with_revision, diff_img,
public=True)
print ('See http://%s.commondatastorage.googleapis.com/'
'view_test_results.html?%s for this run\'s test results') % (
error_image_cloud_storage_bucket, upload_dir)
def _ValidateScreenshotSamples(self, url,
screenshot, expectations, device_pixel_ratio):
"""Samples the given screenshot and verifies pixel color values.
The sample locations and expected color values are given in expectations.
In case any of the samples do not match the expected color, it raises
a Failure and dumps the screenshot locally or cloud storage depending on
what machine the test is being run."""
try:
_CompareScreenshotSamples(screenshot, expectations, device_pixel_ratio)
except page_test.Failure:
image_name = self._UrlToImageName(url)
if self.options.test_machine_name:
self._UploadErrorImagesToCloudStorage(image_name, screenshot, None)
else:
self._WriteErrorImages(self.options.generated_dir, image_name,
screenshot, None)
raise
class TestBase(test.Test):
@classmethod
def AddTestCommandLineArgs(cls, group):
group.add_option('--build-revision',
help='Chrome revision being tested.',
default="unknownrev")
group.add_option('--upload-refimg-to-cloud-storage',
dest='upload_refimg_to_cloud_storage',
action='store_true', default=False,
help='Upload resulting images to cloud storage as reference images')
group.add_option('--download-refimg-from-cloud-storage',
dest='download_refimg_from_cloud_storage',
action='store_true', default=False,
help='Download reference images from cloud storage')
group.add_option('--refimg-cloud-storage-bucket',
help='Name of the cloud storage bucket to use for reference images; '
'required with --upload-refimg-to-cloud-storage and '
'--download-refimg-from-cloud-storage. Example: '
'"chromium-gpu-archive/reference-images"')
group.add_option('--os-type',
help='Type of operating system on which the pixel test is being run, '
'used only to distinguish different operating systems with the same '
'graphics card. Any value is acceptable, but canonical values are '
'"win", "mac", and "linux", and probably, eventually, "chromeos" '
'and "android").',
default='')
group.add_option('--test-machine-name',
help='Name of the test machine. Specifying this argument causes this '
'script to upload failure images and diffs to cloud storage directly, '
'instead of relying on the archive_gpu_pixel_test_results.py script.',
default='')
group.add_option('--generated-dir',
help='Overrides the default on-disk location for generated test images '
'(only used for local testing without a cloud storage account)',
default=default_generated_data_dir)
|
pansapiens/mytardis
|
tardis/tardis_portal/tests/test_download.py
|
# -*- coding: utf-8 -*-
from os import makedirs
from os.path import abspath, basename, dirname, join, exists, getsize
from shutil import rmtree
from zipfile import is_zipfile, ZipFile
from tarfile import is_tarfile, TarFile
from tempfile import NamedTemporaryFile
from compare import expect
from django.test import TestCase
from django.test.client import Client
from django.conf import settings
from django.contrib.auth.models import User
from tardis.tardis_portal.models import \
Experiment, Dataset, DataFile, DataFileObject
try:
from wand.image import Image # pylint: disable=C0411
IMAGEMAGICK_AVAILABLE = True
except (AttributeError, ImportError):
IMAGEMAGICK_AVAILABLE = False
def get_size_and_sha512sum(testfile):
import hashlib
with open(testfile, 'rb') as f:
contents = f.read()
return (len(contents), hashlib.sha512(contents).hexdigest())
def _generate_test_image(testfile):
if IMAGEMAGICK_AVAILABLE:
with Image(filename='logo:') as img:
img.format = 'tiff'
img.save(filename=testfile)
else:
# Apparently ImageMagick isn't installed...
# Write a "fake" TIFF file
f = open(testfile, 'w')
f.write("II\x2a\x00")
f.close()
class DownloadTestCase(TestCase):
def setUp(self):
# create a test user
self.user = User.objects.create_user(username='DownloadTestUser',
email='',
password='secret')
# create a public experiment
self.experiment1 = Experiment(
title='Experiment 1',
created_by=self.user,
public_access=Experiment.PUBLIC_ACCESS_FULL)
self.experiment1.save()
# create a non-public experiment
self.experiment2 = Experiment(
title='Experiment 2',
created_by=self.user,
public_access=Experiment.PUBLIC_ACCESS_NONE)
self.experiment2.save()
# dataset1 belongs to experiment1
self.dataset1 = Dataset()
self.dataset1.save()
self.dataset1.experiments.add(self.experiment1)
self.dataset1.save()
# dataset2 belongs to experiment2
self.dataset2 = Dataset()
self.dataset2.save()
self.dataset2.experiments.add(self.experiment2)
self.dataset2.save()
# absolute path first
filename1 = 'testfile.txt'
filename2 = 'testfile.tiff'
self.dest1 = abspath(join(settings.FILE_STORE_PATH, '%s/%s/'
% (self.experiment1.id,
self.dataset1.id)))
self.dest2 = abspath(join(settings.FILE_STORE_PATH,
'%s/%s/'
% (self.experiment2.id,
self.dataset2.id)))
if not exists(self.dest1):
makedirs(self.dest1)
if not exists(self.dest2):
makedirs(self.dest2)
testfile1 = abspath(join(self.dest1, filename1))
f = open(testfile1, 'w')
f.write("Hello World!\n")
f.close()
testfile2 = abspath(join(self.dest2, filename2))
_generate_test_image(testfile2)
self.datafile1 = self._build_datafile(testfile1, filename1,
self.dataset1)
self.datafile2 = self._build_datafile(testfile2, filename2,
self.dataset2)
def _build_datafile(self, testfile, filename, dataset, checksum=None,
size=None, mimetype=''):
filesize, sha512sum = get_size_and_sha512sum(testfile)
datafile = DataFile(dataset=dataset, filename=filename,
mimetype=mimetype,
size=size if size is not None else filesize,
sha512sum=(checksum if checksum else sha512sum))
datafile.save()
dfo = DataFileObject(
datafile=datafile,
storage_box=datafile.get_default_storage_box())
dfo.save()
with open(testfile, 'r') as sourcefile:
dfo.file_object = sourcefile
return DataFile.objects.get(pk=datafile.pk)
def tearDown(self):
self.user.delete()
self.experiment1.delete()
self.experiment2.delete()
rmtree(self.dest1)
rmtree(self.dest2)
def testView(self):
client = Client()
# check view of file1
response = client.get('/datafile/view/%i/' % self.datafile1.id)
self.assertEqual(response['Content-Disposition'],
'inline; filename="%s"'
% self.datafile1.filename)
self.assertEqual(response.status_code, 200)
response_content = ""
for c in response.streaming_content:
response_content += c
self.assertEqual(response_content, 'Hello World!\n')
# check view of file2
response = client.get('/datafile/view/%i/' % self.datafile2.id)
# Should be forbidden
self.assertEqual(response.status_code, 403)
self.experiment2.public_access = Experiment.PUBLIC_ACCESS_FULL
self.experiment2.save()
# check view of file2 again
response = client.get('/datafile/view/%i/' % self.datafile2.id)
self.assertEqual(response.status_code, 200)
# The following behaviour relies on ImageMagick
if IMAGEMAGICK_AVAILABLE:
# file2 should have a ".png" filename
self.assertEqual(response['Content-Disposition'],
'inline; filename="%s"'
% (self.datafile2.filename+'.png'))
# file2 should be a PNG
self.assertEqual(response['Content-Type'], 'image/png')
png_signature = "\x89PNG\r\n\x1a\n"
self.assertEqual(response.content[0:8], png_signature)
else:
# file2 should have a ".tiff" filename
self.assertEqual(response['Content-Disposition'],
'inline; filename="%s"'
% (self.datafile2.filename))
# file2 should be a TIFF
self.assertEqual(response['Content-Type'], 'image/tiff')
tiff_signature = "II\x2a\x00"
self.assertEqual(response.content[0:4], tiff_signature)
def _check_tar_file(self, content, rootdir, datafiles,
simpleNames=False, noTxt=False):
with NamedTemporaryFile('w') as tempfile:
for c in content:
tempfile.write(c)
tempfile.flush()
if getsize(tempfile.name) > 0:
expect(is_tarfile(tempfile.name)).to_be_truthy()
try:
tf = TarFile(tempfile.name, 'r')
self._check_names(datafiles, tf.getnames(),
rootdir, simpleNames, noTxt)
finally:
tf.close()
else:
self._check_names(datafiles, [],
rootdir, simpleNames, noTxt)
def _check_zip_file(self, content, rootdir, datafiles,
simpleNames=False, noTxt=False):
with NamedTemporaryFile('w') as tempfile:
for c in content:
tempfile.write(c)
tempfile.flush()
# It should be a zip file
expect(is_zipfile(tempfile.name)).to_be_truthy()
try:
zf = ZipFile(tempfile.name, 'r')
self._check_names(datafiles, zf.namelist(),
rootdir, simpleNames, noTxt)
finally:
zf.close()
def _check_names(self, datafiles, names, rootdir, simpleNames, noTxt):
# SimpleNames says if we expect basenames or pathnames
# NoTxt says if we expect '.txt' files to be filtered out
expect(len(names)).to_equal(len(datafiles))
def testDownload(self):
client = Client()
# check download for experiment1 as tar
response = client.get('/download/experiment/%i/tar/' %
self.experiment1.id)
self.assertEqual(response['Content-Disposition'],
'attachment; filename="%s-complete.tar"'
% self.experiment1.title.replace(' ', '_'))
self.assertEqual(response.status_code, 200)
self._check_tar_file(
response.streaming_content, str(self.experiment1.title
.replace(' ', '_')),
reduce(lambda x, y: x + y,
[ds.datafile_set.all()
for ds in self.experiment1.datasets.all()]))
# check download of file1
response = client.get('/download/datafile/%i/' % self.datafile1.id)
self.assertEqual(response['Content-Disposition'],
'attachment; filename="%s"'
% self.datafile1.filename)
self.assertEqual(response.status_code, 200)
response_content = ""
for c in response.streaming_content:
response_content += c
self.assertEqual(response_content, 'Hello World!\n')
# requesting file2 should be forbidden...
response = client.get('/download/datafile/%i/' % self.datafile2.id)
self.assertEqual(response.status_code, 403)
# check dataset1 download as tar
response = client.post('/download/datafiles/',
{'expid': self.experiment1.id,
'dataset': [self.dataset1.id],
'datafile': [],
'comptype': 'tar'})
self.assertEqual(response.status_code, 200)
self._check_tar_file(response.streaming_content,
'Experiment 1-selection',
self.dataset1.datafile_set.all())
# check dataset2 download
response = client.post('/download/datafiles/',
{'expid': self.experiment2.id,
'dataset': [self.dataset2.id],
'datafile': []})
self.assertEqual(response.status_code, 403)
# check datafile1 download via POST
response = client.post('/download/datafiles/',
{'expid': self.experiment1.id,
'dataset': [],
'datafile': [self.datafile1.id]})
self.assertEqual(response.status_code, 200)
self._check_tar_file(response.streaming_content,
'Experiment 1-selection',
[self.datafile1])
# check datafile2 download via POST
response = client.post('/download/datafiles/',
{'expid': self.experiment2.id,
'dataset': [],
'datafile': [self.datafile2.id]})
self.assertEqual(response.status_code, 403)
# Check datafile2 download with second experiment to "metadata only"
self.experiment2.public_access = Experiment.PUBLIC_ACCESS_METADATA
self.experiment2.save()
response = client.get('/download/datafile/%i/' % self.datafile2.id)
# Metadata-only means "no file access"!
self.assertEqual(response.status_code, 403)
# Check datafile2 download with second experiment to public
self.experiment2.public_access = Experiment.PUBLIC_ACCESS_FULL
self.experiment2.save()
response = client.get('/download/datafile/%i/' % self.datafile2.id)
self.assertEqual(response.status_code, 200)
# This should be a TIFF (which often starts with "II\x2a\x00")
self.assertEqual(response['Content-Type'], 'image/tiff')
response_content = ""
for c in response.streaming_content:
response_content += c
self.assertEqual(response_content[0:4], "II\x2a\x00")
# check experiment tar download with alternative organization
response = client.get('/download/experiment/%i/tar/' %
self.experiment1.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Disposition'],
'attachment; filename="%s-complete.tar"'
% self.experiment1.title.replace(' ', '_'))
self._check_tar_file(
response.streaming_content, str(self.experiment1.id),
reduce(lambda x, y: x + y,
[ds.datafile_set.all()
for ds in self.experiment1.datasets.all()]),
simpleNames=True)
# check experiment1 download with '.txt' filtered out (none left)
response = client.get('/download/experiment/%i/tar/' %
self.experiment1.id)
self.assertEqual(response.status_code, 200)
# check experiment2 download with '.txt' filtered out
response = client.get('/download/experiment/%i/tar/' %
self.experiment2.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Disposition'],
'attachment; filename="%s-complete.tar"'
% self.experiment2.title.replace(' ', '_'))
self._check_tar_file(
response.streaming_content, str(self.experiment2.id),
reduce(lambda x, y: x + y,
[ds.datafile_set.all()
for ds in self.experiment2.datasets.all()]),
simpleNames=True, noTxt=True)
def testDatasetFile(self):
# check registered text file for physical file meta information
df = DataFile.objects.get(pk=self.datafile1.id) # skipping test # noqa # pylint: disable=W0101
try:
from magic import Magic
self.assertEqual(df.mimetype, 'text/plain; charset=us-ascii')
except:
# XXX Test disabled because lib magic can't be loaded
pass
self.assertEqual(df.size, 13)
self.assertEqual(df.md5sum, '8ddd8be4b179a529afa5f2ffae4b9858')
# Now check we can calculate checksums and infer the mime type
# for a JPG file.
filename = 'tardis/tardis_portal/tests/test_data/ands-logo-hi-res.jpg'
dataset = Dataset.objects.get(pk=self.dataset1.id)
pdf1 = self._build_datafile(filename, basename(filename), dataset)
self.assertEqual(pdf1.file_objects.get().verify(), True)
pdf1 = DataFile.objects.get(pk=pdf1.pk)
try:
from magic import Magic # noqa
self.assertEqual(pdf1.mimetype, 'image/jpeg')
except:
# XXX Test disabled because lib magic can't be loaded
pass
self.assertEqual(pdf1.size, 14232)
self.assertEqual(pdf1.md5sum, 'c450d5126ffe3d14643815204daf1bfb')
# Now check that we can override the physical file meta information
# We are setting size/checksums that don't match the actual file, so
# the
pdf2 = self._build_datafile(filename, filename, dataset,
checksum='cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e',
size=0,
mimetype='application/vnd.openxmlformats-officedocument.presentationml.presentation') # noqa
self.assertEqual(pdf2.size, 0)
self.assertEqual(pdf2.md5sum, '')
self.assertEqual(pdf2.file_objects.get().verified, False)
pdf2 = DataFile.objects.get(pk=pdf2.pk)
try:
from magic import Magic # noqa
self.assertEqual(pdf2.mimetype, 'application/vnd.openxmlformats-officedocument.presentationml.presentation') # noqa
except:
# XXX Test disabled because lib magic can't be loaded
pass
self.assertEqual(pdf2.size, 0)
self.assertEqual(pdf2.md5sum, '')
pdf2.mimetype = ''
pdf2.save()
pdf2.file_objects.get().save()
pdf2 = DataFile.objects.get(pk=pdf2.pk)
try:
from magic import Magic # noqa
self.assertEqual(pdf2.mimetype, 'application/pdf')
except:
# XXX Test disabled because lib magic can't be loaded
pass
|
vlegoff/mud
|
menu/validate_account.py
|
"""
This module contains the 'validate_account' menu node.
"""
from textwrap import dedent
from menu.character import _options_choose_characters
def validate_account(caller, input):
"""Prompt the user to enter the received validation code."""
text = ""
options = (
{
"key": "b",
"desc": "Go back to the e-mail address menu.",
"goto": "email_address",
},
{
"key": "_default",
"desc": "Enter the validation code.",
"goto": "validate_account",
},
)
player = caller.db._player
if player.db.validation_code != input.strip():
text = dedent("""
|rSorry, the specified validation code {} doesn't match
the one stored for this account. Is it the code you
received by e-mail? You can try to enter it again, or
enter |yb|n to choose a different e-mail address.
""".strip("\n")).format(input.strip())
else:
player.db.valid = True
player.attributes.remove("validation_code")
text = ""
options = _options_choose_characters(player)
return text, options
|
katiecheng/Bombolone
|
env/lib/python2.7/site-packages/requests/_oauth.py
|
# -*- coding: utf-8 -*-
"""
requests._oauth
~~~~~~~~~~~~~~~
This module comtains the path hack neccesary for oauthlib to be vendored into requests
while allowing upstream changes.
"""
import os
import sys
try:
from oauthlib.oauth1 import rfc5849
from oauthlib.common import extract_params
from oauthlib.oauth1.rfc5849 import (Client, SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER)
except ImportError:
path = os.path.abspath('/'.join(__file__.split('/')[:-1]+['packages']))
sys.path.insert(0, path)
from oauthlib.oauth1 import rfc5849
from oauthlib.common import extract_params
from oauthlib.oauth1.rfc5849 import (Client, SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER)
|
disqus/django-old
|
tests/regressiontests/forms/localflavor/co.py
|
from django.contrib.localflavor.co.forms import CODepartmentSelect
from utils import LocalFlavorTestCase
class COLocalFlavorTests(LocalFlavorTestCase):
def test_CODepartmentSelect(self):
d = CODepartmentSelect()
out = u"""<select name="department">
<option value="AMA">Amazonas</option>
<option value="ANT">Antioquia</option>
<option value="ARA">Arauca</option>
<option value="ATL">Atl\xe1ntico</option>
<option value="DC">Bogot\xe1</option>
<option value="BOL">Bol\xedvar</option>
<option value="BOY">Boyac\xe1</option>
<option value="CAL">Caldas</option>
<option value="CAQ">Caquet\xe1</option>
<option value="CAS">Casanare</option>
<option value="CAU">Cauca</option>
<option value="CES">Cesar</option>
<option value="CHO">Choc\xf3</option>
<option value="COR" selected="selected">C\xf3rdoba</option>
<option value="CUN">Cundinamarca</option>
<option value="GUA">Guain\xeda</option>
<option value="GUV">Guaviare</option>
<option value="HUI">Huila</option>
<option value="LAG">La Guajira</option>
<option value="MAG">Magdalena</option>
<option value="MET">Meta</option>
<option value="NAR">Nari\xf1o</option>
<option value="NSA">Norte de Santander</option>
<option value="PUT">Putumayo</option>
<option value="QUI">Quind\xedo</option>
<option value="RIS">Risaralda</option>
<option value="SAP">San Andr\xe9s and Providencia</option>
<option value="SAN">Santander</option>
<option value="SUC">Sucre</option>
<option value="TOL">Tolima</option>
<option value="VAC">Valle del Cauca</option>
<option value="VAU">Vaup\xe9s</option>
<option value="VID">Vichada</option>
</select>"""
self.assertEqual(d.render('department', 'COR'), out)
|
almarklein/scikit-image
|
skimage/util/shape.py
|
__all__ = ['view_as_blocks', 'view_as_windows']
import numpy as np
from numpy.lib.stride_tricks import as_strided
def view_as_blocks(arr_in, block_shape):
"""Block view of the input n-dimensional array (using re-striding).
Blocks are non-overlapping views of the input array.
Parameters
----------
arr_in: ndarray
The n-dimensional input array.
block_shape: tuple
The shape of the block. Each dimension must divide evenly into the
corresponding dimensions of `arr_in`.
Returns
-------
arr_out: ndarray
Block view of the input array.
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_blocks
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> B = view_as_blocks(A, block_shape=(2, 2))
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[2, 3],
[6, 7]])
>>> B[1, 0, 1, 1]
13
>>> A = np.arange(4*4*6).reshape(4,4,6)
>>> A # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]],
[[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41],
[42, 43, 44, 45, 46, 47]],
[[48, 49, 50, 51, 52, 53],
[54, 55, 56, 57, 58, 59],
[60, 61, 62, 63, 64, 65],
[66, 67, 68, 69, 70, 71]],
[[72, 73, 74, 75, 76, 77],
[78, 79, 80, 81, 82, 83],
[84, 85, 86, 87, 88, 89],
[90, 91, 92, 93, 94, 95]]])
>>> B = view_as_blocks(A, block_shape=(1, 2, 2))
>>> B.shape
(4, 2, 3, 1, 2, 2)
>>> B[2:, 0, 2] # doctest: +NORMALIZE_WHITESPACE
array([[[[52, 53],
[58, 59]]],
[[[76, 77],
[82, 83]]]])
"""
# -- basic checks on arguments
if not isinstance(block_shape, tuple):
raise TypeError('block needs to be a tuple')
block_shape = np.array(block_shape)
if (block_shape <= 0).any():
raise ValueError("'block_shape' elements must be strictly positive")
if block_shape.size != arr_in.ndim:
raise ValueError("'block_shape' must have the same length "
"as 'arr_in.shape'")
arr_shape = np.array(arr_in.shape)
if (arr_shape % block_shape).sum() != 0:
raise ValueError("'block_shape' is not compatible with 'arr_in'")
# -- restride the array to build the block view
arr_in = np.ascontiguousarray(arr_in)
new_shape = tuple(arr_shape / block_shape) + tuple(block_shape)
new_strides = tuple(arr_in.strides * block_shape) + arr_in.strides
arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides)
return arr_out
def view_as_windows(arr_in, window_shape, step=1):
"""Rolling window view of the input n-dimensional array.
Windows are overlapping views of the input array, with adjacent windows
shifted by a single row or column (or an index of a higher dimension).
Parameters
----------
arr_in: ndarray
The n-dimensional input array.
window_shape: tuple
Defines the shape of the elementary n-dimensional orthotope
(better know as hyperrectangle [1]_) of the rolling window view.
step : int
Number of elements to skip when moving the window forward (by
default, move forward by one).
Returns
-------
arr_out: ndarray
(rolling) window view of the input array.
Notes
-----
One should be very careful with rolling views when it comes to
memory usage. Indeed, although a 'view' has the same memory
footprint as its base array, the actual array that emerges when this
'view' is used in a computation is generally a (much) larger array
than the original, especially for 2-dimensional arrays and above.
For example, let us consider a 3 dimensional array of size (100,
100, 100) of ``float64``. This array takes about 8*100**3 Bytes for
storage which is just 8 MB. If one decides to build a rolling view
on this array with a window of (3, 3, 3) the hypothetical size of
the rolling view (if one was to reshape the view for example) would
be 8*(100-3+1)**3*3**3 which is about 203 MB! The scaling becomes
even worse as the dimension of the input array becomes larger.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hyperrectangle
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_windows
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> window_shape = (2, 2)
>>> B = view_as_windows(A, window_shape)
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[1, 2],
[5, 6]])
>>> A = np.arange(10)
>>> A
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> window_shape = (3,)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(8, 3)
>>> B
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
>>> A = np.arange(5*4).reshape(5, 4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> window_shape = (4, 3)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(2, 2, 4, 3)
>>> B # doctest: +NORMALIZE_WHITESPACE
array([[[[ 0, 1, 2],
[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14]],
[[ 1, 2, 3],
[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15]]],
[[[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14],
[16, 17, 18]],
[[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15],
[17, 18, 19]]]])
"""
# -- basic checks on arguments
if not isinstance(arr_in, np.ndarray):
raise TypeError("`arr_in` must be a numpy ndarray")
if not isinstance(window_shape, tuple):
raise TypeError("`window_shape` must be a tuple")
if not (len(window_shape) == arr_in.ndim):
raise ValueError("`window_shape` is incompatible with `arr_in.shape`")
if step < 1:
raise ValueError("`step` must be >= 1")
arr_shape = np.array(arr_in.shape)
window_shape = np.array(window_shape, dtype=arr_shape.dtype)
if ((arr_shape - window_shape) < 0).any():
raise ValueError("`window_shape` is too large")
if ((window_shape - 1) < 0).any():
raise ValueError("`window_shape` is too small")
# -- build rolling window view
arr_in = np.ascontiguousarray(arr_in)
new_shape = tuple((arr_shape - window_shape) // step + 1) + \
tuple(window_shape)
arr_strides = np.array(arr_in.strides)
new_strides = np.concatenate((arr_strides * step, arr_strides))
arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides)
return arr_out
|
rustyrazorblade/machete
|
machete/wiki/tests/test_create_page.py
|
from unittest import TestCase
from machete.base.tests import IntegrationTestCase
from machete.wiki.models import Wiki, Page
class CreatePageTest(TestCase):
def test_create_page(self):
wiki = Wiki.create()
page = wiki.create_page("test name [Some link]",
"/index.html",
u"this is a test")
assert isinstance(page, Page)
assert page.html == u'<p>this is a test</p>'
class PageIntegrationTest(IntegrationTestCase):
def test_create_page(self):
url = "/projects/{}/wiki/".format(self.project.vid)
response = self.post(url, {"url":"TestPage",
"name":"Whatever bro",
"text":"this is a test"})
self.assert200(response)
url = "/projects/{}/wiki/TestPage".format(self.project.vid)
response = self.get(url)
self.assert200(response)
url = "/projects/{}/wiki/".format(self.project.vid)
response = self.get(url)
self.assert200(response)
|
pauloschilling/sentry
|
src/sentry/interfaces/exception.py
|
"""
sentry.interfaces.exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = ('Exception',)
from django.conf import settings
from sentry.interfaces.base import Interface
from sentry.interfaces.stacktrace import Stacktrace, is_newest_frame_first
from sentry.utils.safe import trim
from sentry.web.helpers import render_to_string
class SingleException(Interface):
"""
A standard exception with a ``type`` and value argument, and an optional
``module`` argument describing the exception class type and
module namespace. Either ``type`` or ``value`` must be present.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``sentry.interfaces.Stacktrace``.
>>> {
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__"
>>> "stacktrace": {
>>> # see sentry.interfaces.Stacktrace
>>> }
>>> }
"""
score = 900
display_score = 1200
@classmethod
def to_python(cls, data):
assert data.get('type') or data.get('value')
if data.get('stacktrace') and data['stacktrace'].get('frames'):
stacktrace = Stacktrace.to_python(data['stacktrace'])
else:
stacktrace = None
kwargs = {
'type': trim(data.get('type'), 128),
'value': trim(data.get('value'), 4096),
'module': trim(data.get('module'), 128),
'stacktrace': stacktrace,
}
return cls(**kwargs)
def to_json(self):
if self.stacktrace:
stacktrace = self.stacktrace.to_json()
else:
stacktrace = None
return {
'type': self.type,
'value': self.value,
'module': self.module,
'stacktrace': stacktrace,
}
def get_api_context(self):
if self.stacktrace:
stacktrace = self.stacktrace.get_api_context()
else:
stacktrace = None
return {
'type': self.type,
'value': self.value,
'module': self.module,
'stacktrace': stacktrace,
}
def get_alias(self):
return 'exception'
def get_path(self):
return 'sentry.interfaces.Exception'
def get_hash(self):
output = None
if self.stacktrace:
output = self.stacktrace.get_hash()
if output and self.type:
output.append(self.type)
if not output:
output = filter(bool, [self.type, self.value])
return output
def get_context(self, event, is_public=False, **kwargs):
last_frame = None
interface = event.interfaces.get('sentry.interfaces.Stacktrace')
if interface is not None and interface.frames:
last_frame = interface.frames[-1]
e_module = self.module
e_type = self.type
e_value = self.value
if self.module:
fullname = '%s.%s' % (e_module, e_type)
else:
fullname = e_type
if e_value and not e_type:
e_type = e_value
e_value = None
return {
'is_public': is_public,
'event': event,
'exception_type': e_type,
'exception_value': e_value,
'exception_module': e_module,
'fullname': fullname,
'last_frame': last_frame,
}
class Exception(Interface):
"""
An exception consists of a list of values. In most cases, this list
contains a single exception, with an optional stacktrace interface.
Each exception has a mandatory ``value`` argument and optional ``type`` and
``module`` arguments describing the exception class type and module
namespace.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``sentry.interfaces.Stacktrace``.
>>> {
>>> "values": [{
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__"
>>> "stacktrace": {
>>> # see sentry.interfaces.Stacktrace
>>> }
>>> }]
>>> }
Values should be sent oldest to newest, this includes both the stacktrace
and the exception itself.
.. note:: This interface can be passed as the 'exception' key in addition
to the full interface path.
"""
score = 2000
@classmethod
def to_python(cls, data):
if 'values' not in data:
data = {'values': [data]}
assert data['values']
trim_exceptions(data)
kwargs = {
'values': [
SingleException.to_python(v)
for v in data['values']
],
}
if data.get('exc_omitted'):
assert len(data['exc_omitted']) == 2
kwargs['exc_omitted'] = data['exc_omitted']
else:
kwargs['exc_omitted'] = None
return cls(**kwargs)
def to_json(self):
return {
'values': [v.to_json() for v in self.values],
'exc_omitted': self.exc_omitted,
}
def get_api_context(self):
return {
'values': [v.get_api_context() for v in self.values],
'excOmitted': self.exc_omitted,
}
def __getitem__(self, key):
return self.values[key]
def __iter__(self):
return iter(self.values)
def __len__(self):
return len(self.values)
def get_alias(self):
return 'exception'
def get_path(self):
return 'sentry.interfaces.Exception'
def compute_hashes(self, platform):
system_hash = self.get_hash(system_frames=True)
if not system_hash:
return []
app_hash = self.get_hash(system_frames=False)
if system_hash == app_hash or not app_hash:
return [system_hash]
return [system_hash, app_hash]
def get_hash(self, system_frames=True):
# optimize around the fact that some exceptions might have stacktraces
# while others may not and we ALWAYS want stacktraces over values
output = []
for value in self.values:
if not value.stacktrace:
continue
stack_hash = value.stacktrace.get_hash(
system_frames=system_frames,
)
if stack_hash:
output.extend(stack_hash)
output.append(value.type)
if not output:
for value in self.values:
output.extend(value.get_hash())
return output
def get_context(self, event, is_public=False, **kwargs):
newest_first = is_newest_frame_first(event)
system_frames = 0
app_frames = 0
unknown_frames = 0
for exc in self.values:
if not exc.stacktrace:
continue
for frame in exc.stacktrace.frames:
if frame.in_app is False:
system_frames += 1
elif frame.in_app is True:
app_frames += 1
else:
unknown_frames += 1
# TODO(dcramer): this should happen in normalize
# We need to ensure that implicit values for in_app are handled
# appropriately
if unknown_frames and (app_frames or system_frames):
for exc in self.values:
if not exc.stacktrace:
continue
for frame in exc.stacktrace.frames:
if frame.in_app is None:
frame.in_app = bool(system_frames)
if frame.in_app:
app_frames += 1
else:
system_frames += 1
# if there is a mix of frame styles then we indicate that system frames
# are present and should be represented as a split
has_system_frames = app_frames and system_frames
context_kwargs = {
'event': event,
'is_public': is_public,
'newest_first': newest_first,
'has_system_frames': has_system_frames,
}
exceptions = []
last = len(self.values) - 1
for num, e in enumerate(self.values):
context = e.get_context(**context_kwargs)
if e.stacktrace:
context['stacktrace'] = e.stacktrace.get_context(
with_stacktrace=False, **context_kwargs)
else:
context['stacktrace'] = {}
context['stack_id'] = 'exception_%d' % (num,)
context['is_root'] = num == last
exceptions.append(context)
if newest_first:
exceptions.reverse()
if self.exc_omitted:
first_exc_omitted, last_exc_omitted = self.exc_omitted
else:
first_exc_omitted, last_exc_omitted = None, None
return {
'newest_first': newest_first,
'system_frames': system_frames if has_system_frames else 0,
'exceptions': exceptions,
'stacktrace': self.get_stacktrace(event, newest_first=newest_first),
'first_exc_omitted': first_exc_omitted,
'last_exc_omitted': last_exc_omitted,
}
def to_html(self, event, **kwargs):
if not self.values:
return ''
if len(self.values) == 1 and not self.values[0].stacktrace:
exception = self.values[0]
context = exception.get_context(event=event, **kwargs)
return render_to_string('sentry/partial/interfaces/exception.html', context)
context = self.get_context(event=event, **kwargs)
return render_to_string('sentry/partial/interfaces/chained_exception.html', context)
def to_string(self, event, is_public=False, **kwargs):
if not self.values:
return ''
output = []
for exc in self.values:
output.append(u'{0}: {1}\n'.format(exc.type, exc.value))
if exc.stacktrace:
output.append(exc.stacktrace.get_stacktrace(
event, system_frames=False, max_frames=5,
header=False) + '\n\n')
return (''.join(output)).strip()
def get_stacktrace(self, *args, **kwargs):
exc = self.values[0]
if exc.stacktrace:
return exc.stacktrace.get_stacktrace(*args, **kwargs)
return ''
def trim_exceptions(data, max_values=settings.SENTRY_MAX_EXCEPTIONS):
# TODO: this doesnt account for cases where the client has already omitted
# exceptions
values = data['values']
exc_len = len(values)
if exc_len <= max_values:
return
half_max = max_values / 2
data['exc_omitted'] = (half_max, exc_len - half_max)
for n in xrange(half_max, exc_len - half_max):
del values[half_max]
|
haijieg/SFrame
|
oss_src/unity/python/sframe/data_structures/__init__.py
|
"""
GraphLab Create offers several data structures for data analysis.
Concise descriptions of the data structures and their methods are contained in
the API documentation, along with a small number of simple examples. For more
detailed descriptions and examples, please see the `User Guide
<https://dato.com/learn/userguide/>`_, `API Translator
<https://dato.com/learn/translator/>`_, `How-Tos
<https://dato.com/learn/how-to/>`_, and data science `Gallery
<https://dato.com/learn/gallery/>`_.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
__all__ = ['sframe', 'sarray', 'sgraph', 'sketch', 'image']
from . import image
from . import sframe
from . import sarray
from . import sgraph
from . import sketch
|
vlinhart/django-smsbrana
|
smsbrana/views.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
from django.http import HttpResponse
from smsbrana import SmsConnect
from smsbrana import signals
from smsbrana.const import DELIVERY_STATUS_DELIVERED, DATETIME_FORMAT
from smsbrana.models import SentSms
def smsconnect_notification(request):
sc = SmsConnect()
result = sc.inbox()
# print result
for delivered in result['delivery_report']:
sms_id = delivered['idsms']
if delivered['status'] != DELIVERY_STATUS_DELIVERED:
continue
try:
sms = SentSms.objects.get(sms_id=sms_id)
if sms.delivered:
continue
sms.delivered = True
sms.delivered_date = datetime.strptime(delivered['time'], DATETIME_FORMAT)
sms.save()
except SentSms.DoesNotExist:
# logger.error('sms delivered which wasn\'t sent' + str(delivered))
pass
# delete the inbox if there are 100+ items
if len(result['delivery_report']) > 100:
sc.inbox(delete=True)
signals.smsconnect_notification_received.send(sender=None, inbox=result, request=request)
return HttpResponse('OK')
|
carlohamalainen/volgenmodel-nipype
|
new_data_to_atlas_space.py
|
#!/usr/bin/env python3
import os
import os.path
from nipype.interfaces.utility import IdentityInterface, Function
from nipype.interfaces.io import SelectFiles, DataSink, DataGrabber
from nipype.pipeline.engine import Workflow, Node, MapNode
from nipype.interfaces.minc import Resample, BigAverage, VolSymm
import argparse
def create_workflow(
xfm_dir,
xfm_pattern,
atlas_dir,
atlas_pattern,
source_dir,
source_pattern,
work_dir,
out_dir,
name="new_data_to_atlas_space"
):
wf = Workflow(name=name)
wf.base_dir = os.path.join(work_dir)
datasource_source = Node(
interface=DataGrabber(
sort_filelist=True
),
name='datasource_source'
)
datasource_source.inputs.base_directory = os.path.abspath(source_dir)
datasource_source.inputs.template = source_pattern
datasource_xfm = Node(
interface=DataGrabber(
sort_filelist=True
),
name='datasource_xfm'
)
datasource_xfm.inputs.base_directory = os.path.abspath(xfm_dir)
datasource_xfm.inputs.template = xfm_pattern
datasource_atlas = Node(
interface=DataGrabber(
sort_filelist=True
),
name='datasource_atlas'
)
datasource_atlas.inputs.base_directory = os.path.abspath(atlas_dir)
datasource_atlas.inputs.template = atlas_pattern
resample = MapNode(
interface=Resample(
sinc_interpolation=True
),
name='resample_',
iterfield=['input_file', 'transformation']
)
wf.connect(datasource_source, 'outfiles', resample, 'input_file')
wf.connect(datasource_xfm, 'outfiles', resample, 'transformation')
wf.connect(datasource_atlas, 'outfiles', resample, 'like')
bigaverage = Node(
interface=BigAverage(
output_float=True,
robust=False
),
name='bigaverage',
iterfield=['input_file']
)
wf.connect(resample, 'output_file', bigaverage, 'input_files')
datasink = Node(
interface=DataSink(
base_directory=out_dir,
container=out_dir
),
name='datasink'
)
wf.connect([(bigaverage, datasink, [('output_file', 'average')])])
wf.connect([(resample, datasink, [('output_file', 'atlas_space')])])
wf.connect([(datasource_xfm, datasink, [('outfiles', 'transforms')])])
return wf
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--name",
type=str,
required=True
)
parser.add_argument(
"--xfm_dir",
type=str,
required=True
)
parser.add_argument(
"--xfm_pattern",
type=str,
required=True
)
parser.add_argument(
"--source_dir",
type=str,
required=True
)
parser.add_argument(
"--source_pattern",
type=str,
required=True
)
parser.add_argument(
"--atlas_dir",
type=str,
required=True
)
parser.add_argument(
"--atlas_pattern",
type=str,
required=True
)
parser.add_argument(
"--work_dir",
type=str,
required=True
)
parser.add_argument(
"--out_dir",
type=str,
required=True
)
parser.add_argument(
'--debug',
dest='debug',
action='store_true',
help='debug mode'
)
args = parser.parse_args()
if args.debug:
from nipype import config
config.enable_debug_mode()
config.set('execution', 'stop_on_first_crash', 'true')
config.set('execution', 'remove_unnecessary_outputs', 'false')
config.set('execution', 'keep_inputs', 'true')
config.set('logging', 'workflow_level', 'DEBUG')
config.set('logging', 'interface_level', 'DEBUG')
config.set('logging', 'utils_level', 'DEBUG')
wf = create_workflow(
xfm_dir=os.path.abspath(args.xfm_dir),
xfm_pattern=args.xfm_pattern,
atlas_dir=os.path.abspath(args.atlas_dir),
atlas_pattern=args.atlas_pattern,
source_dir=os.path.abspath(args.source_dir),
source_pattern=args.source_pattern,
work_dir=os.path.abspath(args.work_dir),
out_dir=os.path.abspath(args.out_dir),
name=args.name
)
wf.run(
plugin='MultiProc',
plugin_args={
'n_procs': int(
os.environ["NCPUS"] if "NCPUS" in os.environ else os.cpu_count
)
}
)
|
shub0/algorithm-data-structure
|
python/sum_roof_to_leaf.py
|
#! /usr/bin/python
'''
Given a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number.
An example is the root-to-leaf path 1->2->3 which represents the number 123.
Find the total sum of all root-to-leaf numbers.
For example,
1
/ \
2 3
The root-to-leaf path 1->2 represents the number 12.
The root-to-leaf path 1->3 represents the number 13.
Return the sum = 12 + 13 = 25.
'''
from node_struct import TreeNode
class Solution:
def leafNode(self, root):
if not root.left and not root.right:
return True
return False
def inOrderTraversal(self, root, currentPath, path):
if not root:
return
# visit()
currentPath = 10 * currentPath + root.val
if self.leafNode(root):
path.append(currentPath)
else:
self.inOrderTraversal(root.left, currentPath, path)
self.inOrderTraversal(root.right, currentPath, path)
# @param root, a tree node
# @return an integer
def sumNumbers(self, root):
path = list()
self.inOrderTraversal(root, 0, path)
return sum(path)
if __name__ == '__main__':
solution = Solution()
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.right = TreeNode(4)
root.left.left = TreeNode(5)
print solution.sumNumbers(root)
print solution.sumNumbers(None)
|
goddardl/gaffer
|
python/GafferSceneUI/TransformUI.py
|
##########################################################################
#
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferScene
Gaffer.Metadata.registerNodeDescription(
GafferScene.Transform,
"""Modifies the transforms of all locations matched by the filter.""",
"space",
"""The space in which the transform is applied.""",
"transform",
"""The transform to be applied.""",
)
GafferUI.PlugValueWidget.registerCreator(
GafferScene.Transform,
"space",
GafferUI.EnumPlugValueWidget,
labelsAndValues = (
( "World", GafferScene.Transform.Space.World ),
( "Object", GafferScene.Transform.Space.Object ),
)
)
|
jonzobrist/Percona-Server-5.1
|
kewpie/lib/util/mysqlBaseTestCase.py
|
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
import os
import time
import difflib
import subprocess
import MySQLdb
servers = None
class mysqlBaseTestCase(unittest.TestCase):
def setUp(self):
""" If we need to do anything pre-test, we do it here.
Any code here is executed before any test method we
may execute
"""
self.servers = servers
return
def tearDown(self):
#server_manager.reset_servers(test_executor.name)
queries = ["DROP SCHEMA IF EXISTS test"
,"CREATE SCHEMA IF NOT EXISTS test"
]
for server in self.servers:
retcode, result = self.execute_queries(queries, server, schema='mysql')
self.assertEqual(retcode,0,result)
# Begin our utility code here
# This is where we add methods that enable a test to do magic : )
def execute_cmd(self, cmd, stdout_path, exec_path=None, get_output=False):
stdout_file = open(stdout_path,'w')
cmd_subproc = subprocess.Popen( cmd
, shell=True
, cwd=exec_path
, stdout = stdout_file
, stderr = subprocess.STDOUT
)
cmd_subproc.wait()
retcode = cmd_subproc.returncode
stdout_file.close()
if get_output:
data_file = open(stdout_path,'r')
output = ''.join(data_file.readlines())
else:
output = None
return retcode, output
def get_tables(self, server, schema):
""" Return a list of the tables in the
schema on the server
"""
results = []
query = "SHOW TABLES IN %s" %(schema)
retcode, table_set = self.execute_query(query, server)
for table_data in table_set:
table_name = table_data[0]
results.append(table_name)
return results
def check_slaves_by_query( self
, master_server
, other_servers
, query
, expected_result = None
):
""" We execute the query across all servers
and return a dict listing any diffs found,
None if all is good.
If a user provides an expected_result, we
will skip executing against the master
This is done as it is assumed the expected
result has been generated / tested against
the master
"""
comp_results = {}
if expected_result:
pass # don't bother getting it
else:
# run against master for 'good' value
retcode, expected_result = self.execute_query(query, master_server)
for server in other_servers:
retcode, slave_result = self.execute_query(query, server)
#print "%s: expected_result= %s | slave_result= %s" % ( server.name
# , expected_result
# , slave_result_
# )
if not expected_result == slave_result:
comp_data = "%s: expected_result= %s | slave_result= %s" % ( server.name
, expected_result
, slave_result
)
if comp_results.has_key(server.name):
comp_results[server.name].append(comp_data)
else:
comp_results[server.name]=[comp_data]
if comp_results:
return comp_results
return None
def check_slaves_by_checksum( self
, master_server
, other_servers
, schemas=['test']
, tables=[]
):
""" We compare the specified tables (default = all)
from the specified schemas between the 'master'
and the other servers provided (via list)
via CHECKSUM
We return a dictionary listing the server
and any tables that differed
"""
comp_results = {}
for server in other_servers:
for schema in schemas:
for table in self.get_tables(master_server, schema):
query = "CHECKSUM TABLE %s.%s" %(schema, table)
retcode, master_checksum = self.execute_query(query, master_server)
retcode, slave_checksum = self.execute_query(query, server)
#print "%s: master_checksum= %s | slave_checksum= %s" % ( table
# , master_checksum
# , slave_checksum
# )
if not master_checksum == slave_checksum:
comp_data = "%s: master_checksum= %s | slave_checksum= %s" % ( table
, master_checksum
, slave_checksum
)
if comp_results.has_key(server.name):
comp_results[server.name].append(comp_data)
else:
comp_results[server.name]=[comp_data]
if comp_results:
return comp_results
return None
def take_mysqldump( self
, server
, databases=[]
, tables=[]
, dump_path = None
, cmd_root = None):
""" Take a mysqldump snapshot of the given
server, storing the output to dump_path
"""
if not dump_path:
dump_path = os.path.join(server.vardir, 'dumpfile.dat')
if cmd_root:
dump_cmd = cmd_root
else:
dump_cmd = "%s --no-defaults --user=root --port=%d --host=127.0.0.1 --protocol=tcp --result-file=%s" % ( server.mysqldump
, server.master_port
, dump_path
)
if databases:
if len(databases) > 1:
# We have a list of db's that are to be dumped so we handle things
dump_cmd = ' '.join([dump_cmd, '--databases', ' '.join(databases)])
else:
dump_cmd = ' '.join([dump_cmd, databases[0], ' '.join(tables)])
self.execute_cmd(dump_cmd, os.devnull)
def diff_dumpfiles(self, orig_file_path, new_file_path):
""" diff two dumpfiles useful for comparing servers """
orig_file = open(orig_file_path,'r')
restored_file = open(new_file_path,'r')
orig_file_data = []
rest_file_data = []
orig_file_data= self.filter_data(orig_file.readlines(),'Dump completed')
rest_file_data= self.filter_data(restored_file.readlines(),'Dump completed')
server_diff = difflib.unified_diff( orig_file_data
, rest_file_data
, fromfile=orig_file_path
, tofile=new_file_path
)
diff_output = []
for line in server_diff:
diff_output.append(line)
output = '\n'.join(diff_output)
orig_file.close()
restored_file.close()
return (diff_output==[]), output
def filter_data(self, input_data, filter_text ):
return_data = []
for line in input_data:
if filter_text in line.strip():
pass
else:
return_data.append(line)
return return_data
def execute_query( self
, query
, server
, schema='test'):
try:
conn = MySQLdb.connect( host = '127.0.0.1'
, port = server.master_port
, user = 'root'
, db = schema)
cursor = conn.cursor()
cursor.execute(query)
result_set = cursor.fetchall()
cursor.close()
except MySQLdb.Error, e:
return 1, ("Error %d: %s" %(e.args[0], e.args[1]))
conn.commit()
conn.close()
return 0, result_set
def execute_queries( self
, query_list
, server
, schema= 'test'):
""" Execute a set of queries as a single transaction """
results = {}
retcode = 0
try:
conn = MySQLdb.connect( host = '127.0.0.1'
, port = server.master_port
, user = 'root'
, db = schema)
cursor = conn.cursor()
for idx, query in enumerate(query_list):
try:
cursor.execute(query)
result_set = cursor.fetchall()
except MySQLdb.Error, e:
result_set = "Error %d: %s" %(e.args[0], e.args[1])
retcode = 1
finally:
results[query+str(idx)] = result_set
conn.commit()
cursor.close()
conn.close()
except Exception, e:
retcode = 1
results = (Exception, e)
finally:
return retcode, results
def execute_randgen(self, test_cmd, test_executor, servers, schema='test'):
randgen_outfile = os.path.join(test_executor.logdir,'randgen.out')
randgen_output = open(randgen_outfile,'w')
server_type = test_executor.master_server.type
if server_type in ['percona','galera']:
# it is mysql for dbd::perl purposes
server_type = 'mysql'
dsn = "--dsn=dbi:%s:host=127.0.0.1:port=%d:user=root:password="":database=%s" %( server_type
, servers[0].master_port
, schema)
randgen_cmd = " ".join([test_cmd, dsn])
randgen_subproc = subprocess.Popen( randgen_cmd
, shell=True
, cwd=test_executor.system_manager.randgen_path
, env=test_executor.working_environment
, stdout = randgen_output
, stderr = subprocess.STDOUT
)
randgen_subproc.wait()
retcode = randgen_subproc.returncode
randgen_output.close()
randgen_file = open(randgen_outfile,'r')
output = ''.join(randgen_file.readlines())
randgen_file.close()
if retcode == 0:
if not test_executor.verbose:
output = None
return retcode, output
|
globocom/database-as-a-service
|
dbaas/maintenance/async_jobs/remove_instance_database.py
|
from maintenance.async_jobs import BaseJob
from maintenance.models import RemoveInstanceDatabase
__all__ = ('RemoveInstanceDatabase',)
class RemoveInstanceDatabaseJob(BaseJob):
step_manger_class = RemoveInstanceDatabase
get_steps_method = 'remove_readonly_instance_steps'
success_msg = 'Instance removed with success'
error_msg = 'Could not remove instance'
def __init__(self, request, database, task, instance, since_step=None,
step_manager=None, scheduled_task=None,
auto_rollback=False, auto_cleanup=False):
super(RemoveInstanceDatabaseJob, self).__init__(
request, database, task, since_step,
step_manager, scheduled_task,
auto_rollback, auto_cleanup
)
self._instance = instance
@property
def instances(self):
return [self._instance]
|
seymour1/label-virusshare
|
test/test_hashes.py
|
import json
import argparse
import logging
import glob
# Logging Information
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s: %(message)s')
fh = logging.FileHandler('test_hashes.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
parser = argparse.ArgumentParser()
parser.add_argument("hash_num", help="file that we want to verify")
args = parser.parse_args()
hashes = set()
hash_num = args.hash_num
logger.info("Verifying consistency for VirusShare_00" + str(hash_num).zfill(3))
logger.debug("Generating hashes from ../hashes/VirusShare_00" + str(hash_num).zfill(3) + ".md5")
with open(("../hashes/VirusShare_00" + str(hash_num).zfill(3) + ".md5"),'r') as file:
for line in file.readlines()[6:]:
hashes.add(line.strip())
for filename in glob.glob("../analyses/VirusShare_00" + str(hash_num).zfill(3) + ".*"):
logger.debug("Removing hashes from " + filename)
with open(filename,'r') as file:
for line in file.readlines():
hashes.remove(json.loads(line.strip())["md5"])
if len(hashes) == 0:
logger.info("VirusShare_00" + str(hash_num).zfill(3) + ".ldjson is consistent with hashfile")
else:
logger.error("VirusShare_00" + str(hash_num).zfill(3) + ".ldjson is inconsistent with hashfile")
|
neiljdo/readysaster-icannhas-web
|
readysaster-icannhas-web/hazard/utils.py
|
import json
import urllib
import urllib2
from django.core.files import File
from django.conf import settings
from django.contrib.gis.geos import Point
from .models import FloodMap, ReturnPeriod
def get_geoserver_baseurl():
'''
Just input the layer name, height, width and boundarybox
'''
url = settings.NOAH_GEOSERVER_URL + '?'
url += urllib.urlencode({
'request': 'GetMap',
'service': 'WMS',
'version': '1.1.1',
'srs': 'EPSG:4326',
'format': 'kml',
})
return url
def get_floodmap_kml_file(layers, bbox, height=720, width=330, styles=''):
url = get_geoserver_baseurl() + '&' + urllib.urlencode({
'layers': layers,
'styles': styles,
'height': height,
'width': width,
'bbox': bbox,
})
data = urllib2.urlopen(url)
path = 'uploads/floodmaps/{}.kml'.format(layers)
print path
with open(path, 'wb+') as destination:
for chunk in data.chunks():
destination.write(chunk)
print 'test'
return destination
def get_floodmap_instances(municipality):
url = settings.NOAH_API_URL + 'flood_maps'
data = urllib2.urlopen(url)
data = data.read()
events = json.loads(data)
for event in events:
return_period = event['verbose_name']
# Assuming that NOAH API uses 'N-Years' format in its verbose names
return_period = int(return_period.split('-')[0])
floodmaps = event['flood']
for floodmap in floodmaps:
flood_center = floodmap['center']
layer = floodmap['geoserver_layer']
flood_center = Point(flood_center['lng'], flood_center['lat'])
print floodmap
if municipality.geom.contains(flood_center):
return_period, created = ReturnPeriod.objects.get_or_create(years=return_period)
coords = municipality.geom.extent
bbox = ''
for coord in coords:
bbox += (str(coord) + ',')
bbox = bbox[:-1]
print bbox
map_kml = get_floodmap_kml_file(layer, bbox)
try:
fm = FloodMap.objects.get(municipality=municipality, return_period=return_period)
except FloodMap.DoesNotExist:
fm = FloodMap(municipality=municipality, return_period=return_period)
fm.map_kml = map_kml
fm.save()
|
sdss/marvin
|
tests/tools/test_map.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews
# @Date: 2017-07-02
# @Filename: test_map.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: andrews
# @Last modified time: 2019-11-22 12:11:29
import operator
from copy import deepcopy
import matplotlib
import numpy as np
import pytest
from astropy import units as u
from marvin.core.exceptions import MarvinError
from tests import marvin_test_if
from marvin.tools.maps import Maps
from marvin.tools.quantities import EnhancedMap, Map
from marvin.utils.datamodel.dap import datamodel
from marvin.utils.general.maskbit import Maskbit
value1 = np.array([[16.35, 0.8],
[0, -10.]])
value2 = np.array([[591., 1e-8],
[4., 10]])
value_prod12 = np.array([[9.66285000e+03, 8e-9],
[0, -100]])
value_log2 = np.array([[2.77158748, -8.],
[0.60205999, 1.]])
ivar1 = np.array([[4, 1],
[6.97789734e+36, 1e8]])
ivar2 = np.array([[10, 1e-8],
[5.76744385e+36, 0]])
ivar_sum12 = np.array([[2.85714286e+00, 9.99999990e-09],
[3.15759543e+36, 0]])
ivar_prod12 = np.array([[1.10616234e-05, 1.56250000e-08],
[0, 0.]])
ivar_pow_2 = np.array([[5.23472002e-08, 9.53674316e-01],
[0, 25]])
ivar_pow_05 = np.array([[3.66072168e-03, 7.81250000e+00],
[0, 0]])
ivar_pow_0 = np.array([[0, 0],
[0, 0]])
ivar_pow_m1 = np.array([[4, 1.],
[0, 1e+08]])
ivar_pow_m2 = np.array([[2.67322500e+02, 1.6e-01],
[0, 2.5e+09]])
ivar_pow_m05 = np.array([[0.97859327, 5],
[0, 0]])
ivar_log1 = np.array([[3.67423420e-04, 4.34294482e+07],
[4.11019127e-20, 4.34294482e-06]])
u_flux = u.erg / u.cm**2 / u.s / u.def_unit('spaxel')
u_flux2 = u_flux * u_flux
ufuncs = [it for it in dir(np) if isinstance(getattr(np, it), np.ufunc)]
def _get_maps_kwargs(galaxy, data_origin):
if data_origin == 'file':
maps_kwargs = dict(filename=galaxy.mapspath)
else:
maps_kwargs = dict(plateifu=galaxy.plateifu, release=galaxy.release,
bintype=galaxy.bintype, template_kin=galaxy.template,
mode='local' if data_origin == 'db' else 'remote')
return maps_kwargs
@pytest.fixture(scope='function', params=[('emline_gflux', 'ha_6564'),
('emline_gvel', 'oiii_5008'),
('stellar_vel', None),
('stellar_sigma', None)])
def map_(request, galaxy, data_origin):
maps = Maps(**_get_maps_kwargs(galaxy, data_origin))
map_ = maps.getMap(property_name=request.param[0], channel=request.param[1])
map_.data_origin = data_origin
return map_
class TestMap(object):
def test_map(self, map_, galaxy):
assert map_.getMaps().release == galaxy.release
assert tuple(map_.shape) == tuple(galaxy.shape)
assert map_.value.shape == tuple(galaxy.shape)
assert map_.ivar.shape == tuple(galaxy.shape)
assert map_.mask.shape == tuple(galaxy.shape)
assert (map_.masked.data == map_.value).all()
assert (map_.masked.mask == map_.mask.astype(bool)).all()
assert map_.snr == pytest.approx(np.abs(map_.value * np.sqrt(map_.ivar)))
assert datamodel[map_.getMaps()._dapver][map_.datamodel.full()].unit == map_.unit
def test_plot(self, map_):
fig, ax = map_.plot()
assert isinstance(fig, matplotlib.figure.Figure)
assert isinstance(ax, matplotlib.axes._subplots.Subplot)
assert 'Make single panel map or one panel of multi-panel map plot.' in map_.plot.__doc__
@marvin_test_if(mark='skip', map_={'data_origin': ['db']})
def test_save_and_restore(self, temp_scratch, map_):
fout = temp_scratch.join('test_map.mpf')
map_.save(str(fout))
assert fout.check() is True
map_restored = Map.restore(str(fout), delete=True)
assert tuple(map_.shape) == tuple(map_restored.shape)
@pytest.mark.parametrize('property_name, channel',
[('emline_gflux', 'ha_6564'),
('stellar_vel', None)])
def test_deepcopy(self, galaxy, property_name, channel):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property_name, channel=channel)
map2 = deepcopy(map1)
for attr in vars(map1):
if not attr.startswith('_'):
value = getattr(map1, attr)
value2 = getattr(map2, attr)
if isinstance(value, np.ndarray):
assert np.isclose(value, value2).all()
elif isinstance(value, np.ma.core.MaskedArray):
assert (np.isclose(value.data, value2.data).all() and
(value.mask == value2.mask).all())
elif isinstance(value, Maskbit) or isinstance(value[0], Maskbit):
if isinstance(value, Maskbit):
value = [value]
value2 = [value2]
for mb, mb2 in zip(value, value2):
for it in ['bits', 'description', 'labels', 'mask', 'name']:
assert getattr(mb, it) == getattr(mb2, it)
assert (mb.schema == mb2.schema).all().all()
elif isinstance(value, Maps):
pass
else:
assert value == value2, attr
def test_getMap_invalid_property(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
with pytest.raises(ValueError) as ee:
maps.getMap(property_name='mythical_property')
assert 'Your input value is too ambiguous.' in str(ee.value)
def test_getMap_invalid_channel(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
with pytest.raises(ValueError) as ee:
maps.getMap(property_name='emline_gflux', channel='mythical_channel')
assert 'Your input value is too ambiguous.' in str(ee.value)
@marvin_test_if(mark='include', maps={'plateifu': '8485-1901',
'release': 'MPL-6',
'mode': 'local',
'data_origin': 'file'})
def test_quatities_reorder(self, maps):
"""Asserts the unit survives a quantity reorder (issue #374)."""
ha = maps['emline_gflux_ha']
assert ha is not None
assert ha.unit is not None
reordered_ha = np.moveaxis(ha, 0, -1)
assert reordered_ha.unit is not None
@marvin_test_if(mark='include', maps={'plateifu': '8485-1901',
'release': 'MPL-6',
'bintype': ['SPX']})
def test_get_spaxel(self, maps):
"""Tests `.Map.getSpaxel`."""
ha = maps['emline_gflux_ha']
spaxel = ha.getSpaxel(x=10, y=10, xyorig='lower')
assert spaxel is not None
assert spaxel.x == 10 and spaxel.y == 10
@marvin_test_if(mark='skip', galaxy=dict(release=['MPL-6']))
def test_stellar_sigma_values(self, maps, galaxy):
''' Assert values for stellar_sigma and stellar_sigmacorr are different (issue #411) '''
ss = maps.stellar_sigma
sc = maps.stellar_sigmacorr
compare = sum(ss.value == sc.value)
assert len(np.unique(compare)) > 1
x = galaxy.dap['x']
y = galaxy.dap['y']
ssvalue = galaxy.dap['stellar_sigma'][galaxy.bintype.name]
scvalue = galaxy.dap['stellar_sigmacorr'][galaxy.bintype.name]
assert ssvalue == pytest.approx(ss[x, y].value, 1e-4)
assert scvalue == pytest.approx(sc[x, y].value, 1e-4)
def test_datamodel(self, maps):
gew_ha = maps.emline_gew_ha_6564
assert gew_ha.datamodel.description == ('Gaussian-fitted equivalent widths measurements '
'(based on EMLINE_GFLUX). Channel = H-alpha 6564.')
@marvin_test_if(mark='include', galaxy=dict(release=['MPL-6']))
def test_stellar_sigma_mpl6(self, maps, galaxy):
with pytest.raises(MarvinError) as cm:
__ = maps.stellar_sigmacorr
assert 'stellar_sigmacorr is unreliable in MPL-6. Please use MPL-7.' in str(cm.value)
class TestMapArith(object):
def test_add_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha + 10.
assert ha10.value == pytest.approx(ha.value + 10.)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
def test_reflexive_add_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = 10. + ha
assert ha10.value == pytest.approx(ha.value + 10.)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
def test_subtract_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha - 10.
assert ha10.value == pytest.approx(ha.value - 10.)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
def test_reflexive_subtract_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = 10. - ha
assert ha10.value == pytest.approx(10. - ha.value)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
def test_multiply_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha * 10.
assert ha10.value == pytest.approx(ha.value * 10.)
assert ha10.ivar == pytest.approx(ha.ivar / 10.**2)
assert ha10.mask == pytest.approx(ha.mask)
def test_reflexive_multiply_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = 10. * ha
assert ha10.value == pytest.approx(ha.value * 10.)
assert ha10.ivar == pytest.approx(ha.ivar / 10.**2)
assert ha10.mask == pytest.approx(ha.mask)
def test_divide_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha / 10.
assert ha10.value == pytest.approx(ha.value / 10.)
assert ha10.ivar == pytest.approx(ha.ivar * 10.**2)
assert ha10.mask == pytest.approx(ha.mask)
def test_reflexive_divide_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = 10. / ha
assert ha10.value == pytest.approx(10. / ha.value)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
@pytest.mark.parametrize('ivar1, ivar2, expected',
[(ivar1, ivar2, ivar_sum12)])
def test_add_ivar(self, ivar1, ivar2, expected):
assert Map._add_ivar(ivar1, ivar2) == pytest.approx(expected)
@pytest.mark.parametrize('ivar1, ivar2, value1, value2, value_prod12, expected',
[(ivar1, ivar2, value1, value2, value_prod12, ivar_prod12)])
def test_mul_ivar(self, ivar1, ivar2, value1, value2, value_prod12, expected):
ivar = Map._mul_ivar(ivar1, ivar2, value1, value2, value_prod12)
ivar[np.isnan(ivar)] = 0
ivar[np.isinf(ivar)] = 0
assert ivar == pytest.approx(expected)
@pytest.mark.parametrize('power, expected',
[(2, ivar_pow_2),
(0.5, ivar_pow_05),
(0, ivar_pow_0),
(-1, ivar_pow_m1),
(-2, ivar_pow_m2),
(-0.5, ivar_pow_m05)])
@pytest.mark.parametrize('ivar, value,',
[(ivar1, value1)])
def test_pow_ivar(self, ivar, value, power, expected):
ivar = Map._pow_ivar(ivar, value, power)
ivar[np.isnan(ivar)] = 0
ivar[np.isinf(ivar)] = 0
assert ivar == pytest.approx(expected)
@pytest.mark.parametrize('power', [2, 0.5, 0, -1, -2, -0.5])
def test_pow_ivar_none(self, power):
assert Map._pow_ivar(None, np.arange(4), power) == pytest.approx(np.zeros(4))
@pytest.mark.parametrize('ivar, value, expected',
[(ivar1, value2, ivar_log1)])
def test_log10_ivar(self, ivar, value, expected):
actual = Map._log10_ivar(ivar, value)
assert actual == pytest.approx(expected)
def test_log10(self, maps_release_only):
niiha = maps_release_only.emline_gflux_nii_6585 / maps_release_only.emline_gflux_nii_6585
log_niiha = np.log10(niiha)
ivar = np.log10(np.e) * niiha.ivar**-0.5 / niiha.value
assert log_niiha.value == pytest.approx(np.log10(niiha.value), nan_ok=True)
assert log_niiha.ivar == pytest.approx(ivar, nan_ok=True)
assert (log_niiha.mask == niiha.mask).all()
assert log_niiha.unit == u.dimensionless_unscaled
@pytest.mark.runslow
@marvin_test_if(mark='skip', ufunc=['log10'])
@pytest.mark.parametrize('ufunc', ufuncs)
def test_np_ufunc_notimplemented(self, maps_release_only, ufunc):
ha = maps_release_only.emline_gflux_ha_6564
nii = maps_release_only.emline_gflux_nii_6585
with pytest.raises(NotImplementedError) as ee:
if getattr(getattr(np, ufunc), 'nargs') <= 2:
getattr(np, ufunc)(ha)
else:
getattr(np, ufunc)(nii, ha)
expected = 'np.{0} is not implemented for Map.'.format(getattr(np, ufunc).__name__)
assert str(ee.value) == expected
@pytest.mark.parametrize('unit1, unit2, op, expected',
[(u_flux, u_flux, '+', u_flux),
(u_flux, u_flux, '-', u_flux),
(u_flux, u_flux, '*', u_flux2),
(u_flux, u_flux, '/', u.dimensionless_unscaled),
(u.km, u.s, '*', u.km * u.s),
(u.km, u.s, '/', u.km / u.s)])
def test_unit_propagation(self, unit1, unit2, op, expected):
assert Map._unit_propagation(unit1, unit2, op) == expected
@pytest.mark.parametrize('unit1, unit2, op',
[(u_flux, u.km, '+'),
(u_flux, u.km, '-')])
def test_unit_propagation_mismatch(self, unit1, unit2, op):
with pytest.warns(UserWarning):
assert Map._unit_propagation(unit1, unit2, op) is None
@pytest.mark.parametrize('property1, channel1, property2, channel2',
[('emline_gflux', 'ha_6564', 'emline_gflux', 'nii_6585'),
('emline_gvel', 'ha_6564', 'stellar_vel', None)])
def test_add_maps(self, galaxy, property1, channel1, property2, channel2):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property1, channel=channel1)
map2 = maps.getMap(property_name=property2, channel=channel2)
map12 = map1 + map2
assert map12.value == pytest.approx(map1.value + map2.value)
assert map12.ivar == pytest.approx(map1._add_ivar(map1.ivar, map2.ivar))
assert map12.mask == pytest.approx(map1.mask | map2.mask)
@pytest.mark.parametrize('property1, channel1, property2, channel2',
[('emline_gflux', 'ha_6564', 'emline_gflux', 'nii_6585'),
('emline_gvel', 'ha_6564', 'stellar_vel', None)])
def test_subtract_maps(self, galaxy, property1, channel1, property2, channel2):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property1, channel=channel1)
map2 = maps.getMap(property_name=property2, channel=channel2)
map12 = map1 - map2
assert map12.value == pytest.approx(map1.value - map2.value)
assert map12.ivar == pytest.approx(map1._add_ivar(map1.ivar, map2.ivar))
assert map12.mask == pytest.approx(map1.mask | map2.mask)
@pytest.mark.parametrize('property1, channel1, property2, channel2',
[('emline_gflux', 'ha_6564', 'emline_gflux', 'nii_6585'),
('emline_gvel', 'ha_6564', 'stellar_vel', None)])
def test_multiply_maps(self, galaxy, property1, channel1, property2, channel2):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property1, channel=channel1)
map2 = maps.getMap(property_name=property2, channel=channel2)
map12 = map1 * map2
ivar = map1._mul_ivar(map1.ivar, map2.ivar, map1.value, map2.value, map12.value)
ivar[np.isnan(ivar)] = 0
ivar[np.isinf(ivar)] = 0
assert map12.value == pytest.approx(map1.value * map2.value)
assert map12.ivar == pytest.approx(ivar)
assert map12.mask == pytest.approx(map1.mask | map2.mask)
@pytest.mark.parametrize('property1, channel1, property2, channel2',
[('emline_gflux', 'ha_6564', 'emline_gflux', 'nii_6585'),
('emline_gvel', 'ha_6564', 'stellar_vel', None)])
def test_divide_maps(self, galaxy, property1, channel1, property2, channel2):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property1, channel=channel1)
map2 = maps.getMap(property_name=property2, channel=channel2)
map12 = map1 / map2
ivar = map1._mul_ivar(map1.ivar, map2.ivar, map1.value, map2.value, map12.value)
ivar[np.isnan(ivar)] = 0
ivar[np.isinf(ivar)] = 0
mask = map1.mask | map2.mask
bad = np.isnan(map12.value) | np.isinf(map12.value)
mask[bad] = mask[bad] | map12.pixmask.labels_to_value('DONOTUSE')
with np.errstate(divide='ignore', invalid='ignore'):
assert map12.value == pytest.approx(map1.value / map2.value, nan_ok=True)
assert map12.ivar == pytest.approx(ivar)
assert map12.mask == pytest.approx(mask)
@pytest.mark.runslow
@pytest.mark.parametrize('power', [2, 0.5, 0, -1, -2, -0.5])
@pytest.mark.parametrize('property_name, channel',
[('emline_gflux', 'ha_6564'),
('stellar_vel', None)])
def test_pow(self, galaxy, property_name, channel, power):
maps = Maps(plateifu=galaxy.plateifu)
map_orig = maps.getMap(property_name=property_name, channel=channel)
map_new = map_orig**power
sig_orig = np.sqrt(1. / map_orig.ivar)
sig_new = map_new.value * power * sig_orig * map_orig.value
ivar_new = 1 / sig_new**2.
ivar_new[np.isnan(ivar_new)] = 0
ivar_new[np.isinf(ivar_new)] = 0
assert map_new.value == pytest.approx(map_orig.value**power, nan_ok=True)
assert map_new.ivar == pytest.approx(ivar_new)
assert (map_new.mask == map_orig.mask).all()
@marvin_test_if(mark='skip', galaxy=dict(release=['MPL-4', 'MPL-6']))
def test_stellar_sigma_correction(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
stsig = maps['stellar_sigma']
stsigcorr = maps['stellar_sigmacorr']
expected = (stsig**2 - stsigcorr**2)**0.5
expected.ivar = (expected.value / stsig.value) * stsig.ivar
expected.ivar[stsig.ivar == 0] = 0
expected.ivar[stsigcorr.value >= stsig.value] = 0
expected.value[stsigcorr.value >= stsig.value] = 0
actual = stsig.inst_sigma_correction()
assert actual.value == pytest.approx(expected.value, nan_ok=True)
assert actual.ivar == pytest.approx(expected.ivar)
assert (actual.mask == expected.mask).all()
assert actual.datamodel == stsig.datamodel
@marvin_test_if(mark='include', galaxy=dict(release=['MPL-4', 'MPL-6']))
def test_stellar_sigma_correction_MPL4(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
stsig = maps['stellar_sigma']
if galaxy.release == 'MPL-4':
errmsg = 'Instrumental broadening correction not implemented for MPL-4.'
elif galaxy.release == 'MPL-6':
errmsg = 'The stellar sigma corrections in MPL-6 are unreliable. Please use MPL-7.'
with pytest.raises(MarvinError) as ee:
stsig.inst_sigma_correction()
assert errmsg in str(ee.value)
def test_stellar_sigma_correction_invalid_property(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
with pytest.raises(MarvinError) as ee:
ha.inst_sigma_correction()
assert ('Cannot correct {0}_{1} '.format(ha.datamodel.name, ha.datamodel.channel) +
'for instrumental broadening.') in str(ee.value)
def test_emline_sigma_correction(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
hasig = maps['emline_gsigma_ha_6564']
emsigcorr = maps['emline_instsigma_ha_6564']
expected = (hasig**2 - emsigcorr**2)**0.5
expected.ivar = (expected.value / hasig.value) * hasig.ivar
expected.ivar[hasig.ivar == 0] = 0
expected.ivar[emsigcorr.value >= hasig.value] = 0
expected.value[emsigcorr.value >= hasig.value] = 0
actual = hasig.inst_sigma_correction()
assert actual.value == pytest.approx(expected.value, nan_ok=True)
assert actual.ivar == pytest.approx(expected.ivar)
assert (actual.mask == expected.mask).all()
assert actual.datamodel == hasig.datamodel
@marvin_test_if(mark='skip', galaxy=dict(release=['MPL-4', 'MPL-5']))
@pytest.mark.parametrize('channel, op',
[('hb', '*'),
('d4000', '*'),
('cn1', '+'),
])
def test_specindex_sigma_correction(self, galaxy, channel, op):
maps = Maps(plateifu=galaxy.plateifu)
si = maps['specindex_' + channel]
sicorr = maps['specindex_corr' + channel]
ops = {'+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.truediv}
expected = ops[op](si, sicorr)
actual = si.specindex_correction()
assert actual.value == pytest.approx(expected.value, nan_ok=True)
assert actual.ivar == pytest.approx(expected.ivar)
assert (actual.mask == expected.mask).all()
assert actual.datamodel == si.datamodel
class TestMaskbit(object):
def test_masked(self, maps_release_only):
params = maps_release_only.datamodel.parent.get_default_plot_params()
ha = maps_release_only['emline_gflux_ha_6564']
expected = ha.pixmask.get_mask(params['default']['bitmasks'], dtype=bool)
assert ha.masked.data == pytest.approx(ha.value)
assert (ha.masked.mask == expected).all()
@marvin_test_if(mark='include', maps_release_only=dict(release=['MPL-4']))
def test_values_to_bits_mpl4(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.values_to_bits(1) == [0]
@marvin_test_if(mark='skip', maps_release_only=dict(release=['MPL-4']))
def test_values_to_bits(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.values_to_bits(3) == [0, 1]
@marvin_test_if(mark='include', maps_release_only=dict(release=['MPL-4']))
def test_values_to_labels_mpl4(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.values_to_labels(1) == ['DONOTUSE']
@marvin_test_if(mark='skip', maps_release_only=dict(release=['MPL-4']))
def test_values_to_labels(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.values_to_labels(3) == ['NOCOV', 'LOWCOV']
@marvin_test_if(mark='include', maps_release_only=dict(release=['MPL-4']))
def test_labels_to_value_mpl4(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.labels_to_value('DONOTUSE') == 1
@marvin_test_if(mark='skip', maps_release_only=dict(release=['MPL-4']))
@pytest.mark.parametrize('names, expected',
[(['NOCOV', 'LOWCOV'], 3),
('DONOTUSE', 1073741824)])
def test_labels_to_value(self, maps_release_only, names, expected):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.labels_to_value(names) == expected
@pytest.mark.parametrize('flag',
['manga_target1',
'manga_target2',
'manga_target3',
'target_flags',
'pixmask'])
def test_flag(self, flag, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert getattr(ha, flag, None) is not None
class TestEnhancedMap(object):
def test_overridden_methods(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
nii = maps['emline_gflux_nii_6585']
n2ha = nii / ha
assert isinstance(n2ha, EnhancedMap)
methods = ['_init_map_from_maps', '_get_from_file', '_get_from_db', '_get_from_api',
'inst_sigma_correction']
for method in methods:
with pytest.raises(AttributeError) as ee:
meth = getattr(n2ha, method)
meth()
assert "'EnhancedMap' has no attribute '{}'.".format(method) in str(ee.value)
|
hep-cce/hpc-edge-service
|
argo/test_jobs/test_submit_alpgen.py
|
#!/usr/bin/env python
import sys,logging,optparse
from AlpgenArgoJob import AlpgenArgoJob
sys.path.append('/users/hpcusers/balsam/argo_deploy/argo_core')
from MessageInterface import MessageInterface
def main():
parser = optparse.OptionParser(description='submit alpgen job to ARGO')
parser.add_option('-e','--evts-per-iter',dest='evts_per_iter',help='number of events per warmup iteration',type='int')
parser.add_option('-i','--num-iter',dest='numiters',help='number of iterations for the warmup',type='int')
parser.add_option('-w','--warmup-weighted',dest='num_warmup',help='number of event to in the warmup, after the iterations complete',type='int')
parser.add_option('-n','--num-weighted',dest='num_weighted',help='number of weighted events to generate.',type='int')
parser.add_option('-p','--process',dest='process',help='define the process to generate, 2Q,4Q,hjet,top,wjet,zjet,Njet,etc.')
parser.add_option('-o','--num-nodes',dest='numnodes',help='number of nodes to use on destination machine',type='int')
parser.add_option('-c','--cpus-per-node',dest='cpus_per_node',help='number of CPUs per node to use on destination machine',type='int')
parser.add_option('-a','--alpgen-input',dest='alpgen_input_file',help='The AlpGen input file which carries all the options for this generation job')
parser.add_option('-t','--wall-time',dest='walltime',help='The wall time to submit to the queue in minutes.',type='int')
options,args = parser.parse_args()
if options.numiters is None:
parser.error('Must define the number of warmup iterations')
if options.process is None:
parser.error('Must define the process to generate')
if options.numnodes is None:
parser.error('Must define the number of nodes to use')
if options.cpus_per_node is None:
parser.error('Must define the number of CPUs per node to use')
if options.evts_per_iter is None:
parser.error('Must define the number of events per warmup iteration')
if options.num_weighted is None:
parser.error('Must define the number of weighted events to produce')
if options.num_warmup is None:
parser.error('Must define the number of weighted events to produce in the warmup step.')
if options.alpgen_input_file is None:
parser.error('Must define the AlpGen input file')
if options.walltime is None:
parser.error('Must specify a wall time')
user = os.environ.get('USER','nobody')
if(user == 'apf'): # AutoPyFactory
user= os.environ.get('prodUserID','nobody')
jobID = taskID + '0'
if options.resubmitjobid is not None:
jobID = int(options.resubmitjobid)
TOP_PATH = os.getcwd() # directory in which script was run
RUNPATH = os.path.join(TOP_PATH,str(jobID)) # directory in which to store files
if not os.path.exists(RUNPATH):
os.makedirs(RUNPATH) # make directories recursively like 'mkdir -p'
logger.info('JobID: ' + str(jobID))
if __name__ == '__main__':
main()
|
cluck/dnspython
|
tests/test_namedict.py
|
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
try:
import unittest2 as unittest
except ImportError:
import unittest
import dns.name
import dns.namedict
class NameTestCase(unittest.TestCase):
def setUp(self):
self.ndict = dns.namedict.NameDict()
n1 = dns.name.from_text('foo.bar.')
n2 = dns.name.from_text('bar.')
self.ndict[n1] = 1
self.ndict[n2] = 2
self.rndict = dns.namedict.NameDict()
n1 = dns.name.from_text('foo.bar', None)
n2 = dns.name.from_text('bar', None)
self.rndict[n1] = 1
self.rndict[n2] = 2
def testDepth(self):
self.failUnless(self.ndict.max_depth == 3)
def testLookup1(self):
k = dns.name.from_text('foo.bar.')
self.failUnless(self.ndict[k] == 1)
def testLookup2(self):
k = dns.name.from_text('foo.bar.')
self.failUnless(self.ndict.get_deepest_match(k)[1] == 1)
def testLookup3(self):
k = dns.name.from_text('a.b.c.foo.bar.')
self.failUnless(self.ndict.get_deepest_match(k)[1] == 1)
def testLookup4(self):
k = dns.name.from_text('a.b.c.bar.')
self.failUnless(self.ndict.get_deepest_match(k)[1] == 2)
def testLookup5(self):
def bad():
n = dns.name.from_text('a.b.c.')
(k, v) = self.ndict.get_deepest_match(n)
self.failUnlessRaises(KeyError, bad)
def testLookup6(self):
def bad():
(k, v) = self.ndict.get_deepest_match(dns.name.empty)
self.failUnlessRaises(KeyError, bad)
def testLookup7(self):
self.ndict[dns.name.empty] = 100
n = dns.name.from_text('a.b.c.')
(k, v) = self.ndict.get_deepest_match(n)
self.failUnless(v == 100)
def testLookup8(self):
def bad():
self.ndict['foo'] = 100
self.failUnlessRaises(ValueError, bad)
def testRelDepth(self):
self.failUnless(self.rndict.max_depth == 2)
def testRelLookup1(self):
k = dns.name.from_text('foo.bar', None)
self.failUnless(self.rndict[k] == 1)
def testRelLookup2(self):
k = dns.name.from_text('foo.bar', None)
self.failUnless(self.rndict.get_deepest_match(k)[1] == 1)
def testRelLookup3(self):
k = dns.name.from_text('a.b.c.foo.bar', None)
self.failUnless(self.rndict.get_deepest_match(k)[1] == 1)
def testRelLookup4(self):
k = dns.name.from_text('a.b.c.bar', None)
self.failUnless(self.rndict.get_deepest_match(k)[1] == 2)
def testRelLookup7(self):
self.rndict[dns.name.empty] = 100
n = dns.name.from_text('a.b.c', None)
(k, v) = self.rndict.get_deepest_match(n)
self.failUnless(v == 100)
if __name__ == '__main__':
unittest.main()
|
metamarcdw/PyBitmessage-I2P
|
src/i2p/test/test_socket.py
|
# --------------------------------------------------------
# test_socket.py: Unit tests for socket, select.
# --------------------------------------------------------
# Make sure we can import i2p
import sys; sys.path += ['../../']
import traceback, time, thread, threading, random, copy
from i2p import socket, select
def test_passed(s, msg='OK'):
"""Notify user that the given unit test passed."""
print ' ' + (s + ':').ljust(50) + msg
def verify_html(s):
"""Raise an error if s does not end with </html>"""
assert s.strip().lower()[-7:] == '</html>'
def resolve_test(name='duck.i2p'):
"""Unit test for resolve."""
try:
rname = socket.resolve(name)
except:
print 'Unit test failed for socket.resolve'
traceback.print_exc(); sys.exit()
test_passed('socket.resolve', 'See below')
print ' Use hosts.txt to verify that ' + name + '=' + \
rname[:15] + '...'
def stream_client(dest):
"""Sub-unit test for socket.socket in SOCK_STREAM mode."""
S = socket.socket('Alice', socket.SOCK_STREAM)
S.connect(dest)
S.send('GET / HTTP/1.0\r\n\r\n') # Send request
f = S.makefile() # File object
while True: # Read header
line = f.readline().strip() # Read a line
if line == '': break # Content begins
s = f.read() # Get content
f.close()
S.close()
def stream_client_test():
"""Unit test for socket.socket in SOCK_STREAM mode."""
url = 'duck.i2p'
stream_client('http://' + url + '/')
stream_client(url)
stream_client(url + '/')
stream_client('http://' + url)
stream_client(socket.resolve('http://' + url + '/'))
test_passed('socket.socket stream client')
def packet_test(raw=True):
"""Unit test for socket.socket in SOCK_DGRAM or SOCK_RAW modes."""
try:
multithread_wait_time = 500.0
may_need_increase = False
kwargs = {'in_depth': 0, 'out_depth': 0}
if raw:
C = socket.socket('Carola', socket.SOCK_RAW, **kwargs)
D = socket.socket('Davey', socket.SOCK_RAW, **kwargs)
else:
C = socket.socket('Carol', socket.SOCK_DGRAM, **kwargs)
D = socket.socket('Dave', socket.SOCK_DGRAM, **kwargs)
global C_recv, D_recv, C_got, D_got, __lock
C_recv = [] # Packets C *should* receive
D_recv = [] # Packets D *should* receive
C_got = [] # Packets C actually got
D_got = [] # Packets D actually got
n = 50 # Create n threads
m = 40 # Each thread sends m packets
global __done_count
__done_count = 0
__lock = threading.Lock()
# Use C and D to send and read in many different threads.
def f():
# This code is run in each separate thread
global C_recv, D_recv, C_got, D_got, __lock, __done_count
for i in range(m):
# Random binary string of length 2-80.
index_list = range(random.randrange(2, 80))
s = ''.join([chr(random.randrange(256)) for j in index_list])
if random.randrange(2) == 0:
# Send packet from C to D, and log it.
C.sendto(s, 0, D.dest)
__lock.acquire()
D_recv += [s]
__lock.release()
else:
# Send packet from D to C, and log it.
D.sendto(s, 0, C.dest)
__lock.acquire()
C_recv += [s]
__lock.release()
time.sleep(0.01*random.uniform(0.0,1.0))
# Read any available packets.
try: (p, fromaddr) = C.recvfrom(1000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == D.dest
__lock.acquire()
if p != None: C_got += [p]
__lock.release()
try: (p, fromaddr) = D.recvfrom(1000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == C.dest
__lock.acquire()
if p != None: D_got += [p]
__lock.release()
__lock.acquire()
__done_count += 1
__lock.release()
# Create n threads.
for i in range(n):
threading.Thread(target=f).start()
# Wait for them to finish.
while __done_count < n: time.sleep(0.01)
# Read any left-over received packets.
end_time = time.time() + multithread_wait_time
while time.time() < end_time:
# Read any available packets.
try: (p, fromaddr) = C.recvfrom(1000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == D.dest
if p != None: C_got += [p]
try: (p, fromaddr) = D.recvfrom(1000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == C.dest
if p != None: D_got += [p]
if len(C_got) == len(C_recv) and len(D_got) == len(D_recv):
break
if time.time() >= end_time:
may_need_increase = True
C_got.sort()
D_got.sort()
C_recv.sort()
D_recv.sort()
assert C_got == C_recv
assert D_got == D_recv
C.close()
D.close()
except:
if raw:
print 'Unit test failed for socket.socket (SOCK_RAW).'
print 'Raw packets are not reliable.'
else:
print 'Unit test failed for socket.socket (SOCK_DGRAM).'
print 'Datagram packets are not reliable.'
if may_need_increase:
print 'Try increasing multithread_wait_time.'
traceback.print_exc(); sys.exit()
if raw:
test_passed('socket.socket (SOCK_RAW)')
else:
test_passed('socket.socket (SOCK_DGRAM)')
def stream_test():
"""Multithreaded unit test for socket.socket (SOCK_STREAM)."""
try:
multithread_wait_time = 200.0
may_need_increase = False
kwargs = {'in_depth':0, 'out_depth':0}
C = socket.socket('Carolic', socket.SOCK_STREAM, **kwargs)
D = socket.socket('David', socket.SOCK_STREAM, **kwargs)
Cout = socket.socket('Carolic', socket.SOCK_STREAM, **kwargs)
Dout = socket.socket('David', socket.SOCK_STREAM, **kwargs)
assert C.dest == Cout.dest
assert D.dest == Dout.dest
C.listen(5)
D.listen(5)
Cout.connect(D.dest)
Dout.connect(C.dest)
(Cin, ignoredest) = C.accept()
(Din, ignoredest) = D.accept()
global C_recv, D_recv, C_got, D_got, __lock
C_recv = [] # String data C *should* receive
D_recv = [] # String data D *should* receive
C_got = [] # String data C actually got
D_got = [] # String data D actually got
n = 50 # Create n threads
m = 40 # Each thread sends m strings
global __done_count
__done_count = 0
__lock = threading.Lock()
# Use C and D to send and read in many different threads.
def f():
# This code is run in each separate thread
global C_recv, D_recv, C_got, D_got, __lock, __done_count
for i in range(m):
# Random binary string of length 2-80.
index_list = range(random.randrange(2, 80))
s = ''.join([chr(random.randrange(256)) for j in index_list])
if random.randrange(2) == 0:
# Send packet from C to D, and log it.
__lock.acquire()
Cout.send(s)
D_recv += [s]
__lock.release()
else:
# Send packet from D to C, and log it.
__lock.acquire()
Dout.send(s)
C_recv += [s]
__lock.release()
time.sleep(0.01*random.uniform(0.0,1.0))
# Read any available string data, non-blocking.
__lock.acquire()
try: p = Cin.recv(100000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None: C_got += [p]
__lock.release()
__lock.acquire()
try: p = Din.recv(100000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None: D_got += [p]
__lock.release()
__lock.acquire()
__done_count += 1
__lock.release()
# Create n threads.
for i in range(n):
threading.Thread(target=f).start()
# Wait for them to finish.
while __done_count < n: time.sleep(0.01)
# Read any left-over received string data.
end_time = time.time() + multithread_wait_time
while time.time() < end_time:
# Read any available string data, non-blocking.
try: p = Cin.recv(100000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None: C_got += [p]
try: p = Din.recv(100000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None: D_got += [p]
if len(''.join(C_got)) == len(''.join(C_recv)) and \
len(''.join(D_got)) == len(''.join(D_recv)):
break
if time.time() >= end_time:
may_need_increase = True
C_got = ''.join(C_got)
D_got = ''.join(D_got)
C_recv = ''.join(C_recv)
D_recv = ''.join(D_recv)
assert C_got == C_recv
assert D_got == D_recv
Cin.close()
Din.close()
Cout.close()
Dout.close()
C.close()
D.close()
except:
print 'Unit test failed for socket.socket ' + \
'(SOCK_STREAM, multithreaded).'
if may_need_increase:
print 'Try increasing multithread_wait_time.'
traceback.print_exc(); sys.exit()
test_passed('socket.socket (SOCK_STREAM, multithreaded)')
def noblock_stream_test():
"""Unit test for non-blocking stream commands and listen."""
kwargs = {'in_depth': 0, 'out_depth': 0}
serv = socket.socket('Allison',socket.SOCK_STREAM,**kwargs)
serv.setblocking(False)
serv.listen(100)
assert serv.gettimeout() == 0.0
msg_to_client = 'Hi, client!!!!'
msg_to_server = 'Hi, server!'
nconnects = 5
global server_done, client_count, client_lock
server_done = False
client_count = 0
client_lock = threading.Lock()
def serv_func(n = nconnects):
while True:
try:
(C, ignoredest) = serv.accept()
C.send(msg_to_client)
rmsg = C.recv(len(msg_to_server), socket.MSG_WAITALL)
if rmsg != msg_to_server:
raise ValueError('message should have been: ' +
repr(msg_to_server) + ' was: ' + repr(rmsg))
C.close()
n -= 1
if n == 0: break
except socket.BlockError:
pass
time.sleep(0.01)
global server_done
server_done = True
def client_func():
# FIXME: i2p.socket.NetworkError('TIMEOUT', '') errors are produced
# for our streams if we use '' for all clients. Why?
C = socket.socket('Bobb', socket.SOCK_STREAM, **kwargs)
C.setblocking(False)
try:
C.connect(serv.dest)
except socket.BlockError:
# One could also use timeout=0.1 and loop
(Rlist, Wlist, Elist) = select.select([C], [C], [C])
if len(Elist) > 0:
assert Elist[0] == C
raise Elist[0].sessobj.err
C.send(msg_to_server)
C.setblocking(True)
rmsg = C.recv(len(msg_to_client), socket.MSG_WAITALL)
if rmsg != msg_to_client:
raise ValueError('message should have been: ' +
repr(msg_to_client) + ' was: ' + repr(rmsg))
C.close()
global client_count, client_lock
# Synchronized
client_lock.acquire()
try: client_count += 1
finally: client_lock.release()
thread.start_new_thread(serv_func, ())
for i in range(nconnects):
thread.start_new_thread(client_func, ())
while True:
if server_done and client_count == nconnects: break
time.sleep(0.01)
test_passed('socket.listen (SOCK_STREAM), and non-blocking IO')
def multi_stream_test(n):
"""See if we can have n streams open at once."""
server = None
client = [None] * n
kwargs = {'in_depth': 0, 'out_depth': 0}
server = socket.socket('Aligi',socket.SOCK_STREAM,**kwargs)
server.listen(n)
for i in range(n):
client[i] = socket.socket('Bobo', socket.SOCK_STREAM, \
in_depth=0, out_depth=0)
for i in range(n):
client[i].connect(server.dest)
client[i].send('Hi')
for i in range(n):
client[i].close()
server.close()
test_passed(str(n) + ' streams open at once')
# Todo:
# select, poll
# More nonblocking unit tests
def test():
print 'Testing:'
print "Comment and uncomment tests manually, if they don't finish."
resolve_test()
noblock_stream_test()
stream_client_test()
packet_test(raw=True)
packet_test(raw=False)
stream_test()
multi_stream_test(200)
if __name__ == '__main__':
test()
|
pombredanne/opc-diag
|
ez_setup.py
|
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import tarfile
import optparse
import subprocess
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "0.9.6"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of setuptools (>=%s) is not available,"
"\nand can't be installed while this script is running. Plea"
"se\ninstall a more recent version first, using\n'easy_insta"
"ll -U setuptools'.\n\n(Currently using %r)\n" %
(version, e.args[0])
)
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound: # noqa
return _do_download(version, download_base, to_dir,
download_delay)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "setuptools-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
|
frappe/frappe
|
frappe/core/doctype/prepared_report/test_prepared_report.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and Contributors
# License: MIT. See LICENSE
import frappe
import unittest
import json
class TestPreparedReport(unittest.TestCase):
def setUp(self):
self.report = frappe.get_doc({
"doctype": "Report",
"name": "Permitted Documents For User"
})
self.filters = {
"user": "Administrator",
"doctype": "Role"
}
self.prepared_report_doc = frappe.get_doc({
"doctype": "Prepared Report",
"report_name": self.report.name,
"filters": json.dumps(self.filters),
"ref_report_doctype": self.report.name
}).insert()
def tearDown(self):
frappe.set_user("Administrator")
self.prepared_report_doc.delete()
def test_for_creation(self):
self.assertTrue('QUEUED' == self.prepared_report_doc.status.upper())
self.assertTrue(self.prepared_report_doc.report_start_time)
|
xkong/baniugui
|
dict4ini/p3.py
|
# $Id: p3.py,v 1.2 2003/11/18 19:04:03 phr Exp phr $
# Simple p3 encryption "algorithm": it's just SHA used as a stream
# cipher in output feedback mode.
# Author: Paul Rubin, Fort GNOX Cryptography, <phr-crypto at nightsong.com>.
# Algorithmic advice from David Wagner, Richard Parker, Bryan
# Olson, and Paul Crowley on sci.crypt is gratefully acknowledged.
# Copyright 2002,2003 by Paul Rubin
# Copying license: same as Python 2.3 license
# Please include this revision number in any bug reports: $Revision: 1.2 $.
from string import join
from array import array
try:
import hashlib as sha
except:
import sha
from time import time
class CryptError(Exception): pass
def _hash(str): return sha.new(str).digest()
_ivlen = 16
_maclen = 8
_state = _hash(`time()`)
try:
import os
_pid = `os.getpid()`
except ImportError, AttributeError:
_pid = ''
def _expand_key(key, clen):
blocks = (clen+19)/20
xkey=[]
seed=key
for i in xrange(blocks):
seed=sha.new(key+seed).digest()
xkey.append(seed)
j = join(xkey,'')
return array ('L', j)
def p3_encrypt(plain,key):
global _state
H = _hash
# change _state BEFORE using it to compute nonce, in case there's
# a thread switch between computing the nonce and folding it into
# the state. This way if two threads compute a nonce from the
# same data, they won't both get the same nonce. (There's still
# a small danger of a duplicate nonce--see below).
_state = 'X'+_state
# Attempt to make nlist unique for each call, so we can get a
# unique nonce. It might be good to include a process ID or
# something, but I don't know if that's portable between OS's.
# Since is based partly on both the key and plaintext, in the
# worst case (encrypting the same plaintext with the same key in
# two separate Python instances at the same time), you might get
# identical ciphertexts for the identical plaintexts, which would
# be a security failure in some applications. Be careful.
nlist = [`time()`, _pid, _state, `len(plain)`,plain, key]
nonce = H(join(nlist,','))[:_ivlen]
_state = H('update2'+_state+nonce)
k_enc, k_auth = H('enc'+key+nonce), H('auth'+key+nonce)
n=len(plain) # cipher size not counting IV
stream = array('L', plain+'0000'[n&3:]) # pad to fill 32-bit words
xkey = _expand_key(k_enc, n+4)
for i in xrange(len(stream)):
stream[i] = stream[i] ^ xkey[i]
ct = nonce + stream.tostring()[:n]
auth = _hmac(ct, k_auth)
return ct + auth[:_maclen]
def p3_decrypt(cipher,key):
H = _hash
n=len(cipher)-_ivlen-_maclen # length of ciphertext
if n < 0:
raise CryptError, "invalid ciphertext"
nonce,stream,auth = \
cipher[:_ivlen], cipher[_ivlen:-_maclen]+'0000'[n&3:],cipher[-_maclen:]
k_enc, k_auth = H('enc'+key+nonce), H('auth'+key+nonce)
vauth = _hmac (cipher[:-_maclen], k_auth)[:_maclen]
if auth != vauth:
raise CryptError, "invalid key or ciphertext"
stream = array('L', stream)
xkey = _expand_key (k_enc, n+4)
for i in xrange (len(stream)):
stream[i] = stream[i] ^ xkey[i]
plain = stream.tostring()[:n]
return plain
# RFC 2104 HMAC message authentication code
# This implementation is faster than Python 2.2's hmac.py, and also works in
# old Python versions (at least as old as 1.5.2).
from string import translate
def _hmac_setup():
global _ipad, _opad, _itrans, _otrans
_itrans = array('B',[0]*256)
_otrans = array('B',[0]*256)
for i in xrange(256):
_itrans[i] = i ^ 0x36
_otrans[i] = i ^ 0x5c
_itrans = _itrans.tostring()
_otrans = _otrans.tostring()
_ipad = '\x36'*64
_opad = '\x5c'*64
def _hmac(msg, key):
if len(key)>64:
key=sha.new(key).digest()
ki = (translate(key,_itrans)+_ipad)[:64] # inner
ko = (translate(key,_otrans)+_opad)[:64] # outer
return sha.new(ko+sha.new(ki+msg).digest()).digest()
#
# benchmark and unit test
#
def _time_p3(n=1000,len=20):
plain="a"*len
t=time()
for i in xrange(n):
p3_encrypt(plain,"abcdefgh")
dt=time()-t
print "plain p3:", n,len,dt,"sec =",n*len/dt,"bytes/sec"
def _speed():
_time_p3(len=5)
_time_p3()
_time_p3(len=200)
_time_p3(len=2000,n=100)
def _test():
e=p3_encrypt
d=p3_decrypt
plain="test plaintext"
key = "test key"
c1 = e(plain,key)
c2 = e(plain,key)
assert c1!=c2
assert d(c2,key)==plain
assert d(c1,key)==plain
c3 = c2[:20]+chr(1+ord(c2[20]))+c2[21:] # change one ciphertext character
try:
print d(c3,key) # should throw exception
print "auth verification failure"
except CryptError:
pass
try:
print d(c2,'wrong key') # should throw exception
print "test failure"
except CryptError:
pass
_hmac_setup()
#_test()
# _speed() # uncomment to run speed test
|
mmottahedi/neuralnilm_prototype
|
scripts/e127.py
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
"""
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
],
layer_changes={
501: {
'remove_from': -3,
'new_layers':
[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
}
}
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=5000)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
|
g2p/xtraceback
|
xtraceback/lexer.py
|
try:
from pygments.lexer import bygroups, include, using
from pygments.lexers.agile import PythonLexer, PythonTracebackLexer
from pygments.token import Text, Name, Number, Generic, String, Operator
except ImportError: # pragma: no cover
# this is for nose coverage which does a recursive import on the package
pass
else:
BASE_NAME = r"[a-zA-Z_][a-zA-Z0-9_]*"
class XPythonLexer(PythonLexer):
tokens = PythonLexer.tokens.copy()
tokens["classname"] = [
("'?[a-zA-Z_][a-zA-Z0-9_.]*'?", Name.Class, "#pop")
]
# Marker __repr__
ref = "(<ref offset)(=)(\-\d+)( ?)((?:name)?)(=?)((?:%s)?)(>?)" % BASE_NAME
tokens["root"].insert(0, (ref, bygroups(Name.Builtin, Name.Operator,
Number, Text, Name.Builtin,
Name.Operator, Name.Variable,
Name.Builtin)))
class PythonXTracebackLexer(PythonTracebackLexer):
tokens = {
"root": [
include("entry"),
include("exception"),
(r"^.*\n", Generic.Error),
],
"entry": [
(r"^Traceback \(most recent call last\):\n",
Generic.Error,
"frame"),
# file - path is colored differently if under working directory
(r'^( File )((?:"[./<][^"]+")?)((?:"[^"]+")?)' \
'(, line )(\d+)((?:, in )?)(.*)(\n)',
bygroups(Generic.Error, Name.Builtin, Operator.Word,
Generic.Error, Number, Generic.Error, Name.Function,
Text),
"frame"),
],
"exception": [
(r"^(AssertionError: )(.+\n)", bygroups(Generic.Error,
using(XPythonLexer))),
(r"^(%s:?)(.+\n)" % BASE_NAME, bygroups(Generic.Error, String)),
],
"frame": [
include("entry"),
include("exception"),
# line of python code
(r"^((?:-+>)?)( +)(\d+)(.+\n)",
bygroups(Generic.Error, Text, Number, using(XPythonLexer))),
# variable continuation
(r"^([ ]+)('[^']+')(: )(.*)([,}]?\n)",
bygroups(Text, String, Name.Operator, using(XPythonLexer), Text)),
# variable
(r"^([ ]+)((?:g:)?)(\**%s)( = )(.+\n)" % BASE_NAME,
bygroups(Text, Name.Builtin, Name.Variable, Name.Operator,
using(XPythonLexer))),
# plain python
(r"^( )(.+)(\n)",
bygroups(Text, using(XPythonLexer), Text)),
],
}
|
ee08b397/LeetCode-4
|
070 Text Justification.py
|
"""
Given an array of words and a length L, format the text such that each line has exactly L characters and is fully (left
and right) justified.
You should pack your words in a greedy approach; that is, pack as many words as you can in each line. Pad extra spaces
' ' when necessary so that each line has exactly L characters.
Extra spaces between words should be distributed as evenly as possible. If the number of spaces on a line do not divide
evenly between words, the empty slots on the left will be assigned more spaces than the slots on the right.
For the last line of text, it should be left justified and no extra space is inserted between words.
For example,
words: ["This", "is", "an", "example", "of", "text", "justification."]
L: 16.
Return the formatted lines as:
[
"This is an",
"example of text",
"justification. "
]
Note: Each word is guaranteed not to exceed L in length.
click to show corner cases.
Corner Cases:
A line other than the last line might contain only one word. What should you do in this case?
In this case, that line should be left-justified.
"""
__author__ = 'Danyang'
class Solution:
def fullJustify(self, words, L):
"""
:param words: a list of str
:param L: int
:return: a list of str
"""
result = []
self.break_line(words, L, result)
return self.distribute_space(L, result)
def break_line(self, words, L, result):
if not words:
return
cur_length = -1
lst = []
i = 0
while i<len(words):
word = words[i]
cur_length += 1 # space in left justified
cur_length += len(word)
if cur_length>L: break
lst.append(word)
i += 1
result.append(lst)
self.break_line(words[i:], L, result)
def distribute_space(self, L, result):
new_result = []
for ind, line in enumerate(result):
word_cnt = len(line)
str_builder = []
space_cnt = L-sum(len(word) for word in line)
hole_cnt = word_cnt-1
if ind<len(result)-1:
if hole_cnt>0:
space = space_cnt/hole_cnt
remain = space_cnt%hole_cnt
for word in line[:-1]:
str_builder.append(word)
str_builder.append(" "*space)
if remain>0:
str_builder.append(" ")
remain -= 1
str_builder.append(line[-1])
else:
str_builder.append(line[-1])
str_builder.append(" "*space_cnt)
else: # last line, special handling
str_builder = [" ".join(line)]
str_builder.append(" "*(space_cnt-hole_cnt))
new_result.append("".join(str_builder))
return new_result
if __name__=="__main__":
print Solution().fullJustify(["This", "is", "an", "example", "of", "text", "justification."], 16)
print Solution().fullJustify(["What","must","be","shall","be."], 12)
|
dracarysX/flask_restapi
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
class MyTest(TestCommand):
def run_tests(self):
tests = unittest.TestLoader().discover('tests', pattern='test_*.py')
unittest.TextTestRunner(verbosity=1).run(tests)
setup(
name='flask_restapi',
version='0.2.0',
license='MIT',
description=u'A simple rest query framework by flask, peewee, rest_query',
author='dracarysX',
author_email='huiquanxiong@gmail.com',
url='https://github.com/dracarysX/flask_restapi',
packages=find_packages(include=['flask_restapi']),
install_requires=[
'peewee',
'flask',
'wtforms',
'flask_bcrypt',
'flask-script',
'peewee-rest-query'
],
test_suite='nose.collector',
tests_require=['nose'],
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: MIT',
],
keywords='Python, Flask, APIMethodView, Filtering Query API, Mysql, Peewee, RestAPI',
long_description='A simple rest query framework. Web framework use flask, '
'orm by peewee, form by wtform and query by rest_query.'
'The framework implements custom query api(like this: /?select=id,name&id=gte.20), '
'save form data, model object serializer, APIMethodView(get, post, put,delete) and errorhandler.'
)
|
OpenSourcePolicyCenter/webapp-public
|
webapp/apps/pages/urls.py
|
from django.conf.urls import url
from .views import (homepage, aboutpage, newspage, gallerypage, hellopage,
embedpage, widgetpage, newsdetailpage, apps_landing_page,
border_adjustment_plot, docspage, gettingstartedpage)
urlpatterns = [
# url(r'^apps/$', apps_landing_page, name='apps'),
url(r'^$', homepage, name='home'),
url(r'^about/$', aboutpage, name='about'),
url(r'^getting-started/$', gettingstartedpage, name='gettingstartedpage'),
url(r'^hello/$', hellopage, name='hello'),
url(r'^gallery/$', gallerypage, name='gallery'),
url(r'^news/$', newspage, name='news'),
url(r'^news/news-detail$', newsdetailpage, name='newsdetail'),
url(r'^gallery/(?P<widget_id>\w+)/$', widgetpage),
url(r'^gallery/embed/(?P<widget_id>\w+)/$', embedpage),
url(r'^gallery/embed/(?P<widget_id>\w+)/(?P<layout>\w+)/$', embedpage),
url(r'^gallery/$', gallerypage, name='gallery'),
url(r'^docs/$', docspage, name='docs'),
url(r'^gallery/border_adjustment$',
border_adjustment_plot,
name='border_adjustment'),
url(r'^bac/$', border_adjustment_plot, name='border_adjustment')
]
|
aaiijmrtt/JUCSE
|
Compilers/parser.py
|
_first = dict()
_follow = dict()
_table = dict()
_endsymbol = '$'
_emptysymbol = '#'
# function to remove left recursion from a subgrammar
def recursion(grammar):
section = grammar[0][0]
nonrecursivegrammar = [[item for item in rule] + [section + 'bar'] for rule in grammar if rule[0] != rule[1]]
recursivegrammar = [rule for rule in grammar if rule[0] == rule[1]]
return [rule for rule in nonrecursivegrammar] + [[section + 'bar', _emptysymbol]] + [[rule[0] + 'bar'] + rule[2: ] + [section + 'bar'] for rule in recursivegrammar]
# function to left factor a subgrammar
def factoring(grammar):
section = grammar[0][0]
factoredgrammar = list()
index = 0
while index + 1 < len(grammar):
mismatches = [subindex for subindex in range(min(len(grammar[index]), len(grammar[index + 1]))) if grammar[index][subindex] != grammar[index + 1][subindex]]
maxmatch = min(mismatches) if mismatches else min(len(grammar[index]), len(grammar[index + 1]))
if maxmatch == 1:
index += 1
continue
subindex = 2
while index + subindex < len(grammar):
if grammar[index][: maxmatch] != grammar[index + subindex][: maxmatch]:
break
subindex += 1
factoredgrammar.append(grammar[index][: maxmatch] + [section + 'star' * (index + 1)])
for subsubindex in range(subindex):
if grammar[index + subsubindex][maxmatch: ]:
factoredgrammar.append([section + 'star' * (index + 1)] + grammar[index + subsubindex][maxmatch: ])
else:
factoredgrammar.append([section + 'star' * (index + 1)] + [_emptysymbol])
del grammar[index : index + subindex]
return factoredgrammar + grammar
# function to calculate first of expressions
def first(expression, grammar, terminals):
if expression in _first:
return _first[expression]
if expression in terminals:
return set((expression, ))
_first[expression] = set()
for rule in grammar:
if expression == rule[0]:
flag = True
for production in rule[1: ]:
foremost = first(production, grammar, terminals)
_first[expression].update(foremost)
if _emptysymbol in foremost:
_first[expression].remove(_emptysymbol)
else:
flag = False
break
if flag:
_first[expression].add(_emptysymbol)
return _first[expression]
# function to calculate follow of expressions
def follow(expression, grammar, terminals, startsymbol):
if expression in _follow:
return _follow[expression]
temporary = frozenset()
if expression == startsymbol:
temporary = temporary.union(frozenset([_endsymbol]))
for rule in grammar:
if expression in rule[1: ]:
index = rule[1: ].index(expression)
index += 2
flag = True
for production in rule[index: ]:
foremost = first(production, grammar, terminals)
temporary = temporary.union(foremost)
if _emptysymbol in foremost:
temporary = temporary.difference(frozenset([_emptysymbol]))
else:
flag = False
break
if flag and rule[0] != expression:
temporary = temporary.union(follow(rule[0], grammar, terminals, startsymbol))
_follow[expression] = temporary
return _follow[expression]
# function to create parsing table
def table(grammar, terminals, startsymbol):
for rule in grammar:
if rule[0] not in _table:
_table[rule[0]] = dict()
for terminal in terminals:
_table[rule[0]][terminal] = set()
_table[rule[0]][_endsymbol] = set()
for rule in grammar:
flag = True
for production in rule[1: ]:
foremost = first(production, grammar, terminals)
for symbol in foremost:
if symbol in terminals or symbol == _endsymbol:
_table[rule[0]][symbol].add(tuple(rule[1: ]))
if _emptysymbol not in foremost:
flag = False
break
if flag:
rearmost = follow(rule[0], grammar, terminals, startsymbol)
for symbol in rearmost:
if symbol in terminals or symbol == _endsymbol:
_table[rule[0]][symbol].add(tuple(rule[1: ]))
if _endsymbol in rearmost:
_table[rule[0]][_endsymbol].add(tuple(rule[1: ]))
return _table
# function to syntactically parse code
def parser(code, start):
_input = line.strip().split() + [_endsymbol]
_stack = [_endsymbol, start]
while _stack:
if not _input or _stack[-1] == _emptysymbol:
print '[?POP', _stack.pop(), ']', _input, _stack
continue
if _stack[-1] == _input[0]:
print '[POP', _stack.pop(), ']', _input, _stack
_input = _input[1: ]
continue
if not _table[_stack[-1]][_input[0]]:
print '[!POP', _stack.pop(), ']', _input, _stack
continue
ex = list(_table[_stack[-1]][_input[0]])[0]
a = _stack.pop()
_stack += [x for x in reversed(ex)]
print '[RULE', a, '->', ' '.join(ex), ']', _input, _stack
# function to replace rules while removing indirect left recursion
def replace(replacable, replacing):
replacedgrammar = list()
for replacablerule in replacable:
flag = False
for replacingrule in replacing:
for index in range(1, len(replacablerule)):
if replacablerule[index] == replacingrule[0]:
replacedgrammar.append(replacablerule[: index] + replacingrule[1: ] + replacablerule[index + 1:])
flag = True
if not flag:
replacedgrammar.append(replacablerule)
return replacedgrammar
if __name__ == '__main__':
grammar = [rule.split() for rule in open('grammar.txt', 'r').readlines()]
grammar = [[rule[0]] + rule[2: ] for rule in grammar]
temporarygrammar = list()
index = 0
while index < len(grammar):
subindex = 1
while index + subindex < len(grammar):
if grammar[index][0] != grammar[index + subindex][0]:
break
subindex += 1
temporarygrammar = temporarygrammar + factoring(grammar[index: index + subindex])
index += subindex
grammar = temporarygrammar
temporarygrammar = list()
index = 0
while index < len(grammar):
subindex = 1
while index + subindex < len(grammar):
if grammar[index][0] != grammar[index + subindex][0]:
break
subindex += 1
temporarygrammar = temporarygrammar + recursion(replace(grammar[index: index + subindex], temporarygrammar))
index += subindex
grammar = temporarygrammar
startsymbol = 'program'
print '\n\nGRAMMAR RULES\n\n'
for rule in grammar:
print rule
terms = set([term for rule in grammar for term in rule])
nonterminals = set([rule[0] for rule in grammar])
terminals = terms - nonterminals
print '\n\nFIRSTS\n\n'
for nonterminal in nonterminals:
print nonterminal, first(nonterminal, grammar, terminals)
print '\n\nFOLLOWS\n\n'
for nonterminal in nonterminals:
print nonterminal, follow(nonterminal, grammar, terminals, startsymbol)
table(grammar, terminals, startsymbol)
print '\n\nTABLE\n\n'
for left in _table:
for top in _table[left]:
if _table[left][top]:
print left, top, _table[left][top]
with open('intermediate.txt', 'r') as filein:
for line in filein:
print '\n\nSYNTACTIC PARSE\n\n'
parser(line.strip().split(), startsymbol)
|
kmike/DAWG-Python
|
tests/utils.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import zipfile
DEV_DATA_PATH = os.path.join(
os.path.dirname(__file__),
'..',
'dev_data',
)
def data_path(*args):
"""
Returns a path to dev data
"""
return os.path.join(DEV_DATA_PATH, *args)
def words100k():
zip_name = data_path('words100k.txt.zip')
zf = zipfile.ZipFile(zip_name)
txt = zf.open(zf.namelist()[0]).read().decode('utf8')
return txt.splitlines()
|
BitcoinUnlimited/BitcoinUnlimited
|
qa/rpc-tests/electrum_reorg.py
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Unlimited developers
"""
Tests to check if basic electrum server integration works
"""
import random
from test_framework.util import waitFor, assert_equal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.loginit import logging
from test_framework.electrumutil import compare, bitcoind_electrum_args
class ElectrumReorgTests(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [bitcoind_electrum_args()]
def run_test(self):
n = self.nodes[0]
n.generate(200)
# waitFor throws on timeout, failing the test
waitFor(10, lambda: compare(n, "index_height", n.getblockcount()))
waitFor(10, lambda: compare(n, "mempool_count", 0, True))
n.sendtoaddress(n.getnewaddress(), 1)
assert_equal(1, len(n.getrawmempool()))
waitFor(10, lambda: compare(n, "mempool_count", 1, True))
blocks = n.generate(50)
waitFor(10, lambda: compare(n, "index_height", n.getblockcount()))
waitFor(10, lambda: compare(n, "mempool_count", 0, True))
logging.info("invalidating %d blocks", len(blocks))
n.invalidateblock(blocks[0])
# electrum server should trim its chain as well and see our
# transaction go back into mempool
waitFor(10, lambda: compare(n, "index_height", n.getblockcount()))
waitFor(10, lambda: compare(n, "mempool_count", 1, True))
n.generate(50)
waitFor(10, lambda: compare(n, "index_height", n.getblockcount()))
waitFor(10, lambda: compare(n, "mempool_count", 0, True))
def setup_network(self, dummy = None):
self.nodes = self.setup_nodes()
if __name__ == '__main__':
ElectrumReorgTests().main()
|
YaoQ/zigbee-on-pcduino
|
zigbee.py
|
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
import urllib
import urllib2
import json
import serial
import time
import gpio
import re
import binascii
import threading
import datetime
import sys
# use your deviceID and apikey
deviceID="xxxxxxxxxx"
apikey = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
key_pin = "gpio12"
s = ""
door = ""
PIR = ""
Leak = ""
Smoke = ""
Remote = ""
Door_mac = ""
PIR_mac = ""
Leak_mac = ""
Smoke_mac = ""
Remote_mac = ""
# use USB UART or UART on pcDuino to communicate with zigbee gateway
try:
ser = serial.Serial("/dev/ttyUSB0", 115200,timeout = 0.1)
except serial.serialutil.SerialException:
try:
ser = serial.Serial("/dev/ttyS1", 115200,timeout = 0.1)
with open("/sys/devices/virtual/misc/gpio/mode/gpio0",'w') as UART_RX:
UART_RX.write('3')
with open("/sys/devices/virtual/misc/gpio/mode/gpio1",'w') as UART_TX:
UART_TX.write('3')
except serial.serialutil.SerialException:
print "serial failed!"
exit()
def setup():
gpio.pinMode(key_pin,gpio.INPUT)
def key_interrupt():
val=gpio.digitalRead(key_pin)
if val==0:
time.sleep(0.010)
if val==0:
return '1'
return '0'
def http_post(data):
try:
url = 'http://www.linksprite.io/api/http'
jdata = json.dumps(data)
req = urllib2.Request(url, jdata)
req.add_header('Content-Type','application/json')
response = urllib2.urlopen(req)
return response.read()
except urllib2.URLError:
print "connect failed"
return "connect failed"
pass
def hexShow(argv):
result = ''
hLen = len(argv)
for i in xrange(hLen):
hvol = ord(argv[i])
hhex = '%02x'%hvol
result += hhex+' '
return result
def register():
while True:
ser.write('\x02')
ser.write('\x75')
ser.write('\x1e')
data = ser.readline()
val=hexShow(data)
leng = len(val)
if leng > 45:
a = val.find("0e fc 02 e1",1)
if a != -1:
print "add equipment ok"
b=a+12
mac = val[b:b+29]
return mac
break
time.sleep(0.2)
def set_target(short_mac):
send = "0c fc 02 01 04 01 01 01 02"+short_mac+"02 0a"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
a = rec.find("04 fd 02 01",0)
if a != -1:
print "set target ok"
break
time.sleep(0.2)
def gateway_mac():
while True:
ser.write('\x02')
ser.write('\x14')
ser.write('\x6f')
data = ser.readline()
dat = hexShow(data)
leng = len(dat)
if leng > 30:
a = dat.find("0c 15 00 6f",0)
if a != -1:
dt = dat[15:38]
return dt
break
time.sleep(1)
def bind(eq_mac,gat_mac):
send = "16 d8"+eq_mac+"01 01 00 03"+gat_mac+"01"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
start = datetime.datetime.now()
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
b = rec.find("02 d9 00")
if b != -1:
print "bind ok"
break
time.sleep(0.2)
def cluster():
send = "08 FC 00 00 05 00 01 01 00"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
start = datetime.datetime.now()
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
leng = len(rec)
finsh = datetime.datetime.now()
tim = (finsh-start).seconds
if tim > 5:
print "failure! please add again"
return "xxxx"
break
if leng > 30:
b = rec.find("0b fe 03")
c = rec.find("00 01 07 fe 03 00")
if b != -1:
return rec[b+30:b+35]
break
elif c != -1:
return "11 00"
time.sleep(0.2)
def report():
send = "11 FC 00 01 00 06 01 00 21 00 20 f0 00 f0 00 01 00 00"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
leng = len(rec)
if leng > 15:
b = rec.find("06 fd 00")
if b != -1:
print "send report ok"
break
time.sleep(0.2)
def alarm():
line = ser.readline()
val = hexShow(line)
leng = len(val)
if leng >= 56:
#print val
po = val.find("fe 01")
if po != -1:
aa = val[po+21:po+26]
sta = val[po+46]
s = aa+sta
return s
return -1
def open_socket():
send = "05 FC 01 06 00 01"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
def close_socket():
send = "05 FC 01 06 00 00"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
def recovery():
global s
global PIR
s = '0'
PIR = '0'
values ={
"action":"update",
"apikey":apikey,
"deviceid":deviceID,
"params":
{
"PIR":PIR,
"SOS":s
}}
http_post(values)
def update(mac,sta):
global Door_mac
global PIR_mac
global Leak_mac
global Smoke_mac
global Remote_mac
global s
global door
global PIR
global Leak
global Smoke
global Remote
try:
f = open('door.txt','r')
Door_mac=f.read()
f.close()
except IOError:
pass
try:
f = open('pir.txt','r')
PIR_mac=f.read()
f.close()
except IOError:
pass
try:
f = open('leak.txt','r')
Leak_mac=f.read()
f.close()
except IOError:
pass
try:
f = open('smoke.txt','r')
Smoke_mac=f.read()
f.close()
except IOError:
pass
try:
f = open('remote.txt','r')
Remote_mac=f.read()
f.close()
except IOError:
pass
if mac == Door_mac:
door = sta
elif mac == PIR_mac:
PIR = sta
elif mac == Leak_mac:
Leak = sta
elif mac == Smoke_mac:
Smoke = sta
elif mac == Remote_mac:
Remote = sta
if sta == '1':
s = sta
else:
print "You should add the equipment first"
values ={
"action":"update",
"apikey":apikey,
"deviceid":deviceID,
"params":
{
"Door":door,
"PIR":PIR,
"Leak":Leak,
"Smoke":Smoke,
"Remote":Remote,
"SOS":s
}}
http_post(values)
if s == '1'or PIR == '1':
timer = threading.Timer(2,recovery)
timer.start()
def main():
global Door_mac
global PIR_mac
global Leak_mac
global Smoke_mac
global Remote_mac
setup()
if ser.isOpen() == True:
print "serial open succeed!"
else:
print "serial open failure!"
while True:
# If check the GPIO12's status, if it is high, excuete commands to
# add new zigbee device into zigbee gateway
a = key_interrupt()
if a == '1':
print "Add equipment!"
# Set gateway to allow adding device
val=register()
short = val[0:5]
print "short:"+short
mac = val[6:29]
print "mac:"+mac
# Get the gateway MAC address
gatmac=gateway_mac()
print "gatewaymac:"+gatmac
# Configure the communication with zigbee device
set_target(short)
# Bind the zigbee device
bind(mac,gatmac)
# Read the zone type to check the type of zigbee device
# which can identify the alarm information from different zigbee sensor.
zone_type=cluster()
print "zone_type:"+zone_type
if zone_type == "15 00":
Door_mac = short
f = open('door.txt','w')
f.write(short)
f.close()
report()
elif zone_type == "0d 00":
PIR_mac = short
f=open('pir.txt','w')
f.write(short)
f.close()
report()
elif zone_type == "2a 00":
Leak_mac = short
f=open('leak.txt','w')
f.write(short)
f.close()
report()
elif zone_type == "28 00":
Smoke_mac = short
f=open('smoke.txt','w')
f.write(short)
f.close()
report()
elif zone_type == "11 00":
Remote_mac = short
f=open('remote.txt','w')
f.write(short)
f.close()
report()
# Check the alarm information from zigbee sensor node
data=alarm()
if data != -1:
short_mac = data[0:5]
print"short mac:"+short_mac
status = data[5]
print"status:"+status
# upload the alarm information to linksprite.io server
update(short_mac,status)
time.sleep(0.2)
if __name__=='__main__':
try:
main()
except KeyboardInterrupt:
ser.close()
|
MBARIMike/biofloat
|
biofloat/converters.py
|
# -*- coding: utf-8 -*-
# Module containing functions for converting biofloat DataFrames to other formats
from collections import OrderedDict
def to_odv(df, odv_file_name, vars=None):
'''Output biofloat DataFrame in Ocean Data View spreadsheet format to
file named odv_file_name. Pass in a OrderedDict named vars to override
the default variable list of TEMP_ADJUSTED, PSAL_ADJUSTED, DOXY_ADJUSTED.
'''
fixed_bot_depth = 4000.0
if not vars:
vars = OrderedDict([
('TEMP_ADJUSTED', 'degree_Celsius'),
('PSAL_ADJUSTED', 'psu'),
('DOXY_ADJUSTED', 'micromole/kg'),
])
header_base = ('Cruise\tStation\tType\tmon/day/yr\thh:mm\t'
'Lon (degrees_east)\t' 'Lat (degrees_north)\t'
'Bot. Depth [m]\tDEPTH [m]\tQF\t')
header_vars = '\t'.join([('{} [{}]\tQF').format(v, u)
for v, u in vars.iteritems()])
fmt_base = '{}\t' * 10
header = header_base + header_vars + '\n'
with open(odv_file_name, 'w') as odv:
odv.write(header)
for i, r in df.iterrows():
rec_base = [i[0], i[4], 'C', i[1].strftime('%m/%d/%Y'),
i[1].strftime('%H:%M'), i[2], i[3], fixed_bot_depth,
i[5], 0]
rec_vars = '\t'.join([('{:f}\t0').format(r[v])
for v in vars.keys()])
odv.write(fmt_base.format(*rec_base) + rec_vars + '\n')
|
VirusTotal/content
|
Packs/CofenseTriage/Scripts/CofenseTriageThreatEnrichment/CofenseTriageThreatEnrichment.py
|
from CommonServerPython import *
''' STANDALONE FUNCTION '''
def get_threat_indicator_list(args: Dict[str, Any]) -> list:
"""
Executes cofense-threat-indicator-list command for given arguments.
:type args: ``Dict[str, Any]``
:param args: The script arguments provided by the user.
:return: List of responses.
:rtype: ``list``
"""
# Fetch threat indicators based on threat value provided in the argument.
# cofense-threat-indicator-list command will enrich the information based on value.
threat_indicator = execute_command('cofense-threat-indicator-list',
{'threat_value': f"{args.get('threat_value')}"},
extract_contents=False)
# Populate response
return threat_indicator
''' MAIN FUNCTION '''
def main():
try:
return_results(get_threat_indicator_list(demisto.args()))
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute CofenseTriageThreatEnrichment. Error: {str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
lidingpku/open-conference-data
|
iswc-metadata/src/mu/lib_entity.py
|
"""
syntax
entity ::= {id(utc millisecond), type, id_from, id_to, status, data, source, note}
data example1: search wikipedia
{ "id":1378327851001,
"type":"name-name",
"id-from":"MIT",
"id-to":"Massachusetts Institute of Technology",
"status":"auto",
"date":"2013-09-04",
"source":"wikipedia+dbpedia"
}
data example2: search dbpedia
{ "id":1378327851002,
"type":"name-uri",
"id-from":"Massachusetts Institute of Technology",
"id-to":"http://dbpedia.org/resource/Massachusetts_Institute_of_Technology",
"status":"auto",
"date":"2013-09-04",
"source":"dbpedia"
}
data example3: manual assert
{ "id":1378327851003,
"type":"name-name",
"id-from":"Mit",
"id-to":"MIT",
"status":"auto",
"date":"2013-09-04",
"source":"man"
}
data
* map_data id_from > type > record
* list_new [record]
* list_fail [id_from] -- avoid retry
* map_type_lookup type > lookup-function input: id_from; output:{type>record}
operation
* load(dir_name, entity_type)
pass1: remove obsoleted relation r1 where r1.id_from=r2.id_from and r1.created<r2.created
alternatively, use a hashtable with keys {type, id-from}
* find(id_from, type, recursive=False)
* add(record)
notes
* persistent storage in csv file
* records is ordered by created
1. api
2. web test
3. algorithm develop
"""
class DataNamedEntity(object):
ENTITY_TYPE_ORGANIZATION = "organisation"
ENTITY_TYPE_PERSON = "person"
ENTITY_TYPE_PLACE = "place"
# LIST_HEADER = ["altLabel","title","subtitle", "uri","source","status","redirects","disambiguates","matched_entity_type"]
LIST_HEADER = ["altLabel", "title", "subtitle", "uri", "source", "status", "redirects", "disambiguates",
"matched_entity_type", "row_type"]
def __log__(self, msg):
print( "[{}]{}",format(type(self),msg) )
def __init__(self, dir_data, entity_type):
# init config
self.config = {
"entity_type": entity_type,
"fn_data": "%s/%s.csv" % (dir_data, entity_type) ,
"fn_new": "%s/%s.new.csv" % (dir_data, entity_type) ,
}
#load data
data_json = []
if os.path.exists(self.config["fn_data"]):
data_json = UtilCsv.csv2json(self.config["fn_data"])
self.__log__("load {} entries from [{}]".format(
len(data_json),
self.config["fn_data"]))
else:
with open (self.config["fn_data"],'w') as f:
csvwriter = UnicodeWriter(f)
headers = EntityPerson.LIST_HEADER
csvwriter.writerow(headers)
#init internal_memory
self.dict_name ={}
for entry in data_json:
#default label_type
if not entry["label_type"]:
entry["label_type"]="text"
data_person = {}
for p in ["name","sense","modified"]
data_person[p]=entry[p]
UtilJson.add_init_dict(
self.dict_name
[ entry["label_type"] ],
entry["label_text"],
data_person
)
#init new row
self.list_new_entity =[]
def add_new_data(self, entry):
#source_id
#email
#homepage
#name
#organization
#country
def write_new_data(self, filemode="w"):
headers = DbpediaApi.LIST_HEADER
print "{0} new mapping entries added ".format(len(self.list_new_entity))
#start the new data file, to be merged to original data
with open (self.config["fn_new"],filemode) as f:
csvwriter = UnicodeWriter(f)
for entry in self.list_new_entity:
row = UtilString.json2list(entry, headers)
csvwriter.writerow(row)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.