prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
from app import app, grabber, merge, segment
from flask import render_template, request, url_for, jsonify
import cv2
import numpy as np
import os, re
def rm(dir, pattern):
for f in os.listdir(dir):
if re.search(pattern, f):
os.remove(os.path.join(dir, f))
@app.route('/')
@app.route('/index')
def home():
return render_template('index.html')
@app.route('/grabber/', methods=['POST'])
def doGrabber():
# clean up folders
rm('app/static/img', 'dg*')
rm('app/ma_prediction_400','dg*')
data = request.form
lat = data['lat']
lon = data['lon']
zoom = data['zoom']
with open('app/static/secrets.txt') as f: token = f.read()
# get the location from digital globe
g = grabber.Grabber('app/static/img', token,'png')
| time = g.grab(lat, lon, zoom)
# 'smart' means that the image went through the neural net prediction script
smart_contours = segment.predict(time,'app/ma_prediction_400/dg%s.png'%(time), 'app/static/img/nn_dg'+time+'.png')
smart_areas = segment.get_areas(smart_contours.values())
# 'dumb' meanas that the segmentation was on the original image
dumb_contours = segment.dumb_contours('app/static/img/dg'+time+' | .png','app/static/img/dumy_dg'+time+'.png')
dumb_areas = segment.get_areas(dumb_contours.values())
# uses 'smart' locations to pick out contours in the 'dumb' image
buildings = merge.intersect(smart_contours, dumb_contours)
merge.mkimage('app/static/img/dg'+time+'.png','app/static/img/merge_dg'+time+'.png', buildings)
areas = segment.get_areas(buildings.values())
url_nn = url_for('static', filename='img/nn_base_dg'+time+'.png')
url_smart = url_for('static', filename='img/nn_dg'+time+'.png')
url_dumb = url_for('static', filename='img/dumy_dg'+time+'.png')
url_merge = url_for('static', filename='img/merge_dg'+time+'.png')
# # for cameron
# dumb_contours = segment.dumb_contours('app/static/img/dg'+time+'.png','app/static/img/dumy_dg'+time+'.png')
# dumb_areas = segment.get_areas(dumb_contours.values())
# areas = dumb_areas
# url_nn = ''
# url_smart = ''
# url_merge = ''
# url_dumb = url_for('static', filename='img/dumy_dg'+time+'.png')
return jsonify(url_nn=url_nn, url_smart=url_smart, url_dumb=url_dumb, url_merge=url_merge,
areas=areas
)
|
import re
import textwrap
__all__ = ['dumps', 'loads']
SPLIT_ITEMS = re.compile(r'\n(?!\s)').split
MATCH_ITEM = re.compile(r'''
(?P<key | >\w+): # key
\s?
(?P<value>.*?)$ # first line
(?P<value2>.+)? # optional continuation line(s)
''', re.MULTILINE | re.DOTALL | re.VERBOSE).match
def dumps(data, comments={}):
s = ''
for k, v in data.items():
comment = comments.get(k, None)
if comment:
s += '# ' + '\n '.join(comment.splitlines()) + '\n'
value = v or ''
s += '{}: {}\n'.format(k, value.replace('\n', '\n '))
return s
def lo | ads(serialized):
data = {}
lineno = 0
for item in SPLIT_ITEMS(serialized):
if not item.startswith('#') and item.strip():
m = MATCH_ITEM(item)
if not m:
raise ValueError('syntax error on line {}'.format(lineno + 1))
value = m.group('value')
value += textwrap.dedent(m.group('value2') or '')
data[m.group('key')] = value or None
lineno += item.count('\n') + 1
return data
|
class MutableValue:
"""
Used to avoid warnings (and in future errors) from aiohttp when the app context is modified.
"""
__slots__ = 'value',
def __init__(self, value=None):
self.value = value
| def change(self, new_value):
self.value = new_value
def __len__(self):
return len(self.value)
def __repr__(self):
return repr(self.value)
def __str__(self):
return str(self.value)
def __bool__(self):
return | bool(self.value)
def __eq__(self, other):
return MutableValue(self.value == other)
def __add__(self, other):
return self.value + other
def __getattr__(self, item):
return getattr(self.value, item)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-06 06:33
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('expense_date', models.DateField()),
| ('expense_detail', models.CharField(help_text='Enter expense details', max_length=2 | 00, null=True)),
('expense_amount', models.FloatField(help_text='Enter expense amount', null=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
unlink(pyo)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_execute_bit_not_copied(self):
# Issue 6070: under posix .pyc files got their execute bit set if
# the .py file had the execute bit set, but they aren't executable.
with temp_umask(0o022):
sys.path.insert(0, os.curdir)
try:
fname = TESTFN + os.extsep + "py"
open(fname, 'w').close()
os.chmod(fname, (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
__import__(TESTFN)
fn = imp.cache_from_source(fname)
if not os.path.exists(fn):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
s = os.stat(fn)
self.assertEqual(
stat.S_IMODE(s.st_mode),
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
def test_imp_module(self):
# Verify that the imp module can correctly load and find .py files
# XXX (ncoghlan): It would be nice to use support.CleanImport
# here, but that breaks because the os module registers some
# handlers in copy_reg on import. Since CleanImport doesn't
# revert that registration, the module is left in a broken
# state after reversion. Reinitialising the module contents
# and just reverting os.environ to its previous state is an OK
# workaround
orig_path = os.path
orig_getenv = os.getenv
with EnvironmentVarGuard():
x = imp.find_module("os")
self.addCleanup(x[0].close)
new_os = imp.load_module("os", *x)
self.assertIs(os, new_os)
self.assertIs(orig_path, new_os.path)
self.assertIsNot(orig_getenv, new_os.getenv)
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc (or .pyo).
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertTrue(x is test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertTrue(y is test.support, y.__name__)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, imp.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNot(mod, None, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertIn(ext, ('.pyc', '.pyo'))
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace.
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(te | st.support, "__file__"))
# import x.y.z as w binds z as w.
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_import_initless_directory_warning(self):
with check_warnings(('', ImportWarning)):
# Just a | random non-package directory we always expect to be
# somewhere in sys.path...
self.assertRaises(ImportError, __import__, "site-packages")
|
"""nox-poetry c | onfiguration file."""
from calcipy.dev.noxfile import build_check, build_dist, check_safety, coverage, tests # noqa: F401 | |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not dist | ributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.hashes import SHA256, Hash
def sha256(bytes):
digest = Hash(SHA256(), backend=def | ault_backend())
digest.update(bytes)
return digest.finalize()
|
ved = p['commit_set'] - v['commit_set']
added = v['commit_set'] - p['commit_set']
for c in history.get(v['sha1'], []):
if c['sha1'] in added:
v['commits'].append(dict(c))
v['commits'][-1]['removed'] = False
for c in history.get(p['sha1'], [])[::-1]:
if c['sha1'] in removed:
v['commits'].append(dict(c))
v['commits'][-1]['removed'] = True
for c in v['commits']:
c['known'] = commit_known(c)
have_displayable_commits = False
for v in versions:
if v['commits']:
have_displayable_commits = True
break
f.write('''
<!DOCTYPE html>
<html>
<head>
<title>Release notes for version {0}</title>
<meta charset="utf-8">
<script>
var versions = ["{1}"];
function toggle(version)
{{
var changes = document.getElementById("changes" + version);
if (changes != null)
changes.style.display = changes.style.display == "none" ? "block" : "none";
var link = document.getElementById("toggle" + version);
if (link != null)
link.innerHTML = link.innerHTML == "[-]" ? "[+]" : "[-]";
return false;
}}
function toggle_lower(version)
{{
if (versions.indexOf(version) == -1)
return;
var version_found = false;
var captions = document.getElementsByTagName("h3");
for (var i = 0; i < captions.length; i++) {{
var parts = captions[i].id.split("caption");
if (!parts || parts.length != 2)
continue;
var rebased = captions[i].className.search(/rebased/) != -1;
var current_version = parts[1] == version;
if (version_found) {{
captions[i].className += " old";
toggle(parts[1]);
}}
if (current_version)
version_found = true;
}}
}}
</script>
<style>
html
{{
font-family: sans-serif;
}}
h3 a
{{
font-family: monospace;
}}
h3.old
{{
color: gray;
}}
.removed
{{
text-decoration: line-through;
}}
</style>
</head>
<body>
'''.format(manifest['tag']['name'], '", "'.join(str(v['internal_version']) for v in versions)))
if have_displayable_commits:
for v in versions[::-1]:
removed_class = ' class="removed"'
extra_style = removed_class if not v['known'] else ""
expand_link = ' <a id="toggle{0}" href="#caption{0}" onclick="return toggle(\'{0}\')">[-]</a>'.format(v['internal_version']) if v['commits'] else ""
caption = '<h3 id="caption{0}"{2}>Release notes for version {1}{3}</h3>'
caption = caption.format(v['internal_version'], v['user_version'], extra_style, expand_link)
f.write(caption)
if len(v['commits']):
url = 'https://github.com/{0}/obs-studio/commit/{1}'
change_fmt = '<li><a href="{0}"{2}>(view)</a> {1}</li>'
f.write('<ul id="changes{0}">'.format(v['internal_version']))
for c in v['commits']:
extra_style = removed_class if not c['known'] else ""
text = ("<span{0}>{1}</span>" if c['removed'] else "{1}").format(removed_class, c['desc'])
url_formatted = url.format(manifest['user'], c['sha1'])
f.write(change_fmt.format(url_formatted, text, extra_style))
f.write('</ul>')
f.write('<h2>Release notes for version {0}</h2>'.format(manifest['tag']['name']))
write_tag_html(f, manifest['tag']['description'])
f.write('''
<script>
parts = window.location.href.toString().split("#");
if (parts.length == 2 && parts[1].search(/^\d+$/) == 0)
toggle_lower(parts[1]);
</script>
</body>
</html>
''')
def dump_xml(file, element):
with open(file, 'w') as f:
f.write('<?xml version="1.0" encoding="UTF-8" standalone="no"?>')
ET.ElementTree(element).write(f, encoding='utf-8', method='xml')
def create_update(package, key, manifest_file):
manifest = load_manifest(manifest_file)
channel = create_channel(manifest)
feed_ele = load_or_create_feed(channel)
history = load_or_create_history(channel)
from distutils.version import LooseVersion
if manifest['stable']:
my_version = LooseVersion(manifest['tag']['name'])
else:
my_version = LooseVersion(manifest['jenkins_build'])
versions = []
seen_versions = set()
for item in feed_ele.findall('channel/item'):
en_ele = item.find('enclosure')
internal_version = LooseVersion(en_ele.get(qn_tag('sparkle', 'version')))
user_version = en_ele.get(qn_tag('sparkle', 'shortVersionString'))
sha1 = en_ele.get(qn_tag('ce', 'sha1'))
if internal_version == my_version:
# shouldn't happen, delete
feed_ele.find('channel').remove(item)
continue
if str(internal_version) in seen_versions:
continue
seen_versions.add(str(internal_version))
versions.append({
'internal_version': internal_version,
'user_version': user_version,
'sha1': sha1
})
versions.append(dict(
internal_version = my_version,
user_version = create_version(manifest),
sha1 = manifest['sha1']
))
import StringIO
notes = StringIO.StringIO()
write_notes_html(notes, manifest, versions, history)
new_item = ET.SubElement(feed_ele.find('channel'), 'item')
populate_item(new_item, package, key, manifest, channel, 'mpkg')
new_item = ET.SubElement(feed_ele.find('channel'), 'item')
populate_item(new_item, package, key, manifest, channel, 'app')
from os import path
deploy_path = path.join('deploy', channel)
mkdir(deploy_path)
feed_ele = ET.fromstring(ET.tostring(feed_ele, encoding='utf-8', method='xml'))
dump_xml(path.join(deploy_path, 'updates.xml'), feed_ele)
with open(path.join(deploy_path, 'notes.html'), 'w') as f:
f.write(notes.getvalue())
with open(path.join(deploy_path, 'history'), 'w') as f:
import cPickle
cPickle.dump(history, f)
import shutil
shutil.copy('{0}-mpkg.zip'.format(package), path.join(deploy_path, '{0}-mpkg.zip'.format(create_version(manifest))))
shutil.copy('{0}-app.zip'.format(package), path.join(deploy_path, '{0}-app.zip'.forma | t(create_version(manifest))))
if __name__ == "__main__":
ET.register_namespace('sparkle', 'http://www.andymatuschak.org/xml-namespaces/sparkle')
ET.register_namespace('ce', 'http://catchexception.org/xml-namespaces/ce')
import argparse
parser = argparse.ArgumentParser(description='obs-studio release util')
parser.add_argument('-m', '--manifest', dest='manifest', default='manifest')
parser.add_argument('-p', '--package', dest='package', default='OBS')
parser.add_argument('-k | ', '--key', dest='key')
args = parser.parse_args()
create_update(args. |
Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from neutronclient.neutron.v2_0.vpn import ipsecpolicy
from neutronclient.tests.unit import test_cli20
class CLITestV20VpnIpsecPolicyJSON(test_cli20.CLITestV20Base):
def test_create_ipsecpolicy_all_params(self):
"""vpn-ipsecpolicy-create all params with dashes."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
description = 'first-ipsecpolicy1'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-256'
encapsulation_mode = 'tunnel'
pfs = 'group5'
transform_protocol = 'ah'
tenant_id = 'my-tenant'
my_id = 'my-id'
lifetime = 'units=seconds,value=20000'
args = [name,
'--description', description,
'--tenant-id', tenant_id,
'--auth-algorithm', auth_algorithm,
'--encryption-algorithm', encryption_algorithm,
'--transform-protocol', transform_protocol,
'--encapsulation-mode', encapsulation_mode,
'--lifetime', lifetime,
'--pfs', pfs]
position_names = ['name', 'auth_algorithm', 'encryption_algorithm',
'encapsulation_mode', 'description',
'transform_protocol', 'pfs',
'tenant_id']
position_values = [name, auth_algorithm, encryption_algorithm,
encapsulation_mode, description,
transform_protocol, pfs,
tenant_id]
extra_body = {
'lifetime': {
'units': 'seconds',
'value': 20000,
},
}
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
extra_body=extra_body)
def test_create_ipsecpolicy_with_limited_params(self):
"""vpn-ipsecpolicy-create with limited params."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-128'
encapsulation_mode = 'tunnel'
pfs = 'group5'
transform_protocol = 'esp'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = [name,
'--tenant-id', tenant_id]
position_names = ['name', 'auth_algorithm', 'encryption_algorithm',
'encapsulation_mode',
'transform_protocol', 'pfs',
'tenant_id']
position_values = [name, auth_algorithm, encryption_algorithm,
encapsulation_mode,
transform_protocol, pfs,
tenant_id]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
def _test_lifetime_values(self, lifetime):
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
description = 'my-ipsec-policy'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-256'
ike_version = 'v1'
phase1_negotiation_mode = 'main'
pfs = 'group5'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = [name,
'--description', description,
'--tenant-id', tenant_id,
'--auth-algorithm', auth_algorithm,
'--encryption-algorithm', encryption_algorithm,
'--ike-version', ike_version,
'--phase1-negotiation-mode', phase1_negotiation_mode,
'--lifetime', lifetime,
'--pfs', pfs]
position_names = ['name', 'description',
'auth_algorithm', 'encryption_algorithm',
'phase1_negotiation_mode',
'ike_version', 'pfs',
'tenant_id']
position_values = [name, description,
auth_algorithm, encryption_algorithm,
phase1_negotiation_mode, ike_version, pfs,
tenant_id]
try:
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
except Exception:
return
self.fail("IPsecPolicy Lifetime Error")
def test_create_ipsecpolicy_with_invalid_lifetime_keys(self):
lifetime = 'uts=seconds,val=20000'
self._test_lifetime_values(lifetime)
def test_create_ipsecpolicy_with_invalide_lifetime_values(self):
lifetime = 'units=minutes,value=0'
self._test_lifetime_values(lifetime)
def test_list_ipsecpolicy(self):
"""vpn-ipsecpolicy-list."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_ipsecpolicy_pagination(self):
"""vpn-ipsecpolicy-list."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_ipsecpolicy_sort(self):
"""vpn-ipsecpolicy-list --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_ipsecpolicy_limit(self):
"""vpn-ipsecpolicy-list -P."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_ipsecpolicy_id(self):
"""vpn-ipsecpolicy-show ipsecpolicy_id."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.ShowIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_show_ipsecpolicy_id_name(self):
"""vpn-ipsecpolicy-show."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.ShowIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_update_ipsecpolicy(self):
"""vpn-ipsecpolicy-update myid --name newname --tags a b."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.UpdateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'newname'],
{'name': 'newname', })
def test_delete_ipsecpolicy(self):
"""vpn-ipsecpolicy-delete my-id."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.DeleteIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
my_id = 'my | -id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
class CLITestV2 | 0V |
# -*- coding: utf-8 -*-
"""Reusable mixins for SQLAlchemy declarative models."""
from __future__ import unicode_literals
import datetime
import sqlalchemy as sa
class Timestamps(object):
created = sa.Column(
sa.DateTime,
default=datetime.datetime.utcnow,
server_default=sa.func.now(),
nullable=False,
)
updated = sa.Column(
sa.DateTime,
server_default=sa.func.now(),
default=datetime.datetime.utcnow,
onup | date=datetime.d | atetime.utcnow,
nullable=False,
)
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Sof | tware is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVEN | T SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify output when a Progress() call is initialized with the list
that represents a canonical "spinner" on the output.
"""
import os
import TestSCons
test = TestSCons.TestSCons(universal_newlines=None)
test.write('SConstruct', r"""
env = Environment()
env['BUILDERS']['C'] = Builder(action=Copy('$TARGET', '$SOURCE'))
Progress(['-\r', '\\\r', '|\r', '/\r'])
env.C('S1.out', 'S1.in')
env.C('S2.out', 'S2.in')
env.C('S3.out', 'S3.in')
env.C('S4.out', 'S4.in')
""")
test.write('S1.in', "S1.in\n")
test.write('S2.in', "S2.in\n")
test.write('S3.in', "S3.in\n")
test.write('S4.in', "S4.in\n")
expect = """\
\\\r|\rCopy("S1.out", "S1.in")
/\r-\rCopy("S2.out", "S2.in")
\\\r|\rCopy("S3.out", "S3.in")
/\r-\rCopy("S4.out", "S4.in")
\\\r|\r"""
if os.linesep != '\n':
expect = expect.replace('\n', os.linesep)
test.run(arguments = '-Q .', stdout=expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
#!/usr/bin/env python
#
# This is run by Travis-CI before an upgrade to load some data into the
# database. After the upgrade is complete, the data is verified by
# upgrade-after.py to make sure that the upgrade of the database went smoothly.
#
import logging
import unittest
import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../pynipap')
sys.path.insert(0, '../nipap')
sys.path.insert(0, '../nipap-cli')
from nipap.backend import Nipap
from nipap.authlib import SqliteAuth
from nipap.nipapconfig import NipapConfig
from pynipap import AuthOptions, VRF, Pool, Prefix, NipapNonExistentError, NipapDuplicateError, NipapValueError
import pynipap
pynipap.xmlrpc_uri = 'http://unittest:gottatest@127.0.0.1:1337'
o = AuthOptions({
'authoritative_source': 'nipap'
})
class TestHelper:
@classmethod
def clear_database(cls):
cfg = NipapConfig('/etc/nipap/nipap.conf')
n = Nipap()
# have to delete hosts before we can delete the rest
n._execute("DELETE FROM ip_net_plan WHERE masklen(prefix) = 32")
# the rest
n._execute("DELETE FROM ip_net_plan")
# delete all except for the default VRF with id 0
n._execute("DELETE FROM ip_net_vrf WHERE id > 0")
# set default info for VRF 0
n._execute("UPDATE ip_net_vrf SET name = 'default', description = 'The default VRF, typically the Internet.' WHERE id = 0")
n._execute("DELETE FROM ip_net_pool")
n._execute("DELET | E FROM ip_net_asn")
def add_prefix(self, prefix, type, description, tags=None):
if tags is None:
tags = []
p = Prefix()
p.prefix = prefix
p.type = type
p.description = description
| p.tags = tags
p.save()
return p
class TestLoad(unittest.TestCase):
""" Load some data into the database
"""
def test_load_data(self):
"""
"""
th = TestHelper()
p1 = th.add_prefix('192.168.0.0/16', 'reservation', 'test')
p2 = th.add_prefix('192.168.0.0/20', 'reservation', 'test')
p3 = th.add_prefix('192.168.0.0/24', 'reservation', 'test')
p4 = th.add_prefix('192.168.1.0/24', 'reservation', 'test')
p5 = th.add_prefix('192.168.2.0/24', 'reservation', 'test')
p6 = th.add_prefix('192.168.32.0/20', 'reservation', 'test')
p7 = th.add_prefix('192.168.32.0/24', 'reservation', 'test')
p8 = th.add_prefix('192.168.32.1/32', 'reservation', 'test')
ps1 = th.add_prefix('2001:db8:1::/48', 'reservation', 'test')
ps2 = th.add_prefix('2001:db8:1::/64', 'reservation', 'test')
ps3 = th.add_prefix('2001:db8:2::/48', 'reservation', 'test')
pool1 = Pool()
pool1.name = 'upgrade-test'
pool1.ipv4_default_prefix_length = 31
pool1.ipv6_default_prefix_length = 112
pool1.save()
p2.pool = pool1
p2.save()
ps1.pool = pool1
ps1.save()
pool2 = Pool()
pool2.name = 'upgrade-test2'
pool2.save()
vrf1 = VRF()
vrf1.name = 'foo'
vrf1.rt = '123:123'
vrf1.save()
if __name__ == '__main__':
# set up logging
log = logging.getLogger()
logging.basicConfig()
log.setLevel(logging.INFO)
if sys.version_info >= (2,7):
unittest.main(verbosity=2)
else:
unittest.main()
|
ndom)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
"""
_, data = yield self.transport.perform_request(
'GET', _make_path(index, doc_type, id), params=params)
raise gen.Return(data)
@gen.coroutine
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'local')
def get_alias(self, index=None, name=None, params=None):
"""
Retrieve a specified alias.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg index: A comma-separated list of index names to filter aliases
:arg name: A comma-separated list of alias names to return
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both., default 'all',
valid choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
"""
_, result = yield self.transport.perform_request(
'GET', _make_path(index, '_alias', name), params=params)
raise gen.Return(result)
@gen.coroutine
@query_params('_source_exclude', '_source_include', 'parent', 'preference',
'realtime', 'refresh', 'routing')
def get_source(self, index, id, doc_type='_all', params=None):
"""
Get the source of a document by it's index, type and id.
`<http://elasticsearch.org/guide/reference/api/get/>`_
:arg index: The name of the index
:arg doc_type: The type of the document (uses `_all` by default to
fetch the first document matching the ID across all types)
:arg id: The document ID
:arg exclude: A list of fields to exclude from the returned
_source field
:arg include: A list of fields to extract and return from the
_source field
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
"""
_, data = yield self.transport.perform_request(
'GET', _make_path(index, doc_type, id, '_source'), params=params)
raise gen.Return(data)
@gen.coroutine
@query_params('_source', '_source_exclude', '_source_include', 'fields',
'parent', 'preference', 'realtime', 'refresh', 'routing')
def mget(self, body, index=None, doc_type=None, params=None):
"""
Get multiple documents based on an index, type (optional) and ids.
`<http://elasticsearch.org/guide/reference/api/multi-get/>`_
:arg body: Document identifiers; can be either `docs` (containing full
document information) or `ids` (when index and type is provided
in the URL.
:arg index: The name of the index
:arg doc_type: The type of the document
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg fields: A comma-separated list of fields to return in the response
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
"""
_, data = yield self.transport.perform_request(
'GET', _make_path(index, doc_type, '_mget'),
params=params, body=body)
raise gen.Return(data)
@gen.coroutine
@query_params('consistency', 'fields', 'lang', 'parent', 'percolate',
'refresh', 'replication', 'retry_on_conflict', 'routing',
'script', 'timeout', 'timestamp', 'ttl', 'version',
'version_type')
def update(self, index, doc_type, id, body=None, params=None):
"""
Update a document based on a script or partial data provided.
`<http://elasticsearch.org/guide/reference/api/update/>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg id: Document ID
:arg body: The request definition using either `script` or partial `doc`
:arg consistency: Explicit write consistency setting for the operation
:arg fields: A comma-separated list of fields to return in the response
:arg lang: The script language (default: mvel)
:arg parent: ID of the parent document
:arg percolate: Perform percolation during the operation; use specific
registered qu | ery name, attribute, or wildcard
:arg refresh: Refresh the index after performing the operation
:arg replication: Specific replication type (default: sync)
:arg retry_on_conflict: Specify how many times should the operation be
retried when a conflic | t occurs (default: 0)
:arg routing: Specific routing value
:arg script: The URL-encoded script definition (instead of using
request body)
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Explicit version number for concurrency control
"""
_, data = yield self.transport.perform_request('POST',
_make_path(index,
doc_type, id,
'_update'),
params=params, body=body)
raise gen.Return(data)
@gen.coroutine
@query_params('_source', '_source_exclude', '_source_include',
'analyze_wildcard', 'analyzer', 'default_operator', 'df',
'explain', 'fields', 'ignore_indices', 'indices_boost',
'lenient', 'lowercase_expanded_terms', 'from_', 'preference',
'q', 'routing', 'scroll', 'search_type', 'size', 'sort',
'source', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'timeout', 'version')
def search(self, index=None, doc_type=None, body=None, params=None):
"""
Execute a search query and get back search hits that match the query.
`<http://www.elasticsearch.org/guide/reference/api/search/>`_
:arg index: A comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg doc_type: A comma-separated list of document types to search;
leave empty to perform the operation on all types
:arg body: The search definition using the Query DSL
:arg _source: True |
from pulp.bindings import auth, consumer, consumer_groups, repo_groups, repository
from pulp.bindings.actions import ActionsAPI
from pulp.bindings.content import OrphanContentAPI, ContentSourceAPI, ContentCatalogAPI
from pulp.bindings.event_listeners import EventListenerAPI
from pulp.bindings.server_info import ServerInfoAPI, ServerStatusAPI
from pulp.bindings.tasks import TasksAPI, TaskSearchAPI
from pulp.bindings.upload import UploadAPI
class Bindings(object):
def __init__(self, pulp_connection):
"""
@type: pulp_connection: pulp.bindings.server.PulpConnection
"""
# Please keep the following in alphabetical order to ease reading
self.actions = ActionsAPI(pulp_connection)
self.bind = consumer.BindingsAPI(pulp_connection)
self.bindings = consumer.BindingSearchAPI(pulp_connection)
self.profile = consumer.ProfilesAPI(pulp_connection)
self.consumer = consumer.ConsumerAPI(pulp_connection)
self.consumer_content = consumer.ConsumerContentAPI(pulp_connection)
self.consumer_content_schedules = consumer.ConsumerContentSchedulesAPI(pulp_connection)
self.consumer_group = consumer_groups.ConsumerGroupAPI(pulp_connection)
self.consumer_group_search = consumer_groups.ConsumerGroupSearchAPI(pulp_connection)
self.consumer_group_actions = consumer_ | groups.ConsumerGroupActionAPI(pulp_connection)
self.consumer_group_bind = consumer_groups.ConsumerGroupBindAPI(pulp_connection)
self.consumer_group_content = consumer_groups.ConsumerG | roupContentAPI(pulp_connection)
self.consumer_history = consumer.ConsumerHistoryAPI(pulp_connection)
self.consumer_search = consumer.ConsumerSearchAPI(pulp_connection)
self.content_orphan = OrphanContentAPI(pulp_connection)
self.content_source = ContentSourceAPI(pulp_connection)
self.content_catalog = ContentCatalogAPI(pulp_connection)
self.event_listener = EventListenerAPI(pulp_connection)
self.permission = auth.PermissionAPI(pulp_connection)
self.repo = repository.RepositoryAPI(pulp_connection)
self.repo_actions = repository.RepositoryActionsAPI(pulp_connection)
self.repo_distributor = repository.RepositoryDistributorAPI(pulp_connection)
self.repo_group = repo_groups.RepoGroupAPI(pulp_connection)
self.repo_group_actions = repo_groups.RepoGroupActionAPI(pulp_connection)
self.repo_group_distributor = repo_groups.RepoGroupDistributorAPI(pulp_connection)
self.repo_group_distributor_search = repo_groups.RepoGroupSearchAPI(pulp_connection)
self.repo_group_search = repo_groups.RepoGroupSearchAPI(pulp_connection)
self.repo_history = repository.RepositoryHistoryAPI(pulp_connection)
self.repo_importer = repository.RepositoryImporterAPI(pulp_connection)
self.repo_publish_schedules = repository.RepositoryPublishSchedulesAPI(pulp_connection)
self.repo_search = repository.RepositorySearchAPI(pulp_connection)
self.repo_sync_schedules = repository.RepositorySyncSchedulesAPI(pulp_connection)
self.repo_unit = repository.RepositoryUnitAPI(pulp_connection)
self.role = auth.RoleAPI(pulp_connection)
self.server_info = ServerInfoAPI(pulp_connection)
self.server_status = ServerStatusAPI(pulp_connection)
self.tasks = TasksAPI(pulp_connection)
self.tasks_search = TaskSearchAPI(pulp_connection)
self.uploads = UploadAPI(pulp_connection)
self.user = auth.UserAPI(pulp_connection)
self.user_search = auth.UserSearchAPI(pulp_connection)
|
import mock
from django.utils import timezone
from rest_framework.test import APIRequestFactory
from elections.api.next.api_views import BallotViewSet
class TestBallotViewSet:
def test_get_queryset_last_updated_ordered_by_modified(self):
factory = APIRequestFactory()
timestamp = timezone.now().isoformat()
request = factory.get("/next/ballots/", {"last_updated": timestamp})
request.query_params = request.GET
view = BallotView | Set(request=request)
view.queryset = mock.MagicMock()
view.get_queryset()
view.queryset.with_last_updated.assert_called_once()
def test_get_queryset_last_updated_not_ordered(self):
factory = APIRequestFactory | ()
request = factory.get("/next/ballots/")
request.query_params = request.GET
view = BallotViewSet(request=request)
view.queryset = mock.MagicMock()
view.get_queryset()
view.queryset.with_last_updated.assert_not_called()
|
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('WebPage')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import os
import web
from viz import Viz
import WebPage
import WebHandler
class WebHandlerDyn(WebHandler.WebHandler):
def getPage(self,subResource,username):
return self.getPageDyn(dynPath=self.getDynPath(),
subResource=subResource,
username=username)
def getData(self,subResource,username):
return self.getDataDyn(dynPath=self.getDynPath(),
subResource=subResource,
username=username)
def postData(self,receivedData,subResource,username):
return self.postDataDyn(receivedData=receivedData,
dynPath=self.getDynPath(),
subResource=subResource,
| username=username)
def getDynPath(self):
elems = WebPage.WebPage.urlStringTolist(web.ctx.path)
for e in elems:
if e.startswith('_'):
return e[1:]
c | lass WebPageDyn(WebPage.WebPage):
def __init__(self,subPageLister=None,
subPageHandler=None,
**fvars):
assert callable(subPageLister)
# store params
self.subPageLister = subPageLister
self.subPageHandler = subPageHandler
# initialize parent class
WebPage.WebPage.__init__(self,**fvars)
# register subPageHandler
self.registerPage(WebPage.WebPage(webServer = self.webServer,
url = '_[.%%\w-]*',
title = '',
webHandler = self.subPageHandler))
def getUrlHierarchy(self,parentPath=[]):
# run the parent class' function
returnVal = WebPage.WebPage.getUrlHierarchy(self,parentPath)
# modify the children
returnVal['children'] = []
for sub in self.subPageLister():
classUrl = parentPath+[self.url]+[sub['url']]
if len(classUrl) and not classUrl[0]:
classUrl = classUrl[1:]
returnVal['children'] += [
{
'url': self.urlListToString(parentPath+[self.url]+['_'+sub['url']]),
'title': sub['title'],
'class': self.webServer.getDocumentation().getClass(classUrl),
'children': [],
}
]
return returnVal
|
# REST API Backend for the Radiocontrol Project
#
# Copyright (C) 2017 Stefan Derkits <stefan@derkits.at>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed i | n the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
# Reg | ister your models here.
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import test # noqa
from tempest_lib.common.utils import data_utils # noqa
from tempest_lib import exceptions as lib_exc # noqa
from manila_tempest_tests import clients_share as clients
from manila_tempest_tests.tests.api import base
class ShareTypesAdminNegativeTest(base.BaseSharesAdminTest):
def _create_share_type(self):
name = data_utils.rand_name("unique_st_name")
extra_specs = self.add_required_extra_specs_to_dict({"key": "value"})
return self.create_share_type(name, extra_specs=extra_specs)
@classmethod
def resource_setup(cls):
super(ShareTypesAdminNegativeTest, cls).resource_setup()
cls.member_shares_client = clients.Manager().shares_client
@test.attr(type=["gate", "smoke", ])
def test_create_share_with_nonexistent_share_type(self):
self.assertRaises(lib_exc.NotFound,
self.create_share,
share_type_id=data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_cr | eate_share_type_with_empty_name(self):
self.assertRaises(lib_exc.BadRequest, self.create_share_type, '')
@test.attr(type=["gate", "smoke", ])
def test_create_share_type_with_too_big_name(self):
self.assertRaises(lib_exc.BadRequest,
self.create_share_type,
"x" * 256)
@test.attr(type=["gate", "smoke", ])
| def test_get_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share_type,
data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_try_delete_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_share_type,
data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_try_create_duplicate_of_share_type(self):
st = self._create_share_type()
self.assertRaises(lib_exc.Conflict,
self.create_share_type,
st["share_type"]["name"],
extra_specs=self.add_required_extra_specs_to_dict())
@test.attr(type=["gate", "smoke", ])
def test_add_share_type_allowed_for_public(self):
st = self._create_share_type()
self.assertRaises(lib_exc.Conflict,
self.shares_client.add_access_to_share_type,
st["share_type"]["id"],
self.shares_client.tenant_id)
@test.attr(type=["gate", "smoke", ])
def test_remove_share_type_allowed_for_public(self):
st = self._create_share_type()
self.assertRaises(lib_exc.Conflict,
self.shares_client.remove_access_from_share_type,
st["share_type"]["id"],
self.shares_client.tenant_id)
@test.attr(type=["gate", "smoke", ])
def test_add_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.add_access_to_share_type,
data_utils.rand_name("fake"),
self.shares_client.tenant_id)
@test.attr(type=["gate", "smoke", ])
def test_remove_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.remove_access_from_share_type,
data_utils.rand_name("fake"),
self.shares_client.tenant_id)
|
"""
Tests for CourseData utility class.
"""
from __future__ import absolute_import
import six
from mock import patch
from lms.djangoapps.course_blocks.api import get_course_blocks
from openedx.core.djangoapps.content.block_structure.api import get_course_in_cache
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..course_data import CourseData
class CourseDataTest(ModuleStoreTestCase):
"""
Simple tests to ensure CourseData works as advertised.
"""
def setUp(self):
super(CourseDataTest, self).setUp()
with self.store.default_store(ModuleStoreEnum.Type.split):
self.course = CourseFactory.create()
# need to re-retrieve the course since the version on the original course isn't accurate.
self.course = self.store.get_course(self.course.id)
self.user = UserFactory.create()
self.collected_structure = get_course_in_cache(self.course.id)
self.one_true_structure = get_course_blocks(
self.user, self.course.location, collected_block_structure=self.collected_structure,
)
self.expected_results = {
'course': self.course,
'collected_block_structure': self.collected_structure,
'structure': self.one_true_structure,
'course_key': self.course.id,
'location': self.course.location,
}
@patch('lms.djangoapps.grades.course_data.get_course_blocks')
def test_fill_course_data(self, mock_get_blocks):
"""
Tests to ensure that course data is fully filled with just a single input.
"""
mock_get_blocks.return_value = self.one_true_structure
for kwarg in self.expected_results: # We iterate instead of ddt due to dependence on 'self'
if kwarg == 'location':
continue # This property is purely output; it's never able to be used as input
kwargs = {kwarg: self.expected_results[kwarg]}
course_data = CourseData(self.user, **kwargs)
for arg in self.expected_results:
# No point validating the | data we used as input, and c_b_s is input-only
if arg != kwarg and arg != "collected_block_structure":
expected = self.expected_results[arg]
actual = getattr(co | urse_data, arg)
self.assertEqual(expected, actual)
def test_properties(self):
expected_edited_on = getattr(
self.one_true_structure[self.one_true_structure.root_block_usage_key],
'subtree_edited_on',
)
for kwargs in [
dict(course=self.course),
dict(collected_block_structure=self.one_true_structure),
dict(structure=self.one_true_structure),
dict(course_key=self.course.id),
]:
course_data = CourseData(self.user, **kwargs)
self.assertEquals(course_data.course_key, self.course.id)
self.assertEquals(course_data.location, self.course.location)
self.assertEquals(course_data.structure.root_block_usage_key, self.one_true_structure.root_block_usage_key)
self.assertEquals(course_data.course.id, self.course.id)
self.assertEquals(course_data.version, self.course.course_version)
self.assertEquals(course_data.edited_on, expected_edited_on)
self.assertIn(u'Course: course_key', six.text_type(course_data))
self.assertIn(u'Course: course_key', course_data.full_string())
def test_no_data(self):
with self.assertRaises(ValueError):
_ = CourseData(self.user)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_full_string(self):
empty_structure = get_course_blocks(self.user, self.course.location)
self.assertFalse(empty_structure)
# full_string retrieves value from collected_structure when structure is empty.
course_data = CourseData(
self.user, structure=empty_structure, collected_block_structure=self.collected_structure,
)
self.assertIn(u'Course: course_key: {}, version:'.format(self.course.id), course_data.full_string())
# full_string returns minimal value when structures aren't readily available.
course_data = CourseData(self.user, course_key=self.course.id)
self.assertIn(u'empty course structure', course_data.full_string())
|
# Copyright 2014 Sebastien Maccagnoni-Munch
#
# Th | is file is part of Calaos Web Installer.
#
# Calaos Web Installer is free software: you can redistribute it and/or
# modify it unde | r the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# Calaos Web Installer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Calaos Web Installer. If not, see <http://www.gnu.org/licenses/>.
import os.path
import pickle
class CalaosApi:
def __init__(self, io, rules):
self.io_path = io
self.rules_path = rules
self.readfiles()
def readfiles(self):
if os.path.exists(self.io_path):
self.io = pickle.load(file(self.io_path))
else:
self.io = []
if os.path.exists(self.rules_path):
self.rules = pickle.load(file(self.rules_path))
else:
self.rules = []
def get_config(self):
return {
'io': self.io,
'rules': self.rules
}
def writefiles(self):
pickle.dump(self.io, file(self.io_path, 'w'))
pickle.dump(self.rules, file(self.rules_path, 'w')) |
"""
Deployment file to facilitate releases of pymatgen.
Note that this file is meant to be run from the root directory of the pymatgen
repo.
"""
__author__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "Sep 1, 2014"
import glob
import os
import json
import webbrowser
import requests
import re
import subprocess
from fabric.api import local, lcd
from pymatgen import __version__ as ver
def make_doc():
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-{3,}", contents)
n = len(toks[0].split()[-1])
changes = [toks[0]]
changes.append("\n" + "\n".join(toks[1].strip().split("\n")[0:-1]))
changes = ("-" * n).join(changes)
with open("docs/latest_changes.rst", "w") as f:
f.write(changes)
with lcd("examples"):
local("ipython nbconvert --to html *.ipynb")
local("mv *.html ../docs/_static")
with lcd("docs"):
local("cp ../CHANGES.rst change_log.rst")
local("sphinx-apidoc -d 6 -o . -f ../pymatgen")
local("rm pymatgen.*.tests.rst")
for f in glob.glob("docs/*.rst"):
if f.startswith('docs/pymatgen') and f.endswith('rst'):
newoutput = []
suboutput = []
subpackage = False
with open(f, 'r') as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("pymatgen") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, 'w') as fid:
fid.write("".join(newoutput))
local("make html")
local("cp _static/* _build/html/_static")
#This makes sure pymatgen.org works to redirect to the Gihub page
local("echo \"pymatgen.org\" > _build/html/CNAME")
#Avoid ths use of jekyll so that _dir works as intended.
local("touch _build/html/.nojekyll")
def publish():
local("python setup.py release")
def setver():
local("sed s/version=.*,/version=\\\"{}\\\",/ setup.py > newsetup"
.format(ver))
local("mv newsetup setup.py")
def update_doc():
make_doc()
with lcd("docs/_build/html/"):
local("git add .")
local("git commit -a -m \"Update dev docs\"")
local("git push origin gh-pages")
def merge_stable():
local("git commit -a -m \"v%s release\"" % ver)
local("git push")
local("git checkout stable")
local("git pull")
local("git merge master")
local("git push")
local("git checkout master")
def release_github():
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-+", contents)
desc = toks[1].strip()
payload = {
"tag_name": "v" + ver,
"target_commitish": "master",
"name": "v" + ver,
"body": desc,
"draft": False,
"prerelease": False
}
response = requests.post(
"https://api.github.com/repos/materialsproject/pymatgen/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]})
print response.text
def update_changelog():
output = subprocess.check_output(["git", "log", "--pretty=format:%s",
"v%s..HEAD" % ver])
lines = ["* " + l for l in output.strip().split("\n")]
with open("CHANGES.rst") as f:
contents = f.read()
toks = contents.split("==========")
| toks.insert(-1, "\n\n" + "\n".join(lines))
with open("CHANGES.rst", "w") as f:
f.write("==========".join(toks))
def log_ver():
filepath = os.path.join(os.environ["HOME"], "Dropbox", "Public",
"pymatgen", ver)
with open(filepath, "w") as f:
f.write("Release")
def release(skip_test=False):
| setver()
if not skip_test:
local("nosetests")
publish()
log_ver()
update_doc()
merge_stable()
release_github()
def open_doc():
pth = os.path.abspath("docs/_build/html/index.html")
webbrowser.open("file://" + pth)
|
v\xedk</option>
<option value="113">113 Reykjav\xedk</option>
<option value="116">116 Kjalarnes</option>
<option value="121">121 Reykjav\xedk</option>
<option value="123">123 Reykjav\xedk</option>
<option value="124">124 Reykjav\xedk</option>
<option value="125">125 Reykjav\xedk</option>
<option value="127">127 Reykjav\xedk</option>
<option value="128">128 Reykjav\xedk</option>
<option value="129">129 Reykjav\xedk</option>
<option value="130">130 Reykjav\xedk</option>
<option value="132">132 Reykjav\xedk</option>
<option value="150">150 Reykjav\xedk</option>
<option value="155">155 Reykjav\xedk</option>
<option value="170">170 Seltjarnarnes</option>
<option value="172">172 Seltjarnarnes</option>
<option value="190">190 Vogar</option>
<option value="200">200 K\xf3pavogur</option>
<option value="201">201 K\xf3pavogur</option>
<option value="202">202 K\xf3pavogur</option>
<option value="203">203 K\xf3pavogur</option>
<option value="210">210 Gar\xf0ab\xe6r</option>
<option value="212">212 Gar\xf0ab\xe6r</option>
<option value="220">220 Hafnarfj\xf6r\xf0ur</option>
<option value="221">221 Hafnarfj\xf6r\xf0ur</option>
<option value="222">222 Hafnarfj\xf6r\xf0ur</option>
<option value="225">225 \xc1lftanes</option>
<option value="230">230 Reykjanesb\xe6r</option>
<option value="232">232 Reykjanesb\xe6r</option>
<option value="233">233 Reykjanesb\xe6r</option>
<option value="235">235 Keflav\xedkurflugv\xf6llur</option>
<option value="240">240 Grindav\xedk</option>
<option value="245">245 Sandger\xf0i</option>
<option value="250">250 Gar\xf0ur</option>
<option value="260">260 Reykjanesb\xe6r</option>
<option value="270">270 Mosfellsb\xe6r</option>
<option value="271">271 Mosfellsb\xe6r</option>
<option value="276">276 Mosfellsb\xe6r</option>
<option value="300">300 Akranes</option>
<option value="301">301 Akranes</option>
<option value="302">302 Akranes</option>
<option value="310">310 Borgarnes</option>
<option value="311">311 Borgarnes</option>
<option value="320">320 Reykholt \xed Borgarfir\xf0i</option>
<option value="340">340 Stykkish\xf3lmur</option>
<option value="345">345 Flatey \xe1 Brei\xf0afir\xf0i</option>
<option value="350">350 Grundarfj\xf6r\xf0ur</option>
<option value="355">355 \xd3lafsv\xedk</option>
<option value="356">356 Sn\xe6fellsb\xe6r</option>
<option value="360">360 Hellissandur</option>
<option value="370">370 B\xfa\xf0ardalur</option>
<option value="371">371 B\xfa\xf0ardalur</option>
<option value="380">380 Reykh\xf3lahreppur</option>
<option value="400">400 \xcdsafj\xf6r\xf0ur</option>
<option value="401">401 \xcdsafj\xf6r\xf0ur</option>
<option value="410">410 Hn\xedfsdalur</option>
<option value="415">415 Bolungarv\xedk</option>
<option value="420">420 S\xfa\xf0av\xedk</option>
<option value="425">425 Flateyri</option>
<option value="430">430 Su\xf0ureyri</option>
<option value="450">450 Patreksfj\xf6r\xf0ur</option>
<option value="451">451 Patreksfj\xf6r\xf0ur</option>
<option value="460">460 T\xe1lknafj\xf6r\xf0ur</option>
<option value="465">465 B\xedldudalur</option>
<option value="470">470 \xdeingeyri</option>
<option value="471">471 \xdeingeyri</option>
<option value="500">500 Sta\xf0ur</option>
<option value="510">510 H\xf3lmav\xedk</option>
<option value="512">512 H\xf3lmav\xedk</option>
<option value="520">520 Drangsnes</option>
<option value="522">522 Kj\xf6rvogur</option>
<option value="523">523 B\xe6r</option>
<option value="524">524 Nor\xf0urfj\xf6r\xf0ur</option>
<option value="530">530 Hvammstangi</option>
<option value="531">531 Hvammstangi</option>
<option value="540">540 Bl\xf6ndu\xf3s</option>
<option value="541">541 Bl\xf6ndu\xf3s</option>
<option value="545">545 Skagastr\xf6nd</option>
<option value="550">550 Sau\xf0\xe1rkr\xf3kur</option>
<option value="551">551 Sau\xf0\xe1rkr\xf3kur</option>
<option value="560">560 Varmahl\xed\xf0</option>
<option value="565">565 Hofs\xf3s</option>
<option value="566">566 Hofs\xf3s</option>
<option value="570">570 Flj\xf3t</option>
<option value="580">580 Siglufj\xf6r\xf0ur</option>
<option value="600">600 Akureyri</option>
<option va | lue="601">601 Akureyri</option>
<option value="602">602 Akureyri</option>
<option value="603">603 Akureyri</option>
<option value="610">610 Greniv\xedk</option>
<option value="611">611 Gr\xedmsey</option>
<option value="620">620 Dalv\xedk</option>
<option value="621">621 Dalv\xedk</option>
<option value="625">625 \xd3lafsfj\xf6r\xf0ur</option>
<option value="630">630 Hr\xedsey</option>
<option value="640">640 H\xfasav\xedk</option>
<option value="641">641 H\xfasav\xedk | </option>
<option value="645">645 Fossh\xf3ll</option>
<option value="650">650 Laugar</option>
<option value="660">660 M\xfdvatn</option>
<option value="670">670 K\xf3pasker</option>
<option value="671">671 K\xf3pasker</option>
<option value="675">675 Raufarh\xf6fn</option>
<option value="680">680 \xde\xf3rsh\xf6fn</option>
<option value="681">681 \xde\xf3rsh\xf6fn</option>
<option value="685">685 Bakkafj\xf6r\xf0ur</option>
<option value="690">690 Vopnafj\xf6r\xf0ur</option>
<option value="700">700 Egilssta\xf0ir</option>
<option value="701">701 Egilssta\xf0ir</option>
<option value="710">710 Sey\xf0isfj\xf6r\xf0ur</option>
<option value="715">715 Mj\xf3ifj\xf6r\xf0ur</option>
<option value="720">720 Borgarfj\xf6r\xf0ur eystri</option>
<option value="730">730 Rey\xf0arfj\xf6r\xf0ur</option>
<option value="735">735 Eskifj\xf6r\xf0ur</option>
<option value="740">740 Neskaupsta\xf0ur</option>
<option value="750">750 F\xe1skr\xfa\xf0sfj\xf6r\xf0ur</option>
<option value="755">755 St\xf6\xf0varfj\xf6r\xf0ur</option>
<option value="760">760 Brei\xf0dalsv\xedk</option>
<option value="765">765 Dj\xfapivogur</option>
<option value="780">780 H\xf6fn \xed Hornafir\xf0i</option>
<option value="781">781 H\xf6fn \xed Hornafir\xf0i</option>
<option value="785">785 \xd6r\xe6fi</option>
<option value="800">800 Selfoss</option>
<option value="801">801 Selfoss</option>
<option value="802">802 Selfoss</option>
<option value="810">810 Hverager\xf0i</option>
<option value="815">815 \xdeorl\xe1ksh\xf6fn</option>
<option value="816">816 \xd6lfus</option>
<option value="820">820 Eyrarbakki</option>
<option value="825">825 Stokkseyri</option>
<option value="840">840 Laugarvatn</option>
<option value="845">845 Fl\xfa\xf0ir</option>
<option value="850">850 Hella</option>
<option value="851">851 Hella</option>
<option value="860">860 Hvolsv\xf6llur</option>
<option value="861">861 Hvolsv\xf6llur</option>
<option value="870">870 V\xedk</option>
<option value="871">871 V\xedk</option>
<option value="880">880 Kirkjub\xe6jarklaustur</option>
<option value="900">900 Vestmannaeyjar</option>
<option value="902">902 Vestmannaeyjar</option>
</select>'''
self.assertHTMLEqual(f.render('foo', 'bar'), out)
def test_ISIdNumberField(self):
error_atleast = ['Ensure this value has at least 10 characters (it has 9).']
error_invalid = ['Enter a valid Icelandic identification number. The format is XXXXXX-XXXX.']
error_atmost = ['Ensure this value has at most 11 characters (it has 12).']
error_notvalid = ['The Icelandic identification number is not valid.']
valid = {
'2308803449': '230880-3449',
'230880-3449': '230880-3449',
'230880 3449': '230880-3449',
'2308803440': '230880-3440',
}
invalid = {
'230880343': error_atleast + error_invalid,
'230880343234': error_atmost + error_invalid,
'abcdefghijk': error_invalid,
'2308803439': error_notvalid,
}
self.assertFieldOutput(ISIdNumberField, valid, invalid)
def test_ISPhoneNumberField(self):
error_invalid = ['Enter a valid value.']
error_atleast = ['Ensure this value has at least 7 characters (it has 6).']
error_atmost = ['Ensure this value has at most 8 characters (it has 9).']
valid = {
'1234567': '1234567',
'123 4567': '1234567',
'123-4567': '1234567',
}
invalid = {
'123-456': error_invalid,
'123456': error_atleast + error_invalid,
'123456555': error_atmo |
import tkinter as tk
from tkinter.filedialog import askdirectory
from tkinter.messagebox import showwarning, showerror, showinfo
from tkinter import ttk
import logging
import sys
from threading import Thread
from spider_board.client import Browser
from spider_board.utils import time_job, LOG_FILE, get_logger, humansize
# Create the logging handlers and attach them
logger = get_logger(__name__, LOG_FILE)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
class Gui:
def __init__(self):
logger.info('Instantiating GUI')
self.root = tk.Tk()
self.browser = None
self.make_gui()
def make_gui(self):
logger.info('Building GUI')
self.main_frame = ttk.Frame(self.root)
self.main_frame.pack(expand=True, fill=tk.BOTH, pady=10, padx=10)
# Make the username label and box
ttk.Label(self.main_frame, text='Username:').grid(row=0, column=2)
self.username = tk.StringVar()
self.username_box = ttk.Entry(self.main_frame,
textvariable=self.username)
self.username_box.grid(row=0, column=3, sticky='nsew')
# Make the password label and box
ttk.Label(self.main_frame, text='Password:').grid(row=1, column=2)
self.password = tk.StringVar()
self.password_box = ttk.Entry(self.main_frame,
textvariable=self.password)
self.password_box.grid(row=1, column=3, sticky='nsew')
# Make the savefile label and box
self.savefile_btn = ttk.Button(self.main_frame, text='Browse',
command=self.ask_find_directory)
self.savefile_btn.grid(row=2, column=2)
self.savefile = tk.StringVar()
self.savefile_box = ttk.Entry(self.main_frame,
textvariable=self.savefile)
self.savefile_box.grid(row=2, column=3, sticky='nsew')
# Set up the column weightings
self.main_frame.columnconfigure(3, weight=1)
self.main_frame.columnconfigure(0, weight=5)
self.main_frame.rowconfigure(3, weight=1)
# Make the listbox (and scrollbar) for selecting units
self.unit_box = tk.Listbox(self.main_frame, relief=tk.SUNKEN,
selectmode=tk.EXTENDED)
self.unit_box.grid(row=0, column=0,
rowspan=5, columnspan=2,
sticky='nsew')
scrollbar = tk.Scrollbar(self.main_frame)
scrollbar.config(command=self.unit_box.yview)
self.unit_box.config(yscrollcommand=scrollbar.set)
scrollbar.grid(row=0, column=1, rowspan=5, sticky='nsew')
# Make the "login" button
self.go_button = ttk.Button(self.main_frame, text='Login',
command=self.login)
self.go_button.grid(row=4, column=2, sticky='es')
# Make the "start downloading" button
self.go_button = ttk.Button(self.main_frame, text='Start Downloading',
command=self.start_downloading)
self.go_button.grid(row=4, column=3, sticky='es')
def login(self):
logger.info('Login button pressed')
username = self.username.get()
password = self.password.get()
savefile = self.savefile.get()
# Check all required fields are filled in
if username and password and savefile:
logger.info('Attempting login')
self.browser = Browser(username, password, savefile)
self.bootstrap_browser(self.browser)
# Do the login in a different thread
Thread(target=self.browser.login).start()
else:
showwarning('Ok', 'Please fill in all necessary fields.')
logger.warn("Required fields haven't been filled in")
def start_downloading(self):
logger.info('Download button pressed')
if self.browser and self.browser.is_logged_in:
self.browser.spider_concurrent()
self.browser.download_concurrent()
else:
logger.info('Not logged in')
showerror('Ok', 'Not logged in')
def ask_find_directory(self):
save_location = askdirectory()
self.savefile.set(save_location)
def mainloop(self):
self.root.mainloop()
def quit(self):
self.root.d | estroy | ()
def update_units(self):
self.unit_box.delete(0, tk.END)
for unit in self.browser.units:
self.unit_box.insert(tk.END, unit.title)
self.root.after(1000, self.update_units)
def bootstrap_browser(self, browser):
"""
Add in any hooks to the browser so they will be run on certain events.
"""
def on_quit(browser_instance, gui):
"""Close the GUI"""
gui.quit()
def on_login_successful(browser_instance, gui):
"""Fire off an info dialog and get units (in another thread)"""
# Thread(target=browser_instance.get_units).start()
gui.root.after(0, showinfo, 'Ok', 'Login Successful')
def on_login_failed(browser_instance, gui):
"""Fire off an error dialog"""
showerror('Ok', 'Login Unsuccessful')
def on_get_units(browser_instance, gui):
gui.root.after(0, gui.update_units)
hooks = [on_quit, on_login_successful, on_login_failed,
on_get_units]
# Do the actual bootstrapping
for hook in hooks:
callback = lambda browser_instance: hook(browser_instance, self)
setattr(browser, hook.__name__, callback)
browser.on_login_failed(self)
|
#!/usr/bin/env python
import sys
sys.path.append('/var/www/html/modules/libraries')
import avahi
import dbus
from time import sleep
import mysql.connector
file = open('/var/www/html/config.php', 'r')
for line in file:
if "db_name" in line: MySQL_database = line.split('"')[3]
elif "db_user" in line: MySQL_username = line.split('"')[3]
elif "db_password" in line: MySQL_password = line.split('"')[3]
cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database)
cursor = cnx.cursor()
query = ("SELECT Setting,value FROM Settings")
cursor.execute(query)
for (Setting, value) in cursor:
if Setting == "MQTT_ip_address":
MQTT_ip_address = value
cursor.close()
cnx.close()
class ServiceAnnouncer:
def __init__(self, name, service, port, txt):
bus = dbus.SystemBus()
server = dbus.Interface(bus.get_object(a | vahi.DBUS_NAME, avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_ | SERVER)
group = dbus.Interface(bus.get_object(avahi.DBUS_NAME, server.EntryGroupNew()),
avahi.DBUS_INTERFACE_ENTRY_GROUP)
self._service_name = name
index = 1
while True:
try:
group.AddService(avahi.IF_UNSPEC, avahi.PROTO_INET, 0, self._service_name, service, '', '', port, avahi.string_array_to_txt_array(txt))
except dbus.DBusException: # name collision -> rename
index += 1
self._service_name = '%s #%s' % (name, str(index))
else:
break
group.Commit()
def get_service_name(self):
return self._service_name
if __name__ == '__main__':
announcer = ServiceAnnouncer(MQTT_ip_address, '_irulez._tcp.', 80,'')
print announcer.get_service_name()
sleep(10000)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from tempest.lib import exceptions
from neutronclient.tests.functional import base
class SimpleReadOnlyNeutronClientTest(base.ClientTestBase):
"""This is a first pass at a simple read only python-neutronclient test.
This only exercises client commands that are read only.
This should test commands:
* as a regular user
* as a admin user
* with and without optional parameters
* initially just check return codes, and later test command outputs
"""
def test_admin_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-neutron-exist')
# NOTE(mestery): Commands in order listed in 'neutron help'
# Optional arguments:
def test_neutron_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-not-exist')
def test_neutron_net_list(self):
net_list = self.parser.listing(self.neutron('net-list'))
self.assertTableStruct(net_list, ['id', 'name', 'subnets'])
def test_neutron_ext_list(self):
ext = self.parser.listing(self.neutron('ext-list'))
self.assertTableStruct(ext, ['alias', 'name'])
def test_neutron_dhcp_agent_list_hosting_net(self):
self.neutron('dhcp-agent-list-hosting-net',
params='private')
def test_neutron_agent_list(self):
agents = self.parser.listing(self.neutron('agent-list'))
field_names = ['id', 'agent_type', 'host', 'alive', 'admin_state_up']
self.assertTableStruct(agents, field_names)
def test_neutron_floatingip_list(self):
self.neutron('floatingip-list')
def test_neutron_meter_label_list(self):
self.neutron('meter-label-list')
def test_neutron_meter_label_rule_list(self):
self.neutron('meter-label-rule-list')
def _test_neutron_lbaas_command(self, command):
try:
self.neutron(command)
except exceptions.CommandFailed as e:
if '404 Not Found' not in e.stderr:
self.fail('%s: Unexpected failure.' % command)
def test_neutron_lb_healthmonitor_list(self):
self._test_neutron_lbaas_command('lb-healthmonitor-list')
def test_neutron_lb_member_list(self):
self._test_neutron_lbaas_command('lb-member-list')
def test_neutron_lb_pool_list(self):
self._test_neutron_lbaas_command('lb-pool-list')
def test_neutron_lb_vip_list(self):
self._test_neutron_lbaas_command('lb-vip-list')
def test_neutron_net_external_list(self):
net_ext_list = self.parser.listing(self.neutron('net-external-list'))
self.assertTableStruct(net_ext_list, ['id', 'name', 'subnets'])
def test_neutron_port_list(self):
port_list = self.parser.listing(self.neutron('port-list'))
self.assertTableStruct(port_list, ['id', 'name', 'mac_address',
'fixed_ips'])
def test_neutron_quota_list(self):
self.neutron('quota-list')
def test_neutron_router_list(self):
router_list = self.parser.listing(self.neutron('router-list'))
self.assertTableStruct(router_list, ['id', 'name',
'external_gateway_info'])
def test_neutron_security_group_list(self):
security_grp = self.parser.listing(self.neutron('security-group-list'))
self.assertTableStruct(security_grp, ['id', 'name',
'security_group_rules'])
def test_neutron_security_group_rule_list(self):
security_grp = self.parser.listing(self.neutron
('security-group-rule-list'))
self.assertTableStruct(security_grp, ['id', 'security_group',
| 'direction', 'ethertype',
'port/protocol', 'remote'])
def test_neutron_subnet_list(self):
subnet_list = self.parser.listing(self.neutron('subnet-list'))
| self.assertTableStruct(subnet_list, ['id', 'name', 'cidr',
'allocation_pools'])
def test_neutron_firewall_list(self):
firewall_list = self.parser.listing(self.neutron
('firewall-list'))
self.assertTableStruct(firewall_list, ['id', 'name',
'firewall_policy_id'])
def test_neutron_firewall_policy_list(self):
firewall_policy = self.parser.listing(self.neutron
('firewall-policy-list'))
self.assertTableStruct(firewall_policy, ['id', 'name',
'firewall_rules'])
def test_neutron_firewall_rule_list(self):
firewall_rule = self.parser.listing(self.neutron
('firewall-rule-list'))
self.assertTableStruct(firewall_rule, ['id', 'name',
'firewall_policy_id',
'summary', 'enabled'])
def test_neutron_help(self):
help_text = self.neutron('help')
lines = help_text.split('\n')
self.assertFirstLineStartsWith(lines, 'usage: neutron')
commands = []
cmds_start = lines.index('Commands for API v2.0:')
command_pattern = re.compile('^ {2}([a-z0-9\-\_]+)')
for line in lines[cmds_start:]:
match = command_pattern.match(line)
if match:
commands.append(match.group(1))
commands = set(commands)
wanted_commands = set(('net-create', 'subnet-list', 'port-delete',
'router-show', 'agent-update', 'help'))
self.assertFalse(wanted_commands - commands)
# Optional arguments:
def test_neutron_version(self):
self.neutron('', flags='--version')
def test_neutron_debug_net_list(self):
self.neutron('net-list', flags='--debug')
def test_neutron_quiet_net_list(self):
self.neutron('net-list', flags='--quiet')
|
from OctaHomeCore.OctaFiles.urls.base import *
from OctaHomeTempControl.views import *
class TempControlOctaUrls(OctaUrls):
@classmethod
def getUrls(cls):
return [
url(r'^TempControl/command/(?P<command>\w+)/$', handleTempCommand.as_view(), name='TempControlCommandWithOutDevice'),
url(r'^TempControl/command/(?P<command>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempCommand.as_view(), | name='TempControlCommand'),
url(r'^TempControl/command/(?P<command>\w+)/(?P<house>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempCommand.as_view(), name='TempControlCommand'),
url(r'^TempControl/command/(?P<command>\w+)/(?P<house>\w | +)/(?P<room>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempCommand.as_view(), name='TempControlCommand'),
url(r'^TempControl/page/(?P<page>\w+)/$', handleTempView.as_view(), name='TempControlPage'),
url(r'^TempControl/page/(?P<page>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempCommand.as_view(), name='TempControlPage'),
url(r'^TempControl/page/(?P<house>\w+)/(?P<page>\w+)/$', handleTempView.as_view(), name='TempControlPage'),
url(r'^TempControl/page/(?P<house>\w+)/(?P<room>\w+)/(?P<page>\w+)/$', handleTempView.as_view(), name='TempControlPage'),
url(r'^TempControl/$', handleTempView.as_view(), name='TempControl'),
url(r'^TempControl/(?P<house>\w+)/$', handleTempView.as_view(), name='TempControl'),
url(r'^TempControl/(?P<house>\w+)/(?P<room>\w+)/$', handleTempView.as_view(), name='TempControl'),
url(r'^TempControl/(?P<house>\w+)/(?P<room>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempView.as_view(), name='TempControl'),
]
|
self.assertEqual(expected_ret, ret, "Not expected return")
def test_rc_send_success(self):
methodname = 'POST'
url = '/controller/dc/esdk/v2.0/test_url'
expected_resp = {'errorCode': u'0', 'reason': None,
'response': 'ok', 'status': 204}
with mock.patch.object(self.restc,
| 'process_request',
return_value=self._mock_req_re | sp
(requests.codes.no_content)):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url,
hex(10),
test_create_network_req)
self.assertEqual(expected_resp, ret, "Not expected response")
def test_rc_send_del_network(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
expected_resp = {'errorCode': None, 'reason': None,
'response': None, 'status': 200}
resp = self._mock_req_resp(requests.codes.ok)
resp.content = ""
with mock.patch.object(self.restc, 'process_request',
return_value=resp):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url,
hex(10),
test_create_network_req)
self.assertEqual(expected_resp, ret, "Not expected response")
def test_rc_send_del_network_resp_valid(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
expected_resp = {'errorCode': None, 'reason': None,
'response': None, 'status': 300}
resp = self._mock_req_resp(requests.codes.multiple_choices)
with mock.patch.object(self.restc, 'process_request',
return_value=resp):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url,
hex(10),
test_create_network_req)
self.assertEqual(expected_resp, ret, "Not expected response")
def test_rc_process_request(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
auth = (cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password)
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
data = {"network": {"routerExternal": False,
"id": "d897e21a-dfd6-4331-a5dd-7524fa421c3e",
"serviceName": "physnet1",
"status": "ACTIVE",
"shared": False,
"adminStateUp": True,
"tenant_id": "test-tenant",
"segmentationId": None,
"physicalNetwork": None,
"networkType": "local",
"name": "net1"}}
resp = self._mock_req_resp(requests.codes.no_content)
kwargs = {'url': url, 'data': data}
with mock.patch('requests.request',
return_value=resp) as mock_method:
ac_rest.RestClient().process_request(methodname, auth,
url, headers,
data)
mock_method.\
assert_called_once_with(
methodname,
headers={'Content-type':
'application/json',
'Accept':
'application/json'},
timeout=float(cfg.CONF.
huawei_ac_config.
request_timeout),
verify=False,
auth=(cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password),
**kwargs)
def test_rc_process_request_timeout_exception(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
auth = (cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password)
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
data = {"network": {"routerExternal": False,
"id": "d897e21a-dfd6-4331-a5dd-7524fa421c3e",
"serviceName": "physnet1",
"status": "ACTIVE",
"shared": False,
"adminStateUp": True,
"tenant_id": "test-tenant",
"segmentationId": None,
"physicalNetwork": None,
"networkType": "local",
"name": "net1"}}
resp = self._mock_req_resp(requests.codes.no_content)
kwargs = {'url': url, 'data': data}
with mock.patch('requests.request',
return_value=resp) as mock_method:
mock_method.side_effect = requests.exceptions.\
Timeout(mock.Mock(msg="Timeout Exceptions"))
ac_rest.RestClient().\
process_request(methodname, auth, url, headers, data)
mock_method.\
assert_any_call(methodname,
headers={'Content-type':
'application/json',
'Accept':
'application/json'},
timeout=float(cfg.CONF.
huawei_ac_config.
request_timeout),
verify=False,
auth=(cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password),
**kwargs)
def test_rc_process_request_exception(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
auth = (cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password)
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
data = {"network": {"routerExternal": False,
"id": "d897e21a-dfd6-4331-a5dd-7524fa421c3e",
"serviceName": "physnet1",
"status": "ACTIVE",
"shared": False,
"adminStateUp": True,
"tenant_id": "test-tenant",
"segmentationId": None,
"physicalNetwork": None,
"networkType": "local",
"name": "net1"}}
resp = self._mock_req_resp(requests.codes.no_content)
kwargs = {'url': url, 'data': data}
with mock.patch('requests.request',
return_value=resp) as mock_method:
mock_method.side_effect = Exception(mock.Mock(msg="Timeout "
"Exceptions"))
ac_rest.RestClient().process_request(methodname, auth,
url,
headers, data)
mock_method.\
assert_any_call(methodname,
headers={'Content-type':
'application/json',
'Accept':
'application/json'},
timeout=float(cfg.CONF.
|
import unittest
from graph_diff.graph import rnr_graph, lr_node
from graph_diff.graph.graph_with_repetitive_nodes_exceptions import GraphWithRepetitiveNodesKeyError
class GraphWithRepetitiveNodesWithRootTest(unittest.TestCase):
def setUp(self):
self.test_graph = rnr_graph()
def test_add_node(self):
self.assertFalse(lr_node(1, 1) in self.test_graph)
self.test_graph.add_node(lr_node(1, 1))
self.assertTrue(lr_node(1, 1) in self.test_graph)
def test_add_edge(self):
self.assertFalse(lr_node(1, 1) in self.test_graph)
self.assertFalse(lr_node(1, 2) i | n self.test_graph)
self.test_graph.add_edge(lr_node(1, 1), lr_node(1, 2))
self.assertTrue(lr_node(1, 1) in self.test_graph)
self.assertTrue(lr_node(1, 2) in self.test_graph)
def test_add_edge_exp(self):
self.assertFalse(lr_node(1, 1) in self.test_graph)
self.assertFalse(lr_node(1, 2) in self.test_graph)
self.assertRaises(GraphWithRepetitiveNodesKeyError,
self.test_graph.add_edge_exp,
| lr_node(1, 1),
lr_node(1, 2))
if __name__ == '__main__':
unittest.main()
|
from setuptools import setup
__version__ = "0.5.0"
# Get the long description by reading the README
try:
readme_content = open("README.md").read()
except:
readme_content = ""
# Create the actual setup method
setup(name='pypred',
version=__version__,
description='A Python library for simple evaluation of natural language predicates',
long_description=readme_content,
author='Armon Dadgar',
author_email='armon@kiip.me',
maintainer='Armon Dadgar',
maintainer_email='armon@kiip.me',
url="https://github.com/armon/pypred/",
license="MIT License",
keywords=["python", "predicate", "natural la | nguage"],
packages=['pypred'],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating | System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Libraries"
],
install_requires=["ply>=3.4"]
)
|
summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.audio'])
def audio(name, tensor, sample_rate, max_outputs=3, collections=None,
family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/audio/0', '*name*/audio/1', etc
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family=family, values=[tensor]) as (tag, scope):
sample_rate = _ops.convert_to_tensor(
sample_rate, dtype=_dtypes.float32, name='sample_rate')
val = _gen_logging_ops.audio_summary_v2(
tag=tag, tensor=tensor, max_outputs=max_outputs,
sample_rate=sample_rate, name=scope)
_summary_op | _util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.text'])
def text(name, tensor, collections=None):
"""Summarizes textual data.
Text data summarized via this plugin will be visible in the Text Dashboard
in TensorBoard. The standard TensorBoard Text Dashboard will render markd | own
in the strings, and will automatically organize 1d and 2d tensors into tables.
If a tensor with more than 2 dimensions is provided, a 2d subarray will be
displayed along with a warning message. (Note that this behavior is not
intrinsic to the text summary api, but rather to the default TensorBoard text
plugin.)
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: a string-type Tensor to summarize.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
Returns:
A TensorSummary op that is configured so that TensorBoard will recognize
that it contains textual data. The TensorSummary is a scalar `Tensor` of
type `string` which contains `Summary` protobufs.
Raises:
ValueError: If tensor has the wrong type.
"""
if tensor.dtype != _dtypes.string:
raise ValueError('Expected tensor %s to have dtype string, got %s' %
(tensor.name, tensor.dtype))
summary_metadata = _SummaryMetadata(
plugin_data=_SummaryMetadata.PluginData(plugin_name='text'))
t_summary = tensor_summary(
name=name,
tensor=tensor,
summary_metadata=summary_metadata,
collections=collections)
return t_summary
@tf_export(v1=['summary.tensor_summary'])
def tensor_summary(name,
tensor,
summary_description=None,
collections=None,
summary_metadata=None,
family=None,
display_name=None):
"""Outputs a `Summary` protocol buffer with a serialized tensor.proto.
Args:
name: A name for the generated node. If display_name is not set, it will
also serve as the tag name in TensorBoard. (In that case, the tag
name will inherit tf name scopes.)
tensor: A tensor of any type and shape to serialize.
summary_description: A long description of the summary sequence. Markdown
is supported.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
summary_metadata: Optional SummaryMetadata proto (which describes which
plugins may use the summary value).
family: Optional; if provided, used as the prefix of the summary tag,
which controls the name used for display on TensorBoard when
display_name is not set.
display_name: A string used to name this data in TensorBoard. If this is
not set, then the node name will be used instead.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if summary_metadata is None:
summary_metadata = _SummaryMetadata()
if summary_description is not None:
summary_metadata.summary_description = summary_description
if display_name is not None:
summary_metadata.display_name = display_name
serialized_summary_metadata = summary_metadata.SerializeToString()
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.tensor_summary_v2(
tensor=tensor,
tag=tag,
name=scope,
serialized_summary_metadata=serialized_summary_metadata)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.merge'])
def merge(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
Raises:
RuntimeError: If called with eager mode enabled.
@compatibility(eager)
Not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatibility
"""
# pylint: enable=line-too-long
if _context.executing_eagerly():
raise RuntimeError(
'Merging tf.summary.* ops is not compatible with eager execution. '
'Use tf.contrib.summary instead.')
if _summary_op_util.skip_summary():
return _constant_op.constant('')
name = _summary_op_util.clean_tag(name)
with _ops.name_scope(name, 'Merge', inputs):
val = _gen_logging_ops.merge_summary(inputs=inputs, name=name)
_summary_op_util.collect(val, collections, [])
return val
@tf_export(v1=['summary.merge_all'])
def merge_all(key=_ops.GraphKeys.SUMMARIES, scope=None, name=None):
"""Merges all summaries collected in the default graph.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
scope: Optional scope used to filter the summary ops, using `re.match`
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from th |
from django import forms
from django.forms.widgets import *
from django.utils.safestring import mark_safe
from madrona.analysistools.widgets import SliderWidget, DualSliderWidget
class AdminFileWidget(forms.FileInput):
"""
A FileField Widget that shows its current value if it has one.
"""
def __init__(self, attrs={}):
super(AdminFileWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
output = []
if value and hasattr(value, "name"):
filename = split(value.name)[-1]
output.append('Current File: <a href="%s" target="_blank">%s</a> : <input style="top:0px;margin-bottom:0px" type="checkbox" name="clear_%s" /> Remove </p>' % (value._get_url(), filename, name))
output.append('<p> Change:')
output.append(super(AdminFileWidget, self).render(name, value, attrs))
#output.append("</p>")
return mark_safe(u''.join(output))
class SliderWidgetWithTooltip(SliderWidget):
def __init__(self, min, max, step, id):
super(SliderWidgetWithTooltip, self).__init__(min, max, step)
self.id = id
def render(self, *args, **kwargs):
output = super(SliderWidgetWithTooltip, self).render(*args,**kwargs)
img_id = self.id
span_id = "%s_content" %self.id
#grabbing flatblock outright as including the flatblock template tag in the output html resulted in a literal output of the template tag
from flatblocks.models import Fl | atBlock
try:
flatblock = str(FlatBlock.objects.get(slug=self.id).content)
except:
flatblock = ""
output = output.replace('\n', ' <img src="/media/marco/img/info.png" id="%s" class="info" /> | \n' %img_id, 1)
output = output.replace('\n', ' <span id="%s" style="display: none;">%s</span>\n' %(span_id, flatblock), 1)
return mark_safe(output)
class DualSliderWidgetWithTooltip(DualSliderWidget):
def __init__(self, param1, param2, min, max, step, id):
super(DualSliderWidgetWithTooltip, self).__init__(param1, param2, min, max, step)
self.id = id
def render(self, *args, **kwargs):
output = super(DualSliderWidgetWithTooltip, self).render(*args,**kwargs)
output = output.replace('\n', '<img src="/media/marco/img/info.png" id="%s" class="info" />\n' %self.id, 1)
return mark_safe(output)
class CheckboxSelectMultipleWithObjTooltip(forms.CheckboxSelectMultiple):
def __init__(self, queryset=None, attrs=None):
super(CheckboxSelectMultipleWithObjTooltip, self).__init__(attrs)
self.queryset = queryset
self.attrs = attrs
def render(self, *args, **kwargs):
output = super(CheckboxSelectMultipleWithObjTooltip, self).render(*args,**kwargs)
for obj in self.queryset:
output = output.replace(str(obj), '%s <img src="/media/marco/img/info.png" id="info_%s" class="info" />' %(str(obj), obj.objective.short_name) )
#print output
return mark_safe(output)
class CheckboxSelectMultipleWithTooltip(forms.CheckboxSelectMultiple):
def __init__(self, queryset=None, substrate=None, attrs=None):
super(CheckboxSelectMultipleWithTooltip, self).__init__(attrs)
self.queryset = queryset
self.substrate = substrate
self.attrs = attrs
def render(self, *args, **kwargs):
output = super(CheckboxSelectMultipleWithTooltip, self).render(*args,**kwargs)
for param in self.queryset:
tidal_substrate = False
try:
if param.parameter.short_name == 'substrate' and self.substrate is None and 'tidal' in self.attrs['class']:
tidal_substrate = True
except:
pass
if param.parameter.short_name == 'substrate' and self.substrate is not None:
output = output.replace(str(param), '%s <img src="/media/marco/img/info.png" id="info_%s" class="info" />' %(str(param), self.substrate) )
elif tidal_substrate:
output = output.replace(str(param), '%s <img src="/media/marco/img/info.png" id="info_tidal_substrate" class="info" />' %(str(param)) )
else:
output = output.replace(str(param), '%s <img src="/media/marco/img/info.png" id="info_%s" class="info" />' %(str(param), param.parameter.short_name) )
#print output
return mark_safe(output)
|
from sympy import S, Integral, sin, cos, pi, sqrt, symbols
from sympy.physics.mechanics import (Dyadic, Particle, Point, ReferenceFrame,
RigidBody, Vector)
from sympy.physics.mechanics import (angular_momentum, dynamicsymbols,
inertia, inertia_of_point_mass,
kinetic_energy, linear_momentum, \
outer, potential_energy)
from sympy.physics.mechanics.functions import _mat_inv_mul
from sympy.utilities.pytest import raises
Vector.simp = True
q1, q2, q3, q4, q5 = symbols('q1 q2 q3 q4 q5')
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = A.orientnew('B', 'Axis', [q2, A.x])
C = B.orientnew('C', 'Axis', [q3, B.y])
def test_inertia():
N = ReferenceFrame('N')
ixx, iyy, izz = symbols('ixx iyy izz')
ixy, iyz, izx = symbols('ixy iyz izx')
assert inertia(N, ixx, iyy, izz) == (ixx * (N.x | N.x) + iyy *
(N.y | N.y) + izz * (N.z | N.z))
assert inertia(N, 0, 0, 0) == 0 * (N.x | N.x)
assert inertia(N, ixx, iyy, izz, ixy, iyz, izx) == (ixx * (N.x | N.x) +
ixy * (N.x | N.y) + izx * (N.x | N.z) + ixy * (N.y | N.x) + iyy *
(N.y | N.y) + iyz * (N.y | N.z) + izx * (N.z | N.x) + iyz * (N.z |
N.y) + izz * (N.z | N.z))
def test_inertia_of_point_mass():
r, s, t, m = symbols('r s t m')
N = ReferenceFrame('N')
px = r * N.x
I = inertia_of_point_mass(m, px, N)
assert I == m * r**2 * (N.y | N.y) + m * r**2 * (N.z | N.z)
py = s * N.y
I = inertia_of_point_mass(m, py, N)
assert I == m * s**2 * (N.x | N.x) + m * s**2 * (N.z | N.z)
pz = t * N.z
I = inertia_of_point_mass(m, pz, N)
assert I == m * t**2 * (N.x | N.x) + m * t**2 * (N.y | N.y)
p = px + py + pz
I = inertia_of_point_mass(m, p, N)
assert I == (m * (s**2 + t**2) * (N.x | N.x) -
m * r * s * (N.x | N.y) -
m * r * t * (N.x | N.z) -
m * r * s * (N.y | N.x) +
m * (r**2 + t**2) * (N.y | N.y) -
m * s * t * (N.y | N.z) -
m * r * t * (N.z | N.x) -
m * s * t * (N.z | N.y) +
m * (r**2 + s**2) * (N.z | N.z))
def test_linear_momentum():
N = ReferenceFrame('N')
Ac = Point('Ac')
Ac.set_vel(N, 25 * N.y)
I = outer(N.x, N.x)
A = RigidBody('A', Ac, N, 20, (I, Ac))
P = Point('P')
Pa = Particle('Pa', P, 1)
Pa.point.set_vel(N, 10 * N.x)
assert linear_momentum(N, A, Pa) == 10 * N.x + 500 * N.y
def test_angular_momentum_and_linear_momentum():
m, M, l1 = symbols('m M l1')
q1d = dynamicsymbols('q1d')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, q1d * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
assert linear_momentum(
N, A, Pa) == 2 * m * q1d* l1 * N.y + M * l1 * q1d * N.y
assert angular_momentum(
O, N, A, Pa) == 4 * m * q1d * l1**2 * N.z + q1d * N.z
def t | est_kinetic_energy():
m, M, l1 = symbols('m M l1')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatene | w('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
assert 0 == kinetic_energy(N, Pa, A) - (M*l1**2*omega**2/2
+ 2*l1**2*m*omega**2 + omega**2/2)
def test_potential_energy():
m, M, l1, g, h, H = symbols('m M l1 g h H')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
Pa.set_potential_energy(m * g * h)
A.set_potential_energy(M * g * H)
assert potential_energy(A, Pa) == m * g * h + M * g * H
def test_mat_inv_mul():
# Uses SymPy generated primes as matrix entries, so each entry in
# each matrix should be symbolic and unique, allowing proper comparison.
# Checks _mat_inv_mul against Matrix.inv / Matrix.__mul__.
from sympy import Matrix, prime
# going to form 3 matrices
# 1 n x n
# different n x n
# 1 n x 2n
n = 3
m1 = Matrix(n, n, lambda i, j: prime(i * n + j + 2))
m2 = Matrix(n, n, lambda i, j: prime(i * n + j + 5))
m3 = Matrix(n, n, lambda i, j: prime(i + j * n + 2))
assert _mat_inv_mul(m1, m2) == m1.inv() * m2
assert _mat_inv_mul(m1, m3) == m1.inv() * m3
|
'''Find valid tags and usernames.
The file will contain things like:
tag:12345:romance
'''
import gzip
import re
import requests
import string
import sys
import time
import random
DEFAULT_HEADERS = {'User-Agent': 'ArchiveTeam'}
class FetchError(Exception):
'''Custom error class when fetching does not me | et our expectation.'''
def main():
# Take the program arguments given to this script
# Normal programs use 'argparse' but this keeps things | simple
start_num = int(sys.argv[1])
end_num = int(sys.argv[2])
output_filename = sys.argv[3] # this should be something like myfile.txt.gz
assert start_num <= end_num
print('Starting', start_num, end_num)
gzip_file = gzip.GzipFile(output_filename, 'wb')
for shortcode in check_range(start_num, end_num):
# Write the valid result one per line to the file
line = '{0}\n'.format(shortcode)
gzip_file.write(line.encode('ascii'))
gzip_file.close()
print('Done')
def check_range(start_num, end_num):
'''Check if page exists.
Each line is like tag:12345:romance
'''
for num in range(start_num, end_num + 1):
shortcode = num
url = 'http://www.panoramio.com/user/{0}'.format(shortcode)
counter = 0
while True:
# Try 20 times before giving up
if counter > 20:
# This will stop the script with an error
raise Exception('Giving up!')
try:
text = fetch(url)
except FetchError:
# The server may be overloaded so wait a bit
print('Sleeping... If you see this')
time.sleep(10)
else:
if text:
for user in extract_user(text) for tag in extract_tags(text):
yield 'tag:{0}:{1}'.format(user, tag)
break # stop the while loop
counter += 1
def fetch(url):
'''Fetch the URL and check if it returns OK.
Returns True, returns the response text. Otherwise, returns None
'''
print('Fetch', url)
response = requests.get(url, headers=DEFAULT_HEADERS)
# response doesn't have a reason attribute all the time??
print('Got', response.status_code, getattr(response, 'reason'))
if response.status_code == 200:
# The item exists
if not response.text:
# If HTML is empty maybe server broke
raise FetchError()
return response.text
elif response.status_code == 404:
# Does not exist
return
else:
# Problem
raise FetchError()
def extract_user(text):
'''Return a list of tags from the text.'''
# Search for <a href="/user/1707816/tags/Bell%27Italia">Bell'Italia</a>
return re.findall(r'"/user/([^/]+)/tags/', text)
def extract_tags(text):
'''Return a list of tags from the text.'''
# Search for <a href="/user/1707816/tags/Bell%27Italia">Bell'Italia</a>
return re.findall(r'"/user/[0-9]+/tags/([^"]+)"', text)
if __name__ == '__main__':
main()
|
"""
Router.py uses bot_packages in this file to setup comma | nd and sensor value routing to the correct bot_role.
"""
settings= {
"bot | _name":"rp4.solalla.ardyh",
"bot_roles":"bot",
"bot_packages":[],
"subscriptions":[],
}
|
from | astropy import units as u
K_kepler = 0.01720209895 # ua^(3/2) m_{sun} d^(−1)
K = 0.01720209908 * u.au ** (3 / 2) / u.d # ua^(3/2) d^(−1)
UA = | 149597870700 * u.m # m
GM1 = 1.32712442099E20 * u.m ** 3 / u.s ** 2 # m^(3) s^(−2)
# m1/m2
Mercury = 6023600
Venus = 408523.719
Earth_Moon = 328900.561400
Mars = 3098703.59
Jupiter = 1047.348644
Saturn = 3497.9018
Uranus = 22902.98
Neptune = 19412.26
Pluto = 136566000
Eris = 119100000
Ceres = 2119000000
Palas = 9700000000
Vesta = 7400000000
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Author: Piotrek Wasilewski <wasilewski.piotrek@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Fre | e Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with | this program. If not, see <http://www.gnu.org/licenses/>.
from core import Chart, ChartColumn
CHART_TOOLS_PACKAGES = ['corechart', 'gauge', 'geochart', 'table', \
'treemap', 'annotatedtimeline']
class InvalidChartsPackage(Exception):
pass
class DatatableColumn(ChartColumn):
"""
"""
type_name = ''
def __init__(self, name, data):
self.name = name
self._data = data
def format(self, value):
return value
def get_data(self):
return [self.format(value) for value in self._data]
data = property(get_data)
class NumberColumn(DatatableColumn):
type_name = 'number'
class StringColumn(DatatableColumn):
type_name = 'string'
def format(self, value):
return "'%s'" % value
class DateColumn(DatatableColumn):
type_name = 'date'
def format(self, value):
return 'new Date(%i, %i, %i)' % \
(value.year, value.month, value.day)
class DatetimeColumn(DatatableColumn):
type_name = 'datetime'
def format(self, value):
return 'new Date(%i, %i, %i, %i, %i, %i)' % \
(value.year, value.month, value.day,
value.hour, value.minute, value.second)
class ChartToolsChart(Chart):
"""
"""
chart_type = ''
def add_column(self, name, data, column_class):
col = column_class(name, data)
self.columns.append(col)
return col
def num_rows(self):
if self.columns:
# we assume that all columns have the same length
return len(self.columns[0])
return 0
class LineChart(ChartToolsChart):
chart_type = 'LineChart'
class ColumnChart(ChartToolsChart):
chart_type = 'ColumnChart'
class ScatterChart(ChartToolsChart):
chart_type = 'ScatterChart'
class AnnotatedTimeLine(ChartToolsChart):
chart_type = 'AnnotatedTimeLine'
class PieChart(ChartToolsChart):
chart_type = 'PieChart' |
"""
Get the stem of a word, given a declined form and its gender.
TODO: Check this logic with von Soden's Grundriss der akkadischen Grammatik.
TODO: Deal with j/y issue.
"""
__author__ = ['M. Willis Monroe <willismonroe@gmail.com>']
__license__ = 'MIT License. See LICENSE.'
ENDINGS = {
'm': {
'singular': {
'nominative': 'um',
'accusative': 'am',
'genitive': 'im'
},
'dual': {
'nominative': 'ān',
'oblique': 'īn'
},
'plural': {
'nominative': 'ū',
'oblique': 'ī'
}
},
'f': {
'singular': {
'nominative': 'tum',
| 'accusative': 'tam',
'genitive': | 'tim'
},
'dual': {
'nominative': 'tān',
'oblique': 'tīn'
},
'plural': {
'nominative': ['ātum', 'ētum', 'ītum'],
'oblique': ['ātim', 'ētim', 'ītum']
}
}
}
class Stemmer(object):
"""Stem Akkadian words with a simple algorithm based on Huehnergard"""
def __init__(self):
self.endings = ENDINGS
def get_stem(self, noun, gender, mimation=True):
"""Return the stem of a noun, given its gender"""
stem = ''
if mimation and noun[-1:] == 'm':
# noun = noun[:-1]
pass
# Take off ending
if gender == 'm':
if noun[-2:] in list(self.endings['m']['singular'].values()) + \
list(self.endings['m']['dual'].values()):
stem = noun[:-2]
elif noun[-1] in list(self.endings['m']['plural'].values()):
stem = noun[:-1]
else:
print("Unknown masculine noun: {}".format(noun))
elif gender == 'f':
if noun[-4:] in self.endings['f']['plural']['nominative'] + \
self.endings['f']['plural']['oblique']:
stem = noun[:-4] + 't'
elif noun[-3:] in list(self.endings['f']['singular'].values()) + \
list(self.endings['f']['dual'].values()):
stem = noun[:-3] + 't'
elif noun[-2:] in list(self.endings['m']['singular'].values()) + \
list(self.endings['m']['dual'].values()):
stem = noun[:-2]
else:
print("Unknown feminine noun: {}".format(noun))
else:
print("Unknown noun: {}".format(noun))
return stem
|
from chil | l import *
source('include.c')
destination('includemodified.c')
procedure('main | ')
loop(0)
original()
print_code()
|
#!/usr/bin/python
from sys import argv
from modules.helpers.wpdetector import WordpressDetector
from modules.net.scan import is_good_response
from modules.const import ERR, NO, OK, INFO
def main ():
if len (argv) > 1:
| print INFO + 'Checking site...'
if not is_good_response (argv [1]):
print ERR + 'Site is unavailable! :('
exit (-1)
print INFO + 'Detecting word | press...'
wpd = WordpressDetector (argv [1])
if wpd.detect_by_pages ():
print OK + 'Wordpress Detected!'
if raw_input ('Try to detect Wordpress version? (y/n): ') == 'y':
print INFO + 'Detecting Wordpress version...'
dec = wpd.detect_version ()
if dec is not None:
print OK + 'Wordpress Version Detected!' + dec
else:
print NO + 'Wordpress version getting failed!'
exit (0)
else:
print NO + 'This is not Wordpress! :('
else:
print ERR + 'Example: ./detector.py http://blabla.com'
if __name__ == '__main__':
main ()
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from rigour.errors impo | rt ValidationFailed
from rigour.types import *
from rigour.constraints import length_between
import rigour
import pytest
def test_secrecy_declared_before():
t = String().secret().constrain(length_between(4,6))
with pytest.raises(ValidationFailed) as excinfo:
t.check("xxx")
message = str(excinfo)
assert "xxx" not in message
def test_secrecy_declared_after():
t = String().constrain(length_between(4,6)).secret()
with pytest.raises(ValidationFailed) as excinfo:
| t.check("xxx")
message = str(excinfo)
assert "xxx" not in message
|
import pygame
import intro
import game
class Intro2(intro.Intro):
def load_image(self):
self.fondo = pygame.image.load('ima/intro2.png').c | onvert()
def go_to_next(self):
new_scene = game.Game(self.world)
self.world.chang | e_scene(new_scene)
|
#!/usr/bin/env python
# PyQt tutorial 3
|
import sys
from PyQt4 import QtGui
app = QtGui.QAp | plication(sys.argv)
window = QtGui.QWidget()
window.resize(200, 120)
quit = QtGui.QPushButton("Quit", window)
quit.setFont(QtGui.QFont("Times", 18, QtGui.QFont.Bold))
quit.setGeometry(10, 40, 180, 40)
quit.clicked.connect(app.quit)
window.show()
sys.exit(app.exec_())
|
class GameStateInterface(object):
def __init_ | _(self):
self._team_ids_to_names = None
self._service_ids_to_names = None
def _team_id_to_name_map(self):
raise NotImplementedError
def _service_id_to_name_map(self):
raise NotImplementedError
def _scored_events_for_tick(self, tick):
raise NotImplementedError
@property
def team_id_to_name_map(self):
if self._team_ids_to_names is None:
self._team | _ids_to_names = self._team_id_to_name_map()
return self._team_ids_to_names
@property
def service_id_to_name_map(self):
if self._service_ids_to_names is None:
self._service_ids_to_names = self._service_id_to_name_map()
return self._service_ids_to_names
def scored_events_for_tick(self, tick):
# TODO: maybe cache here? or do we cache in the database side?
return self._scored_events_for_tick(tick)
|
"""Test cases that are in common among wemo platform modules.
This is not a test module. These test methods are used by the platform test modules.
"""
import asyncio
import threading
from unittest.mock import patch
from pywemo.ouimeaux_device.api.service import ActionException
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_UNAVAILABLE
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
def _perform_registry_callback(hass, pywemo_registry, pywemo_device):
"""Return a callable method to trigger a state callback from the device."""
@callback
def async_callback():
# Cause a state update callback to be triggered by the device.
pywemo_registry.callbacks[pywemo_device.name](pywemo_device, "", "")
return hass.async_block_till_done()
return async_callback
def _perform_async_update(hass, wemo_entity):
"""Return a callable method to cause hass to update the state of the entity."""
@callback
def async_callback():
return hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
return async_callback
async def _async_multiple_call_helper(
hass,
pywemo_registry,
wemo_entity,
pywemo_device,
call1,
call2,
update_polling_method=None,
):
"""Create two calls (call1 & call2) in parallel; verify only one polls the device.
The platform entity should only perform one update poll on the device at a time.
Any parallel updates that happen at the same time should be ignored. This is
verified by blocking in the update polling method. The polling method should
only be called once as a result of calling call1 & call2 simultaneously.
"""
# get_state is called outside the event loop. Use non-async Python Event.
event = threading.Event()
def get_update(force_update=True):
event.wait()
update_polling_method = update_polling_method or pywemo_device.get_state
update_polling_method.side_effect = get_update
# One of these two calls will block on `event`. The other will return right
# away because the `_update_lock` is held.
_, pending = await asyncio.wait(
[call1(), call2()], return_when=asyncio.FIRST_COMPLETED
)
# Allow the blocked call to return.
event.set()
if pending:
await asyncio.wait(pending)
# Make sure the state update only happened once.
update_polling_method.assert_called_once()
async def test_async_update_locked_callback_and_update(
hass, pywemo_registry, wemo_entity, pywemo_device, **kwargs
):
"""Test that a callback and a state update request can't both happen at the same time.
When a state update is received via a callback from the device at the same time
as hass is calling `async_update`, verify that only one of the updates proceeds.
"""
await async_setup_component(hass, HA_DOMAIN, {})
callback = _perform_registry_callback(hass, pywemo_registry, pywemo_device)
update = _perform_async_update(hass, wemo_entity)
await _async_multiple_call_helper(
hass, pywemo_registry, wemo_entity, pywemo_device, callback, update, **kwargs
)
async def test_async_update_locked_multiple_updates(
hass, pywemo_registry, wemo_entity, pywemo_device, **kwargs
):
"""Test that two hass async_update state updates do not proceed at the same time."""
await async_setup_component(hass, HA_DOMAIN, {})
update = _perform_async_update(hass, wemo_entity)
await _async_multiple_call_helper(
hass, pywemo_registry, wemo_entity, pywemo_device, update, update, **kwargs
)
async def test_async_update_locked_multiple_callbacks(
hass, pywemo_registry, wemo_entity, pywemo_device, **kwargs
):
"""Test that two device callback state updates do not proceed at the same time."""
await async_setup_component(hass, HA_DOMAIN, {})
callback = _perform_registry_callback(hass, pywemo_registry, pywemo_device)
await _async_multiple_call_helper(
hass, pywemo_registry, wemo_entity, pywemo_device, callback, callback, **kwargs
)
async def test_async_locked_update_with_exception(
hass, wemo_entity, pywemo_device, update_polling_method=None
):
"""Test that the entity becomes unavailable when communication is lost."""
assert hass.states.get(wemo_entity.entity_id).state == STATE_OFF
await async_setup_component(hass, HA_DOMAIN, {})
update_polling_method = update_polling_method or pywemo_device.get_state
update_polling_method.side_effect = ActionException
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
assert hass.states.get(wemo_entity.entity_id).state == STATE_UNAVAILABLE
async def test_async_update_with_timeout_and_recovery(hass, wemo_entity, pywemo_device):
"""Test that the entity becomes unavailable after a timeout, and that it recovers."""
assert hass.states.get(wemo_entity.entity_id).state == STATE_OFF
await async_setup_component(hass, HA_DOMAIN, {})
with patch("async_timeout.timeout", side_effect=asyncio.TimeoutError):
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
assert hass.states.get(wemo_entity.entity_id | ).state == STATE_UNAVAILABLE
# Check that the entity recovers and is available after the update succeeds.
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blockin | g=True,
)
assert hass.states.get(wemo_entity.entity_id).state == STATE_OFF
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 14 14:10:41 2016
@author: sigurdja
"""
from setuptools import setup, find_packages
setup(
name="psse_models",
| version= | "0.1",
packages=find_packages(),
) |
# Thanks to Kurt Othmer for BioExplorer design this is translated from
from flow import *
class Flow(object):
def init(self, context):
ch1 = context.get_channel('Channel 1')
#ch1 = Notch(50, input=ch1)
ch1_dc = DCBlock(ch1).ac
ch1_raw = BandPass(0.0, 40.0, input=ch1_dc)
ch1_theta = BandPass(3.0, 7.0, input=ch1_raw, type='elliptic', order=3).output
ch1_beta = BandPass(15.0, 18.0, input=ch1_raw, type='ellipic', order=3).output
ch1_hibeta = BandPass(22, 38.0, input=ch1_raw, type='elliptic', order=3).output
ch1_raw.set(label='Left Raw: 0-40', color='white')
ch1_theta.set(label='Left Theta', color='violet')
ch1_beta.set(label='Left Beta', color='green')
ch1_hibeta.set(label='Left Hi Beta', color='yellow')
self.ch1_theta_threshold = Threshold('L Theta', input=RMS(ch1_theta), mode='decrease', auto_target=90)
self.ch1_beta_threshold = Threshold('L Beta', input=RMS(ch1_beta), mode='range', low_target=90, high_target=95)
self.ch1_hibeta_threshold = Threshold('L Hi-Beta', input=RMS(ch1_hibeta), mode='decrease', auto_target=95)
self.ch1_osci = Oscilloscope('Left Side', moving=False,
channels=[ch1_raw, ch1_theta, ch1_beta, ch1_hibeta])
self.left_spectrum = BarSpectrogram('Left', lo=2.0, hi=30.0, input=ch1_raw, align='right')
ch2 = context.get_channel('Channel 2')
#ch2 = Notch(50, input=ch2)
ch2_dc = DCBlock(ch2).ac
ch2_raw = BandPass(0.0, 40.0, input=ch2_dc)
ch2_theta = BandPass(3.0, 7.0, input=ch2_raw, type='elliptic', order=3).output
ch2_smr = BandPass(12.0, 15.0, input=ch2_raw, type='ellipic', order=3).output
ch2_hibeta = BandPass(22, 38.0, input=ch2_raw, type='elliptic', order=3).output
ch2_raw.set(label='Right Raw: 0-40', color='white')
ch2_theta.set(label='Right Theta', color='violet')
ch2_smr.set(label='Right SMR', color='blue')
ch2_hibeta.set(label='Right Hi Beta', color='yellow')
self.ch2_theta_threshold = Threshold('R Theta', input=RMS(ch2_theta), mode='decrease', auto_target=90)
self.ch2_smr_threshold = Threshold('R SMR', input=RMS(ch2_smr), mode='range', low_target=90, high_target=95)
self.ch2_hibeta_threshold = Threshold('R Hi-Beta', input=RMS(ch2_hibeta), mode='decrease', auto_target=95)
self.ch2_osci = Oscilloscope('Right Side', moving=False,
channels=[ch2_raw, ch2_theta, ch2_smr, ch2_hibeta])
self.right_spectrum = BarSpectrogram('Right', lo=2.0, hi=30.0, input=ch2_raw, align='left')
and_cond = Expression(lambda *args: all(args),
self.ch1_theta_threshold.passfail, self.ch1_beta_threshold.passfail, self.ch1_hibeta_threshold.passfail,
#self.ch2_theta_threshold.passfail, self.ch2_smr_threshold.passfail, self.ch2_hibeta_threshold.passfail
)
video_path = '/Users/jonathansieber/Movies/Adventure.Time.S06E22.The.Cooler.720p.HDTV.x264-W4F.mkv'
self.video = MPlayerControl(video_path, enable=and_cond)
def widget(self):
w = QtGui.QWidget()
layout = QtGui.QGridLayout()
w.setLayout(layout)
layout.addWidget(self.ch1_osci.widget(), 0, 0, 1, 4)
layout.addWidget(self.ch1_theta_threshold.widget(), 1, 0)
layout.addWidget(self.ch1_beta_threshold.widget(), 1, 1)
layout.addWidget(self.ch1_hibeta_threshold.widget(), 1, 2)
layout.addWidget(self.left_spectrum.widget(), 1, 3)
layout.addWidget(self.ch2_osci.widget(), 0, 4, 1, 4)
layout.addWidget(self.ch2_theta_threshold.widget(), 1, 5)
layout.addWidget(self.ch2_smr_threshold.widget(), 1, 6)
layout.addWidget(self.ch2_hibeta_threshold.widget(), 1, 7)
layout.addWidget(self.right_spectrum.widget(), 1, 4)
return | w
def | flow():
return Flow()
|
s://scihub.copernicus.eu/apihub/search?q="
requests.packages.urllib3.disable_warnings()
def calculate_md5(fname):
hasher = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hasher.update(chunk)
return hasher.hexdigest().upper()
def do_query(query, user="guest", passwd="guest"):
"""
A simple function to pass a query to the Sentinel scihub website. If
successful this function will return the XML file back for further
processing.
query: str
A query string, such as "https://scihub.copernicus.eu/dhus/odata/v1/"
"Products?$orderby=IngestionDate%20desc&$top=100&$skip=100"
Returns:
The relevant XML file, or raises error
"""
r = requests.get(query, auth=(user, passwd), verify=False)
if r.status_code == 200:
return r.text
else:
raise IOError("Something went wrong! Error code %d" % r.status_code)
def download_product(source, target, user="guest", passwd="guest"):
"""
Download a product from the SentinelScihub site, and save it to a named
local disk location given by ``target``.
source: str
A product fully qualified URL
target: str
A filename where to download the URL specified
"""
md5_source = source.replace("$value", "/Checksum/Value/$value")
r = requests.get(md5_source, auth=(user, passwd), verify=False)
md5 = r.text
if os.path.exists(target):
md5_file = calculate_md5(target)
if md5 == md5_file:
return
chunks = 1048576 # 1MiB...
while True:
LOG.debug("Getting %s" % source)
r = requests.get(source, auth=(user, passwd), stream=True,
verify=False)
if not r.ok:
raise IOError("Can't start download... [%s]" % source)
file_size = int(r.headers['content-length'])
LOG.info("Downloading to -> %s" % target)
LOG.info("%d bytes..." % file_size)
with open(target, 'wb') as fp:
cntr = 0
dload = 0
for chunk in r.iter_content(chunk_size=chunks):
if chunk:
cntr += 1
if cntr > 100:
dload += cntr * chunks
LOG.info("\tWriting %d/%d [%5.2f %%]" % (dload, file_size,
100. * float(dload) /
float(file_size)))
sys.stdout.flush()
cntr = 0
fp.write(chunk)
fp.flush()
os.fsync(fp)
md5_file = calculate_md5(target)
if md5_file == md5:
break
return
def parse_xml(xml):
"""
Parse an OData XML file to havest some relevant information re products
available and so on. It will return a list of dictionaries, with one
dictionary per product returned from the query. Each dicionary will have a
number of keys (see ``fields_of_interest``), as well as ``link`` and
``qui
"""
fields_of_interest = ["filename", "identifier", "instrumentshortname",
"orbitnumber", "orbitdirection", "producttype",
"beginposition", "endposition"]
tree = ET.ElementTree(ET.fromstring(xml))
# Search for all the acquired images...
granules = []
for elem in tree.iter(tag="{http://www.w3.org/2005/Atom}entry"):
granule = {}
for img in elem.getchildren():
if img.tag.find("id") >= 0:
granule['id'] = img.text
if img.tag.find("link") and img.attrib.has_key("href"):
if img.attrib['href'].find("Quicklook") >= 0:
granule['quicklook'] = img.attrib['href']
elif img.attrib['href'].find("$value") >= 0:
granule['link'] = img.attrib['href'].replace("$value", "")
if img.attrib.has_key("name"):
if img.attrib['name'] in fields_of_interest:
granule[img.attrib['name']] = img.text
granules.append(granule)
return granules
# print img.tag, img.attrib, img.text
# for x in img.getchildren():
def download_sentinel(location, input_start_date, input_sensor, output_dir,
input_end_date=None, username="guest", password="guest"):
input_sensor = input_sensor.upper()
sensor_list = ["S1", "S2", "S3"]
if not input_sensor in sensor_list:
raise ValueError("Sensor can only be S1, S2 or S3. You provided %s"
% input_sensor)
else:
if input_sensor.upper() == "S1":
sensor = "Sentinel-1"
elif input_sensor.upper() == "S2":
sensor = "Sentinel-2"
elif input_sensor.upper() == "S3":
sensor= "Sentinel-3"
sensor_str = 'platformname:%s' % sensor
#sensor_str = 'filename:%s' % input_sensor.upper()
try:
start_date = datetime.datetime.strptime(input_start_date,
"%Y.%m.%d").isoformat()
except ValueError:
try:
start_date = datetime.datetime.strptime(input_start_date,
"%Y-%m-%d").isoformat()
except ValueError:
start_date = datetime.datetime.strptime(input_start_date,
"%Y/%j").isoformat()
start_date = start_date + "Z"
if input_end_date is None:
end_date = "NOW"
else:
try:
end_date = datetime.datetime.strptime(input_end_date,
"%Y.%m.%d").isoformat()
except ValueError:
try:
end_date = datetime.datetime.strptime(input_end_date,
"%Y-%m-%d").isoformat()
except ValueError:
end_date = datetime.datetime.strptime(input_end_date,
| "%Y/%j").isoformat()
if len(location) == 2:
location_str = 'footprint:"Intersects(%f, %f)"' % (location[0], location[1])
| elif len(location) == 4:
location_str = 'footprint:"Intersects( POLYGON(( " + \
"%f %f, %f %f, %f %f, %f %f, %f %f) ))"' % (
location[0], location[0],
location[0], location[1],
location[1], location[1],
location[1], location[0],
location[0], location[0])
time_str = 'beginposition:[%s TO %s]' % (start_date, end_date)
query = "%s AND %s AND %s" % (location_str, time_str, sensor_str)
query = "%s%s" % (hub_url, query)
# query = "%s%s" % ( hub_url, urllib2.quote(query ) )
LOG.debug(query)
import pdb;pdb.set_trace()
result = do_query(query, user=username, passwd=password)
granules = parse_xml(result)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
ret_files = []
for granule in granules:
download_product(granule['link'] + "$value", os.path.join(output_dir,
granule['filename'].replace("SAFE", "zip")),
user=username, passwd=password)
ret_files.append(os.path.join(output_dir,
granule['filename'].replace("SAFE", "zip")))
return granules, ret_files
if __name__ == "__main__": # location = (43.3650, -8.4100)
# input_start_date = "2015.01.01"
# input_end_date = None
# username = "guest"
# password = "guest"
# input_sensor = "S2"
# output_dir = "/data/selene/ucfajlg/tmp/"
# granules, retfiles = download_sentinel ( location, input_start_date,
# input_sensor, output_dir )
lng = -8.4100
lat = 43.3650
#lat = 39.0985 # Barrax
#lng = -2.1082
#lat = 28.55 # Libya 4
#lng = 23.39
print "Testing S2 on COPERNICUS scientific hub"
location=(lat,lng)
input_start_date="2017.1.1"
input_sensor="S3"
output_dir="/tmp/"
username="s3guest"
password="s3guest"
|
ce': '/tool'}
try:
r = connection.request('POST',
'/dynamic/login',
fields=credentials,
retries=1,
redirect=False)
except (HTTPError, SSLError):
error('Connection to Hub failed!')
exit(-1)
if r.status != 200:
if r.status == 301:
redirect_location = r.headers.get('location', '')
end_domain = redirect_location.find('/dynamic/login')
error('Login is being redirected to "%s". Please verify the Hub URL.' % redirect_location[:end_domain])
else:
error('Wrong user login information!')
exit(-1)
cookie = r.headers.get('set-cookie', None)
login_info = json_loads(r.data)
# pylint: disable=E1103
if not cookie or HUB_COOKIE_NAME not in cookie or login_info.get('source') != credentials['source']:
error('Hub login failed!')
exit(-1)
# pylint: enable=E1103
return cookie
def logout(connection, cookie):
try:
connection.request('POST',
'/dynamic/logout',
headers={'Cookie': cookie},
redirect=False)
except (HTTPError, SSLError) as e:
error(str(e))
def _request_data(options):
daterange = options.daterange
params = { 'start_time': daterange.start,
'end_time': daterange.end,
'version': __version__ }
connection = connection_from_url(options.hub, timeout=8.0)
cookie = login(connection, options)
try:
r = connection.request('GET',
DATATYPE_URL[options.type] % options.project,
headers={'Cookie': cookie,
'Accept-Encoding': 'gzip'},
fields=params,
redirect=False)
except (HTTPError, SSLError) as e:
error(e)
exit(-1)
# pylint: disable=E1103
r_data = json_loads(r.data)
if r.status != 200:
error_msg = 'Wrong Hub answer.'
if r_data.get('msg', None):
error_msg += ' ' + r_data['msg']
if r.status == 403:
error_msg += ' Make sure the project you\'ve s | pecified exists and you have access to it.'
error(error_msg)
exit(-1)
# pylint: enable=E1103
| if options.verbose:
log('Data received from the hub')
log('Logging out')
logout(connection, cookie)
return r_data
def write_to_file(options, data, filename=None, output_path=None, force_overwrite=False):
if not filename:
filename = '%s-%s-%s.json' % (options.project, options.type, options.daterange.filename_str())
try:
if not output_path:
output_path = normpath(path_join(options.outputdir, filename))
if path_exists(output_path):
if options.overwrite or force_overwrite:
if not options.silent:
warning('Overwriting existing file: %s' % output_path)
elif not options.silent:
warning('Skipping existing file: %s' % output_path)
return
indentation = None
if options.indent:
indentation = 4
if isinstance(data, str):
data = json_loads(data)
with open(output_path, 'wb') as fout:
if isinstance(data, str):
fout.write(data)
else:
json_dump(data, fout, indent=indentation)
if options.verbose:
log('Finished writing to: %s' % output_path)
except (IOError, OSError) as e:
error(e)
exit(-1)
try:
# pylint: disable=F0401
from Crypto.Cipher.AES import new as aes_new, MODE_CBC
# pylint: enable=F0401
def decrypt_data(data, key):
# Need to use a key of length 32 bytes for AES-256
if len(key) != 32:
error('Invalid key length for AES-256')
exit(-1)
# IV is last 16 bytes
iv = data[-16 :]
data = data[: -16]
data = aes_new(key, MODE_CBC, iv).decrypt(data)
# Strip PKCS7 padding required for CBC
if len(data) % 16:
error('Corrupted data - invalid length')
exit(-1)
num_padding = ord(data[-1])
if num_padding > 16:
error('Corrupted data - invalid padding')
exit(-1)
return data[: -num_padding]
except ImportError:
from io import BytesIO
from subprocess import Popen, STDOUT, PIPE
from struct import pack
def decrypt_data(data, key):
# Need to use a key of length 32 bytes for AES-256
if len(key) != 32:
error('Invalid key length for AES-256')
exit(-1)
aesdata = BytesIO()
aesdata.write(key)
aesdata.write(pack('I', len(data)))
aesdata.write(data)
process = Popen('aesdecrypt', stderr=STDOUT, stdout=PIPE, stdin=PIPE, shell=True)
output, _ = process.communicate(input=aesdata.getvalue())
retcode = process.poll()
if retcode != 0:
error('Failed to run aesdecrypt, check it is on the path or install PyCrypto')
exit(-1)
return str(output)
def get_log_files_local(options, files_list, enc_key):
verbose = options.verbose
silent = options.silent
overwrite = options.overwrite
output_dir = options.outputdir
filename_prefix = options.project + '-'
try:
for filename in files_list:
if filename.startswith('http'):
error('Unexpected file to retrieve')
exit(-1)
# Format v1: 'eventlogspath/gamefolder/events-yyyy-mm-dd.json.gz'
# Format v2: 'eventlogspath/gamefolder/events-yyyy-mm-dd.bin'
# Convert to 'gameslug-events-yyyy-mm-dd.json'
filename_patched = filename_prefix + filename.rsplit('/', 1)[-1].split('.', 1)[0] + '.json'
output_path = normpath(path_join(output_dir, filename_patched))
if not overwrite and path_exists(output_path):
if not silent:
warning('Skipping existing file: %s' % output_path)
continue
if verbose:
log('Retrieving file: %s' % filename_patched)
if filename.endswith('.bin'):
with open(filename, 'rb') as fin:
file_content = fin.read()
file_content = decrypt_data(file_content, enc_key)
file_content = zlib_decompress(file_content)
else: # if filename.endswith('.json.gz'):
gzip_file = GzipFile(filename=filename, mode='rb')
file_content = gzip_file.read()
gzip_file.close()
file_content = decrypt_data(file_content, enc_key)
write_to_file(options, file_content, filename=filename_patched, output_path=output_path)
except (IOError, OSError) as e:
error(e)
exit(-1)
def get_log_files_s3(options, files_list, enc_key, connection):
verbose = options.verbose
silent = options.silent
overwrite = options.overwrite
output_dir = options.outputdir
filename_prefix = options.project + '-'
try:
for filename in files_list:
# Format v1: 'https://bucket.s3.amazonaws.com/gamefolder/events-yyyy-mm-dd.json?AWSAccessKeyId=keyid
# &Expires=timestamp&Signature=signature'
# Format v2: 'https://bucket.s3.amazonaws.com/gamefolder/events-yyyy-mm-dd.bin?AWSAccessKeyId=keyid
# &Expires=timestamp&Signature=signature'
# Convert to 'gameslug-events-yyyy-mm-dd.json'
filename_cleaned = filename.split('?', 1)[0].rsplit('/', 1)[-1]
filename_patched = filename_prefix + filename_cleaned.split('.', 1)[0] + '.json'
output_path = normpath(path_join(output_dir, filename_patched))
if not overwrite and path_exists(output_path):
if not silent:
warning('Sk |
entry = InstructorTask.objects.get(id=task_entry.id)
status = json.loads(entry.task_output)
self.assertEquals(status.get('attempted'), succeeded + failed)
self.assertEquals(status.get('succeeded'), succeeded)
self.assertEquals(status.get('skipped'), skipped)
self.assertEquals(status.get('failed'), failed)
self.assertEquals(status.get('total'), total)
self.assertEquals(status.get('action_name'), action_name)
self.assertGreater(status.get('duration_ms'), 0)
self.assertEquals(entry.task_state, SUCCESS)
self._assert_single_subtask_status(entry, succeeded, failed, skipped, retried_nomax, retried_withmax)
return entry
def test_successful(self):
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
self._create_students(num_emails - 1)
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([None])
self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails, num_emails)
def test_successful_twice(self):
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
self._create_students(num_emails - 1)
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([None])
task_entry = self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails, num_emails)
# submit the same task a second time, and confirm that it is not run again.
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([Exception("This should not happen!")])
parent_status = self._run_task_with_mock_celery(send_bulk_course_email, task_entry.id, task_entry.task_id)
self.assertEquals(parent_status.get('total'), num_emails)
self.assertEquals(parent_status.get('succeeded'), num_emails)
self.assertEquals(parent_status.get('failed'), 0)
def test_unactivated_user(self):
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
students = self._create_students(num_emails - 1)
# mark a student as not yet having activated their email:
student = students[0]
student.is_active = False
student.save()
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.ret | urn_value.send_messages.side_effect = cycle([None])
self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails - 1, num_emails - 1)
def test_disabled_user(self):
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to th | e instructor:
students = self._create_students(num_emails - 1)
# mark a student disabled:
student = students[0]
UserStanding.objects.create(user=student, account_status=UserStanding.ACCOUNT_DISABLED, changed_by=student)
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([None])
self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails - 1, num_emails - 1)
def test_skipped(self):
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
students = self._create_students(num_emails - 1)
# have every fourth student optout:
expected_skipped = int((num_emails + 3) / 4.0)
expected_succeeds = num_emails - expected_skipped
for index in range(0, num_emails, 4):
Optout.objects.create(user=students[index], course_id=self.course.id)
# mark some students as opting out
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([None])
self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails, expected_succeeds, skipped=expected_skipped)
def test_skipped_include_optout(self):
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
students = self._create_students(num_emails - 1)
# have every fourth student optout and every eighth student force disabled:
expected_skipped = int((num_emails + 7) / 8.0)
expected_succeeds = num_emails - expected_skipped
for index in range(0, num_emails, 4):
if index % 8 == 0:
Optout.objects.create(user=students[index], course_id=self.course.id, force_disabled=True)
else:
Optout.objects.create(user=students[index], course_id=self.course.id)
# mark some students as opting out.
# But are skipped only students who is force-disabled.
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([None])
task_entry = self._create_input_entry(to_option=SEND_TO_ALL_INCLUDE_OPTOUT)
self._test_run_with_entry(send_bulk_course_email, task_entry, 'emailed', num_emails, expected_succeeds, skipped=expected_skipped)
def _test_email_address_failures(self, exception):
"""Test that celery handles bad address errors by failing and not retrying."""
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
self._create_students(num_emails - 1)
expected_fails = int((num_emails + 3) / 4.0)
expected_succeeds = num_emails - expected_fails
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
# have every fourth email fail due to some address failure:
get_conn.return_value.send_messages.side_effect = cycle([exception, None, None, None])
self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails, expected_succeeds, failed=expected_fails)
def test_smtp_blacklisted_user(self):
# Test that celery handles permanent SMTPDataErrors by failing and not retrying.
self._test_email_address_failures(SMTPDataError(554, "Email address is blacklisted"))
def test_ses_blacklisted_user(self):
# Test that celery handles permanent SMTPDataErrors by failing and not retrying.
self._test_email_address_failures(SESAddressBlacklistedError(554, "Email address is blacklisted"))
def test_ses_illegal_address(self):
# Test that celery handles permanent SMTPDataErrors by failing and not retrying.
self._test_email_address_failures(SESIllegalAddressError(554, "Email address is illegal"))
def test_ses_local_address_character_error(self):
# Test that celery handles permanent SMTPDataErrors by failing and not retrying.
self._test_email_address_failures(SESLocalAddressCharacterError(554, "Email address contains a bad character"))
def test_ses_domain_ends_with_dot(self):
# Test that celery handles permanent SMTPDataErrors by failing and not retrying.
self._test_email_address_failures(SESDomainEndsWithDotError(554, "Email address ends with a dot"))
def _test_retry_after_limited_retry_error(self, exception):
"""Test that celery handles connection failures by retrying."""
# If we want the batch to succeed, we need to send fewer emails
# than the max retries, so that the max is not triggered.
num_emails = settings.BULK_EMA |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join
from workspace_tools.toolchains import mbedToolchain
from workspace_tools.settings import ARM_BIN, ARM_INC, ARM_LIB, MY_ARM_CLIB, ARM_CPPLIB
from workspace_tools.hooks import hook_tool
from workspace_tools.settings import GOANNA_PATH
class ARM(mbedToolchain):
LINKER_EXT = '.sct'
LIBRARY_EXT = '.ar'
STD_LIB_NAME = "%s.ar"
DIAGNOSTIC_PATTERN = re.compile('"(?P<file>[^"]+)", line (?P<line>\d+)( \(column (?P<column>\d+)\)|): (?P<severity>Warning|Error): (?P<message>.+)')
DEP_PATTERN = re.compile('\S+:\s(?P<file>.+)\n')
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
mbedToolchain.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
if target.core == "Cortex-M0+":
cpu = "Cortex-M0"
elif target.core == "Cortex-M4F":
cpu = "Cortex-M4.fp"
elif target.core == "Cortex-M7F":
cpu = "Cortex-M7.fp.sp"
else:
cpu = target.core
main_cc = join(ARM_BIN, "armcc")
common = ["-c",
"--cpu=%s" % cpu, "--gnu",
"-Otime", "--split_sections", "--apcs=interwork",
"--brief_diagnostics", "--restrict", "--multibyte_chars"
]
if "save-asm" in self.options:
common.extend(["--asm", "--interleave"])
if "debug-info" in self.options:
common.append("-g")
common.append("-O0")
else:
common.append("-O3")
common_c = [
"--md", "--no_depend_system_headers",
'-I%s' % ARM_INC
]
self.asm = [main_cc] + common + ['-I%s' % ARM_INC]
if not "analyze" in self.options:
self.cc = [main_cc] + common + common_c + ["--c99"]
self.cppc = [main_cc] + common + common_c + ["--cpp", "--no_rtti"]
else:
self.cc = [join(GOANNA_PATH, "goannacc"), "--with-cc=" + main_cc.replace('\\', '/'), "--dialect=armcc", '--output-format="%s"' % self.GOANNA_FORMAT] + common + common_c + ["--c99"]
self.cppc= [join(GOANNA_PATH, "goannac++"), "--with-cxx=" + main_cc.replace('\\', '/'), "--dialect=armcc", '--output-format="%s"' % self.GOANNA_FORMAT] + common + common_c + ["--cpp", "--no_rtti"]
self.ld = [join(ARM_BIN, "armlink")]
self.sys_libs = []
self.ar = join(ARM_BIN, "armar")
self.elf2bin = join(ARM_BIN, "fromelf")
def remove_option(self, option):
for tool in [self.asm, self.cc, self.cppc]:
if option in tool:
tool.remove(option)
def assemble(self, source, object, includes):
# Preprocess first, then assemble
tempfile = object + '.E.s'
return [
self.asm + ['-D%s' % s for s in self.get_symbols() + self.macros] + ["-I%s" % i for i in includes] + ["-E", "-o", tempfile, source],
self.hook.get_cmdline_assembler(self.asm + ["-o", object, tempfile])
]
def parse_dependencies(self, dep_path):
dependencies = []
for line in open(dep_path).readlines():
match = ARM.DEP_PATTERN.match(line)
if match is not None:
dependencies.append(match.group('file'))
return dependencies
def parse_output(self, output):
for line in output.splitlines():
match = ARM.DIAGNOSTIC_PATTERN.match(line)
if match is not None:
self.cc_info(
match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message'),
target_name=self.target.name,
toolchain_name=self.name
)
| match = self.goanna_parse_line(line)
if match is not None:
self.cc_info(
| match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message')
)
def get_dep_opt(self, dep_path):
return ["--depend", dep_path]
def archive(self, objects, lib_path):
self.default_cmd([self.ar, '-r', lib_path] + objects)
def link(self, output, objects, libraries, lib_dirs, mem_map):
if len(lib_dirs):
args = ["-o", output, "--userlibpath", ",".join(lib_dirs), "--info=totals", "--list=.link_totals.txt"]
else:
args = ["-o", output, "--info=totals", "--list=.link_totals.txt"]
if mem_map:
args.extend(["--scatter", mem_map])
if hasattr(self.target, "link_cmdline_hook"):
args = self.target.link_cmdline_hook(self.__class__.__name__, args)
self.default_cmd(self.ld + args + objects + libraries + self.sys_libs)
@hook_tool
def binary(self, resources, elf, bin):
args = [self.elf2bin, '--bin', '-o', bin, elf]
if hasattr(self.target, "binary_cmdline_hook"):
args = self.target.binary_cmdline_hook(self.__class__.__name__, args)
self.default_cmd(args)
class ARM_STD(ARM):
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
ARM.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
self.cc += ["-D__ASSERT_MSG"]
self.cppc += ["-D__ASSERT_MSG"]
self.ld.append("--libpath=%s" % ARM_LIB)
class ARM_MICRO(ARM):
PATCHED_LIBRARY = False
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
ARM.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
# Compiler
self.asm += ["-D__MICROLIB"]
self.cc += ["--library_type=microlib", "-D__MICROLIB", "-D__ASSERT_MSG"]
self.cppc += ["--library_type=microlib", "-D__MICROLIB", "-D__ASSERT_MSG"]
# Linker
self.ld.append("--library_type=microlib")
# We had to patch microlib to add C++ support
# In later releases this patch should have entered mainline
if ARM_MICRO.PATCHED_LIBRARY:
self.ld.append("--noscanlib")
# System Libraries
self.sys_libs.extend([join(MY_ARM_CLIB, lib+".l") for lib in ["mc_p", "mf_p", "m_ps"]])
if target.core == "Cortex-M3":
self.sys_libs.extend([join(ARM_CPPLIB, lib+".l") for lib in ["cpp_ws", "cpprt_w"]])
elif target.core in ["Cortex-M0", "Cortex-M0+"]:
self.sys_libs.extend([join(ARM_CPPLIB, lib+".l") for lib in ["cpp_ps", "cpprt_p"]])
else:
self.ld.append("--libpath=%s" % ARM_LIB)
|
from monitor import Monitor
try:
from python_libtorrent import get_libtorrent
lt = get_libtorrent()
except Exception, e:
import libtorrent as lt
class Dispatcher(Monitor):
def __init__(self, client):
super(Dispatcher,self).__init__(client)
def do_start(self, th, ses):
self._th = th
| self._ses=ses
self.start()
def run(self):
if not self._ses:
raise Exception('Invalid state, session is not initialized')
while self.running:
a=self._ses.wait_for_alert(1000)
if a:
alerts= self._ses.pop_alerts()
for alert in alerts:
with self.lock:
| for cb in self.listeners:
cb(lt.alert.what(alert), alert)
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016, 2018, 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The WAF plugin is useful to build waf based parts
waf bases projects are projects that drive configuration and build via
a local waf python helper - see https://github.com/waf-project/waf for more
details.
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
In addition, this plugin uses the following plugin-specific keywords:
- configflags:
(list of strings)
configure flags to pass to the build such as those shown by running
./waf --help
"""
from snapcraft.plugins.v1 import PluginV1
class WafPlugin(PluginV1):
"""plugin to build via waf build system"""
@classmethod
def schema(cls):
schema = super().schema()
schema["properties"]["configflags"] = {
"type": "array",
"minitems": 1,
| "uniqueItems": True,
"items": {"type": "string"},
"default": [],
}
schema["required"] = ["source"]
return schema
def __init__(self, name, options, project):
super().__init__(name, options, project)
self._setup_base_tools()
def _setup_base_tools(self):
self.build_packages.append("python-dev:native")
@classmethod
def get_build_properties(cls):
| # Inform Snapcraft of the properties associated with building. If these
# change in the YAML Snapcraft will consider the build step dirty.
return ["configflags"]
def env(self, root):
env = super().env(root)
if self.project.is_cross_compiling:
env.extend(
[
"CC={}-gcc".format(self.project.arch_triplet),
"CXX={}-g++".format(self.project.arch_triplet),
]
)
return env
def enable_cross_compilation(self):
# Let snapcraft know that this plugin can cross-compile
# If the method isn't implemented an exception is raised
pass
def build(self):
super().build()
self.run(["./waf", "distclean"])
self.run(["./waf", "configure"] + self.options.configflags)
self.run(["./waf", "build"])
self.run(
["./waf", "install", "--destdir=" + self.installdir]
) # target from snappy env
|
20,
'other_seed': 'r1'}}
expected_report = """\
remap r1,100,1,10,r2
remap r2,200,2,20,r1
"""
write_remap_counts(writer, counts, 'remap', distance_report)
self.assertEqual(expected_report, report.getvalue())
# noinspection DuplicatedCode
class ConvertPrelimTest(unittest.TestCase):
def setUp(self):
self.projects = ProjectConfig()
self.projects.load(StringIO("""\
{
"regions": {
"R1-seed": {
"seed_group": "main",
"reference": ["ACTAAAGGG"]
},
"R2-seed": {
"seed_group": "main",
"reference": ["ACTAAAGGGAAA"]
}
}
}
"""))
self.sam_file = StringIO()
self.remap_counts = StringIO()
self.remap_counts_writer = DictWriter(
self.remap_counts,
['type', 'filtered_count', 'count'],
lineterminator=os.linesep)
self.remap_counts_writer.writeheader()
def test_simple(self):
prelim_csv = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
example1,89,R1-seed,1,0,9M,=,1,0,AAACCCTTT,BBBBBBBBB
""")
count_threshold = 2
expected_sam_file = """\
@HD VN:1.0 SO:unsorted
@SQ SN:R1-seed LN:9
@SQ SN:R2-seed LN:12
@PG ID:bowtie2 PN:bowtie2 VN:2.2.3 CL:""
example1\t89\tR1-seed\t1\t0\t9M\t=\t1\t0\tAAACCCTTT\tBBBBBBBBB
"""
expected_remap_counts = """\
type,filtered_count,count
prelim R1-seed,0,1
"""
expected_seed_counts = {}
seed_counts = convert_prelim(prelim_csv,
self.sam_file,
self.remap_counts_writer,
count_threshold,
self.projects)
self.assertEqual(expected_sam_file, self.sam_file.getvalue())
self.assertEqual(expected_remap_counts, self.remap_counts.getvalue())
self.assertEqual(expected_seed_counts, seed_counts)
def test_two_regions(self):
prelim_csv = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
example1,89,R1-seed,1,0,9M,=,1,0,AAACCCTTT,BBBBBBBBB
example2,89,R2-seed,1,0,9M,=,1,0,AAAACCTTT,BBBBBBBBB
example3,89,R2-seed,1,0,9M,=,1,0,AAAAACTTT,BBBBBBBBB
""")
count_threshold = 2
expected_sam_file = """\
@HD VN:1.0 SO:unsorted
@SQ SN:R1-seed LN:9
@SQ SN:R2-seed LN:12
@PG ID:bowtie2 PN:bowtie2 VN:2.2.3 CL:""
example1\t89\tR1-seed\t1\t0\t9M\t=\t1\t0\tAAACCCTTT\tBBBBBBBBB
example2\t89\tR2-seed\t1\t0\t9M\t=\t1\t0\tAAAACCTTT\tBBBBBBBBB
example3\t89\tR2-seed\t1\t0\t9M\t=\t1\t0\tAAAAACTTT\tBBBBBBBBB
"""
expected_remap_counts = """\
type,filtered_count,count
prelim R1-seed,0,1
prelim R2-seed,0,2
"""
expected_seed_counts = {}
seed_counts = convert_prelim(prelim_csv,
self.sam_file,
self.remap_counts_writer,
count_threshold,
self.projects)
self.assertEqual(expected_sam_file, self.sam_file.getvalue())
self.assertEqual(expected_remap_counts, self.remap_counts.getvalue())
self.assertEqual(expected_seed_counts, seed_counts)
def test_long_reads(self):
self.maxDiff = None
prelim_csv = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
example1,89,R1-seed,1,0,54M,=,1,0,\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2,89,R1-seed,1,0,54M,=,1,0,\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
""")
count_threshold = 2
expected_sam_file = """\
@HD VN:1.0 SO:unsorted
@SQ SN:R1-seed LN:9
@SQ SN:R2-seed LN:12
@PG ID:bowtie2 PN:bowtie2 VN:2.2.3 CL:""
example1\t89\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2\t89\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
"""
expected_remap_counts = """\
type,filtered_count,count
prelim R1-seed,2,2
"""
| expected_seed_counts = {'R1-seed': 2}
seed_counts = convert_prelim(prelim_csv,
self.sam_file,
self.remap_counts_writer,
count_threshold,
self.projects)
self.assertEqual(expected_sam_file, self.sam_file.getvalue())
self.assertEqual(expected_remap_counts, self.remap_counts.getval | ue())
self.assertEqual(expected_seed_counts, seed_counts)
def test_star_region(self):
self.maxDiff = None
prelim_csv = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
example1,89,R1-seed,1,0,54M,=,1,0,\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2,89,R1-seed,1,0,54M,=,1,0,\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example3,93,*,*,*,*,*,*,*,\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
""")
count_threshold = 2
expected_sam_file = """\
@HD VN:1.0 SO:unsorted
@SQ SN:R1-seed LN:9
@SQ SN:R2-seed LN:12
@PG ID:bowtie2 PN:bowtie2 VN:2.2.3 CL:""
example1\t89\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2\t89\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example3\t93\t*\t*\t*\t*\t*\t*\t*\t\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
"""
expected_remap_counts = """\
type,filtered_count,count
prelim *,0,1
prelim R1-seed,2,2
"""
expected_seed_counts = {'R1-seed': 2}
seed_counts = convert_prelim(prelim_csv,
self.sam_file,
self.remap_counts_writer,
count_threshold,
self.projects)
self.assertEqual(expected_sam_file, self.sam_file.getvalue())
self.assertEqual(expected_remap_counts, self.remap_counts.getvalue())
self.assertEqual(expected_seed_counts, seed_counts)
def test_best_in_group(self):
self.maxDiff = None
prelim_csv = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
example1,89,R1-seed,1,0,54M,=,1,0,\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2,89,R2-seed,1,0,54M,=,1,0,\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example3,89,R1-seed,1,0,54M,=,1,0,\
AAAAAATTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example4,89,R2-seed,1,0,54M,=,1,0,\
AAAAAAAATAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example5,89,R2-seed,1,0,54M,=,1,0,\
AAAAAAAAAAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
""")
count_threshold = 2
expected_sam_file = """\
@HD VN:1.0 SO:unsorted
@SQ SN:R1-seed LN:9
@SQ SN:R2-seed LN:12
@PG ID:bowtie2 PN:bowtie2 VN:2.2.3 CL:""
example1\t89\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2\t89\tR2-seed\t1\t0\t54M\t=\t1\t0\t\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example3\t89\tR1-s |
import re
simple_cmd_match = re.compile(r'\\([^\\]+?)\{(.*?)\}')
graphics_cmd_match = re.compile(r'\\includegraphics\[.*?\]?\{(.*?)\}')
begin_cmd_match = re.compile(r'\\begin{([^}]+?)}(?:(?:\[([^\]]+?)\])|.*)')
newcmd_match = | re.compile(r'\\.+?\{(.*?)\}\{(.*)\}')
# newcmd_match_with_var = re.compile(r'\\[^\\]+?\{(.*?)\}\{(.*?)\}')
vars_match = re.compile(r'\{(.+?)\}')
def get_vars(line):
res = list()
open_braces = 0
one_var = ''
for char in line.strip():
| if char == '}':
open_braces -= 1
if open_braces > 0:
one_var += char
elif open_braces == 0 and one_var:
res.append(one_var)
one_var = ''
if char == '{':
open_braces += 1
return res
class FileIter:
def __init__(self, filename):
self.fn = filename
self.f = open(self.fn, 'r')
def get_line(self):
for line in self.f:
yield line
self.f.close()
|
ng.getLogger(__name__)
# Import third party libs
HAS_NEUTRON = False
try:
from neutronclient.v2_0 import client
HAS_NEUTRON = True
except ImportError:
pass
__opts__ = {}
def __virtual__():
'''
Only load this module if neutron
is installed on this minion.
'''
if HAS_NEUTRON:
return 'neutron'
return False
def _autheticate(func_name):
'''
Authenticate requests with the salt keystone module and format return data
'''
@wraps(func_name)
def decorator_method(*args, **kwargs):
'''
Authenticate request and format return data
'''
connection_args = {'profile': kwargs.get('profile', None)}
nkwargs = {}
for kwarg in kwargs:
if 'connection_' in kwarg:
connection_args.update({kwarg: kwargs[kwarg]})
elif '__' not in kwarg:
nkwargs.update({kwarg: kwargs[kwarg]})
kstone = __salt__['keystone.auth'](**connection_args)
token = kstone.auth_token
endpoint = kstone.service_catalog.url_for(
service_type='network',
endpoint_type='publicURL')
neutron_interface = client.Client(
endpoint_url=endpoint, token=token)
LOG.error('calling with args ' + str(args))
LOG.error('calling with kwargs ' + str(nkwargs))
return_data = func_name(neutron_interface, *args, **nkwargs)
LOG.error('got return data ' + str(return_data))
if isinstance(return_data, list):
# format list as a dict for rendering
return {data.get('name', None) or data['id']: data
for data in return_data}
return return_data
return decorator_method
@_autheticate
def list_floatingips(neutron_interface, **kwargs):
'''
list all floatingips
CLI Example:
.. code-block:: bash
salt '*' neutron.list_floatingips
'''
return neutron_interface.list_floatingips(**kwargs)['floatingips']
@_autheticate
def list_security_groups(neutron_interface, **kwargs):
'''
list all security_groups
CLI Example:
.. code-block:: bash
salt '*' neutron.list_security_groups
'''
return neutron_interface.list_security_groups(**kwargs)['security_groups']
@_autheticate
def list_subnets(neutron_interface, **kwargs):
'''
list all subnets
CLI Example:
.. code-block:: bash
salt '*' neutron.list_subnets
'''
return neutron_interface.list_subnets(**kwargs)['subnets']
@_autheticate
def list_networks(neutron_interface, **kwargs):
'''
| list all networks
CLI Example:
.. code-block:: bash
salt '*' neutron.list_networks
'''
return neutron_interface.list_networks(**kwargs)['networks']
@_autheticate
def list_ports(neutron_interface, **kwargs):
'''
list all ports
CLI Example:
.. code-block:: bash
salt '*' neutron.list_ports
'''
return neutron_interface.list_ports(**kwargs)['ports']
@_autheticate
def list_routers(neutron_interface, **kwargs):
'''
list all routers
|
CLI Example:
.. code-block:: bash
salt '*' neutron.list_routers
'''
return neutron_interface.list_routers(**kwargs)['routers']
@_autheticate
def update_floatingip(neutron_interface, fip, port_id=None):
'''
update floating IP. Should be used to associate and disassociate
floating IP with instance
CLI Example:
.. code-block:: bash
to associate with an instance's port
salt '*' neutron.update_floatingip openstack-floatingip-id port-id
to disassociate from an instance's port
salt '*' neutron.update_floatingip openstack-floatingip-id
'''
neutron_interface.update_floatingip(fip, {"floatingip":
{"port_id": port_id}})
@_autheticate
def update_subnet(neutron_interface, subnet_id, **subnet_params):
'''
update given subnet
CLI Example:
.. code-block:: bash
salt '*' neutron.update_subnet openstack-subnet-id name='new_name'
'''
neutron_interface.update_subnet(subnet_id, {'subnet': subnet_params})
@_autheticate
def update_router(neutron_interface, router_id, **router_params):
'''
update given router
CLI Example:
.. code-block:: bash
salt '*' neutron.update_router openstack-router-id name='new_name'
external_gateway='openstack-network-id' administrative_state=true
'''
neutron_interface.update_router(router_id, {'router': router_params})
@_autheticate
def router_gateway_set(neutron_interface, router_id, external_gateway):
'''
Set external gateway for a router
CLI Example:
.. code-block:: bash
salt '*' neutron.update_router openstack-router-id openstack-network-id
'''
neutron_interface.update_router(
router_id, {'router': {'external_gateway_info':
{'network_id': external_gateway}}})
@_autheticate
def router_gateway_clear(neutron_interface, router_id):
'''
Clear external gateway for a router
CLI Example:
.. code-block:: bash
salt '*' neutron.update_router openstack-router-id
'''
neutron_interface.update_router(
router_id, {'router': {'external_gateway_info': None}})
@_autheticate
def create_router(neutron_interface, **router_params):
'''
Create OpenStack Neutron router
CLI Example:
.. code-block:: bash
salt '*' neutron.create_router name=R1
'''
response = neutron_interface.create_router({'router': router_params})
if 'router' in response and 'id' in response['router']:
return response['router']['id']
@_autheticate
def router_add_interface(neutron_interface, router_id, subnet_id):
'''
Attach router to a subnet
CLI Example:
.. code-block:: bash
salt '*' neutron.router_add_interface openstack-router-id subnet-id
'''
neutron_interface.add_interface_router(router_id, {'subnet_id': subnet_id})
@_autheticate
def router_rem_interface(neutron_interface, router_id, subnet_id):
'''
Dettach router from a subnet
CLI Example:
.. code-block:: bash
salt '*' neutron.router_rem_interface openstack-router-id subnet-id
'''
neutron_interface.remove_interface_router(
router_id, {'subnet_id': subnet_id})
@_autheticate
def create_security_group(neutron_interface, **sg_params):
'''
Create a new security group
CLI Example:
.. code-block:: bash
salt '*' neutron.create_security_group name='new_rule'
description='test rule'
'''
response = neutron_interface.create_security_group(
{'security_group': sg_params})
if 'security_group' in response and 'id' in response['security_group']:
return response['security_group']['id']
@_autheticate
def create_security_group_rule(neutron_interface, **rule_params):
'''
Create a rule entry for a security group
CLI Example:
.. code-block:: bash
salt '*' neutron.create_security_group_rule
'''
neutron_interface.create_security_group_rule(
{'security_group_rule': rule_params})
@_autheticate
def create_floatingip(neutron_interface, **floatingip_params):
'''
Create a new floating IP
CLI Example:
.. code-block:: bash
salt '*' neutron.create_floatingip floating_network_id=ext-net-id
'''
response = neutron_interface.create_floatingip(
{'floatingip': floatingip_params})
if 'floatingip' in response and 'id' in response['floatingip']:
return response['floatingip']['id']
@_autheticate
def create_subnet(neutron_interface, **subnet_params):
'''
Create a new subnet in OpenStack
CLI Example:
.. code-block:: bash
salt '*' neutron.create_subnet name='subnet name'
network_id='openstack-network-id' cidr='192.168.10.0/24' \\
gateway_ip='192.168.10.1' ip_version='4' enable_dhcp=false \\
start_ip='192.168.10.10' end_ip='192.168.10.20'
'''
if 'start_ip' in subnet_params:
subnet_params.update(
|
from dj | ango.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from .views import HomeView
# Uncomment the next two lines to enable the admin:
admin.autodiscover()
urlpatterns = (
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) +
patterns(
'',
url('^$', HomeView.as_view(), name='home'),
ur | l(r'^admin/', include(admin.site.urls)),
url(r'^social/', include('socialregistration.urls', namespace='socialregistration')),
)
)
|
iguration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
import cachalot
# This sets up Django, necessary for autodoc
import runtests
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-cachalot'
copyright = '2014-2016, Bertrand Bordage'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '%s.%s' % cachalot.VERSION[:2]
# The full version, including alpha/beta/rc tags.
release = cachalot.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain | a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xh | tml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-cachalotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django-cachalot.tex', u'django-cachalot Documentation',
u'Bertrand Bordage', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-cachalot', u'django-cachalot Documentation',
[u'Bertrand Bordage'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-cachalot', u'django-cachalot Documentation',
u'Bertrand Bordage', 'django-cachalot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to |
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
product = Table('product', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('product_name', VARCHAR),
Column('bar_code', INTEGER),
Column('price', NUMERIC),
Column('picture_id', INTEGER),
Column('category', VARCHAR),
Column('inprice', NUMERIC),
Column('size', VARCHAR),
Column('supply', INTEGER),
)
product = Table('product', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('product_name', String),
Column('category', String),
Column('bar_code', Integer),
Column('size', String),
Column('inprice', Numeric),
Column('price', Numeric),
Column('supply_id', Integer),
Co | lumn('picture_id', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['product'].columns['supply'].drop()
post_meta.tables['product'].columns['supply_id'].create()
def downgrade(migra | te_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['product'].columns['supply'].create()
post_meta.tables['product'].columns['supply_id'].drop()
|
import pytest
import six
from mock import call, patch
from tests import utils
from week_parser.base import parse_row, parse_week, populate_extra_data
from week_parser.main import PrettyPrinter
def test_populate_extra_data_no_days():
"""
If we haven't found any days data, there is not extra data to add
"""
week_data = {}
description = '__DESCRIPTION__'
populate_extra_data(week_data, description)
assert week_data == {}
def test_populate_extra_data_square_day():
"""
If we have found a 'square' day, the description and square value is added
"""
value = 7
week_data = {'mon': {'value': value}}
description = '__DESCRIPTION__'
populate_extra_data(week_data, description)
assert week_data == {
'mon': {
'value': value,
'square': value ** 2,
'description': '{} {}'.format(description, value ** 2)
}
}
def test_populate_extra_data_double_day():
"""
If we have found a 'double' day, the description and double value is added
"""
value = 7
week_data = {'thu': {'value': value}}
description = '__DESCRIPTION__'
populate_extra_data(week_data, description)
assert week_data == {
'thu': {
'value': value,
'double': value * 2,
'description': '{} {}'.format(description, value * 2)
}
}
def test_parse_row_single_day():
"""
If the input row contains a single day, it is outputted
"""
row = {'mon': '3', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
| assert week_data == {'mon': {'day': 'mon', 'value': 3}}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row_day_range():
"""
If the input row contains a day range, it is outputted
"""
row = {'mon-wed': '3', 'description': '__DESCRIPTION__'}
w | ith patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {
'mon': {'day': 'mon', 'value': 3},
'tue': {'day': 'tue', 'value': 3},
'wed': {'day': 'wed', 'value': 3},
}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row_extra_columns():
"""
If the input row contains any extra columns, they are skipped
"""
row = {'wed': '2', 'description': '__DESCRIPTION__',
'__FOO__': '__BAR__', '__ANYTHING__': '__ELSE__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {'wed': {'day': 'wed', 'value': 2}}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row_not_int_value():
"""
If the day value is not an integer, we get a ValueError
"""
row = {'mon': '__NOT_A_NUMBER__', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
with pytest.raises(ValueError) as exc:
parse_row(row)
assert mock_populate.call_count == 0
assert str(exc.value) == (
"invalid literal for int() with base 10: '__NOT_A_NUMBER__'")
def test_parse_row_invalid_day_range():
"""
If the input row contains an invalid day range, we skip it
"""
row = {'foo-bar': '3', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row():
"""
An input row may contain any combination of day ranges
"""
row = {'mon-tue': '3', 'wed-thu': '2', 'fri': '1',
'__SOME__': '__DATA__', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {
'mon': {'day': 'mon', 'value': 3},
'tue': {'day': 'tue', 'value': 3},
'wed': {'day': 'wed', 'value': 2},
'thu': {'day': 'thu', 'value': 2},
'fri': {'day': 'fri', 'value': 1},
}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_week_empty_file():
"""
We can process an empty file
"""
filename = 'anything.csv'
with utils.mock_open(file_content='') as mock_open:
with patch('week_parser.base.parse_row') as mock_parse_week:
result = parse_week(filename)
assert result == []
assert mock_open.call_args_list == [call(filename)]
assert mock_parse_week.call_count == 0
def test_parse_week_valid_file():
"""
We can process a file with valid content
"""
filename = 'anything.csv'
csv_data = ('mon,tue,some_column1,wed,thu,fri,description\n'
'1,5,data,2,3,3,first_desc\n')
expected_row = {'mon': '1', 'tue': '5', 'wed': '2', 'thu': '3', 'fri': '3',
'description': 'first_desc', 'some_column1': 'data'}
with utils.mock_open(file_content=csv_data) as mock_open:
with patch('week_parser.base.parse_row') as mock_parse_row:
mock_parse_row.return_value = {'mon': {'day': 'mon'}}
result = parse_week(filename)
assert result == [{'day': 'mon'}]
assert mock_open.call_args_list == [call(filename)]
assert mock_parse_row.call_args_list == [call(expected_row)]
def test_pprint_bytes(capsys):
printer = PrettyPrinter()
printer.pprint(six.b('__FOO__'))
out, err = capsys.readouterr()
assert err == ''
assert out == "'__FOO__'\n"
def test_pprint_unicode(capsys):
printer = PrettyPrinter()
printer.pprint(six.u('__FOO__'))
out, err = capsys.readouterr()
assert err == ''
assert out == "'__FOO__'\n"
|
pedescr = dtype
valid_filemodes = ["r", "c", "r+", "w+"]
writeable_filemodes = ["r+", "w+"]
mode_equivalents = {
"readonly":"r",
"copyonwrite":"c",
"readwrite":"r+",
"write":"w+"
}
@set_module('numpy')
class memmap(ndarray):
"""Create a memory-map to an array stored in a *binary* file on disk.
Memory-mapped files are used for accessing small segments of large files
on disk, without reading the entire file into memory. NumPy's
memmap's are array-like objects. This differs from Python's ``mmap``
module, which uses file-like objects.
This subclass of ndarray has some unpleasant interactions with
some operations, because it doesn't quite fit properly as a subclass.
An alternative to using this subclass is to create the ``mmap``
object yourself, then create an ndarray with ndarray.__new__ directly,
passing the object created in its 'buffer=' parameter.
This class may at some point be turned into a factory function
which returns a view into an mmap buffer.
Flush the memmap instance to write the changes to the file. Currently there
is no API to close the underlying ``mmap``. It is tricky to ensure the
resource is actually closed, since it may be shared between different
memmap instances.
Parameters
----------
filename : str, file-like object, or pathlib.Path instance
The file name or file object to be used as the array data buffer.
dtype : data-type, optional
The data-type used to interpret the file contents.
Default is `uint8`.
mode : {'r+', 'r', 'w+', 'c'}, optional
The file is opened in this mode:
+------+-------------------------------------------------------------+
| 'r' | Open existing file for r | eading only. | |
+------+-------------------------------------------------------------+
| 'r+' | Open existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'w+' | Create or overwrite existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'c' | Copy-on-write: assignments affect data in memory, but |
| | changes are not saved to disk. The file on disk is |
| | read-only. |
+------+-------------------------------------------------------------+
Default is 'r+'.
offset : int, optional
In the file, array data starts at this offset. Since `offset` is
measured in bytes, it should normally be a multiple of the byte-size
of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
file are valid; The file will be extended to accommodate the
additional data. By default, ``memmap`` will start at the beginning of
the file, even if ``filename`` is a file pointer ``fp`` and
``fp.tell() != 0``.
shape : tuple, optional
The desired shape of the array. If ``mode == 'r'`` and the number
of remaining bytes after `offset` is not a multiple of the byte-size
of `dtype`, you must specify `shape`. By default, the returned array
will be 1-D with the number of elements determined by file size
and data-type.
order : {'C', 'F'}, optional
Specify the order of the ndarray memory layout:
:term:`row-major`, C-style or :term:`column-major`,
Fortran-style. This only has an effect if the shape is
greater than 1-D. The default order is 'C'.
Attributes
----------
filename : str or pathlib.Path instance
Path to the mapped file.
offset : int
Offset position in the file.
mode : str
File mode.
Methods
-------
flush
Flush any changes in memory to file on disk.
When you delete a memmap object, flush is called first to write
changes to disk.
See also
--------
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
The memmap object can be used anywhere an ndarray is accepted.
Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
``True``.
Memory-mapped files cannot be larger than 2GB on 32-bit systems.
When a memmap causes a file to be created or extended beyond its
current size in the filesystem, the contents of the new part are
unspecified. On systems with POSIX filesystem semantics, the extended
part will be filled with zero bytes.
Examples
--------
>>> data = np.arange(12, dtype='float32')
>>> data.resize((3,4))
This example uses a temporary file so that doctest doesn't write
files to your directory. You would use a 'normal' filename.
>>> from tempfile import mkdtemp
>>> import os.path as path
>>> filename = path.join(mkdtemp(), 'newfile.dat')
Create a memmap with dtype and shape that matches our data:
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
>>> fp
memmap([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]], dtype=float32)
Write data to memmap array:
>>> fp[:] = data[:]
>>> fp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fp.filename == path.abspath(filename)
True
Flushes memory changes to disk in order to read them back
>>> fp.flush()
Load the memmap and verify data was stored:
>>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> newfp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Read-only memmap:
>>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> fpr.flags.writeable
False
Copy-on-write memmap:
>>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
>>> fpc.flags.writeable
True
It's possible to assign to copy-on-write array, but values are only
written into the memory copy of the array, and not written to disk:
>>> fpc
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fpc[0,:] = 0
>>> fpc
memmap([[ 0., 0., 0., 0.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
File on disk is unchanged:
>>> fpr
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Offset into a memmap:
>>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
>>> fpo
memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
"""
__array_priority__ = -100.0
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
shape=None, order='C'):
# Import here to minimize 'import numpy' overhead
import mmap
import os.path
try:
mode = mode_equivalents[mode]
except KeyError as e:
if mode not in valid_filemodes:
raise ValueError(
"mode must be one of {!r} (got {!r})"
.format(valid_filemodes + list(mode_equivalents.keys()), mode)
) from None
if mode == 'w+' and shape is None:
raise ValueError("shape must be given")
if hasattr(filename, 'read'):
f_ctx = nullcontext(filename)
else:
f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b')
with f_ctx as fid:
fid.seek(0, 2)
flen = fid.tell()
descr = dtypedescr(dtype)
_dbytes = descr.itemsize
if shape is None:
bytes = flen - offset
if bytes % _dbytes:
raise ValueError("Size of available data is not a "
|
documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 5 15:13:45 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libxmlquery'
copyright = u'2010, Frederico Gonçalves, Vasco Fernandes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current m | odule name will be prepended to all description
# unit titles (such as .. function::).
#add_modu | le_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libxmlquerydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'libxmlquery.tex', u'libxmlquery Documentation',
u'Frederico Gonçalves, Vasco Fernandes', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libxmlquery', u'libxmlquery Documentation',
[u'Frederico Gonçalves, Vasco Fernandes'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'libxmlquery'
epub_author = u'Frederico Gonçalves, Vasco Fernandes'
epub_publisher = u'Frederico Gonçalves, Vasco Fernandes'
epub_copyright = u'2010, Frederico Gonçalves, Vasco Fernandes'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub fil |
def main(request, response):
headers = {
# CORS-safelisted
"content-type": "text/plain",
"c | ache-control": "no cache",
"content-language": "en",
"expires": "Fri, 30 Oct 1998 14:19:41 GMT",
"last-modified": "Tue, 15 Nov 1994 12:45:26 GMT",
"pragma": "no-cache",
| # Non-CORS-safelisted
"x-test": "foobar",
"Access-Control-Allow-Origin": "*"
}
for header in headers:
response.headers.set(header, headers[header])
response.content = "PASS: Cross-domain access allowed."
|
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of cpu_temperature.py
"""
# pylint: disable=unused-argument
import logging
import unittest
from devil import devil_env
from devil.android import cpu_temperature
from devil.android import device_utils
from devil.utils import mock_calls
from devil.android.sdk import adb_wrapper
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock # pylint: disable=import-error
class CpuTemperatureTest(mock_calls.TestCase):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def setUp(self):
# Mock the device
self.mock_device = mock.Mock(spec=device_utils.DeviceUtils)
self.mock_device.build_product = 'blueline'
self.mock_device.adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
self.mock_device.FileExists.return_value = True
self.cpu_temp = cpu_temperature.CpuTemperature(self.mock_device)
self.cpu_temp.InitThermalDeviceInformation()
class CpuTemperatureInitTest(unittest.TestCase):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testInitWithDeviceUtil(self):
d = mock.Mock(spec=device_utils.DeviceUtils)
d.build_product = 'blueline'
c = cpu_temperature.CpuTemperature(d)
self.assertEqual(d, c.GetDeviceForTesting())
def testInitWithMissing_fails(self):
with self.assertRaises(TypeError):
cpu_temperature.CpuTemperature(None)
with self.assertRaises(TypeError):
cpu_temperature.CpuTemperature('')
class CpuTemperatureGetThermalDeviceInformationTest(CpuTemperatureTest):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testGetThermalDeviceInformation_noneWhenIncorrectLabel(self):
invalid_device = mock.Mock(spec=device_utils.DeviceUtils)
invalid_device.build_product = 'invalid_name'
c = cpu_temperature.CpuTemperature(invalid_device)
c.InitThermalDeviceInformation()
self.assertEqual(c.GetDeviceInfoForTesting(), None)
def testGetThermalDeviceInformation_getsCorrectInformation(self):
correct_information = {
'cpu0': '/sys/class/thermal/thermal_zone11/temp',
'cpu1': '/sys/class/thermal/thermal_zone12/temp',
'cpu2': '/sys/class/thermal/thermal_zone13/temp',
'cpu3': '/sys/class/thermal/thermal_zone14/temp',
'cpu4': '/sys/class/thermal/thermal_zone15/temp',
'cpu5': '/sys/class/thermal/thermal_zone16/temp',
'cpu6': '/sys/class/thermal/thermal_zone17/temp',
'cpu7': '/sys/class/thermal/thermal_zone18/temp'
}
self.assertEqual(
cmp(correct_information,
self.cpu_temp.GetDeviceInfoForTesting().get('cpu_temps')), 0)
class CpuTemperatureIsSupportedTest(CpuTemperatureTest):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testIsSupported_returnsTrue(self):
d = mock.Mock(spec=device_utils.DeviceUtils)
d.build_product = 'blueline'
d.FileExists.return_value = True
c = cpu_temperature.CpuTemperature(d)
self.assertTrue(c.IsSupported())
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testIsSupported_returnsFalse(self):
d = mock.Mock(spec=device_utils.DeviceUtils)
d.build_product = 'blueline'
d.FileExists.return_value = False
c = cpu_temperature.CpuTemperature(d)
| self.assertFalse(c.IsSupported())
class CpuTemperatureLetCpuCoolToTemperatureTest(CpuTemperatureTest):
# Return values for the mock side effect
cooling_down0 = (
[45000
for _ in range(8)] + [43000
for _ in range(8)] + [41000 for _ in range(8)])
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_coolWithin24Call | s(self):
self.mock_device.ReadFile = mock.Mock(side_effect=self.cooling_down0)
self.cpu_temp.LetCpuCoolToTemperature(42)
self.mock_device.ReadFile.assert_called()
self.assertEquals(self.mock_device.ReadFile.call_count, 24)
cooling_down1 = [45000 for _ in range(8)] + [41000 for _ in range(16)]
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_coolWithin16Calls(self):
self.mock_device.ReadFile = mock.Mock(side_effect=self.cooling_down1)
self.cpu_temp.LetCpuCoolToTemperature(42)
self.mock_device.ReadFile.assert_called()
self.assertEquals(self.mock_device.ReadFile.call_count, 16)
constant_temp = [45000 for _ in range(40)]
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_timeoutAfterThree(self):
self.mock_device.ReadFile = mock.Mock(side_effect=self.constant_temp)
self.cpu_temp.LetCpuCoolToTemperature(42)
self.mock_device.ReadFile.assert_called()
self.assertEquals(self.mock_device.ReadFile.call_count, 24)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
#coding:utf- | 8
#################################
#Copyright(c) 2014 dtysky
#################################
import G2R
class ScSp(G2R.SpSyntax):
def Show(self,Flag,Attrs,US,UT,Tmp,FS):
sw=''
name,Attrs=self.Check(Flag,Attrs,UT,FS)
if Attrs['k']=='Main':
sw+=' $ store.chapter='
sw+="'Chapter."+Attrs['cp']+Attrs['sc']+"'\ | n"
return sw |
##
# Copyright 2012-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is fre | e software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will b | e useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Declaration of toolchains.compiler namespace.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
import pkg_resources
pkg_resources.declare_namespace(__name__)
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base spout for integration tests"""
import copy
from heron.common.src.python.utils.log import Log
from heron.api.src.python.spout.spout import Spout
from heron.api.src.python.stream import Stream
from heron.api.src.python.component.component_spec import HeronComponentSpec
import heron.common.src.python.pex_loader as pex_loader
from ..core import constants as integ_const
class IntegrationTestSpout(Spout):
"""Base spout for integration test
Every spout of integration test topology consists of this instance, each delegating user's spout.
"""
outputs = [Stream(fields=[integ_const.INTEGRATION_TEST_TERMINAL],
name=integ_const.INTEGRATION_TEST_CONTROL_STREAM_ID)]
@classmethod
def spec(cls, name, par, config, user_spout_classpath, user_output_fields=None):
python_class_path = "%s.%s" % (cls.__module__, cls.__name__)
config[integ_const.USER_SPOUT_CLASSPATH] = user_spout_classpath
# avoid modification to cls.outputs
_outputs = copy.copy(cls.outputs)
if user_output_fields is not None:
_outputs.extend(user_output_fields)
return HeronComponentSpec(name, python_class_path, is_spout=True, par=par,
inputs=None, outputs=_outputs, config=config)
def initialize(self, config, context):
user_spout_classpath = config.get(integ_const.USER_SPOUT_CLASSPATH, None)
if user_spout_classpath is None:
raise RuntimeError("User defined integration test spout was not found")
user_spout_cls = self._load_user_spout(context.get_topology_pex_path(), user_spout_classpath)
self.user_spout = user_spout_cls(delegate=self)
self.max_executions = config.get(integ_const.USER_MAX_EXECUTIONS, | integ_const.MAX_EXECUTIONS)
assert isinstance(self.max_executions, int) and self.max_executions > 0
Log.info("Max executions: %d" % self.max_executions)
self.tuples_to_complete = 0
self.user_spout.initialize(config, context)
@staticmethod
def _load_user_spout(pex_file, classpath):
pex_loader.load_pex(pex_file)
cls = pex_loader.import_and | _get_class(pex_file, classpath)
return cls
@property
def is_done(self):
return self.max_executions == 0
def next_tuple(self):
if self.is_done:
return
self.max_executions -= 1
Log.info("max executions: %d" % self.max_executions)
self.user_spout.next_tuple()
if self.is_done:
self._emit_terminal_if_needed()
Log.info("This topology is finished.")
def ack(self, tup_id):
Log.info("Received an ack with tuple id: %s" % str(tup_id))
self.tuples_to_complete -= 1
if tup_id != integ_const.INTEGRATION_TEST_MOCK_MESSAGE_ID:
self.user_spout.ack(tup_id)
self._emit_terminal_if_needed()
def fail(self, tup_id):
Log.info("Received a fail message with tuple id: %s" % str(tup_id))
self.tuples_to_complete -= 1
if tup_id != integ_const.INTEGRATION_TEST_MOCK_MESSAGE_ID:
self.user_spout.fail(tup_id)
self._emit_terminal_if_needed()
def emit(self, tup, tup_id=None, stream=Stream.DEFAULT_STREAM_ID,
direct_task=None, need_task_ids=None):
"""Emits from this integration test spout
Overriden method which will be called when user's spout calls emit()
"""
# if is_control True -> control stream should not count
self.tuples_to_complete += 1
if tup_id is None:
Log.info("Add tup_id for tuple: %s" % str(tup))
_tup_id = integ_const.INTEGRATION_TEST_MOCK_MESSAGE_ID
else:
_tup_id = tup_id
super(IntegrationTestSpout, self).emit(tup, _tup_id, stream, direct_task, need_task_ids)
def _emit_terminal_if_needed(self):
Log.info("is_done: %s, tuples_to_complete: %s" % (self.is_done, self.tuples_to_complete))
if self.is_done and self.tuples_to_complete == 0:
Log.info("Emitting terminals to downstream")
super(IntegrationTestSpout, self).emit([integ_const.INTEGRATION_TEST_TERMINAL],
stream=integ_const.INTEGRATION_TEST_CONTROL_STREAM_ID)
|
from django.db import models
from cms.models import CMSPlugin
CLASS_CHOICES = ['container', 'content', 'teaser']
CLASS_CHOICES = tuple((entry, entry) for entry in CLASS_CHOICES)
TAG_CHOICES = [
'div', 'article', 'section', 'header', 'footer', 'aside',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6'
]
TAG_CHOICES = tuple((entry, entry) for entry in TAG_CHOICES)
class Style(CMSPlugin):
"""
Renders a given ``TAG_CHOICES`` element with additional attributes
"""
label = models.CharField(
verbose_name='Label',
blank=True,
max_length=255,
help_text='Overrides the display name in the structure mode.',
)
tag_type = models.CharField(
verbose_name='Tag type',
choices=TAG_CHOICES,
default=TAG_CHOICES[0][0],
max_length=255,
)
class_name = models.CharField(
verbose_name='Class name',
| choices=CLASS_CHOICES,
default=CLASS_CHO | ICES[0][0],
blank=True,
max_length=255,
)
additional_classes = models.CharField(
verbose_name='Additional classes',
blank=True,
max_length=255,
)
def __str__(self):
return self.label or self.tag_type or str(self.pk)
def get_short_description(self):
# display format:
# Style label <tag> .list.of.classes #id
display = []
classes = []
if self.label:
display.append(self.label)
if self.tag_type:
display.append('<{0}>'.format(self.tag_type))
if self.class_name:
classes.append(self.class_name)
if self.additional_classes:
classes.extend(item.strip() for item in self.additional_classes.split(',') if item.strip())
display.append('.{0}'.format('.'.join(classes)))
return ' '.join(display)
def get_additional_classes(self):
return ' '.join(item.strip() for item in self.additional_classes.split(',') if item.strip())
|
"""
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.2.2.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __unicode__(self):
return "%s.%s - %dD %s | field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
| self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
|
__source__ = 'https:/ | /leetcode.com/problems/valid-anagram/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/valid-anagram.py
# Time: O(n)
# Space: O(1)
#
# Description: Leetcode # 242. Valid Anagram
#
# Given two strings s and t, write a function to
# determi | ne if t is an anagram of s.
#
# For example,
# s = "anagram", t = "nagaram", return true.
# s = "rat", t = "car", return false.
#
# Note:
# You may assume the string contains only lowercase alphabets.
#
# Companies
# Amazon Uber Yelp
# Related Topics
# Hash Table Sort
# Similar Questions
# Group Anagrams Palindrome Permutation Find All Anagrams in a String
#
import unittest
class Solution:
# @param {string} s
# @param {string} t
# @return {boolean}
def isAnagram(self, s, t):
if len(s) != len(t):
return False
count = {}
for c in s:
if c.lower() in count:
count[c.lower()] += 1
else:
count[c.lower()] = 1
for c in t:
if c.lower() in count:
count[c.lower()] -= 1
else:
count[c.lower()] = -1
if count[c.lower()] < 0:
return False
return True
# Time: O(nlogn)
# Space: O(n)
class Solution2:
# @param {string} s
# @param {string} t
# @return {boolean}
def isAnagram(self, s, t):
return sorted(s) == sorted(t)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
print Solution().isAnagram('a', 'a')
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/valid-anagram/solution/
#
# 4ms 71.69%
class Solution {
public boolean isAnagram(String s, String t) {
if (s.length() != t.length()) {
return false;
}
int[] count = new int[26];
for (int i = 0; i < s.length(); i++) {
count[s.charAt(i) - 'a']++;
}
for (int i = 0; i < t.length(); i++) {
count[t.charAt(i) - 'a']--;
}
for (int i = 0; i < 26; i++) {
if (count[i] != 0) {
return false;
}
}
return true;
}
}
Approach #1 (Sorting) [Accepted]
# Time: O(nlogn)
# Space: O(1)
#7ms 41.66%
class Solution {
public boolean isAnagram(String s, String t) {
if (s.length() != t.length()) {
return false;
}
char[] str1 = s.toCharArray();
char[] str2 = t.toCharArray();
Arrays.sort(str1);
Arrays.sort(str2);
return Arrays.equals(str1, str2);
}
}
# 3ms 81.95%
class Solution {
public boolean isAnagram(String s, String t) {
int [] alp = new int[26];
for(int i = 0;i<s.length();i++) alp[s.charAt(i) - 'a']++;
for(int i = 0;i<t.length();i++) alp[t.charAt(i) - 'a']--;
for(int i : alp) if(i!=0) return false;
return true;
}
}
# 6ms 49.29%
class Solution {
public boolean isAnagram(String s, String t) {
return Arrays.equals(countCharacters(s), countCharacters(t));
}
private int[] countCharacters(String s) {
int[] count = new int[26];
for (int i = 0; i < s.length(); i++) {
count[s.charAt(i) - 'a']++;
}
return count;
}
}
''' |
from typing import Dict
from unittest import mock
from conductor.accounts.forms import DeactivateForm, SignupForm
from conductor.tests import TestCase
class TestSignupForm(TestCase):
def test_valid(self) -> None:
product_plan = self.ProductPlanFactory.create()
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": "12345",
}
form = SignupForm(product_plan, data=data)
self.assertTrue(form.is_valid())
self.assertEqual(product_plan, form.product_plan)
def test_required(self) -> None:
product_plan = self.ProductPlanFactory.create()
data: Dict[str, str] = {}
form = SignupForm(product_plan, data=data)
self.assertFalse(form.is_valid())
self.assertIn("username", form.errors)
self.assertIn("email", form.errors)
self.assertIn("password", form.errors)
self.assertIn("stripe_token", form.errors)
self.assertNotIn("postal_code", form.errors)
def test_invalid_password(self) -> None:
product_plan = self.ProductPlanFactory.create()
# Test similar username and password to ensure a user instance
# is present and valuable.
data = {
"username": "mattlayman",
"email": "matt@test.com",
"password": "mattlayman",
"stripe_token": "tok_1234",
"postal_code": "12345",
}
form = SignupForm(product_plan, data=data)
self.assertFalse(form.is_valid())
self.assertIn("password", form.errors)
def test_unique_email(self) -> None:
product_plan = self.ProductPlanFactory.create()
self.UserFactory.create(email="matt@test.com")
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": "12345",
}
form = SignupForm(product_plan, data=data)
self.assertFalse(form.is_valid())
self.assertIn("email", form.errors)
def test_unique_username(self) -> None:
product_plan = self.ProductPlanFactory.create()
self.UserFactory.create(username="matt")
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": "12345",
}
form = SignupForm(product_plan, data=data)
self.assertFalse(form.is_valid())
self.assertIn("username", form.errors)
@mock.patch("conductor.accounts.forms.stripe_gateway")
def test_creates_user(self, stripe_gateway: mock.MagicMock) -> None:
product_plan = self.ProductPlanFactory.create()
stripe_gateway.create_customer.return_value = "cus_1234"
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": "21702",
}
form = SignupForm(product_plan, data=data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertEqual(user.username, "matt")
self.assertEqual(user.email, "matt@test.com")
self.assertEqual(user.profile.postal_code, "21702")
self.assertEqual(user.profile.stripe_customer_id, "cus_1234")
@mock.patch("conductor.accounts.forms.stripe_gateway")
def test_missing_pos | tal_code(self, stripe_gateway | : mock.MagicMock) -> None:
product_plan = self.ProductPlanFactory.create()
stripe_gateway.create_customer.return_value = "cus_1234"
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": None,
}
form = SignupForm(product_plan, data=data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertEqual(user.profile.postal_code, "")
class TestDeactivateForm(TestCase):
def test_matching_email(self) -> None:
user = self.UserFactory.create()
data = {"email": user.email}
form = DeactivateForm(user, data=data)
is_valid = form.is_valid()
self.assertTrue(is_valid)
def test_mismatched_email(self) -> None:
user = self.UserFactory.create()
data = {"email": f"nomatch-{user.email}"}
form = DeactivateForm(user, data=data)
is_valid = form.is_valid()
self.assertFalse(is_valid)
self.assertIn("email", form.errors)
@mock.patch("conductor.accounts.forms.stripe_gateway")
def test_save(self, stripe_gateway: mock.MagicMock) -> None:
"""The user subscription gets cancelled and the user is marked inactive."""
user = self.UserFactory.create()
form = DeactivateForm(user)
form.save()
stripe_gateway.cancel_subscription.assert_called_once_with(user)
user.refresh_from_db()
self.assertFalse(user.is_active)
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'pasportaservo',
'USER': 'guillaume',
}
}
LANGUAGE_CODE = 'en'
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib | .messages',
'django.contrib.staticfiles',
'django_extensions',
'django_countries',
'phonenumber_field',
'bootstrapform',
'leaflet',
'postman',
'hosting',
'pages',
'debug_toolbar',
)
EMAIL_BACKEND = 'django.core.m | ail.backends.console.EmailBackend'
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) | 2011-2013 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
import newfies
from django.conf import settings
def newfies_version(request):
ret | urn {'newfies_version': newfies.__version__, 'SURVEYDEV': settings.SURVEYDEV}
|
from shutit_module import ShutItModule
import base64
class openshift_airflow(ShutItModule):
def build(self, shutit):
shutit.send('cd /tmp/openshift_vm')
shutit.login(command='vagrant ssh')
shutit.login(command='sudo su -',password='vagrant',note='Become root (there is a problem logging in as admin with the vagrant user')
# AIRFLOW BUILD
# Takes too long.
#shutit.send('oc describe buildconfig airflow',note='Ideally you would take this github url, and update your github webhooks for this project. But there is no public URL for this server so we will skip and trigger a build manually.')
#shutit.send('oc start-build airflow',note='Trigger a build by hand')
#shutit.send('sleep 60 && oc logs -f build/airflow-1',note='Follow the build and wait for it to terminate')
# IMAGE STREAM
shutit.send_file('/tmp/imagestream.json','''
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "airflow"
},
"spec": {},
"status": {
"dockerImageRepository": ""
}
}''')
shutit.send('oc create -f /tmp/imagestream.json')
# BUILD CONFIG
shutit.send_file('secret.json','''{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mysecret"
},
"namespace": "user2",
"data": {
"username": "''' + base64.b64encode('myusername') + '''"
}
}''')
shutit.send('oc create -f secret.json')
shutit.send_file('/tmp/buildconfig.json','''
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "airflow",
"labels": {
"name": "airflow-build"
}
},
"spec": {
"source": {
"type": "Git",
"git": {
"uri": "https://github.com/ianmiell/shutit-airflow"
}
},
"strategy": {
"type": "Docker"
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "airflow:latest"
}
},
"volumes": {
"name": "secvol",
"secret": {
"secretname": "mysecret"
}
}
}
}
''')
shutit.send('oc create -f /tmp/buildconfig.json')
# DEPLOYMENT CONFIG
shutit.send_file('/tmp/deploymentconfig.json','''
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "airflow"
},
"spec": {
"strategy": {
"type": "Rolling",
"rollingParams": {
"updatePeriodSeconds": 1,
"intervalSeconds": 1,
"timeoutSeconds": 120
},
"resources": {}
},
"triggers": [
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"nodejs-helloworld"
],
"from": {
"kind": "ImageStreamTag",
"name": "airflow:latest"
| }
}
},
{
"type": "ConfigChange"
}
],
"replicas": 1,
"selector": {
"name":"airflow"
},
"template": {
"metadata": {
"labels": {
"name": "airflow"
}
},
"spec": {
"containers": [
{
| "name": "airflow",
"image": "airflow",
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "IfNotPresent",
"securityContext": {
"capabilities": {},
"privileged": false
}
}
],
"restartPolicy": "Always",
"dnsPolicy": "ClusterFirst"
}
}
},
"status": {}
}
''')
shutit.send('oc create -f /tmp/deploymentconfig.json')
shutit.logout()
shutit.logout()
return True
def module():
return openshift_airflow(
'shutit.openshift_vm.openshift_vm.openshift_airflow', 1418326706.005,
description='',
maintainer='',
delivery_methods=['bash'],
depends=['shutit.openshift_vm.openshift_vm.openshift_vm']
)
|
from tkinter import *
from gui import GUI
from reminder import Reminder
import argparse
import time
if __name__ == '__main__':
print("""
Copyright (C) 2016 Logvinov Dima.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions.
""")
parser = argparse.ArgumentParser(description="PyReminder - python | reminder app for ElementaryOS.")
parser.add_argument('--add', help="Add new event.\n"
"$ pyreminder --add 'Event text:Event time hours.minutes.day.month.year'", type=str)
parser.add_argument('--list', help="Print list of eve | nts.", action="store_true")
parser.add_argument('--delete', help="Delete event.\n "
"$ pyreminder --delete event_id ", type=int)
parser.add_argument('--gui', help="Run gui program.", action="store_true")
args = parser.parse_args()
reminder = Reminder()
if args.gui:
root = Tk()
root.geometry("500x200+350+500")
app = GUI(root, reminder)
root.mainloop()
if args.add:
event_text, event_date = args.add.split(":")
reminder.add_task(event_date, event_text)
reminder.update_db()
if args.list:
tasks = reminder.get_tasks_list()
if len(tasks) > 0:
for task_id in range(0, len(tasks)):
task_id = str(task_id)
print("id:{0} time:{1} text:{2}".
format(task_id, tasks[task_id][0], tasks[task_id][1]))
if args.delete:
if not reminder.delete_task(str(args.delete)):
print("Task: {} not found.".format(str(args.delete)))
|
ut (2a): ::
doing a slow thing... done
Output (2b): ::
doing a slow thing... ERROR
<stack trace>
:param quiet: True to skip the message altogether
"""
def __init__(self, msg,
sameline=True,
quiet=False,
file=sys.stderr):
self._msg = msg
self._file = file
self._sameline = sameline
self._quiet = quiet
self._start = 0
self._end = 0
def __enter__(self):
# we grab the wall time instead of using time.clock() (A)
# because we # are not using this for profiling but just to
# get a rough idea what's going on, and (B) because we want
# to include things like IO into the mix
self._start = time.time()
if self._quiet:
return
elif self._sameline:
print(self._msg, end="... ", file=self._file)
else:
print("[start]", self._msg, file=self._file)
def __exit__(self, type, value, tb):
self._end = time.time()
if tb is None:
if not self._quiet:
done = "done" if self._sameline else "[-end-] " + self._msg
ms_elapsed = 1000 * (self._end - self._start)
final_msg = u"{} [{:.0f} ms]".format(done, ms_elapsed)
print(final_msg, file=self._file)
else:
if not self._quiet:
oops = "ERROR!" if self._sameline else "ERROR! " + self._msg
print(oops, file=self._file)
traceback.print_exception(type, value, tb)
sys.exit(1)
# pylint: redefined-builtin, invalid-name
# ---------------------------------------------------------------------
# tables
# ---------------------------------------------------------------------
def load_edus(edu_file):
"""
Read EDUs (see :doc:`../input`)
:rtype: [EDU]
.. _format: https://github.com/kowey/attelo/doc/inputs.rst
"""
def read_edu(row):
'interpret a single row'
expected_len = 6
if len(row) != expected_len:
oops = ('This row in the EDU file {efile} has {num} '
'elements instead of the expected {expected}: '
'{row}')
raise IoException(oops.format(efile=edu_file,
num=len(row),
expected=expected_len,
row=row))
[global_id, txt, grouping, subgrouping, start_str, end_str] = row
start = int(start_str)
end = int(end_str)
return EDU(global_id,
txt.decode('utf-8'),
start,
end,
grouping,
subgrouping)
with open(edu_file, 'rb') as instream:
reader = csv.reader(instream, dialect=csv.excel_tab)
return [read_edu(r) for r in reader if r]
def load_pairings(edu_file):
"""
Read and return EDU pairings (see :doc:`../input`).
We assume the order is parent, child
:rtype: [(string, string)]
.. _format: https://github.com/kowey/attelo/doc/inputs.rst
"""
def read_pair(row):
'interpret a single row'
if len(row) < 2 or len(row) > 3:
oops = ('This row in the pairings file {efile} has '
'{num} elements instead of the expected 2 or 3')
raise IoException(oops.format(efile=edu_file,
num=len(row),
row=row))
return tuple(row[:2])
with open(edu_file, 'rb') as instream:
reader = csv.reader(instream, dialect=csv.excel_tab)
return [read_pair(r) for r in reader if r]
def load_labels(feature_file):
"""
Read the very top of a feature file and read the labels comment,
return the sequence of labels, else return None
:rtype: [string] or None
"""
with codecs.open(feature_file, 'r', 'utf-8') as stream:
line = stream.readline()
if line.startswith('#'):
seq = line[1:].split()
if seq[0] == 'labels:':
return seq[1:]
# fall-through case, no labels found
return None
def _process_edu_links(edus, pairings):
"""
Convert from the results of :py:method:load_edus: and
:py:method:load_pairings: to a sequence of edus and pairings
respectively
:rtype: ([EDU], [(EDU,EDU)])
"""
edumap = {e.id: e for e in edus}
enames = frozenset(chain.from_iterable(pairings))
if FAKE_ROOT_ID in enames:
edus2 = [FAKE_ROOT] + edus
edumap[FAKE_ROOT_ID] = FAKE_ROOT
else:
edus2 = copy.copy(edus)
naughty = [x for x in enames if x not in edumap]
if naughty:
oops = ('The pairings file mentions the following EDUs but the EDU '
'file does not actually include EDUs to go with them: {}')
raise DataPackException(oops.format(truncate(', '.join(naughty),
1000)))
pairings2 = [(edumap[e1], edumap[e2]) for e1, e2 in pairings]
return edus2, pairings2
def load_multipack(edu_file, pairings_file, feature_file, vocab_file,
verbose=False):
"""
Read EDUs and features for edu pairs.
Perform some basic sanity checks, raising
:py:class:`IoException` if they should fail
:rtype: :py:class:`Multipack` or None
"""
vocab = load_vocab(vocab_file)
with Torpor("Reading edus and pairings", quiet=not verbose):
edus, pairings = _process_edu_links(load_edus(edu_file),
load_pairings(pairings_file))
with Torpor("Reading features", quiet=not verbose):
labels = [UNKNOWN] + load_labels(feature_file)
# pylint: disable=unbalanced-tuple-unpacking
data, targets = load_svmlight_file(feature_file,
n_features=len(vocab))
# pylint: enable=unbalanced-tuple-unpacking
with Torpor("Build data packs", quiet=not verbose):
dpack = DataPack.load(edus, pairings, data, targets,
labels, vocab)
return {k: dpack.selected(idxs) for
k, idxs in groupings(pairings).items()}
def load_vocab(filename):
"read feature vocabulary"
features = []
with codecs.open(filename, 'r', 'utf-8') as stream:
for line in stream:
features.append(line.split('\t')[0])
return features
# ---------------------------------------------------------------------
# predictions
# ---------------------------------------------------------------------
def write_predictio | ns_output(dpack, predicted, filename):
"""
Write predictions to an output file whose format
is documented in :doc:`../output`
"""
links = {}
for edu1, edu2, label in predicted:
links[(edu1, edu2)] = label
def mk_row(edu1, edu2):
'return a list of columns'
edu1_id = edu1.i | d
edu2_id = edu2.id
row = [edu1_id,
edu2_id,
links.get((edu1_id, edu2_id), UNRELATED)]
return [x.encode('utf-8') for x in row]
with open(filename, 'wb') as fout:
writer = csv.writer(fout, dialect=csv.excel_tab)
# by convention the zeroth edu is the root node
for edu1, edu2 in dpack.pairings:
writer.writerow(mk_row(edu1, edu2))
def load_predictions(edu_file):
"""
Read back predictions (see :doc:`../output`), returning a list
of triples: parent id, child id, relation label (or 'UNRELATED')
:rtype: [(string, string, string)]
"""
def mk_pair(row):
'interpret a single row'
expected_len = 3
if len(row) < expected_len:
oops = ('This row in the predictions file {efile} has {num} '
'elements instead of the expected {expected}: '
'{row}')
raise IoException(oops.format(efile=edu_file,
num=len(row),
expected=expected_len |
def printMap(the_map,note):
print(note)
for row in the_map:
row_str = ""
for cell in row:
row_str += " {0:3d}".format(cell)
print(row_str)
def pathFinder(x, y, the_map, steps, lastX, lastY, wall):
# count possible moves
debug = False
options = []
if x-1 >= 0: # East
options.append([-1, 0])
if x+1 <= lastX: # West
options.append([ 1, 0])
if y-1 >= 0: # North
options.append([ 0,-1])
if y+1 <= lastY: # South
options.append([ 0, 1])
# increment step
steps += 1
if debug:
printMap(the_map,"({0:2d},{1:2d}) steps:{2:3d} {3:6} before options ---------------------------------".format(x,y,steps,wall))
for option in options:
# new x and y
newX = x + option[0]
# print("x({0:2d}) + option[0]({1:2d}) -> newX({2:2d})".format(x,option[0],newX) )
newY = y + option[1]
# print("y({0:2d}) + option[1]({1:2d}) -> newY({2:2d})".format(y,option[1],newY) )
if debug:
| print(" looking at ({0:2d},{1:2d}) with value={2:2d} and with steps:{3:3d} {4:6} from ({5:2d},{6:2d})".format(newX,newY,the_map[newY][newX],steps,wall,x,y))
# if statements
if the_map[newY][newX] == 0:
the_map[newY][newX] = steps
if newX != 0 or newY != 0:
pathFinder(newX, newY, | the_map, steps, lastX, lastY, wall)
elif the_map[newY][newX] > 1 and steps <= the_map[newY][newX]:
the_map[newY][newX] = steps
if newX != 0 or newY != 0:
pathFinder(newX, newY, the_map, steps, lastX, lastY, wall)
elif ( the_map[newY][newX] == 1 or the_map[newY][newX] < 0 ) and not wall and (newX != lastX or newY != lastY):
if debug:
print("Removing a wall at {0:2d}:{1:2d}".format(newX,newY))
wall = True
the_map[newY][newX] = steps * -1
pathFinder(newX, newY, the_map, steps, lastX, lastY, wall)
wall = False
elif the_map[newY][newX] > 1 and steps < abs(the_map[newY][newX]):
if(the_map[newY][newX] < 0):
the_map[newY][newX] = steps * -1
if(the_map[newY][newX] > 0):
the_map[newY][newX] = steps
if newX != 0 or newY != 0:
pathFinder(newX, newY, the_map, steps, lastX, lastY, wall)
if debug:
printMap(the_map,"({0:2d},{1:2d}) steps:{2:3d} {3:6} after options ---------------------------------".format(x,y,steps,wall))
def solution(the_map):
debug = False
steps = 1
lastX = len(the_map[0]) - 1
lastY = len(the_map) - 1
x = lastX
y = lastY
testMap = the_map[:]
testMap[y][x] = 1
pathFinder(x, y, testMap, steps, lastX, lastY, False)
if debug:
printMap(the_map,"All done. {0:3d} ------------------------------".format(testMap[0][0]))
return(testMap[0][0])
#print(solution([[0, 1], [0, 0]]))
#print(solution([[0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0]]))
print(solution([[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]))
#print(solution([[0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]))
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010 by RoboLab - University of Extremadura
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import Ice, sys, math, traceback
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtGui import *
class C(QWidget):
def __init__(self, endpoint, modules):
QWidget.__init__(self)
self.ic = Ice.initialize(sys.argv)
self.mods = modules
print ('Endpoint', )
self.prx = self.ic.stringToProxy(endpoint)
self.proxy = self.mods['RoboCompRoimant'].RoimantPrx.checkedCast(self.prx)
self.leftPyrList = []
self.rightPyrList = []
for level in range(4):
self.leftPyrList.append(None)
self.rightPyrList.append(None)
self.wdth = self.proxy.getRoiParams().width
self.hght = self.proxy.getRoiParams().height
self.job()
def job(self):
output = self.proxy.getBothPyramidsRGBAndLeftROIList()
pos=0
size=self.wdth*self.hght*3
for level in range(4):
self.leftPyrList[level] = output[0][pos:pos+size]
self.rightPyrList[level] = output[2][pos:pos+size]
pos = pos + size
size = size/4
def paintEvent(self, event=None):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
xPos = -self.wdth/2
yPos = self.height()
for level in range(len(s | elf.leftPyrList)):
xPos = xPos + (self.wdth/2)/(2**level)
yPos = yPos - self.hght/(2**level)
qimage = QImage(self.leftPyrList[level], self.wdth/(2**level), self.hght/(2**level), QImage.Format_RGB888);
painter.drawImage(QPointF(xPos, yPos), qimage)
qimage = QImage(self.rightPyrList[level], self.wdth/(2**level), self.hght/(2**level), QImage.Format_RGB888);
painter.drawImage(QPointF(xPos+self.wdth, yPos), qimage)
painter.end()
pa | inter = None
|
'find', 'find_one', 'find_one_and_delete',
'find_one_and_replace', 'find_one_and_update',
'update_one', 'update_many', 'replace_one',
'delete_one', 'delete_many',
'create_index', 'create_indexes', 'reindex',
'index_information', 'list_indexes',
'drop', 'drop_index', 'drop_indexes',
'aggregate', 'group', 'inline_map_reduce', 'map_reduce',
'bulk_write',
'initialize_ordered_bulk_op', 'initialize_unordered_bulk_op',
'rename', 'count', 'distinct', 'options', 'with_options',
]:
locals()[cmd] = partialmethod(run_command, cmd=cmd)
class InitMixin(object):
def __init__(self, data=None):
self._refs = {}
self._data = {}
self._defaults = {}
if data:
for name, field in self._fields.items():
if name in data:
value = data[name]
else:
value = field.default
if callable(value):
value = value()
if value is not None:
self._defaults[name] = value
value = field.to_storage(value)
self._data[name] = value
class ValidationMixin(object):
def validate(self):
for name, field in self._fields.items():
if name in self._data:
value = field.to_python(self._data[name])
field.validate(value)
def to_dict(self):
d = {}
for name, field in self._fields.items():
value = field.to_python(self._data.get(name))
if isinstance(value, list):
ovalue, value = value, []
for v in ovalue:
if isinstance(v, EmbeddedDocument):
v = v.to_dict()
value.append(v)
d[name] = value
return d
class MetaMixin(object):
""" helper methods for "Meta" info """
@classproperty
def unique_fields(cls):
names = set()
for idx in cls.Meta._indexes or []:
if idx.kwargs.get('unique'):
for key in idx.keys:
if isinstance(key, tuple):
names.add(key[0])
else:
names.add(key)
return names
@classmethod
def prepare(cls):
cls.ensure_indexes()
cls.ensure_shards()
@classmethod
def ensure_indexes(cls):
allowed_keys = set(['name', 'unique', 'background', 'sparse',
'bucketSize', 'min', 'max', 'expireAfterSeconds'])
for idx in cls.Meta._indexes or []:
if set(idx.kwargs.keys()) - allowed_keys:
raise ArgumentError(MetaMixin.ensure_indexes, idx.kwargs)
cls._coll.create_index(idx.keys, **idx.kwargs)
@classmethod
def ensure_shards(cls):
if cls.Meta._shardkey:
admin = cls._conn.admin
dbname = cls._db.name
try:
admin.command('enableSharding', dbname)
except Exception as e:
if 'already' in e:
try:
admin.command(
'shardCollection',
'{}.{}'.format(dbname,
cls.Meta.__collection__),
key=cls.Meta._shardkey.key)
except Exception as e:
if 'already' not in e:
log.warning('shard collection failed: '
'{}'.format(str(e)))
else:
log.warning('enable shard failed: '
'{}'.format(str(e)))
class MapperMixin(object):
""" ORM only method mixins """
def refresh(self):
_id = self._data.get('_id')
self._data = {}
if _id:
doc = self._coll.find_one({'_id': _id})
if doc:
self._data = doc
self.validate()
@classmethod
def query(cls, *args, **kwargs):
""" Same as collection.find, but return Document then dict """
for doc in cls._coll.find(*args, **kwargs):
yield cls.from_storage(doc)
@classmethod
def query_one(cls, *args, **kwargs):
""" Same as collection.find_one, but return Document then dict """
doc = cls._coll.find_one(*args, **kwargs)
if doc:
return cls.from_storage(doc)
def update(self, update):
""" Update self """
self._coll.update_one({'_id': self._data['_id']},
update)
def upsert(self, null=False):
""" Insert or Update Document
:param null: whether update null values
Wisely select unique field values as filter,
Update with upsert=True
"""
self._pre_save()
self.validate()
filter_ = self._upsert_filter()
if filter_:
update = self._upsert_update(filter_, null)
if update['$set']:
r = self._coll.find_one_and_update(filter_, update,
upsert=True, new=True)
self._data['_id'] = r['_id']
else:
r = self._coll.insert_one(self._data)
self._data['_id'] = r.inserted_id
def save(self):
self._pre_save()
self._ensure_id()
self.validate()
if '_id' in self._data:
doc = self._data.copy()
del doc['_id']
self._coll.update_one({'_id': self._data['_id']},
{'$set': doc},
upsert=True)
else:
self._coll.insert_one(self._data)
@classmethod
def bulk_upsert(cls, docs, null=False):
if len(docs) == 0:
return 0
requests = []
for doc in docs:
if not isinstance(doc, cls):
raise ArgumentError(cls, docs)
doc._pre_save()
doc.validate()
filter_ = doc._upsert_filter()
if filter_:
update = doc._upsert_update(filter_, null)
if update['$set']:
requests.append(UpdateOne(filter_, update, upsert=True))
else:
requests.append(InsertOne(doc._data))
r = cls._coll.bulk_write(requests, ordered=False)
return r.upserted_count
def remove(self):
_id = self._ensure_id()
if _id:
self._coll.delete_one({'_id': _id})
else:
log.warning("This document has no _id, it can't be deleted")
@classmethod
def cached(cls, timeout=60, cache_none=False):
""" Cache queries
:param timeout: cache timeout
:param cache_none: cache None result
Usage::
>>> Model.cached(60).query({...})
"""
return CachedModel(cls=cls, timeout=timeout, cache_none=cache_none)
def | _pre_save(self):
for name, field in self._fields.items():
value = field.pre_save_val(self._data.get(name))
if value:
setattr(self, name, value)
if not field.required and name in self._data \
and self._data[name] is None:
del self._data[name]
def _upsert_filter( | self):
filter_ = {}
if self._ensure_id():
filter_['_id'] = self._data['_id']
for name in self.unique_fields:
value = self._data.get(name)
if value:
filter_[name] = value
return filter_
def _upsert_update(self, filter_, null=False):
to_update = {}
to_insert = {}
for key, value in self._data.items():
if key not in filter_ and (null or value is not None):
if self._defaults.get(key) == value:
# default value should only been applied if it is an insert
to_insert[key] = value
else:
to_update[key] = value
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START aiplatform_cancel_data_labeling_job_sample]
from google.cloud import aiplatform
def cancel_data_labeling_job_sample(
project: str,
data_labeling_job_id: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
| # The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
name = client.data_labeling_job_path(
project=project, location=location, data_labeling_job=data_labeling_job | _id
)
response = client.cancel_data_labeling_job(name=name)
print("response:", response)
# [END aiplatform_cancel_data_labeling_job_sample]
|
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
import copy
from PIL import Image
import os
import random
import time
import math
imagesize = (120, 120)
peak = 100
gusti = ["margherita", "crudo", "funghi", "salame", "rucola", "4formaggi", "americana"]
def buildnet():
inputs = len(gusti)
outputs = imagesize[0] * imagesize[1] * 3 # R G B
hiddens = (120 * 3) # lol, I have no idea
return buildNetwork(inputs, hiddens, outputs)
def getSwitchTuple(index, lengt, disturb=0):
ret = []
for i in range(lengt):
if i == index:
ret.append((1.0 + disturb) * peak)
else:
ret.append(0.0)
return tuple(ret)
def buildtrainset():
inputs = len(gusti)
outputs = imagesize[0] * imagesize[1] * 3
ds = SupervisedDataSet(inputs, outputs)
for gusto in gusti:
indice = gusti.index(gusto)
pizzaset = os.listdir("./pizze/" + gusto + "/")
| print("Training set for gusto: %s (%s)" % (gusto, ",".join(map(str, getSwitchTuple(indice, inputs)))))
for pizzaname in pizzaset:
pizza = "./pizze/" + gusto + "/" + pizzaname
print(" Training with %s" % pizza, end=" ")
ds.addSample(getSwitchTuple(indice, inputs, disturb=random.un | iform(-0.3, 0.3)), processImg(pizza))
print("done")
return ds
def outimage(outtuple, name):
img = Image.new('RGB', imagesize, "white")
pixels = img.load()
for i in range(img.size[0]):
for j in range(img.size[1]):
tup_index = (i*img.size[0] + j) * 3
pixels[i,j] = (int(outtuple[tup_index]), int(outtuple[tup_index + 1]), int(outtuple[tup_index + 2]))
img.save(name)
#img.show()
def calcETA(timestep, remaining):
totsec = timestep * remaining
totmin = math.floor(totsec / 60)
remsec = totsec - (totmin * 60)
return totmin, remsec
def letsrock(rounds=25):
minimum = 999999999999
bestnet = None
print("Initializing neural network...")
net = buildnet()
print("Building training set...")
trset = buildtrainset()
trainer = BackpropTrainer(net, trset)
started = time.time()
for i in range(rounds):
print("training: %d%%... " % ((i*100) / rounds), end="")
err = trainer.train()
timestep = (time.time() - started) / (i+1)
min, sec = calcETA(timestep, rounds - i - 1)
if err < minimum:
minimum = err
bestnet = copy.deepcopy(net)
print("error: %.05f - ETA: %02d:%02d" % (err, min, sec), end="\r")
#trainer.trainUntilConvergence(verbose=True)
print("training: complete! ")
return bestnet
def fullShow():
net = letsrock()
for gusto in gusti:
print("Creating pizza, gusto: %s" % gusto)
indice = gusti.index(gusto)
activ = getSwitchTuple(indice, len(gusti))
name = "oven/" + gusto + ".jpg"
rgb = net.activate(activ)
datum = list(rgb)
outimage(datum, name)
def processImg(filename):
img = Image.open(filename)
img = img.resize(imagesize, Image.ANTIALIAS)
rgb_img = img.convert('RGB')
pixels = []
for x in range(imagesize[0]):
for y in range(imagesize[1]):
tup = tuple(rgb_img.getpixel((x, y)))
pixels.extend(tup)
return tuple(pixels)
if __name__ == "__main__":
fullShow() |
from allauth.socialaccount.providers.base import AuthAction, ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class Scope(object):
ACCESS = 'read-only'
clas | s YNABAccount(ProviderAccount):
pass
class YNABProvider(OAuth2Provider):
id = 'ynab'
name = 'YNAB'
account_class = YNABAccount
def get_default_scope(self):
scope = [Scope.ACCESS]
return scope
def get_auth_params(self, request, action):
ret = super(YNABProvider, self).get_auth_params(request,
action)
if action == AuthAction.REAUTHENTICATE:
ret['prompt'] = 'select_account consent'
| return ret
def extract_uid(self, data):
return str(data['data']['user']['id'])
provider_classes = [YNABProvider]
|
import sys
from healthcareai.common.healthcareai_error import HealthcareAIError
def validate_pyodbc_is_loaded(): |
""" Simple check that alerts user if they are do not have pyodbc installed, which is not a requirement. """
if 'pyodbc' not in sys.modules:
raise HealthcareAIError('Using this function requires installation of pyodbc.')
def validate_sqlite3_is_loaded():
""" Simple check that alerts user if they are do not have sqlite installed, | which is not a requirement. """
if 'sqlite3' not in sys.modules:
raise HealthcareAIError('Using this function requires installation of sqlite3.')
|
"""
Test basic DataFrame functionality.
"""
import pandas as pd
import pytest
import weld.grizzly as gr
def get_frames(cls, strings):
"""
Returns two DataFrames for testing binary operators.
The DataFrames have columns of overlapping/different names, types, etc.
"""
df1 = pd.DataFrame({
'name': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'Smith', 'Narayanan', 'Thomas', 'Thaker'],
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = pd.DataFrame({
'firstName': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'smith', 'narayanan', 'Thomas', 'thaker'],
'age': [25, 30, 45, 20, 60, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
if not strings:
df1 = df1.drop(['name', 'lastName'], axis=1)
df2 = df2.drop(['firstName', 'lastName'], axis=1)
return (cls(df1), cls(df2))
def _test_binop(pd_op, gr_op, strings=True):
"""
Test a binary operator.
Binary operators align on column name. For columns that don't exist in both
DataFrames, the column is filled with NaN (for non-comparison operations) and
or False (for comparison operations).
If the RHS is a Series, the Series should be added to all columns.
"""
df1, df2 = get_frames(pd.DataFrame, strings)
gdf1, gdf2 = get_frames(gr.GrizzlyDataFrame, strings)
expect = pd_op(df1, df2)
result = gr_op(gdf1, gdf2).to_pandas()
assert expect.equals(result)
def test_evaluation():
# Test to make sure that evaluating a DataFrame once caches the result/
# doesn't cause another evaluatio | n.
df1 = gr.GrizzlyDataFrame({
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50 | .0, 35.0, 25.0]
})
df2 = gr.GrizzlyDataFrame({
'age': [20, 30, 35, 20, 50, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df3 = (df1 + df2) * df2 + df1 / df2
assert not df3.is_value
df3.evaluate()
assert df3.is_value
weld_value = df3.weld_value
df3.evaluate()
# The same weld_value should be returned.
assert weld_value is df3.weld_value
def test_add():
_test_binop(pd.DataFrame.add, gr.GrizzlyDataFrame.add, strings=False)
def test_sub():
_test_binop(pd.DataFrame.sub, gr.GrizzlyDataFrame.sub, strings=False)
def test_mul():
_test_binop(pd.DataFrame.mul, gr.GrizzlyDataFrame.mul, strings=False)
def test_div():
_test_binop(pd.DataFrame.div, gr.GrizzlyDataFrame.div, strings=False)
def test_eq():
_test_binop(pd.DataFrame.eq, gr.GrizzlyDataFrame.eq, strings=True)
def test_ne():
_test_binop(pd.DataFrame.ne, gr.GrizzlyDataFrame.ne, strings=True)
def test_le():
_test_binop(pd.DataFrame.le, gr.GrizzlyDataFrame.le, strings=False)
def test_lt():
_test_binop(pd.DataFrame.lt, gr.GrizzlyDataFrame.lt, strings=False)
def test_ge():
_test_binop(pd.DataFrame.ge, gr.GrizzlyDataFrame.ge, strings=False)
def test_gt():
_test_binop(pd.DataFrame.gt, gr.GrizzlyDataFrame.gt, strings=False)
|
3 = 1.7
n1 = 2.1
n2 = 1.3
b = DMatrix([n1])
Ai = [ blkdiag([n3,-n3])]
C = blkdiag([n2,-n2])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("primal"),DMatrix(n2/n3),digits=5)
self.checkarray(dsp.output("p"),DMatrix.zeros(2,2),digits=5)
self.checkarray(dsp.output("dual")[0,0]-dsp.output("dual")[1,1],DMatrix(n1/n3),digits=5)
@requires("DSDPSolver")
def test_linear_interpolation1(self):
self.message("linear interpolation1")
# min 2*x0 + x1*3
# x0,x1
# x0+x1 - 1 >=0 --> x0+x1>=1
# x0 >=0 |
# x1 >=0
#
# solution: x0=1, x1=0
b = DMatrix([2,3])
Ai = [ blkdiag([1,1,0]), blkdiag([1,0,1])]
C = blkdiag([1,0,0])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checka | rray(dsp.output("primal_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1,0]),digits=5)
self.checkarray(dsp.output("p"),DMatrix([[0,0,0],[0,1,0],[0,0,0]]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix([[2,0,0],[0,0,0],[0,0,1]]),digits=5)
@requires("DSDPSolver")
def test_linear_interpolation2(self):
self.message("linear interpolation2")
# min 2*x0 + 3*x1
# x0,x1
# -(x0 + x1 -1) >=0 --> x0 + x1 <= 1
# x0 >=0
# x1 >=0
#
# solution: x0=0 , x1=0
b = DMatrix([2,3])
Ai = [ blkdiag([-1,1,0]), blkdiag([-1,0,1])]
C = blkdiag([-1,0,0])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(0),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(0),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([0,0]),digits=5)
self.checkarray(dsp.output("p"),DMatrix([[1,0,0],[0,0,0],[0,0,0]]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix([[0,0,0],[0,2,0],[0,0,3]]),digits=5)
@requires("DSDPSolver")
def test_linear_interpolation(self):
self.message("linear interpolation")
# min 2*a + (1-a)*4
# a
# 0 <= a <= 1
#
# Translates to:
# min 2*x0 + 4*x1
# x0,x1
# x0 + x1 -1 >= 0 |__ x0 + x1 == 1
# -(x0 + x1 -1) >= 0 |
# x0 >= 0
# x1 >= 0
b = DMatrix([2,4])
Ai = [ blkdiag([1,-1,1,0]), blkdiag([1,-1,0,1])]
e = 1e-6
C = blkdiag([1,-(1+e),0,0])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1,0]),digits=5)
self.checkarray(dsp.output("p"),diag([0,0,1,0]),digits=5)
self.checkarray(dsp.output("dual"),diag([2,0,0,2]),digits=2)
@requires("DSDPSolver")
def test_example1(self):
self.message("Example1")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([48,-8,20])
Ai = [DMatrix([[10,4],[4,0]]),DMatrix([[0,0],[0,-8]]),DMatrix([[0,-8],[-8,-2]])]
A = vertcat(Ai)
makeSparse(A)
A.printMatrix()
C = DMatrix([[-11,0],[0,23]])
makeSparse(C)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(-41.9),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(-41.9),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([-1.1,-2.7375,-0.55]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix([[5.9,-1.375],[-1.375,1]]),digits=5)
self.checkarray(dsp.output("p"),DMatrix.zeros(2,2),digits=5)
V = struct_ssym([
entry("L",shape=C.shape),
entry("x",shape=b.size())
])
L = V["L"]
x = V["x"]
P = mul(L,L.T)
g = []
g.append(sum([Ai[i]*x[i] for i in range(3)]) - C - P)
f = SXFunction([V],[mul(b.T,x)])
g = SXFunction([V],[veccat(g)])
sol = IpoptSolver(f,g)
sol.init()
sol.setInput(0,"lbg")
sol.setInput(0,"ubg")
sol.setInput(1,"x0")
sol.evaluate()
sol_ = V(sol.output())
self.checkarray(sol_["x"],DMatrix([-1.1,-2.7375,-0.55]),digits=5)
@requires("DSDPSolver")
def test_example2(self):
self.message("Example2")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([1.1, -10, 6.6 , 19 , 4.1])
C = blkdiag([DMatrix([[-1.4,-3.2],[-3.2,-28]]),DMatrix([[15,-12,2.1],[-12,16,-3.8],[2.1,-3.8,15]]),1.8,-4.0]);
sp = C.sparsity()
flatdata = [[0.5,5.2,5.2,-5.3,7.8,-2.4,6.0,-2.4,4.2,6.5,6.0,6.5,2.1,-4.5,-3.5],
[1.7,7.0,7.0,-9.3,-1.9,-0.9,-1.3,-0.9,-0.8,-2.1,-1.3,-2.1,4.0,-0.2,-3.7],
[6.3,-7.5,-7.5,-3.3,0.2,8.8,5.4,8.8,3.4,-0.4,5.4,-0.4,7.5,-3.3,-4.0],
[-2.4,-2.5,-2.5,-2.9,3.4,-3.2,-4.5,-3.2,3.0,-4.8,-4.5,-4.8,3.6,4.8,9.7],
[-6.5,-5.4,-5.4,-6.6,6.7,-7.2,-3.6,-7.2,7.3,-3.0,-3.6,-3.0,-1.4,6.1,-1.5]]
A = vertcat([DMatrix(sp,data) for data in flatdata])
makeSparse(A)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
DMatrix.setPrecision(10)
self.checkarray(dsp.output("primal_cost"),DMatrix(3.20626934048e1),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(3.20626923535e1),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1.551644595,0.6709672545,0.9814916693,1.406569511,0.9421687787]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix(sp,[2.640261206,0.5605636589,0.5605636589,3.717637107,0.7615505416,-1.513524657,1.139370202,-1.513524657,3.008016978,-2.264413045,1.139370202,-2.264413045,1.704633559,0,0]),digits=5)
self.checkarray(dsp.output("p"),DMatrix(sp,[0,0,0,0,7.119155551,5.024671489,1.916294752,5.024671489,4.414745792,2.506021978,1.916294752,2.506021978,2.048124139,0.3432465654,4.391169489]),digits=5)
@requires("DSDPSolver")
def test_example2_perm(self):
self.message("Example2_permuted")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([1.1, -10, 6.6 , 19 , 4.1])
perm = [5,2,1,0,6,3,4]
permi = lookupvector(perm,len(perm))
C = blkdiag([DMatrix([[-1.4,-3.2],[-3.2,-28]]),DMatrix([[15,-12,2.1],[-12,16,-3.8],[2.1,-3.8,15]]),1.8,-4.0]);
sp = C.sparsity()
flatdata = [[0.5,5.2,5.2,-5.3,7.8,-2.4,6.0,-2.4,4.2,6.5,6.0,6.5,2.1,-4.5,-3.5],
[1.7,7.0,7.0,-9.3,-1.9,-0.9,-1.3,-0.9,-0.8,-2.1,-1.3,-2.1,4.0,-0.2,-3.7],
[6.3,-7.5,-7.5,-3.3,0.2,8.8,5.4,8.8,3.4,-0.4,5.4,-0.4,7.5,-3.3,-4.0],
[-2.4,-2.5,-2.5,-2.9,3.4,-3.2,-4.5,-3.2,3.0,-4.8,-4.5,-4.8,3.6,4.8,9.7],
[-6.5,-5.4,-5.4,-6.6,6.7,-7.2,-3.6,-7.2,7.3,-3.0,-3.6,-3.0,-1.4,6.1,-1.5]]
A = vertcat([DMatrix(sp,data)[perm,perm] for data in flatdata])
makeSparse(A)
C = C[perm,perm]
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
DMatrix.setPrecision(10)
self.checkarray(dsp.output("primal_cost"),DMatrix(3.20626934048e1),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(3.20 |
#! /usr/bin/env python
class ParserError(Exception):
pass
class Sentence(object):
def __init__(self, subject, verb, object):
# remember we take ('noun', 'princess') tuples and convert them
self.subject = subject[1]
self.verb = verb[1]
self.object = object[1]
def get_sentence(self):
self.sentence = ' '.join([self.subject, self.verb, self.object])
return self.sentence
def peek(word_list):
if word_list:
word = word_list[0]
return word[0]
else:
return None
def match(word_list, expecting):
if word_list:
word = word_list.pop(0)
if word[0] == expecting:
return word
else:
return None
else:
return None
def skip(word_list, word_type):
while peek(word_list) == word_type:
match(word_list, word_type)
def parse_verb(word_list):
skip(word_list, 'stop')
if peek(word_list) == 'verb':
return match(word_list, 'verb')
else:
raise ParserError("Expected a verb next.")
def parse_object(word_list):
skip(word_list, 'stop')
next = peek(word_list)
if next == 'noun':
return match(word_list, 'noun')
elif next == 'direction':
return match(wor | d_list, 'direction')
else:
raise ParserError("Expected a noun or direction next.")
def parse_sub | ject(word_list, subj):
verb = parse_verb(word_list)
obj = parse_object(word_list)
return Sentence(subj, verb, obj)
def parse_sentence(word_list):
skip(word_list, 'stop')
start = peek(word_list)
if start == 'noun':
subj = match(word_list, 'noun')
return parse_subject(word_list, subj)
elif start == 'verb':
# assume the subject is the player then
return parse_subject(word_list, ('noun', 'player'))
else:
raise ParserError("Must start with subject, object or verb not: %s" % start)
|
from grslra import testdata
from grslra.grslra_batch import grslra_batch, slra_by_factorization
from grslra.structures import Hankel
from grslra.scaling import Scaling
import numpy as np
import time
# The goal of this experiment is to identify an LTI system from a noisy outlier-contaminated and subsampled observation of its impulse response
PROFILE = 0
if PROFILE:
import cProfile
N = 80
m = 20
k = 5
sigma=0.05
outlier_rate = 0.05
outlier_amplitude = 1
rate_Omega=0.5
N_f = 20
scaling = Scaling(centering=True)
p = 0.1
x, x_0, U, Y = testdata.testdata_lti_outliers(N + N_f, m, k, rho=outlier_rate, amplitude=outlier_amplitude, sigma=sigma)
# determine scaling factor
scaling.scale_reference(x)
mu = (1-p) * (3 * sigma / scaling.factor) ** 2
# draw sampling set
card_Omega = np.int(np.round(rate_Omega * N))
Omega = np.random.choice(N, card_Omega, replace=False)
# create binary support vectors for Omega and Omega_not
entries = np.zeros((N + N_f, ))
entries[Omega] = 1
entries_not = np.ones_like(entries) - entries
# set unobserved entries in x to zero
x *= entries
x_Omega = x[Omega]
n = N + N_f - m + 1
hankel = Hankel(m, n)
grslra_params = {"PRINT": None, "VERBOSE": 1}
if PROFILE:
profile = cProfile.Profile()
profile.enable()
t_start = time.time()
l_grslra, U, Y = grslra_batch(x_Omega, hankel, k, p, mu, params=grslra_params, Omega=Omega, x_0=x_0, sca | ling=scaling)
t_grslra = time | .time() - t_start
if PROFILE:
profile.disable()
profile.dump_stats("grslra.bin")
print "error GRSLRA: ", np.linalg.norm(l_grslra - x_0) / np.linalg.norm(x_0)
print "time GRSLRA: ", t_grslra
if PROFILE:
profile = cProfile.Profile()
profile.enable()
t_start = time.time()
l_slrabyF = slra_by_factorization(x_Omega, m, k, PRINT=0, x_0=x_0, Omega=Omega, N=N + N_f)
t_slrabyf = time.time() - t_start
if PROFILE:
profile.disable()
profile.dump_stats("slrabyf.bin")
print "error SLRA by F: ", np.linalg.norm(l_slrabyF - x_0) / np.linalg.norm(x_0)
print "time SLRA by F: ", t_slrabyf
np.savez('result_sysid_lti.npz', x_Omega=x_Omega, Omega=Omega, x_0=x_0, t_grslra=t_grslra, l_grslra=l_grslra, t_slrabyf=t_slrabyf, l_slrabyF=l_slrabyF) |
"""LaTeX Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from traitlets import Unicode, default
from traitlets.config import Config
from nbconvert.filters.highlight import Highlight2Latex
from nbconvert.filters.filter_links import resolve_references
from .templateexporter import TemplateExporter
class LatexExporter(TemplateExporter):
"""
Exports to a Latex template. Inherit from this class if your template is
LaTeX based and you need custom tranformers/filters. Inherit from it if
you are writing your own HTML template and need custom tranformers/filters.
If you don't need custom tranformers/filters, just change the
'template_file' config option. Place your template in the special "/latex"
subfolder of the "../templates" folder.
"""
@default('file_extension')
def _file_extension_default(self):
return '.tex'
@default('template_file')
def _template_file_default(self):
return 'article.tplx'
# Latex constants
@default('default_template_path')
def _default_template_path_default(self):
return os.path.join("..", "templates", "latex")
@default('template_skeleton_path')
def _template_skeleton_path_default(self):
return os.path.join("..", "templates", "latex", "skeleton")
#Extension that the template files use.
template_extension = Unicode(".tplx").tag(config=True)
output_mimetype = 'text/latex'
def default_filters(self):
| for x in super(LatexExporter, self).default_filters():
yield x
yield ('resolve_references', resolve_references)
@property
def default_config(self):
c = Config({
'NbConvertBase': {
'dis | play_data_priority' : ['text/latex', 'application/pdf', 'image/png', 'image/jpeg', 'image/svg+xml', 'text/markdown', 'text/plain']
},
'ExtractOutputPreprocessor': {
'enabled':True
},
'SVG2PDFPreprocessor': {
'enabled':True
},
'LatexPreprocessor': {
'enabled':True
},
'SphinxPreprocessor': {
'enabled':True
},
'HighlightMagicsPreprocessor': {
'enabled':True
}
})
c.merge(super(LatexExporter,self).default_config)
return c
def from_notebook_node(self, nb, resources=None, **kw):
langinfo = nb.metadata.get('language_info', {})
lexer = langinfo.get('pygments_lexer', langinfo.get('name', None))
self.register_filter('highlight_code',
Highlight2Latex(pygments_lexer=lexer, parent=self))
return super(LatexExporter, self).from_notebook_node(nb, resources, **kw)
def _create_environment(self):
environment = super(LatexExporter, self)._create_environment()
# Set special Jinja2 syntax that will not conflict with latex.
environment.block_start_string = "((*"
environment.block_end_string = "*))"
environment.variable_start_string = "((("
environment.variable_end_string = ")))"
environment.comment_start_string = "((="
environment.comment_end_string = "=))"
return environment
|
R_ID, 'BGP')
self.ryu_bgp_driver = ryu_driver.RyuBgpDriver(cfg.CONF.BGP)
mock_ryu_speaker_p = mock.patch.object(bgpspeaker, 'BGPSpeaker')
self.mock_ryu_speaker = mock_ryu_speaker_p.start()
def test_add_new_bgp_speaker(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.mock_ryu_speaker.assert_called_once_with(
as_number=FAKE_LOCAL_AS1, router_id=FAKE_ROUTER_ID,
bgp_server_port=0,
best_path_change_handler=ryu_driver.best_path_change_cb,
peer_down_handler=ryu_driver.bgp_peer_down_cb,
peer_up_handler=ryu_driver.bgp_peer_up_cb)
def test_remove_bgp_speaker(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
self.ryu_bgp_driver.delete_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(0,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.assertEqual(1, speaker.shutdown.call_count)
def test_add_bgp_peer_without_password(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1,
FAKE_PEER_IP,
FAKE_PEER_AS)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_add.assert_called_once_with(
address=FAKE_PEER_IP,
remote_as=FAKE_PEER_AS,
password=None,
connect_mode=CONNECT_MODE_ACTIVE)
def test_add_bgp_peer_with_password(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1,
FAKE_PEER_IP,
FAKE_PEER_AS,
| FAKE_AUTH_TYPE,
FAKE_PEER_PASSWORD)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_add.assert_called_once_with(
address=FAKE_PEER_IP,
remote_as=FAKE_PEER_AS,
| password=FAKE_PEER_PASSWORD,
connect_mode=CONNECT_MODE_ACTIVE)
def test_remove_bgp_peer(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.delete_bgp_peer(FAKE_LOCAL_AS1, FAKE_PEER_IP)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_del.assert_called_once_with(address=FAKE_PEER_IP)
def test_advertise_route(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.advertise_route(FAKE_LOCAL_AS1,
FAKE_ROUTE,
FAKE_NEXTHOP)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.prefix_add.assert_called_once_with(prefix=FAKE_ROUTE,
next_hop=FAKE_NEXTHOP)
def test_withdraw_route(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.withdraw_route(FAKE_LOCAL_AS1, FAKE_ROUTE)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.prefix_del.assert_called_once_with(prefix=FAKE_ROUTE)
def test_add_same_bgp_speakers_twice(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.BgpSpeakerAlreadyScheduled,
self.ryu_bgp_driver.add_bgp_speaker, FAKE_LOCAL_AS1)
def test_add_different_bgp_speakers_when_one_already_added(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.BgpSpeakerMaxScheduled,
self.ryu_bgp_driver.add_bgp_speaker,
FAKE_LOCAL_AS2)
def test_add_bgp_speaker_with_invalid_asnum_paramtype(self):
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_speaker, '12345')
def test_add_bgp_speaker_with_invalid_asnum_range(self):
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_speaker, -1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_speaker, 65536)
def test_add_bgp_peer_with_invalid_paramtype(self):
# Test with an invalid asnum data-type
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, '12345')
# Test with an invalid auth-type and an invalid password
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'sha-1', 1234)
# Test with an invalid auth-type and a valid password
self.assertRaises(bgp_driver_exc.InvaildAuthType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'hmac-md5', FAKE_PEER_PASSWORD)
# Test with none auth-type and a valid password
self.assertRaises(bgp_driver_exc.InvaildAuthType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'none', FAKE_PEER_PASSWORD)
# Test with none auth-type and an invalid password
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'none', 1234)
# Test with a valid auth-type and no password
self.assertRaises(bgp_driver_exc.PasswordNotSpecified,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
FAKE_AUTH_TYPE, None)
def test_add_bgp_peer_with_invalid_asnum_range(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, -1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, 65536)
def test_add_bgp_peer_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS)
def test_remove_bgp_peer_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.as |
els import *
import isisdata.tasks as dtasks
import curation.taskslib.citation_tasks as ctasks
import curation.taskslib.authority_tasks as atasks
from isisdata.filters import CitationFilter
import json
# TODO: refactor these actions to use bulk apply methods and then explicitly
# trigger search indexing (or whatever other post-save actions are needed).
class BaseAction(object):
def __init__(self):
if hasattr(self, 'default_value_field'):
self.value_field = self.default_value_field
if hasattr(self, 'default_value_field_kwargs'):
self.value_field_kwargs = self.default_value_field_kwargs
if hasattr(self, 'extra'):
self.extra_fields = self.extra
def get_value_field(self, **kwargs):
self.value_field_kwargs.update(kwargs)
return self.value_field(**self.value_field_kwargs)
def get_extra_fields(self, **kwargs):
if hasattr(self, 'extra_fields'):
return [(name, field(**kwargs)) for name, field, kwargs in self.extra_fields]
return []
def _build_filter_label(filter_params_raw):
citation_filter = CitationFilter(QueryDict(filter_params_raw, mutable=True))
filter_form = citation_filter.form
filter_data = {}
if filter_form.is_valid():
filter_data = filter_form.cleaned_data
return ', '.join([ '%s: %s' % (key, value) for key, value in list(filter_data.items()) if value ])
class PrependToRecordHistory(BaseAction):
model = Citation
label = u'Update record history'
default_value_field = forms.CharField
default_value_field_kwargs = {
'label': 'Prepend to record history',
'widget': forms.widgets.Textarea(attrs={'class': 'action-value'}),
}
def apply(self, user, filter_params_raw, value, **extra):
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = bulk_prepend_record_history.delay(user.id, filter_params_raw,
value, task.id, type)
else:
result = bulk_prepend_record_history.delay(user.id, filter_params_raw,
value, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('record_status_explanation', value)
task.label = 'Updating set with filters: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
class StoreCreationDataToModel(BaseAction):
model = Citation
label = u'Store creation data to citations'
default_value_field = forms.CharField
default_value_field_kwargs = {
'label': 'Storing creation data to citations',
'widget': forms.widgets.Textarea(attrs={'class': 'action-value', 'readonly': True, 'initial': 'Storing creation data'}),
}
def apply(self, user, filter_params_raw, value, **extra):
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = save_creation_to_citation.delay(user.id, filter_params_raw,
value, task.id, type)
result = save_creation_to_citation.delay(user.id, filter_params_raw,
value, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('created_native', '')
task.label = 'Storing creator in citation for set with filters: ' + _build_filter_label(filter_params_raw)
| task.save()
return task.id
class SetRecordStatus(BaseAction):
model = Citation
label = u'Set record status'
default_value_field = forms.ChoiceField
default_value_field_kwargs = {
'choices': CuratedMixin.STATUS_CHOICES,
'label': 'Set record status',
'widget': forms.widgets.Select(attrs={'clas | s': 'action-value'}),
}
def apply(self, user, filter_params_raw, value, **extra):
# We need this to exist first so that we can keep it up to date as the
# group of tasks is executed.
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = dtasks.bulk_update_citations.delay(user.id,
filter_params_raw,
'record_status_value',
value, task.id, type)
else:
result = dtasks.bulk_update_citations.delay(user.id,
filter_params_raw,
'record_status_value',
value, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('record_status_value', value)
task.label = 'Updating set with filters: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
class SetRecordStatusExplanation(BaseAction):
model = Citation
label = u'Set record status explanation'
default_value_field = forms.CharField
default_value_field_kwargs = {
'label': 'Set record status explanation',
'widget': forms.widgets.TextInput(attrs={'class': 'action-value'}),
}
def apply(self, user, filter_params_raw, value, **extra):
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = dtasks.bulk_update_citations.delay(user.id,
filter_params_raw,
'record_status_explanation',
value, task.id, type)
else:
result = dtasks.bulk_update_citations.delay(user.id,
filter_params_raw,
'record_status_explanation',
value, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('record_status_explanation', value)
task.label = 'Updating set with filters: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
def get_tracking_transition_counts(qs):
states = list(zip(*qs.model.TRACKING_CHOICES))[0]
transitions = dict(list(zip(states, [qs.filter(tracking_state=state).count() for state in states])))
# bugfix for Zotero imports: tracking_state is None not "NO"
transitions[qs.model.NONE] += qs.filter(tracking_state=None).count()
return transitions
def get_allowable_transition_states():
from curation.tracking import TrackingWorkflow
return dict([(target, source) for source, target in TrackingWorkflow.transitions])
def get_transition_labels():
from curation.tracking import TrackingWorkflow
return dict(Tracking.TYPE_CHOICES)
class SetTrackingStatus(BaseAction):
model = Citation
label = u'Set record tracking status'
default_value_field = forms.ChoiceField
default_value_field_kwargs = {
'choices': Tracking.TYPE_CHOICES,
'label': 'Set record tracking status',
'widget': forms.widgets.Select(attrs={'class': 'action-value'}),
}
extra_js = 'curation/js/bulktracking.js'
extra_fields = (
('info', forms.CharField, {'label': 'Tracking Info', 'required': False, 'widget': forms.widgets.TextInput(attrs={'class': 'form-control', 'part_of': 'SetTrackingStatus', 'required': False |
elf, slave, event_name):
"""Acknowledge a slave's message"""
self.send(slave, 'ack {}'.format(event_name))
def monitor_shutdown(self, slave):
# non-daemon so slaves get every opportunity to shut down cleanly
shutdown_thread = Thread(target=self._monitor_shutdown_t,
args=(slave.id, slave.process))
shutdown_thread.start()
def _monitor_shutdown_t(self, slaveid, process):
# a KeyError here means self.slaves got mangled, indicating a problem elsewhere
if process is None:
self.log.warning('Slave was missing when trying to monitor shutdown')
def sleep_and_poll():
start_time = time()
# configure the polling logic
polls = 0
# how often to poll
poll_sleep_time = .5
# how often to report (calculated to be around once a minute based on poll_sleep_time)
poll_report_modulo = 60 / poll_sleep_time
# maximum time to wait
poll_num_sec = 300
while (time() - start_time) < poll_num_sec:
polls += 1
yield
if polls % poll_report_modulo == 0:
remaining_tim | e = int(poll_num_sec - (time() - start_time))
self.print_message(
'{} still shutting down, '
'will continue polling for {} seconds '
.format(slaveid, remaining_time), blue=True)
| sleep(poll_sleep_time)
# start the poll
for poll in sleep_and_poll():
ec = process.poll()
if ec is None:
continue
else:
if ec == 0:
self.print_message('{} exited'.format(slaveid), green=True)
else:
self.print_message('{} died'.format(slaveid), red=True)
break
else:
self.print_message('{} failed to shut down gracefully; killed'.format(slaveid),
red=True)
process.kill()
def interrupt(self, slave, **kwargs):
"""Nicely ask a slave to terminate"""
slave.forbid_restart = True
if slave.poll() is None:
slave.process.send_signal(subprocess.signal.SIGINT)
self.monitor_shutdown(slave, **kwargs)
def kill(self, slave, **kwargs):
"""Rudely kill a slave"""
slave.forbid_restart = True
if slave.poll() is None:
slave.process.kill()
self.monitor_shutdown(slave, **kwargs)
def send_tests(self, slave):
"""Send a slave a group of tests"""
try:
tests = list(self.failed_slave_test_groups.popleft())
except IndexError:
tests = self.get(slave)
self.send(slave, tests)
slave.tests.update(tests)
collect_len = len(self.collection)
tests_len = len(tests)
self.sent_tests += tests_len
if tests:
self.print_message('sent {} tests to {} ({}/{}, {:.1f}%)'.format(
tests_len, slave.id, self.sent_tests, collect_len,
self.sent_tests * 100. / collect_len
))
return tests
def pytest_sessionstart(self, session):
"""pytest sessionstart hook
- sets up distributed terminal reporter
- sets up zmp ipc socket for the slaves to use
- writes pytest options and args to slave_config.yaml
- starts the slaves
- register atexit kill hooks to destroy slaves at the end if things go terribly wrong
"""
# If reporter() gave us a fake terminal reporter in __init__, the real
# terminal reporter is registered by now
self.terminal = store.terminalreporter
self.trdist = TerminalDistReporter(self.config, self.terminal)
self.config.pluginmanager.register(self.trdist, "terminaldistreporter")
self.session = session
def pytest_runtestloop(self):
"""pytest runtest loop
- Disable the master terminal reporter hooks, so we can add our own handlers
that include the slaveid in the output
- Send tests to slaves when they ask
- Log the starting of tests and test results, including slave id
- Handle clean slave shutdown when they finish their runtest loops
- Restore the master terminal reporter after testing so we get the final report
"""
# Build master collection for slave diffing and distribution
self.collection = [item.nodeid for item in self.session.items]
# Fire up the workers after master collection is complete
# master and the first slave share an appliance, this is a workaround to prevent a slave
# from altering an appliance while master collection is still taking place
for slave in self.slaves.values():
slave.start()
try:
self.print_message("Waiting for {} slave collections".format(len(self.slaves)),
red=True)
# Turn off the terminal reporter to suppress the builtin logstart printing
terminalreporter.disable()
while True:
# spawn/kill/replace slaves if needed
self._slave_audit()
if not self.slaves:
# All slaves are killed or errored, we're done with tests
self.print_message('all slaves have exited', yellow=True)
self.session_finished = True
if self.session_finished:
break
slave, event_data, event_name = self.recv()
if event_name == 'message':
message = event_data.pop('message')
markup = event_data.pop('markup')
# messages are special, handle them immediately
self.print_message(message, slave, **markup)
self.ack(slave, event_name)
elif event_name == 'collectionfinish':
slave_collection = event_data['node_ids']
# compare slave collection to the master, all test ids must be the same
self.log.debug('diffing {} collection'.format(slave.id))
diff_err = report_collection_diff(
slave.id, self.collection, slave_collection)
if diff_err:
self.print_message(
'collection differs, respawning', slave.id,
purple=True)
self.print_message(diff_err, purple=True)
self.log.error('{}'.format(diff_err))
self.kill(slave)
slave.start()
else:
self.ack(slave, event_name)
elif event_name == 'need_tests':
self.send_tests(slave)
self.log.info('starting master test distribution')
elif event_name == 'runtest_logstart':
self.ack(slave, event_name)
self.trdist.runtest_logstart(
slave.id,
event_data['nodeid'],
event_data['location'])
elif event_name == 'runtest_logreport':
self.ack(slave, event_name)
report = unserialize_report(event_data['report'])
if report.when in ('call', 'teardown'):
slave.tests.discard(report.nodeid)
self.trdist.runtest_logreport(slave.id, report)
elif event_name == 'internalerror':
self.ack(slave, event_name)
self.print_message(event_data['message'], slave, purple=True)
self.kill(slave)
elif event_name == 'shutdown':
self.config.hook.pytest_miq_node_shutdown(
config=self.config, nodeinfo=slave.appliance.url)
self.ack(slave, event_name)
|
array=numpy.sqrt(stararray['e1']*stararray['e1']+stararray['e2']*stararray['e2'])
emagautocorr=numpy.zeros((starlen*(starlen-1))/2)
edistautocorr=numpy.zeros((starlen*(starlen-1))/2)
iterator=0
# I'm sure there's a better way to do this.
for i in range(len(emagarray)):
for j in range(i+1,len(emagarray)):
emagautocorr[iterator]=emagarray[i]*emagarray[j]
edistautocorr[iterator]=math.sqrt(((stararray['x'][i]-stararray['x'][j])*\
(stararray['x'][i]-stararray['x'][j]))+\
((stararray['y'][i]-stararray['y'][j])*\
(stararray['y'][i]-stararray['y'][j])))
iterator=iterator + 1
return xi_pp, distcorr, emagautocorr, edistautocorr
def make_gal_corrarray( objarray, n ):
m = len(objarray)
a = numpy.array(numpy.zeros((n*m)))
for k in range(n):
a[k*m:(k+1)*m]=objarray
return a
def make_star_corrarray( objarray, n ):
m = len(objarray)
a = numpy.array(numpy.zeros((n*m)))
for k in range(m):
a[k*n:(k+1)*n]=objarray[k]
return a
def do_bootstrap_error(inputarray, nbootstraps=100):
n = len(inputarray)
npars=inputarray[numpy.random.random_integers(0,n-1,(n,nbootstraps))]
meanlist = numpy.mean(npars,0)
if len(meanlist) != nbootstraps:
print 'averaging across wrong axis'
return numpy.std(meanlist)
#
# switch to polar coordinates
#
def cartesianToPolar(x, y):
r = numpy.sqrt(x**2+y**2)
phi = numpy.arccos(x/r)
phi2 = phi = 2. * numpy.pi - phi
phi_yp = y>=0.
phi2_yp = y<0.
phi = phi* phi_yp +phi2* phi2_yp
return r, phi
#
# make the plots
#
def make_scatter_inputs(yvals, xvals,therange, nbins=10):
if len(yvals) != len(xvals):
print len(yvals), ' doeas not equal ',len(xvals)
vals, thebins = pylab.histogram(xvals, weights=yvals, bins=nbins,range=therange)
vals_sq, thebins = pylab.histogram(xvals, weights=yvals*yvals, bins=nbins ,range=therange)
vals_n, thebins = pylab.histogram(xvals, bins=nbins,range=therange)
val_errs = numpy.sqrt((vals_sq/vals_n) - (vals/vals_n)*(vals/vals_n))/numpy.sqrt(vals_n)
bincenters=[]
binerrs=[]
# print 'The Bins = ', thebins
for k in range(len(thebins)-1):
bincenters.append((thebins[k]+thebins[k+1])/2.)
binerrs.append((thebins[k+1]-thebins[k])/2.)
# print 'bincenters = ',bincenters
return bincenters, vals/vals_n, binerrs, val_errs
def get_percentiles(arr):
# return 10 and 90 %iles
sorted = numpy.sort(arr)
n = len(sorted)
val = n/10
return sorted[val],sorted[n-val]
if __name__ == "__main__":
filename_gal = sys.argv[1]
filename_star = sys.argv[2]
if len(sys.argv)==3:
outfilename = 'psfplots.png'
elif len(sys.argv)==4:
outfilename = sys.argv[3]
else:
print 'usage: ./quality_studies_psf.py [galaxy_shear.cat] [star.cat] [output=psfplots.png]'
sys.exit(1)
galcat = open_and_get_shearcat(filename_gal,'OBJECTS')
starcat = open_and_get_shearcat(filename_star,'OBJECTS')
if galcat:
print ' got Galaxy cat'
if starcat:
print ' got Star cat'
maxrg=numpy.max(starcat['rg'])
galcat = galcat.filter(galcat['rg']>maxrg)
galcat = galcat.filter(galcat['Flag']==0)
gal_g1arr = numpy.array(galcat['gs1'])
gal_g2arr = numpy.array(galcat['gs2'])
gal_xarr = numpy.array(galcat['x'])
gal_yarr = numpy.array(galcat['y'])
gal_e1corr = numpy.array(galcat['e1corrpol'])
gal_e2corr = numpy.array(galcat['e2corrpol'])
star_xarr = numpy.array(starcat['x'])
star_yarr = numpy.array(starcat['y'])
star_e1corr = numpy.array(starcat['e1corrpol'])
star_e2corr = numpy.array(starcat['e2corrpol'])
star_e1 = numpy.array(starcat['e1'])
star_e2 = numpy.array(starcat['e2'])
pylab.rc('text', usetex=True)
pylab.figure(figsize=(15,10) ,facecolor='w')
pylab.subplots_adjust(wspace=0.3,hspace=0.3)
pylab.subplot(231,axisbg='w')
pylab.cool()
# Qualtest 1 : Average shear:
avg_gs1 = numpy.mean(gal_g1arr)
err_gs1 = numpy.std(gal_g1arr)/math.sqrt(len(gal_g1arr*1.0))
err_gs1bs = do_bootstrap_error(gal_g1arr)
avg_gs2 = numpy.mean(gal_g2arr)
err_gs2 = numpy.std(gal_g2arr)/math.sqrt(len(gal_g2arr*1.0))
err_gs2bs = do_bootstrap_error(gal_g2arr)
pylab.errorbar(y=[avg_gs2,avg_gs2],x=[avg_gs1,avg_gs1],
xerr=[err_gs1,err_gs1bs], yerr=[err_gs2,err_gs2bs], fmt='r.',
label='''$<\gamma_{1,2}> $''')
pylab.axis([-0.04,0.04,-0.04,0.04])
pylab.xlabel('$<\gamma_{1}>$', horizontalalignment='right')
pylab.ylabel('$<\gamma_{2}>$')
pylab.legend(loc=0)
pylab.grid()
# Qualtest 2 : Average shear in aniso corr bins.
# e1anisocorr : left over from correction
# e1corrpol : the correction.
# the aniso | tropy polynomial values for all the objects.
|
bincenters, gamma1vals, binerrs, gamma1errs = \
make_scatter_inputs(gal_g1arr, gal_e1corr, (-0.02,0.03), 10)
bincenters2, gamma2vals, binerrs2, gamma2errs = \
make_scatter_inputs(gal_g2arr, gal_e2corr, (-0.02,0.03), 10)
pylab.subplot(232,axisbg='w')
pylab.errorbar(x=bincenters,y=gamma1vals,yerr=gamma1errs,xerr=binerrs,
fmt='b.',label='''$<\gamma_{1}>$''')
pylab.errorbar(x=bincenters2,y=gamma2vals,yerr=gamma2errs,xerr=binerrs2,
fmt='r.',label='$<\gamma_{2}>$')
pylab.axis([-0.05,0.05,-0.2,0.2])
pylab.xlabel('$e^{*pol}_{1,2}$', horizontalalignment='right')
pylab.ylabel('''$<\gamma_{1,2}>$''')
pylab.legend(loc=0)
pylab.grid()
# Qualtest 3 : <epol gamma>
eg1,eg2, eg1err, eg2err, eg1errbs, eg2errbs = \
avg_epol_gamma(gal_g1arr, gal_g2arr, gal_e1corr, gal_e1corr)
pylab.subplot(233)
pylab.errorbar(x=[eg1, eg1],y=[eg2,eg2],
xerr=[eg1err,eg1errbs], yerr=[eg2err, eg2errbs], fmt='b.',
label='''$<e^{pol}_{1,2}\gamma_{1,2}>$ ''')
pylab.cool()
pylab.legend(loc=0)
pylab.axis([-0.0004,0.0004,-0.0004,0.0004])
pylab.xlabel('''$<e^{pol}_{1}\gamma_{1}>$ ''', horizontalalignment='right')
pylab.ylabel('''$<e^{pol}_{2}\gamma_{2}>$ ''')
pylab.grid()
# Qualtest 5 : epol * g
pylab.subplot(234)
galarray={'x':gal_xarr, 'y':gal_yarr, 'g1':gal_g1arr, 'g2':gal_g2arr}
stararray={'x':star_xarr,
'y':star_yarr,
'e1':star_e1,
'e2':star_e2,
'e1pol':star_e1corr,
'e2pol':star_e2corr}
xi_pp, distcorr,emagautocorr, edistautocorr = \
star_gal_correlation(galarray, stararray)
xv, yv, xe, ye = make_scatter_inputs(xi_pp, distcorr,(0,10000), nbins=10 )
pylab.errorbar(x=xv,y=yv,yerr=ye,xerr=xe,fmt='b.',label='data')
#######################
# Here we create the random star
# need e1 and e2 arrays
# First Generate same ellipticity distribution
#######################
# |ellipticity| distribution
elldist = numpy.sqrt(star_e1*star_e1+star_e2*star_e2)
rxi_ppt=[]
distcorrt=[]
rxivals1 = [0,0,0,0,0,0,0,0,0,0]
rxierrs1 = [0,0,0,0,0,0,0,0,0,0]
rxivalssq1 = [0,0,0,0,0,0,0,0,0,0]
rxivals1n = [0,0,0,0,0,0,0,0,0,0]
ntrials=10
rxivals1_sum = numpy.zeros(ntrials)
rxierrs1_sum = numpy.zeros(ntrials)
for isim in range(10):
# for each trial, generate random numbers to sample from
# the ellipticity distribution
ellindex = numpy.random.random_integers(0,len(elldist)-1)
# Set up the array
rand_ell_arr = numpy.zeros(len(elldist))
# fill the array, I think there's a fast way to do this...
for i in range(len(elldist)):
rand_ell_arr[i] = elldist[rand_ell_arr[i]]
# now the random angle 0-pi
rand_phi_arr = numpy.random.uniform(0,math.pi,len |
isatty():
RED = "\x1B[31m"
GREEN = "\x1B[32m"
NORM = "\x1B[39m"
else:
RED = ""
GREEN = ""
NORM = ""
class VethNetworkConfig(object):
def __init__(self, portCount):
self.caddr = "127.0.0.1"
self.cport = randomports(1)[0]
self.switchInterfaces = ["veth%d" % (i*2) for i in range(portCount)]
self.oftestInterfaces = ["%d@veth%d" % (i+1, i*2+1) for i in range(portCount)]
def listOFTests(spec=None, testfile=None, openflowVersion=None, testDir=None):
args = [ OFT, "--list-test-names" ]
if spec:
args.append(spec)
if testfile:
args.append("--test-file=%s" % testfile)
if openflowVersion:
args.append("-V%s" % openflowVersion)
if testDir:
args.append("--test-dir=%s" % testDir)
stdout = subprocess.check_output(args);
return stdout.splitlines();
def runOFTest(test, networkConfig, logDir, openflowVersion, testDir=None, oftArgs=None):
args = [ OFT,
"-H", str(networkConfig.caddr),
"-p", str(networkConfig.cport),
"--verbose",
"--log-file", "%s/oft.log" % logDir,
"--fail-skipped" ]
args.append("-V%s" % openflowVersion)
for iface in networkConfig.oftestInterfaces:
args.append('-i')
args.append(iface)
if testDir:
args.append("--test-dir=%s" % testDir)
if oftArgs:
args = args + oftArgs
args.append(test)
with open("%s/oft.stdout.log" % (logDir), "w") as logfile:
child = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=logfile,
stderr=subprocess.STDOUT)
if not child:
raise Exception("Failed to start: ", args)
child.wait()
return child.returncode;
class IVS(object):
def __init__(self, networkConfig, logDir, openflowVersion, ivsArgs=None):
self.networkConfig = networkConfig
self.logDir = logDir
self.openflowVersion = openflowVersion
self.ivsArgs = ivsArgs
self.child = None
def start(self):
args = [ IVS_BINARY,
"-c", "%s:%d" % (self.networkConfig.caddr, self.networkConfig.cport) ]
args.append("-V%s" % self.openflowVersion)
if self.ivsArgs:
args += self.ivsArgs
for iface in self.networkConfig.switchInterfaces:
args.append("-i");
args.append(iface);
with open("%s/ivs.log" % (self.logDir), "w") as logfile:
self.child = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=logfile,
stderr=subprocess.STDOUT)
if self.child is None:
raise Exception("Failed to start IVS")
def stop(self):
if self.child:
self.child.send_signal(signal.SIGTERM)
self.child.wait()
self.child = None
# BSN test system integration
class AbatTask(object):
def __init__(self):
self.abatId = os.getenv("ABAT_ID");
assert(self.abatId)
self.abatTimestamp = os.getenv("ABAT_TIMESTAMP")
self.abatTask = os.getenv("ABAT_TASK")
self.abatWorkspace = "%s-%s" % (self.abatTimestamp, self.abatTask)
self.bscBaseDir = requirePathEnv("BSC");
self.runIds = {}
def addTestcase(self, test, testLogDir):
logUrl = "http://%s/abat/%s/%s" % (platform.node(), self.abatWorkspace, testLogDir)
runId = os.popen("%s/build/add-testcase.py %s %s %s %s | tail -n 1" % (
self.bscBaseDir, self.abatId, test, "OFTest", logUrl)).read().rstrip()
self.runIds[test] = runId
def updateTestcase(self, test, result):
system("%s/build/update-testcase.py %s %s" % (
self.bscBaseDir, self.runIds[test], result))
class AutotestIVS(object):
def __init__(self, config):
self.config = config
self.results = []
if os.getenv("ABAT_TASK"):
print "Running in ABAT."
self.abat = AbatTask()
else:
self.abat = None
self.__setup()
def __setup(self):
self.oftests = listOFTests(spec=self.config.test_spec,
testfile=self.config.test_file,
openflowVersion=self.config.openflow_version,
testDir=self.config.test_dir)
def runTests(self):
results = { 'FAILED' : [], 'PASSED' : [] }
for test in self.oftests:
result = self.runTest(test)
results[result].append(test)
print
print "%d PASSED, %d FAILED." % (len(results['PASSED']), len(results['FAILED'])),
if results['FAILED']:
print
print "Failing tests:"
for test in results['FAILED']:
print test
self.outputResultXml()
def runTest(self, test):
if self.config.test_prefix:
testName = "%s.%s" % (self.config.test_prefix, test)
elif self.config.openflow_version == "1.3":
testName = "of13.%s" % test
else:
testName = test
testLogDir = "%s/%s" % (LOG_BASEDIR, testName)
system("mkdir -p %s" % (testLogDir))
sys.stdout.write("Running %s ... " % testName)
sys.stdout.flush()
if self.abat:
self.abat.addTestcase(testName, testLogDir)
networkConfig = VethNetworkConfig(8)
ivs = IVS(networkConfig, testLogDir, self.config.openflow_version, self.config.ivs_args)
ivs.start()
rv = runOFTest(test, networkConfig, testLogDir, self.config.openflow_version,
self.config.test_dir, self.config.oft_args)
ivs.stop()
if rv == 0:
result = 'PASSED'
sys.stdout.write(GREEN + "OK" + NORM + "\n")
else:
result = 'F | AILED'
sys.stdout.write(RED + "FAIL" + NORM + "\n" | )
print "Test logs in %s" % testLogDir
if self.abat:
self.abat.updateTestcase(testName, result)
self.updateResultXml(testName, result, testLogDir)
return result
def updateResultXml(self, testName, result, logDir):
self.results.append((testName, result, logDir))
def outputResultXml(self):
if not self.config.xml:
return
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
from xml.dom import minidom
root = Element("testsuite", { 'tests': str(len(self.results)) })
for name, result, logDir in self.results:
def readLog(name):
return file(logDir + "/" + name).read()
classname, testname = name.rsplit(".", 1)
testcase = SubElement(root, 'testcase', { 'classname': classname, 'name': testname })
if result == 'FAILED':
failure = SubElement(testcase, 'failure', { 'type': 'Failure' })
failure.text = re.search(r'-{70}(.*?)-{70}', readLog("oft.stdout.log"), re.DOTALL).group(1).strip()
system_out = SubElement(testcase, 'system-out')
system_out.text = readLog("oft.log")
system_err = SubElement(testcase, 'system-err')
system_err.text = readLog("ivs.log")
with open(self.config.xml, 'w') as f:
f.write(minidom.parseString(tostring(root)).toprettyxml(indent=" "))
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser(description="",
epilog=_help,
formatter_class=argparse.RawDescriptionHelpFormatter)
ap.add_argument("-T", "--test-spec", help="OFTest test specification", default=None)
ap.add_argument("-f", "--test-file", help="OFTest test file", default=None)
ap.add_argument("--ivs-args", action="append", help="Additional arguments passed to IVS.")
ap.add_argument("--oft-args", action="append", help="Additional arguments passed to oft.")
ap.add_argument("--log-base-dir", he |
std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core' | ): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_at | tribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3ConfigStore_methods(root_module, cls):
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore(ns3::ConfigStore const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConfigStore const &', 'arg0')])
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore() [constructor]
cls.add_constructor([])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureAttributes() [member function]
cls.add_method('ConfigureAttributes',
'void',
[])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureDefaults() [member function]
cls.add_method('ConfigureDefaults',
'void',
[])
## config-store.h (module 'config-store'): ns3::TypeId ns3::ConfigStore::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## config-store.h (module 'config-store'): static ns3::TypeId ns3::ConfigStore::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFileFormat(ns3::ConfigStore::FileFormat format) [member function]
cls.add_method('SetFileFormat',
'void',
[param('ns3::ConfigStore::FileFormat', 'format')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetMode(ns3::ConfigStore::Mode mode) [member function]
cls.add_method('SetMode',
'void',
[param('ns3::ConfigStore::Mode', 'mode')])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3:: |
earDown()
self.post_draft.delete()
self.post_published.delete()
def test_list_published_posts(self):
"""
Test API list all published blog posts
"""
url = reverse('blogpost-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response['Content-type'], 'application/json')
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data['results'][0]['title'], self.post_published.title)
def test_retrieve_published_post(self):
"""
Test API retrieve the published blog post that we created earlier
"""
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['title'], self.post_published.title)
def test_retrieve_draft_post(self):
"""
Test that retrieving a draft post fails since the API only allows read access to published posts
"""
url = '/api/posts/{}'.format(self.post_draft.pk)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_create_as_superuser_token(self):
"""
Test API POST CREATE whilst authenticated via OAuth2 as a superuser
"""
# Note: we do not directly provide user here, as API should automatically get and
# authenticate current user as the author
post_data = {'title': 'title1', 'content': 'content1', 'publish_date': '2016-01-01T00:00Z',
'categories': 'Machine Learning,Statistics'}
url = '/api/posts'
response = self.client.post(url, post_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Post.objects.get(pk=response.data['id']).user, self.superuser)
self.assertEqual(Post.objects.get(pk=response.data['id']).title, post_data['title'])
self.assertEqual(Post.objects.get(pk=response.data['id']).content, post_data['content'])
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=response.data['id'])),
post_data['categories'])
def test_create_as_superuser(self):
"""
Test API POST CREATE whilst authenticated as a superuser
"""
post_data = {'title': 'title2', 'content': 'content2', 'publish_date': '2016-01-01T00:00Z',
'categories': 'Machine Learning'}
url = '/api/posts'
self.client.force_authenticate(user=self.superuser)
response = self.client.post(url, post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Post.objects.get(pk=response.data['id']).user, self.superuser)
self.assertEqual(Post.objects.get(pk=response.data['id']).title, post_data['title'])
self.assertEqual(Post.objects.get(pk=response.data['id']).content, post_data['content'])
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=response.data['id'])),
post_data['categories'])
def test_create_as_user(self):
"""
Test API POST CREATE whilst authenticated as a standard user
"""
post_data = {'title': 'a', 'content': 'b', 'publish_date': '2016-01-01T00:00Z'}
url = '/api/posts'
self.client.force_authenticate(user=self.user)
response = self.client.post(url, post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_as_guest(self):
"""
Test API POST CREATE whilst unauthenticated as a guest
"""
post_data = {'title': 'a', 'content': 'b', 'publish_date': '2016-01-01T00:00Z'}
url = '/api/posts'
self.client.force_authenticate(user=None)
response = self.client.post(url, post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_as_superuser_token(self):
"""
Test API PUT UPDATE whilst authenticated via OAuth2 as a superuser
"""
put_data = {'title': 'a', 'content': 'b', 'categories': 'cat1,cat2'}
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.put(url, put_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).title, put_data['title'])
self.assertEqual(Post.objects.get(pk=self.post_published.pk).content, put_data['content'])
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk)),
put_data['categories'])
def test_update_as_user(self):
"""
Test API PUT UPDATE whilst authenticated as a standard user
"""
put_data = {'title': 'a', 'content': 'b'}
url = '/api/posts/{}'.format(self.post_published.pk)
self.client.force_authenticate(user=self.user)
response = self.client.put(url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_as_guest(self):
"""
Test API PUT UPDATE whilst unauthenticated as a guest
"""
put_data = {'title': 'a', 'content': 'b'}
url = '/api/posts/{}'.format(self.post_published.pk)
self.client.force_authenticate(user=None)
response = self.client.put(url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_categories_unchanged(self):
"""
Test API PUT UPDATE title and test categories remain unchanged
whilst authenticated via OAuth2 as a superuser
"""
original_content = Post.objects.get(pk=self.post_published.pk).content
original_categories = self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk))
put_data = {'title': 'updated title'}
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.put(url, put_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).title, put_data['title'])
self.assertEqual(Post.objects.get(pk=self.post_published.pk).content, original_content)
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk)),
original_categories)
def test_update_categories_disassociate_one(self):
"""
Test API PUT UPDATE disassociate category 'cat2'
whilst authenticated via OAuth2 as a superuser
"""
original_title = Post.objects.get(pk=self.post_published.pk).title
original_content = Post.objects.get(pk=self.post_published.pk).content
put_data = {'categories': 'cat1'}
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.put(url, put_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
print(response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).title, original_title)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).content, original_content)
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk)),
put_data['categories'])
def test_update_categories_associate_three(self):
"""
Test API PUT UPDATE associate three new categories
whilst authenticated via OAu | th2 as a superuser
" | ""
original_title = Post.objects.get(pk=self.post_published.pk).title
original_content = Pos |
from typing import (
Any,
List,
)
from pcs import resource
from pcs.cli.common.parse_args import InputModifiers
from pcs.cli.common.routing import (
CliCmdInterface,
create_router,
)
def resource_defaults_cmd(parent_cmd: List[str]) -> CliCmdInterface:
def _get_router(
lib: Any, argv: List[str], modifiers: InputModifiers
) -> None:
"""
Options:
* -f - CIB file
* --force - allow unknown options
"""
if argv and "=" in argv[0]:
# DEPRECATED legacy command
return resource.resource_defaults_legacy_cmd(
lib, argv, modifiers, deprecated_syntax_used=True
)
router = create_router(
{
"config": resource.resource_defaults_config_cmd,
"set": create_router(
{
"create": resource.resource_defaults_set_create_cmd,
"delete": resource.resource_defaults_set_remove_cmd,
"remove": resource.resource_defaults_set_remove_cmd,
"update": resource.resource_defaults_set_update_cmd,
},
parent_cmd + ["set"],
),
"update": resource.resource_defaults_legacy_cmd,
},
parent_cmd,
default_cmd="config",
)
return rout | er(lib, argv, modifiers)
return _get_router
def resource_op_defaults_cmd(parent_cmd: List[str]) -> CliCmdInterface:
def _get_router(
lib: Any, argv: List[str], modifiers: InputModifiers
) -> None:
"""
Options:
* -f - CIB file
* --force - allow unknown options
"""
if argv and "=" in argv[0]:
# DEPRECATED legacy | command
return resource.resource_op_defaults_legacy_cmd(
lib, argv, modifiers, deprecated_syntax_used=True
)
router = create_router(
{
"config": resource.resource_op_defaults_config_cmd,
"set": create_router(
{
"create": resource.resource_op_defaults_set_create_cmd,
"delete": resource.resource_op_defaults_set_remove_cmd,
"remove": resource.resource_op_defaults_set_remove_cmd,
"update": resource.resource_op_defaults_set_update_cmd,
},
parent_cmd + ["set"],
),
"update": resource.resource_op_defaults_legacy_cmd,
},
parent_cmd,
default_cmd="config",
)
return router(lib, argv, modifiers)
return _get_router
|
#### PATTERN | EN ##################################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
# English linguistical tools using fast regular expressions.
from inflect import \
article, referenced, DEFINITE, INDEFINITE, \
pluralize, singularize, NOUN, VERB, ADJECTIVE, \
conjugate, lemma, lexeme, tenses, VERBS, \
grade, comparative, superlative, COMPARATIVE, SUPERLATIVE, \
predicative, attributive, \
INFINITIVE, PRESENT, PAST, FUTURE, \
FIRST, SECOND, THIRD, \
SINGULAR, PLURAL, SG, PL, \
PROGRESSIVE, \
PARTICIPLE
from inflect.quantify import \
number, numerals, quantify, reflect
from inflect.spelling | import \
suggest as spelling
from parser import tokenize, parse, tag
from parser.tree import Text, Sentence, Slice, Chunk, PNPChunk, Chink, Word, table
from parser.tree import SLASH, WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA, AND, OR
from parser.modality import mood, INDICATIVE, IMPERATIVE, CONDITIONAL, SUBJUNCTIVE
from parse | r.modality import modality, EPISTEMIC
from parser.modality import negated
from parser.sentiment import sentiment, polarity, subjectivity, positive
from parser.sentiment import NOUN, VERB, ADJECTIVE, ADVERB
import wordnet
import wordlist
def parsetree(s, *args, **kwargs):
""" Returns a parsed Text from the given string.
"""
return Text(parse(s, *args, **kwargs))
def split(s, token=[WORD, POS, CHUNK, PNP]):
""" Returns a parsed Text from the given parsed string.
"""
return Text(s, token)
def pprint(string, token=[WORD, POS, CHUNK, PNP], column=4):
""" Pretty-prints the output of parse() as a table with outlined columns.
Alternatively, you can supply a Text or Sentence object.
"""
if isinstance(string, basestring):
print "\n\n".join([table(sentence, fill=column) for sentence in Text(string, token)])
if isinstance(string, Text):
print "\n\n".join([table(sentence, fill=column) for sentence in string])
if isinstance(string, Sentence):
print table(string, fill=column)
def ngrams(string, n=3, continuous=False):
""" Returns a list of n-grams (tuples of n successive words) from the given string.
Alternatively, you can supply a Text or Sentence object.
With continuous=False, n-grams will not run over sentence markers (i.e., .!?).
"""
def strip_period(s, punctuation=set(".:;,!?()[]'\"")):
return [w for w in s if (isinstance(w, Word) and w.string or w) not in punctuation]
if n <= 0:
return []
if isinstance(string, basestring):
s = [strip_period(s.split(" ")) for s in tokenize(string)]
if isinstance(string, Sentence):
s = [strip_period(string)]
if isinstance(string, Text):
s = [strip_period(s) for s in string]
if continuous:
s = [sum(s, [])]
g = []
for s in s:
#s = [None] + s + [None]
g.extend([tuple(s[i:i+n]) for i in range(len(s)-n+1)])
return g |
"""
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random | _state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values | to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
|
self.assertEqual(indices, offsets_2)
def test_iter_advanced(self):
"""test that pointers to other mappings are detected"""
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
self.pointerEnum1 = haystack.reverse.enumerators.WordAlignedEnumerator(self.mmap, matcher, self.feedback, self.word_size)
offsets1, values1 = zip(*self.pointerEnum1.search())
self.pointerEnum2 = haystack.reverse.enumerators.WordAlignedEnumerator(self.mmap2, matcher, self.feedback, self.word_size)
offsets2, values2 = zip(*self.pointerEnum2.search())
self.assertEqual(values1, values2)
self.assertEqual(len(values1), len(self.seq)+1)
class TestPointerEnumeratorReal(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._memory_handler = folder.load(zeus_856_svchost_exe.dumpname)
#cls._memory_handler = folder.load(putty_1_win7.dumpname)
cls._utils = cls._memory_handler.get_target_platform().get_target_ctypes_utils()
return
@classmethod
def tearDownClass(cls):
cls._utils = None
cls._memory_handler.reset_mappings()
cls._memory_handler = None
return
def setUp(self):
self._heap_finder = self._memory_handler.get_heap_finder()
return
def tearDown(self):
self._heap_finder = None
return
def _stats(self, heap_addrs):
# get the weight per mapping
mapdict = {}
for m in self._memory_handler.get_mappings():
mapdict[m.start] = 0
for addr in heap_addrs:
m = self._memory_handler.get_mapping_for_address(addr)
mapdict[m.start] += 1
res = [(v,k) for k,v, in mapdict.items()]
res.sort()
res.reverse()
print('Most used mappings:')
for cnt,s in res:
if cnt == 0:
continue
m = self._memory_handler.get_mapping_for_address(s)
print(cnt, m)
def test_pointer_enumerators(self):
"""
Search pointers values in one HEAP
:return:
"""
# prep the workers
dumpfilename = self._memory_handler.get_name()
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
walker = walkers[0]
heap_addr = walker.get_heap_address()
heap = walker.get_heap_mapping()
# create the enumerator on the whole mapping
enumerator1 = haystack.reverse.enumerators.WordAlignedEnumerator(heap, matcher, feedback, word_size)
# collect the pointers
if False:
###
ts1 = timeit.timeit(enumerator1.search, number=3)
import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
# ... do something ...
heap_enum = enumerator1.search()
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
###
else:
heap_enum = enumerator1.search()
ts1 = 0.0
heap_addrs1, heap_values1 = zip(*heap_enum)
print('WordAlignedEnumerator: %d pointers, timeit %0.2f' % (len(heap_addrs1), ts1))
self._stats(heap_addrs1)
def test_pointer_enumerators_allocated(self):
"""
Search pointers values in allocated chunks from one HEAP
:return:
"""
# prep the workers
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerato | r(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
heap_walker = walkers[0]
# create the enumerator on the allocated chunks mapping
enumerator2 = haystack.reverse.enumerators.AllocatedWordAlignedEnumerator(heap_walker, matcher, feedback, word_size)
# collect the pointers
if False:
###
ts2 = timeit.timeit(enumerator2.search, number=3)
| import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
# ... do something ...
heap_enum2 = enumerator2.search()
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
###
else:
heap_enum2 = enumerator2.search()
ts2 = 0.0
heap_addrs2, heap_values2 = zip(*heap_enum2)
logging.debug('AllocatedWordAlignedEnumerator: %d pointers, timeit %0.2f', len(heap_addrs2), ts2)
self._stats(heap_addrs2)
def test_pointer_enumerators_all(self):
"""
Search pointers values in all HEAP
:return:
"""
# prep the workers
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
all_heaps_addrs = []
for walker in walkers:
#if heap.start != 0x03360000:
# continue
heap = walker.get_heap_mapping()
log.debug('heap is %s', heap)
# create the enumerator on the allocated chunks mapping
enumerator2 = haystack.reverse.enumerators.WordAlignedEnumerator(heap, matcher, feedback, word_size)
# collect the pointers
heap_enum2 = enumerator2.search()
ts2 = 0.0
if len(heap_enum2) == 0:
logging.debug('Heap %s has no pointers in allocated blocks', heap)
else:
heap_addrs2, heap_values2 = zip(*heap_enum2)
logging.debug('WordAlignedEnumerator: %d pointers, timeit %0.2f', len(heap_addrs2), ts2)
all_heaps_addrs.extend(heap_addrs2)
##
if False:
print("Pointers:")
for k,v in heap_enum2:
print(hex(k), hex(v))
self._stats(all_heaps_addrs)
def test_pointer_enumerators_allocated_all(self):
"""
Search pointers values in allocated chunks from all HEAP
:return:
"""
# prep the workers
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
all_heaps_addrs = []
for heap_walker in walkers:
#if heap.start != 0x03360000:
# continue
heap = heap_walker.get_heap_mapping()
log.debug('heap is %s', heap)
# create the enumerator on the allocated chunks mapping
enumerator2 = haystack.reverse.enumerators.AllocatedWordAlignedEnumerator(heap_walker, matcher, feedback, word_size)
# collect the pointers
heap_enum2 = enumerator2.search()
ts2 = 0.0
if len(heap_enum2) == 0:
logging.debug('Heap %s has no pointers in allocated blocks', heap)
else:
heap_addrs2, heap_values2 = zip(*heap_enum2)
logging.debug('AllocatedWordAlignedEnumerator: %d pointers, timeit %0.2f', len(heap_addrs2), ts2)
all_heaps_addrs.extend(heap_addrs2)
##
if False:
print("Pointers:")
for k,v in heap_enum2:
|
class hheap(dict):
@staticmethod
def _parent(i): # please use bit operation (same below)!
return (i-1)>>1
@staticmethod
def _left(i):
return (i<<1) + 1
@staticmethod
def _right(i):
return (i<<1) + 2
'''
Structure is the following
inside the heap we have a list
[position,value]
which means the dictionary holds a list
[position,value,key]
'''
def __init__(self):
self.heap = []
self.hLength = -1
dict.__init__(self)
def __setitem__(self,key,value):
if dict.__contains__(self,key):
item = dict.__getitem__(self,key)
item[1] = value
if item[1] < self.heap[self._parent(item[0])][1]:
self.heapup(item[0])
else:
self.heapdown(item[0])
else:
self.hLength += 1
self.heap.append([self.hLength,value,key])
dict.__setitem__(self,key,self.heap[-1])
self.heapup(self.hLength)
def __getitem__(self,key):
'''Get item retrieves the value of the given key '''
if dict.__contains__(self,key):
return dict.__getitem__(self,key)[1]
raise KeyError("Key does not exist")
def heapup(self,index):
''' Maintains the property of a heap by checking its parent, mostly used after insertion'''
parent = self._parent(index)
if parent is -1:
return
if self.heap[index][1] < self.heap[parent][1]:
self._swap(index,parent)
return self.heapup(parent)
if self.heap[index][1] == self.heap[parent][1]:
if self.heap[index][2] < self.heap[parent][2]:
self._swap(index,parent)
return self.heapup(parent)
return
def heapdown(self,index=0):
''' Maintains the property of a heap by checking its children '''
leftChild = self._left(index)
rightChild = self._right(index)
last = len(self.heap)-1
if leftChild > last:
return
elif rightChild > last:
if self.heap[leftChild][1] < self.heap[index][1]:
self._swap(index,leftChild)
return self.heapdown(leftChild)
else:
if self.heap[rightChild][1] < self.heap[leftChild][1]:
| min = rightChild
else:
min = leftChild
if self.heap[index][1] > self.heap[min][1]:
self._swap(index,min)
if self.heap[index][1] == self.heap[min][1]:
if self.heap[index][2] > self.heap[min][2]:
self._swap(index,min)
return self.heapdown(min)
def _swap(self, i, j):
| """swap the contents b/w indices i and j; update hash accordingly"""
#swap within the heap
self.heap[i][0],self.heap[j][0] = j,i
self.heap[i],self.heap[j] = self.heap[j],self.heap[i]
def pop(self):
# pop root (best)
#display the soon to be popped item
popped = self.heap[0]
#remove from dict and heap
dict.__delitem__(self,popped[2])
self._swap(0,self.hLength)
self.heap.pop()
self.heapdown()
self.hLength-=1
return popped
def update_if_better(self, key, newvalue,viakey=None):
"""update if newvalue is better than the current value for key
or insert if key is not here yet."""
if dict.__contains__(self,key):
self[key] = min(self[key],newvalue)
info = dict.__getitem__(self,key)
if self[key] == newvalue:
if len(info) is 3:
info.append(viakey)
else:
info[3] = viakey
else:
self[key] = newvalue
def Display(self,arry):
#print arry
if len(arry) is 4:
print arry[2]+" "+str(arry[1])+ " (via "+arry[3]+")"
else:
if arry[1] == float("+inf"):
print str(arry[2])+" "+"unreachable"
else:
print str(arry[2])+" "+str(arry[1])
# def GenerateItems(self):
# for x in self.heap:
# yield x
def __str__(self):
string = "{"
string += ', '.join(["'" + item[0]+ "'" + ": "+str(item[1][1]) for item in sorted(self.items(),key = lambda x: x[1][1])])
string +="}"
return string
__repr__ = __str__
|
import importlib
import os
import sys
here = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_version() -> str:
"""
Return version.
"""
sys.path.insert(0, here)
return importlib.import_module("a2wsgi").__version__
os.chdir(here)
os.system(f"poetry version {get_version()}")
os.system("git add a2wsgi/* pyproject.t | oml")
os.system(f'git commit -m "v{get_version()}"')
os.system("git push")
os.system("git tag v{0}".format(get_version()))
os.system("git p | ush --tags")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.