code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
"""This module cooks up a docstring when imported. Its only purpose is to
be displayed in the sphinx documentation.
"""
from collections import defaultdict
from ..core import Add, Eq, Symbol
from ..core.compatibility import default_sort_key
from ..printing import latex
from .meijerint import _create_lookup_table
t = defaultdict(list)
_create_lookup_table(t)
doc = ''
for about, category in sorted(t.items(), key=default_sort_key):
if about == ():
doc += 'Elementary functions:\n\n'
else:
doc += 'Functions involving ' + ', '.join('`%s`' % latex(
list(category[0][0].atoms(func))[0]) for func in about) + ':\n\n'
for formula, gs, cond, hint in category:
if not isinstance(gs, list):
g = Symbol('\\text{generated}')
else:
g = Add(*[fac*f for (fac, f) in gs])
obj = Eq(formula, g)
if cond is True:
cond = ''
else:
cond = ',\\text{ if } %s' % latex(cond)
doc += f'.. math::\n {latex(obj)}{cond}\n\n'
__doc__ = doc
|
skirpichev/omg
|
diofant/integrals/meijerint_doc.py
|
Python
|
bsd-3-clause
| 1,056
|
__import__('pkg_resources').declare_namespace(__name__) # pragma NO COVERAGE
|
dairiki/humpty
|
tests/dist2/dist2/plugins/__init__.py
|
Python
|
bsd-3-clause
| 78
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyAdd(ref, indices, updates):
# Since numpy advanced assignment does not support repeated indices,
# we run a simple loop to perform scatter_add.
for i, indx in np.ndenumerate(indices):
ref[indx] += updates[i]
def _NumpyAddScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] += update
def _NumpySub(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] -= updates[i]
def _NumpySubScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] -= update
def _NumpyMul(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] *= updates[i]
def _NumpyMulScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] *= update
def _NumpyDiv(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] /= updates[i]
def _NumpyDivScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] /= update
def _NumpyMin(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], updates[i])
def _NumpyMinScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], update)
def _NumpyMax(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], updates[i])
def _NumpyMaxScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], update)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = updates[i]
def _NumpyUpdateScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = update
_TF_OPS_TO_NUMPY = {
state_ops.scatter_update: _NumpyUpdate,
state_ops.scatter_add: _NumpyAdd,
state_ops.scatter_sub: _NumpySub,
state_ops.scatter_mul: _NumpyMul,
state_ops.scatter_div: _NumpyDiv,
state_ops.scatter_min: _NumpyMin,
state_ops.scatter_max: _NumpyMax,
}
_TF_OPS_TO_NUMPY_SCALAR = {
state_ops.scatter_update: _NumpyUpdateScalar,
state_ops.scatter_add: _NumpyAddScalar,
state_ops.scatter_sub: _NumpySubScalar,
state_ops.scatter_mul: _NumpyMulScalar,
state_ops.scatter_div: _NumpyDivScalar,
state_ops.scatter_min: _NumpyMinScalar,
state_ops.scatter_max: _NumpyMaxScalar,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False):
np.random.seed(8)
with self.test_session(use_gpu=True):
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
size = np.prod(indices_shape, dtype=itype)
first_dim = 3 * size
indices = np.arange(first_dim)
np.random.shuffle(indices)
indices = indices[:size]
if size > 1 and repeat_indices:
# Add some random repeats.
indices = indices[:size // 2]
for _ in range(size - size // 2):
# Randomly append some repeats.
indices = np.append(indices,
indices[np.random.randint(size // 2)])
np.random.shuffle(indices)
indices = indices.reshape(indices_shape)
if updates_are_scalar:
updates = _AsType(np.random.randn(), vtype)
else:
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
# Clips small values to avoid division by zero.
def clip_small_values(x):
threshold = 1e-4
sign = np.sign(x)
if isinstance(x, np.int32):
threshold = 1
sign = np.random.choice([-1, 1])
return threshold * sign if np.abs(x) < threshold else x
updates = np.vectorize(clip_small_values)(updates)
old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
if updates_are_scalar:
np_scatter = _TF_OPS_TO_NUMPY_SCALAR[tf_scatter]
else:
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.Variable(old)
ref.initializer.run()
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
def _VariableRankTests(self,
tf_scatter,
repeat_indices=False,
updates_are_scalar=False):
vtypes = [np.float32, np.float64]
if tf_scatter != state_ops.scatter_div:
vtypes.append(np.int32)
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(tf_scatter, vtype, itype, repeat_indices,
updates_are_scalar)
def testVariableRankUpdate(self):
self._VariableRankTests(state_ops.scatter_update, False)
def testVariableRankAdd(self):
self._VariableRankTests(state_ops.scatter_add, False)
def testVariableRankSub(self):
self._VariableRankTests(state_ops.scatter_sub, False)
def testVariableRankMul(self):
self._VariableRankTests(state_ops.scatter_mul, False)
def testVariableRankDiv(self):
self._VariableRankTests(state_ops.scatter_div, False)
def testVariableRankMin(self):
self._VariableRankTests(state_ops.scatter_min, False)
def testVariableRankMax(self):
self._VariableRankTests(state_ops.scatter_max, False)
def testRepeatIndicesAdd(self):
self._VariableRankTests(state_ops.scatter_add, True)
def testRepeatIndicesSub(self):
self._VariableRankTests(state_ops.scatter_sub, True)
def testRepeatIndicesMul(self):
self._VariableRankTests(state_ops.scatter_mul, True)
def testRepeatIndicesDiv(self):
self._VariableRankTests(state_ops.scatter_div, True)
def testRepeatIndicesMin(self):
self._VariableRankTests(state_ops.scatter_min, True)
def testRepeatIndicesMax(self):
self._VariableRankTests(state_ops.scatter_max, True)
def testVariableRankUpdateScalar(self):
self._VariableRankTests(state_ops.scatter_update, False, True)
def testVariableRankAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, False, True)
def testVariableRankSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, False, True)
def testVariableRankMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, False, True)
def testVariableRankDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, False, True)
def testVariableRankMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, False, True)
def testVariableRankMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, False, True)
def testRepeatIndicesAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, True, True)
def testRepeatIndicesSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, True, True)
def testRepeatIndicesMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, True, True)
def testRepeatIndicesDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, True, True)
def testRepeatIndicesMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, True, True)
def testRepeatIndicesMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, True, True)
def testBooleanScatterUpdate(self):
if not test.is_gpu_available():
with self.test_session(use_gpu=False) as session:
var = variables.Variable([True, False])
update0 = state_ops.scatter_update(var, 1, True)
update1 = state_ops.scatter_update(
var, constant_op.constant(
0, dtype=dtypes.int64), False)
var.initializer.run()
session.run([update0, update1])
self.assertAllEqual([False, True], var.eval())
def testScatterOutOfRangeCpu(self):
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
if not test.is_gpu_available():
with self.test_session(use_gpu=False):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = -1 is not in \[0, 6\)'):
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = 6 is not in \[0, 6\)'):
op(ref, indices, updates).eval()
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if test.is_gpu_available():
return
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.test_session(force_gpu=True):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indicies out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
op(ref, indices, updates).eval()
if __name__ == '__main__':
test.main()
|
nburn42/tensorflow
|
tensorflow/python/kernel_tests/scatter_ops_test.py
|
Python
|
apache-2.0
| 11,250
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Seperate titles, French -> english, others to rare.
def engTitle(title):
if title in ["Miss", "Mrs", "Mr", "Dr", "Master"]:
return title
elif title in ["Mme", "Ms"]:
return "Mrs"
elif title == "Mlle":
return "Miss"
else:
return "Rare" #Include Major, Sir, the Countess, Lady, Jonkheer, Rev etc
def getTitleFromName(name):
name = name.split(",")
name = name[1].split(".")
return engTitle( name[0].strip() )
df = pd.read_csv("train.csv")
df["Title"] = df.apply(lambda row: getTitleFromName(row["Name"]), axis = 1)
titles = df["Title"].unique()
index = np.arange( len(titles) )
opacity = 0.5
bar_width = 0.3
total = []
survived = []
for title in titles:
t_all = df[ df["Title"] == title ]
total.append( len(t_all) )
survived.append( len( t_all[ t_all["Survived"] == 1] ) )
for i in range(0, len(titles)):
s = titles[i] + "\t-\t tot: " + str(total[i]) + ", surv: " + str(survived[i]) + ", ratio: " + str(survived[i] / total[i])
print(s)
plt.bar(index, tuple(total), bar_width, alpha = opacity, color = 'b', label = "Total" )
plt.bar(index + bar_width, tuple(survived), bar_width, alpha = opacity, color = 'r', label = "Survived" )
plt.xlabel("Title")
plt.ylabel("Count")
plt.legend()
plt.xticks(index + bar_width / 2, tuple(titles) )
plt.show()
|
cybercomgroup/Big_Data
|
Cloudera/Code/Titanic_Dataset/title_surv.py
|
Python
|
gpl-3.0
| 1,344
|
#!/usr/bin/env python
# Pretty print 9p simpletrace log
# Usage: ./analyse-9p-simpletrace <trace-events> <trace-pid>
#
# Author: Harsh Prateek Bora
import simpletrace
class VirtFSRequestTracker(simpletrace.Analyzer):
def begin(self):
print "Pretty printing 9p simpletrace log ..."
def v9fs_rerror(self, tag, id, err):
print "RERROR (tag =", tag, ", id =", id, ",err =", err, ")"
def v9fs_version(self, tag, id, msize, version):
print "TVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
def v9fs_version_return(self, tag, id, msize, version):
print "RVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
def v9fs_attach(self, tag, id, fid, afid, uname, aname):
print "TATTACH (tag =", tag, ", fid =", fid, ", afid =", afid, ", uname =", uname, ", aname =", aname, ")"
def v9fs_attach_return(self, tag, id, type, version, path):
print "RATTACH (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
def v9fs_stat(self, tag, id, fid):
print "TSTAT (tag =", tag, ", fid =", fid, ")"
def v9fs_stat_return(self, tag, id, mode, atime, mtime, length):
print "RSTAT (tag =", tag, ", mode =", mode, ", atime =", atime, ", mtime =", mtime, ", length =", length, ")"
def v9fs_getattr(self, tag, id, fid, request_mask):
print "TGETATTR (tag =", tag, ", fid =", fid, ", request_mask =", hex(request_mask), ")"
def v9fs_getattr_return(self, tag, id, result_mask, mode, uid, gid):
print "RGETATTR (tag =", tag, ", result_mask =", hex(result_mask), ", mode =", oct(mode), ", uid =", uid, ", gid =", gid, ")"
def v9fs_walk(self, tag, id, fid, newfid, nwnames):
print "TWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", nwnames =", nwnames, ")"
def v9fs_walk_return(self, tag, id, nwnames, qids):
print "RWALK (tag =", tag, ", nwnames =", nwnames, ", qids =", hex(qids), ")"
def v9fs_open(self, tag, id, fid, mode):
print "TOPEN (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ")"
def v9fs_open_return(self, tag, id, type, version, path, iounit):
print "ROPEN (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_lcreate(self, tag, id, dfid, flags, mode, gid):
print "TLCREATE (tag =", tag, ", dfid =", dfid, ", flags =", oct(flags), ", mode =", oct(mode), ", gid =", gid, ")"
def v9fs_lcreate_return(self, tag, id, type, version, path, iounit):
print "RLCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_fsync(self, tag, id, fid, datasync):
print "TFSYNC (tag =", tag, ", fid =", fid, ", datasync =", datasync, ")"
def v9fs_clunk(self, tag, id, fid):
print "TCLUNK (tag =", tag, ", fid =", fid, ")"
def v9fs_read(self, tag, id, fid, off, max_count):
print "TREAD (tag =", tag, ", fid =", fid, ", off =", off, ", max_count =", max_count, ")"
def v9fs_read_return(self, tag, id, count, err):
print "RREAD (tag =", tag, ", count =", count, ", err =", err, ")"
def v9fs_readdir(self, tag, id, fid, offset, max_count):
print "TREADDIR (tag =", tag, ", fid =", fid, ", offset =", offset, ", max_count =", max_count, ")"
def v9fs_readdir_return(self, tag, id, count, retval):
print "RREADDIR (tag =", tag, ", count =", count, ", retval =", retval, ")"
def v9fs_write(self, tag, id, fid, off, count, cnt):
print "TWRITE (tag =", tag, ", fid =", fid, ", off =", off, ", count =", count, ", cnt =", cnt, ")"
def v9fs_write_return(self, tag, id, total, err):
print "RWRITE (tag =", tag, ", total =", total, ", err =", err, ")"
def v9fs_create(self, tag, id, fid, name, perm, mode):
print "TCREATE (tag =", tag, ", fid =", fid, ", perm =", oct(perm), ", name =", name, ", mode =", oct(mode), ")"
def v9fs_create_return(self, tag, id, type, version, path, iounit):
print "RCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_symlink(self, tag, id, fid, name, symname, gid):
print "TSYMLINK (tag =", tag, ", fid =", fid, ", name =", name, ", symname =", symname, ", gid =", gid, ")"
def v9fs_symlink_return(self, tag, id, type, version, path):
print "RSYMLINK (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
def v9fs_flush(self, tag, id, flush_tag):
print "TFLUSH (tag =", tag, ", flush_tag =", flush_tag, ")"
def v9fs_link(self, tag, id, dfid, oldfid, name):
print "TLINK (tag =", tag, ", dfid =", dfid, ", oldfid =", oldfid, ", name =", name, ")"
def v9fs_remove(self, tag, id, fid):
print "TREMOVE (tag =", tag, ", fid =", fid, ")"
def v9fs_wstat(self, tag, id, fid, mode, atime, mtime):
print "TWSTAT (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", atime =", atime, "mtime =", mtime, ")"
def v9fs_mknod(self, tag, id, fid, mode, major, minor):
print "TMKNOD (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", major =", major, ", minor =", minor, ")"
def v9fs_lock(self, tag, id, fid, type, start, length):
print "TLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
def v9fs_lock_return(self, tag, id, status):
print "RLOCK (tag =", tag, ", status =", status, ")"
def v9fs_getlock(self, tag, id, fid, type, start, length):
print "TGETLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
def v9fs_getlock_return(self, tag, id, type, start, length, proc_id):
print "RGETLOCK (tag =", tag, "type =", type, ", start =", start, ", length =", length, ", proc_id =", proc_id, ")"
def v9fs_mkdir(self, tag, id, fid, name, mode, gid):
print "TMKDIR (tag =", tag, ", fid =", fid, ", name =", name, ", mode =", mode, ", gid =", gid, ")"
def v9fs_mkdir_return(self, tag, id, type, version, path, err):
print "RMKDIR (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, err =", err, ")"
def v9fs_xattrwalk(self, tag, id, fid, newfid, name):
print "TXATTRWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", xattr name =", name, ")"
def v9fs_xattrwalk_return(self, tag, id, size):
print "RXATTRWALK (tag =", tag, ", xattrsize =", size, ")"
def v9fs_xattrcreate(self, tag, id, fid, name, size, flags):
print "TXATTRCREATE (tag =", tag, ", fid =", fid, ", name =", name, ", xattrsize =", size, ", flags =", flags, ")"
def v9fs_readlink(self, tag, id, fid):
print "TREADLINK (tag =", tag, ", fid =", fid, ")"
def v9fs_readlink_return(self, tag, id, target):
print "RREADLINK (tag =", tag, ", target =", target, ")"
simpletrace.run(VirtFSRequestTracker())
|
KernelAnalysisPlatform/KlareDbg
|
tracers/qemu/decaf/scripts/analyse-9p-simpletrace.py
|
Python
|
gpl-3.0
| 7,579
|
# Past examples are programmatically insecure
# You require arguments to be passed in but what if the wrong arguments are provided?
# Look at the timestable solution which changes numbers to text - what happens if you provide the number 30?
#
# One way of controlling these things uses conditions
# These enable specific operations to be carried out "if" something is the case or "else" something else is the case
a = 5
# first condition trial
if a >= 5:
print("Value is greater than 5")
else:
print("Value is less than 5")
# second condition trial
if a >= 5:
print("Value is greater than 5")
elif a < 5:
print("Value is less than 5")
else:
print("Value is 5")
# if and (2 conditions)
a=3
b=5
if (a==3) and (b==5):
print("a and b are as expected - great :)")
else:
print("a and b not as expected - not great :(")
|
Chris35Wills/Chris35Wills.github.io
|
courses/examples/Beginners_python/conditions.py
|
Python
|
mit
| 826
|
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets
from PyQt5.QtCore import QTimer, QProcess
import gr3
from gui.gl_widget import GLWidget
from core import file
import os.path
import shutil
import sys
import tempfile
class ImageVideoTabDock(QtWidgets.QDockWidget):
"""
DockWidget for the 'image/video'-tab
"""
def __init__(self, parent):
QtWidgets.QDockWidget.__init__(self, "image/video", parent)
self.setWidget(QtWidgets.QWidget())
self.layout = QtWidgets.QHBoxLayout()
self.image_video_tab = ImageVideoTab(self.widget(), parent)
self.layout.addWidget(self.image_video_tab)
self.widget().setLayout(self.layout)
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable | QtWidgets.QDockWidget.DockWidgetFloatable)
self.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
class ImageVideoTab(QtWidgets.QWidget):
"""
tab 'image/video' in the main widget
"""
def __init__(self, parent, main_window):
QtWidgets.QWidget.__init__(self, parent)
self.main_window = main_window
self.init_gui()
def init_gui(self):
self.vbox = QtWidgets.QVBoxLayout()
self.screenshot_button = QtWidgets.QPushButton('Save screenshot for the current frame', self)
self.video_button = QtWidgets.QPushButton('Save video for all selected frames', self)
self.mass_screenshot_button = QtWidgets.QPushButton('Save screenshot for all selected frames', self)
self.screenshot_button.clicked.connect(self.save_screenshot)
self.screenshot_button.setDisabled(True)
self.mass_screenshot_button.clicked.connect(self.save_screenshot_for_all_selected_frames)
self.mass_screenshot_button.setDisabled(True)
self.video_button.clicked.connect(self.save_video_for_all_selected_frames)
self.video_button.setDisabled(True)
self.vbox.addWidget(self.screenshot_button)
self.vbox.addWidget(self.mass_screenshot_button)
self.vbox.addWidget(self.video_button)
self.vbox.addStretch()
self.setLayout(self.vbox)
self.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
def save_screenshot(self):
file_name = QtWidgets.QFileDialog.getSaveFileName(self,
'Save screenshot...', filter='Portable Network Graphics (*.png)')[0]
if file_name:
ext = os.path.splitext(file_name)[1]
if not ext:
file_name += '.png'
self.main_window.control.visualization.save_screenshot(file_name)
def get_selected_frames(self):
file_list = self.main_window.file_dock.file_tab.file_list
selection = file_list.get_selection()
if not selection:
QtWidgets.QMessageBox.information(self, "No frame selected", "Please use the file tab to select at least one frame.",
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
return
frames_to_write = []
for file_name, frame_numbers in selection.items():
if frame_numbers == [-1]:
num_frames = file.File.open(file_name).info.num_frames
frame_numbers = range(num_frames)
for frame_number in frame_numbers:
frames_to_write.append((file_name, frame_number))
return frames_to_write
def save_video_for_all_selected_frames(self):
frames_to_write = self.get_selected_frames()
if not frames_to_write:
return
file_name = QtWidgets.QFileDialog.getSaveFileName(self,
'Save video...', filter='QuickTime Movie (*.mov)')[0]
if file_name:
ext = os.path.splitext(file_name)[1]
if not ext:
file_name += '.mov'
dialog = MassScreenshotAndVideoDialog(self, frames_to_write, should_save_video=True, video_file_name=file_name)
dialog.show()
return
def save_screenshot_for_all_selected_frames(self):
frames_to_write = self.get_selected_frames()
if not frames_to_write:
return
dir_name = QtWidgets.QFileDialog.getExistingDirectory(self, 'Save screenshots to folder...')
if dir_name:
dialog = MassScreenshotAndVideoDialog(self, frames_to_write, dir_name)
dialog.show()
return
class MassScreenshotAndVideoDialog(QtWidgets.QDialog):
def __init__(self, parent, frames_to_write, dir_name=None, should_save_video=False, video_file_name=None):
super(MassScreenshotAndVideoDialog, self).__init__(parent)
self.control = parent.main_window.control
self.setModal(True)
self.frames_to_write = frames_to_write
self.images_written = []
if not dir_name:
dir_name = tempfile.mkdtemp()
self.delete_dir = True
else:
self.delete_dir = False
self.dir_name = dir_name
self.should_save_video = should_save_video
self.video_file_name = video_file_name
self.vbox = QtWidgets.QVBoxLayout()
self.progress_bar = QtWidgets.QProgressBar(self)
self.progress_bar.setValue(0)
self.progress_bar.setMinimum(0)
if self.should_save_video:
self.progress_bar.setMaximum(int(2.5*len(frames_to_write)))
else:
self.progress_bar.setMaximum(len(frames_to_write))
self.vbox.addWidget(self.progress_bar)
self.setLayout(self.vbox)
self.is_rejected = False
self.process = None
self.is_first_frame = True
if self.should_save_video:
self.setWindowTitle("Saving video...")
else:
self.setWindowTitle("Saving screenshots...")
QTimer.singleShot(10, self.save_single_screenshot)
def save_single_screenshot(self):
if self.is_rejected:
return
current_frame = self.frames_to_write.pop(0)
file_name, frame_number = current_frame
width, height = 1920, 1080
image_file_name = os.path.join(self.dir_name, "{}.{:06d}.png".format(os.path.basename(file_name), frame_number+1))
self.control.visualize(file_name, frame_number)
self.control.visualization.create_scene()
self.control.visualization.save_screenshot(image_file_name, width, height,
self.is_first_frame, not self.frames_to_write)
self.images_written.append(image_file_name)
self.is_first_frame = False
if self.frames_to_write:
self.progress_bar.setValue(len(self.images_written))
self.update()
QTimer.singleShot(10, self.save_single_screenshot)
elif self.should_save_video:
QTimer.singleShot(10, self.save_video)
else:
self.done(0)
def save_video(self):
self.process = QProcess()
self.process.start('gr', ['util/video_output.py', self.video_file_name] + self.images_written)
self.process.readyReadStandardOutput.connect(self.process_output)
self.process.readyReadStandardError.connect(self.process_error)
self.process.finished.connect(lambda *args: self.finished_video())
def finished_video(self):
if self.delete_dir:
shutil.rmtree(self.dir_name)
self.done(0)
def process_output(self):
output = self.process.readAllStandardOutput()
for line in str(output).splitlines():
try:
number = int(line)
except ValueError:
pass
else:
self.progress_bar.setValue(len(self.images_written)+number)
def process_error(self):
output = self.process.readAllStandardError()
sys.stderr.write(output)
def reject(self):
self.is_rejected = True
if self.process is not None:
self.process.terminate()
super(MassScreenshotAndVideoDialog, self).reject()
|
sciapp/pyMolDyn
|
src/gui/tabs/image_video_tab.py
|
Python
|
mit
| 8,054
|
from django.test import TestCase
from .models import ImportTask, generate_file_path
class ImportTest(TestCase):
def test_csv_import(self):
pass
def test_generate_file_path(self):
self.assertEquals(generate_file_path(ImportTask(), 'allo.csv'), 'csv_imports/allo.csv')
self.assertEquals(generate_file_path(ImportTask(), 'allo.xlsx'), 'csv_imports/allo.xlsx')
self.assertEquals(generate_file_path(ImportTask(), 'allo.foo.bar'), 'csv_imports/allo.foo.bar')
long_name = 'foo' * 100
test_file_name = '%s.xls.csv' % long_name
self.assertEquals(len(generate_file_path(ImportTask(), test_file_name)), 100)
self.assertEquals(generate_file_path(ImportTask(), test_file_name), 'csv_imports/%s.csv' % long_name[:84])
test_file_name = '%s.abc.xlsx' % long_name
self.assertEquals(len(generate_file_path(ImportTask(), test_file_name)), 100)
self.assertEquals(generate_file_path(ImportTask(), test_file_name), 'csv_imports/%s.xlsx' % long_name[:83])
|
nyaruka/smartmin
|
smartmin/csv_imports/tests.py
|
Python
|
bsd-3-clause
| 1,036
|
#
# Copyright 2013-2015 BloomReach, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from briefly.core import *
import unittest
import time
class PrinterExecutor(NodeExecutor):
'''A class for debug.
'''
def __init__(self, service, task_done_callback=None):
'''Constructor, initialize executor'''
super(PrinterExecutor, self).__init__(service)
service.execution_queue = []
self.verbose = False
def execute_node(self, node):
'''Simulate executing a node.'''
time.sleep(node.identifier * 0.1)
self.executed = True
self.service.execution_queue.append(node)
#print ' - %s : executed.' % (node.hash(),)
class MockTarget(object):
DEFAULT_RETRY_COUNT = 3
def __init__(self, identifier):
self.identifier = identifier
self.deps = []
self.configured = True
self.executed = False
self.exe_error = None
def __getattribute__(self, attr):
if attr == 'prop':
class property(object):
pass
prop = property()
prop.num_retry = self.DEFAULT_RETRY_COUNT
return prop
else:
return super(MockTarget, self).__getattribute__(attr)
def hash(self):
return self.identifier
def log(self, *args):
pass
def check_configure(self):
return self
def check_execute(self):
pass
def reset_log(self):
pass
class TestCaseWithSetup(unittest.TestCase):
def get_edges(self):
return tuple()
def get_exeuctor_service(self):
prop = Properties(
run_threads = 5,
)
self.prop = prop
return ExecutorService(self)
def setUp(self):
self.service = self.get_exeuctor_service()
self.service.executor_factory = PrinterExecutor
self.edges = self.get_edges()
self.nodes = set()
self.target_map = {}
for p, c in self.edges:
self.nodes.add(p)
self.nodes.add(c)
self.targets = []
for n in self.nodes:
target = MockTarget(n)
self.target_map[n] = target
self.targets.append(target)
for p, c in self.edges:
self.target_map[c].deps.append(self.target_map[p])
class TestCaseExecutionService(TestCaseWithSetup):
def get_edges(self):
return ((11,2),
(11,9),
(11,10),
(8,9),
(7,11),
(7,8),
(5,11),
(3,8),
(3,10))
def get_exeuctor_service(self):
prop = Properties(
run_threads = 5,
)
self.prop = prop
return ExecutorService(self)
def test_setup(self):
for p, c in self.edges:
child = self.target_map[c]
parent = self.target_map[p]
self.assertTrue(parent in child.deps)
def test_run(self):
self.service.execute(self.targets)
execution_sequence = self.service.execution_queue
results = tuple(e.identifier for e in execution_sequence)
self.assertTrue(results == (3, 5, 7, 8, 11, 2, 9, 10))
class TestCaseExecutionServiceParallel(TestCaseWithSetup):
def get_edges(self):
return ((1,2),
(3,4),
(5,6),
(7,8))
def get_exeuctor_service(self):
prop = Properties(
run_threads = 1,
)
self.prop = prop
return ExecutorService(self)
def test_run(self):
self.service.execute(self.targets)
execution_sequence = self.service.execution_queue
results = tuple(e.identifier for e in execution_sequence)
self.assertTrue(results == (1, 2, 3, 4, 5, 6, 7, 8))
if __name__ == '__main__':
unittest.main()
|
bloomreach/briefly
|
tests/briefly/test_core.py
|
Python
|
apache-2.0
| 3,993
|
class Animal:
"""Abstract class to be implemented by all animals."""
def __init__(self, name) -> None:
self.name = name
def make_sound(self) -> str:
raise NotImplementedError
class Cat(Animal): # Error: Method 'make_sound' is not overridden
"""A worthy companion."""
pass
|
RyanDJLee/pyta
|
examples/pylint/W0223_abstract_method.py
|
Python
|
gpl-3.0
| 313
|
from __future__ import unicode_literals
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
|
KSG-IT/ksg-nett
|
api/models.py
|
Python
|
gpl-3.0
| 395
|
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
# This file is part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
# Modified by Benoit Boissinot:
# - fix for digest auth (inspired from urllib2.py @ Python v2.4)
# Modified by Dirkjan Ochtman:
# - import md5 function from a local util module
# Modified by Augie Fackler:
# - add safesend method and use it to prevent broken pipe errors
# on large POST requests
"""An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
>>> import urllib2
>>> from keepalive import HTTPHandler
>>> keepalive_handler = HTTPHandler()
>>> opener = urlreq.buildopener(keepalive_handler)
>>> urlreq.installopener(opener)
>>>
>>> fo = urlreq.urlopen('http://www.python.org')
If a connection to a given host is requested, and all of the existing
connections are still in use, another connection will be opened. If
the handler tries to use an existing connection but it fails in some
way, it will be closed and removed from the pool.
To remove the handler, simply re-run build_opener with no arguments, and
install that opener.
You can explicitly close connections by using the close_connection()
method of the returned file-like object (described below) or you can
use the handler methods:
close_connection(host)
close_all()
open_connections()
NOTE: using the close_connection and close_all methods of the handler
should be done with care when using multiple threads.
* there is nothing that prevents another thread from creating new
connections immediately after connections are closed
* no checks are done to prevent in-use connections from being closed
>>> keepalive_handler.close_all()
EXTRA ATTRIBUTES AND METHODS
Upon a status of 200, the object returned has a few additional
attributes and methods, which should not be used if you want to
remain consistent with the normal urllib2-returned objects:
close_connection() - close the connection to the host
readlines() - you know, readlines()
status - the return status (i.e. 404)
reason - english translation of status (i.e. 'File not found')
If you want the best of both worlds, use this inside an
AttributeError-catching try:
>>> try: status = fo.status
>>> except AttributeError: status = None
Unfortunately, these are ONLY there if status == 200, so it's not
easy to distinguish between non-200 responses. The reason is that
urllib2 tries to do clever things with error codes 301, 302, 401,
and 407, and it wraps the object upon return.
For python versions earlier than 2.4, you can avoid this fancy error
handling by setting the module-level global HANDLE_ERRORS to zero.
You see, prior to 2.4, it's the HTTP Handler's job to determine what
to handle specially, and what to just pass up. HANDLE_ERRORS == 0
means "pass everything up". In python 2.4, however, this job no
longer belongs to the HTTP Handler and is now done by a NEW handler,
HTTPErrorProcessor. Here's the bottom line:
python version < 2.4
HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
errors
HANDLE_ERRORS == 0 pass everything up, error processing is
left to the calling code
python version >= 2.4
HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
HANDLE_ERRORS == 0 (default) pass everything up, let the
other handlers (specifically,
HTTPErrorProcessor) decide what to do
In practice, setting the variable either way makes little difference
in python 2.4, so for the most consistent behavior across versions,
you probably just want to use the defaults, which will give you
exceptions on errors.
"""
# $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
from __future__ import absolute_import, print_function
import errno
import hashlib
import socket
import sys
import threading
from . import (
util,
)
httplib = util.httplib
urlerr = util.urlerr
urlreq = util.urlreq
DEBUG = None
if sys.version_info < (2, 4):
HANDLE_ERRORS = 1
else: HANDLE_ERRORS = 0
class ConnectionManager(object):
"""
The connection manager must be able to:
* keep track of all existing
"""
def __init__(self):
self._lock = threading.Lock()
self._hostmap = {} # map hosts to a list of connections
self._connmap = {} # map connections to host
self._readymap = {} # map connection to ready state
def add(self, host, connection, ready):
self._lock.acquire()
try:
if host not in self._hostmap:
self._hostmap[host] = []
self._hostmap[host].append(connection)
self._connmap[connection] = host
self._readymap[connection] = ready
finally:
self._lock.release()
def remove(self, connection):
self._lock.acquire()
try:
try:
host = self._connmap[connection]
except KeyError:
pass
else:
del self._connmap[connection]
del self._readymap[connection]
self._hostmap[host].remove(connection)
if not self._hostmap[host]: del self._hostmap[host]
finally:
self._lock.release()
def set_ready(self, connection, ready):
try:
self._readymap[connection] = ready
except KeyError:
pass
def get_ready_conn(self, host):
conn = None
self._lock.acquire()
try:
if host in self._hostmap:
for c in self._hostmap[host]:
if self._readymap[c]:
self._readymap[c] = 0
conn = c
break
finally:
self._lock.release()
return conn
def get_all(self, host=None):
if host:
return list(self._hostmap.get(host, []))
else:
return dict(self._hostmap)
class KeepAliveHandler(object):
def __init__(self):
self._cm = ConnectionManager()
#### Connection Management
def open_connections(self):
"""return a list of connected hosts and the number of connections
to each. [('foo.com:80', 2), ('bar.org', 1)]"""
return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
def close_connection(self, host):
"""close connection(s) to <host>
host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
no error occurs if there is no connection to that host."""
for h in self._cm.get_all(host):
self._cm.remove(h)
h.close()
def close_all(self):
"""close all open connections"""
for host, conns in self._cm.get_all().iteritems():
for h in conns:
self._cm.remove(h)
h.close()
def _request_closed(self, request, host, connection):
"""tells us that this request is now closed and that the
connection is ready for another request"""
self._cm.set_ready(connection, 1)
def _remove_connection(self, host, connection, close=0):
if close:
connection.close()
self._cm.remove(connection)
#### Transaction Execution
def http_open(self, req):
return self.do_open(HTTPConnection, req)
def do_open(self, http_class, req):
host = req.get_host()
if not host:
raise urlerr.urlerror('no host given')
try:
h = self._cm.get_ready_conn(host)
while h:
r = self._reuse_connection(h, req, host)
# if this response is non-None, then it worked and we're
# done. Break out, skipping the else block.
if r:
break
# connection is bad - possibly closed by server
# discard it and ask for the next free connection
h.close()
self._cm.remove(h)
h = self._cm.get_ready_conn(host)
else:
# no (working) free connections were found. Create a new one.
h = http_class(host)
if DEBUG:
DEBUG.info("creating new connection to %s (%d)",
host, id(h))
self._cm.add(host, h, 0)
self._start_transaction(h, req)
r = h.getresponse()
except (socket.error, httplib.HTTPException) as err:
raise urlerr.urlerror(err)
# if not a persistent connection, don't try to reuse it
if r.will_close:
self._cm.remove(h)
if DEBUG:
DEBUG.info("STATUS: %s, %s", r.status, r.reason)
r._handler = self
r._host = host
r._url = req.get_full_url()
r._connection = h
r.code = r.status
r.headers = r.msg
r.msg = r.reason
if r.status == 200 or not HANDLE_ERRORS:
return r
else:
return self.parent.error('http', req, r,
r.status, r.msg, r.headers)
def _reuse_connection(self, h, req, host):
"""start the transaction with a re-used connection
return a response object (r) upon success or None on failure.
This DOES not close or remove bad connections in cases where
it returns. However, if an unexpected exception occurs, it
will close and remove the connection before re-raising.
"""
try:
self._start_transaction(h, req)
r = h.getresponse()
# note: just because we got something back doesn't mean it
# worked. We'll check the version below, too.
except (socket.error, httplib.HTTPException):
r = None
except: # re-raises
# adding this block just in case we've missed
# something we will still raise the exception, but
# lets try and close the connection and remove it
# first. We previously got into a nasty loop
# where an exception was uncaught, and so the
# connection stayed open. On the next try, the
# same exception was raised, etc. The trade-off is
# that it's now possible this call will raise
# a DIFFERENT exception
if DEBUG:
DEBUG.error("unexpected exception - closing "
"connection to %s (%d)", host, id(h))
self._cm.remove(h)
h.close()
raise
if r is None or r.version == 9:
# httplib falls back to assuming HTTP 0.9 if it gets a
# bad header back. This is most likely to happen if
# the socket has been closed by the server since we
# last used the connection.
if DEBUG:
DEBUG.info("failed to re-use connection to %s (%d)",
host, id(h))
r = None
else:
if DEBUG:
DEBUG.info("re-using connection to %s (%d)", host, id(h))
return r
def _start_transaction(self, h, req):
# What follows mostly reimplements HTTPConnection.request()
# except it adds self.parent.addheaders in the mix.
headers = req.headers.copy()
if sys.version_info >= (2, 4):
headers.update(req.unredirected_hdrs)
headers.update(self.parent.addheaders)
headers = dict((n.lower(), v) for n, v in headers.items())
skipheaders = {}
for n in ('host', 'accept-encoding'):
if n in headers:
skipheaders['skip_' + n.replace('-', '_')] = 1
try:
if req.has_data():
data = req.get_data()
h.putrequest('POST', req.get_selector(), **skipheaders)
if 'content-type' not in headers:
h.putheader('Content-type',
'application/x-www-form-urlencoded')
if 'content-length' not in headers:
h.putheader('Content-length', '%d' % len(data))
else:
h.putrequest('GET', req.get_selector(), **skipheaders)
except socket.error as err:
raise urlerr.urlerror(err)
for k, v in headers.items():
h.putheader(k, v)
h.endheaders()
if req.has_data():
h.send(data)
class HTTPHandler(KeepAliveHandler, urlreq.httphandler):
pass
class HTTPResponse(httplib.HTTPResponse):
# we need to subclass HTTPResponse in order to
# 1) add readline() and readlines() methods
# 2) add close_connection() methods
# 3) add info() and geturl() methods
# in order to add readline(), read must be modified to deal with a
# buffer. example: readline must read a buffer and then spit back
# one line at a time. The only real alternative is to read one
# BYTE at a time (ick). Once something has been read, it can't be
# put back (ok, maybe it can, but that's even uglier than this),
# so if you THEN do a normal read, you must first take stuff from
# the buffer.
# the read method wraps the original to accommodate buffering,
# although read() never adds to the buffer.
# Both readline and readlines have been stolen with almost no
# modification from socket.py
def __init__(self, sock, debuglevel=0, strict=0, method=None):
httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
self.fileno = sock.fileno
self.code = None
self._rbuf = ''
self._rbufsize = 8096
self._handler = None # inserted by the handler later
self._host = None # (same)
self._url = None # (same)
self._connection = None # (same)
_raw_read = httplib.HTTPResponse.read
def close(self):
if self.fp:
self.fp.close()
self.fp = None
if self._handler:
self._handler._request_closed(self, self._host,
self._connection)
def close_connection(self):
self._handler._remove_connection(self._host, self._connection, close=1)
self.close()
def info(self):
return self.headers
def geturl(self):
return self._url
def read(self, amt=None):
# the _rbuf test is only in this first if for speed. It's not
# logically necessary
if self._rbuf and not amt is None:
L = len(self._rbuf)
if amt > L:
amt -= L
else:
s = self._rbuf[:amt]
self._rbuf = self._rbuf[amt:]
return s
s = self._rbuf + self._raw_read(amt)
self._rbuf = ''
return s
# stolen from Python SVN #68532 to fix issue1088
def _read_chunked(self, amt):
chunk_left = self.chunk_left
value = ''
# XXX This accumulates chunks by repeated string concatenation,
# which is not efficient as the number or size of chunks gets big.
while True:
if chunk_left is None:
line = self.fp.readline()
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except ValueError:
# close the connection as protocol synchronization is
# probably lost
self.close()
raise httplib.IncompleteRead(value)
if chunk_left == 0:
break
if amt is None:
value += self._safe_read(chunk_left)
elif amt < chunk_left:
value += self._safe_read(amt)
self.chunk_left = chunk_left - amt
return value
elif amt == chunk_left:
value += self._safe_read(amt)
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return value
else:
value += self._safe_read(chunk_left)
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline()
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
self.close()
return value
def readline(self, limit=-1):
i = self._rbuf.find('\n')
while i < 0 and not (0 < limit <= len(self._rbuf)):
new = self._raw_read(self._rbufsize)
if not new:
break
i = new.find('\n')
if i >= 0:
i = i + len(self._rbuf)
self._rbuf = self._rbuf + new
if i < 0:
i = len(self._rbuf)
else:
i = i + 1
if 0 <= limit < len(self._rbuf):
i = limit
data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return data
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
def safesend(self, str):
"""Send `str' to the server.
Shamelessly ripped off from httplib to patch a bad behavior.
"""
# _broken_pipe_resp is an attribute we set in this function
# if the socket is closed while we're sending data but
# the server sent us a response before hanging up.
# In that case, we want to pretend to send the rest of the
# outgoing data, and then let the user use getresponse()
# (which we wrap) to get this last response before
# opening a new socket.
if getattr(self, '_broken_pipe_resp', None) is not None:
return
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise httplib.NotConnected
# send the data to the server. if we get a broken pipe, then close
# the socket. we want to reconnect when somebody tries to send again.
#
# NOTE: we DO propagate the error, though, because we cannot simply
# ignore the error... the caller will know if they can retry.
if self.debuglevel > 0:
print("send:", repr(str))
try:
blocksize = 8192
read = getattr(str, 'read', None)
if read is not None:
if self.debuglevel > 0:
print("sending a read()able")
data = read(blocksize)
while data:
self.sock.sendall(data)
data = read(blocksize)
else:
self.sock.sendall(str)
except socket.error as v:
reraise = True
if v[0] == errno.EPIPE: # Broken pipe
if self._HTTPConnection__state == httplib._CS_REQ_SENT:
self._broken_pipe_resp = None
self._broken_pipe_resp = self.getresponse()
reraise = False
self.close()
if reraise:
raise
def wrapgetresponse(cls):
"""Wraps getresponse in cls with a broken-pipe sane version.
"""
def safegetresponse(self):
# In safesend() we might set the _broken_pipe_resp
# attribute, in which case the socket has already
# been closed and we just need to give them the response
# back. Otherwise, we use the normal response path.
r = getattr(self, '_broken_pipe_resp', None)
if r is not None:
return r
return cls.getresponse(self)
safegetresponse.__doc__ = cls.getresponse.__doc__
return safegetresponse
class HTTPConnection(httplib.HTTPConnection):
# use the modified response class
response_class = HTTPResponse
send = safesend
getresponse = wrapgetresponse(httplib.HTTPConnection)
#########################################################################
##### TEST FUNCTIONS
#########################################################################
def error_handler(url):
global HANDLE_ERRORS
orig = HANDLE_ERRORS
keepalive_handler = HTTPHandler()
opener = urlreq.buildopener(keepalive_handler)
urlreq.installopener(opener)
pos = {0: 'off', 1: 'on'}
for i in (0, 1):
print(" fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i))
HANDLE_ERRORS = i
try:
fo = urlreq.urlopen(url)
fo.read()
fo.close()
try:
status, reason = fo.status, fo.reason
except AttributeError:
status, reason = None, None
except IOError as e:
print(" EXCEPTION: %s" % e)
raise
else:
print(" status = %s, reason = %s" % (status, reason))
HANDLE_ERRORS = orig
hosts = keepalive_handler.open_connections()
print("open connections:", hosts)
keepalive_handler.close_all()
def continuity(url):
md5 = hashlib.md5
format = '%25s: %s'
# first fetch the file with the normal http handler
opener = urlreq.buildopener()
urlreq.installopener(opener)
fo = urlreq.urlopen(url)
foo = fo.read()
fo.close()
m = md5(foo)
print(format % ('normal urllib', m.hexdigest()))
# now install the keepalive handler and try again
opener = urlreq.buildopener(HTTPHandler())
urlreq.installopener(opener)
fo = urlreq.urlopen(url)
foo = fo.read()
fo.close()
m = md5(foo)
print(format % ('keepalive read', m.hexdigest()))
fo = urlreq.urlopen(url)
foo = ''
while True:
f = fo.readline()
if f:
foo = foo + f
else: break
fo.close()
m = md5(foo)
print(format % ('keepalive readline', m.hexdigest()))
def comp(N, url):
print(' making %i connections to:\n %s' % (N, url))
sys.stdout.write(' first using the normal urllib handlers')
# first use normal opener
opener = urlreq.buildopener()
urlreq.installopener(opener)
t1 = fetch(N, url)
print(' TIME: %.3f s' % t1)
sys.stdout.write(' now using the keepalive handler ')
# now install the keepalive handler and try again
opener = urlreq.buildopener(HTTPHandler())
urlreq.installopener(opener)
t2 = fetch(N, url)
print(' TIME: %.3f s' % t2)
print(' improvement factor: %.2f' % (t1 / t2))
def fetch(N, url, delay=0):
import time
lens = []
starttime = time.time()
for i in range(N):
if delay and i > 0:
time.sleep(delay)
fo = urlreq.urlopen(url)
foo = fo.read()
fo.close()
lens.append(len(foo))
diff = time.time() - starttime
j = 0
for i in lens[1:]:
j = j + 1
if not i == lens[0]:
print("WARNING: inconsistent length on read %i: %i" % (j, i))
return diff
def test_timeout(url):
global DEBUG
dbbackup = DEBUG
class FakeLogger(object):
def debug(self, msg, *args):
print(msg % args)
info = warning = error = debug
DEBUG = FakeLogger()
print(" fetching the file to establish a connection")
fo = urlreq.urlopen(url)
data1 = fo.read()
fo.close()
i = 20
print(" waiting %i seconds for the server to close the connection" % i)
while i > 0:
sys.stdout.write('\r %2i' % i)
sys.stdout.flush()
time.sleep(1)
i -= 1
sys.stderr.write('\r')
print(" fetching the file a second time")
fo = urlreq.urlopen(url)
data2 = fo.read()
fo.close()
if data1 == data2:
print(' data are identical')
else:
print(' ERROR: DATA DIFFER')
DEBUG = dbbackup
def test(url, N=10):
print("checking error handler (do this on a non-200)")
try: error_handler(url)
except IOError:
print("exiting - exception will prevent further tests")
sys.exit()
print('')
print("performing continuity test (making sure stuff isn't corrupted)")
continuity(url)
print('')
print("performing speed comparison")
comp(N, url)
print('')
print("performing dropped-connection check")
test_timeout(url)
if __name__ == '__main__':
import time
try:
N = int(sys.argv[1])
url = sys.argv[2]
except (IndexError, ValueError):
print("%s <integer> <url>" % sys.argv[0])
else:
test(url, N)
|
dscho/hg
|
mercurial/keepalive.py
|
Python
|
gpl-2.0
| 25,789
|
# -*- coding:utf-8 -*-
'''
Author: Bu Kun
E-mail: bukun@osgeo.cn
CopyRight: http://www.yunsuan.org
'''
import tornado.web
import tornado.escape
import json
from torlite.core import tools
from torlite.core.base_handler import BaseHandler
from torlite.model.mwiki import MWiki
from torlite.model.mcatalog import MCatalog
from torlite.model.mspec import SpesubModel
from torlite.model.mwiki_hist import MWikiHist
from torlite.model.muser import MUser
from torlite.model.mreply import MReply
from torlite.model.mreply2user import MReply2User
from torlite.model.core_tab import CabReply
from torlite.model.core_tab import CabUser2Reply
class ReplyHandler(BaseHandler):
def initialize(self):
self.muser = MUser()
self.mreply = MReply()
self.mreply2user = MReply2User()
if self.get_current_user():
self.userinfo = self.muser.get_by_id(self.get_current_user())
else:
self.userinfo = None
def get(self, url_str=''):
if url_str == '':
return
url_arr = url_str.split(r'/')
if url_arr[0] == 'delete_reply':
self.delete_by_id(url_arr[1])
elif url_arr[0] == 'get':
self.get_by_id(url_arr[1])
elif url_arr[0] == 'zan':
self.zan(url_arr[1])
def get_by_id(self, reply_id):
reply = self.mreply.get_reply_by_uid(reply_id)
self.render( 'tplite/reply/show_reply.html',
cnt = reply.cnt_md,
username=reply.user_name,
date=reply.date,
vote=reply.vote,
uid=reply.uid,
userinfo=self.userinfo,
)
# @tornado.web.authenticated
# def add_one(self, id_reply):
# # post_data = {}
# # for key in self.request.arguments:
# # post_data[key] = self.get_arguments(key)
# # post_data['user_id'] = self.userinfo.uid
#
# cur_count = self.mreply2user.insert_data(self.userinfo.uid, id_reply)
# if cur_count:
# self.mreply.update_vote(id_reply, cur_count)
#
# out_dic = {'zan_count': cur_count}
# return json.dump(out_dic)
@tornado.web.authenticated
def zan(self, id_reply):
post_data = {}
for key in self.request.arguments:
post_data[key] = self.get_arguments(key)
post_data['user_id'] = self.userinfo.uid
# zs = CabUser2Reply.select().where(CabUser2Reply.reply_id == id_reply).count()
cur_count = self.mreply2user.insert_data(self.userinfo.uid, id_reply)
if cur_count:
self.mreply.update_vote(id_reply, cur_count)
output = {
'text_zan': cur_count,
}
else:
output = {
'text_zan': 0,
}
return json.dump(output, self)
def delete_by_id(self, del_id):
is_deleted = self.mreply2user.delete(del_id)
# self.redirect('/post/{0}.html'.format(del_id))
if is_deleted:
output = {
'del_zan': 1
}
else:
output = {
'del_zan': 0,
}
return json.dump(output, self)
|
Geoion/TorCMS
|
torlite/handlers/reply_handler.py
|
Python
|
mit
| 3,236
|
"""
WSGI config for istari project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "istari.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
gilsondev/istari
|
istari/wsgi.py
|
Python
|
mit
| 387
|
# Copyright 2019 KMEE
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models, _
from odoo.exceptions import AccessError
class SacTicket(models.Model):
_name = 'sac.ticket'
_description = 'Sac Ticket'
_inherit = ['mail.thread', 'mail.activity.mixin', 'base.kanban.abstract']
name = fields.Char(
default=lambda self: _('New'),
)
assunto_id = fields.Many2one(
'sac.assunto',
)
partner_id = fields.Many2one('res.partner')
partner_name = fields.Char()
partner_gender = fields.Selection(
selection=[
('m', 'Masculino'),
('f', 'Feminino'),
]
)
partner_birthday = fields.Date()
partner_profession = fields.Char()
partner_phone = fields.Char()
partner_email = fields.Char()
partner_zip = fields.Char()
partner_street = fields.Char()
partner_street2 = fields.Char()
partner_district = fields.Char()
partner_state_id = fields.Many2one(
comodel_name='res.country.state',
domain=[('country_id.code', '=', 'BR')]
)
partner_city = fields.Char()
mensagem = fields.Text()
@api.onchange('partner_id')
def onchange_partner_id(self):
if self.partner_id:
self.partner_name = self.partner_id.name
self.partner_phone = self.partner_id.phone
self.partner_email = self.partner_id.email
self.partner_street = self.partner_id.street
self.partner_street2 = self.partner_id.street2
self.partner_zip = self.partner_id.zip
self.partner_state_id = self.partner_id.state_id
self.partner_city = self.partner_id.city
@api.model
def create(self, vals):
vals['name'] = self.env['ir.sequence'].next_by_code('sac') or _('New')
result = super(SacTicket, self).create(vals)
return result
@api.multi
def message_get_suggested_recipients(self):
recipients = super(SacTicket, self).message_get_suggested_recipients()
try:
for record in self:
if record.partner_id:
record._message_add_suggested_recipient(
recipients,
partner=record.partner_id,
reason=_('Customer'))
elif record.partner_email:
record._message_add_suggested_recipient(
recipients,
email=record.partner_email,
reason=_('Customer Email'))
except AccessError:
pass
return recipients
def enviar_sms(self):
for record in self:
if record.partner_phone:
record.message_post_send_sms(
sms_message='Nº do Chamado {}'.format(record.name),
numbers=[record.partner_phone],
)
|
kmee/kmee_odoo_addons
|
sac/models/sac_ticket.py
|
Python
|
agpl-3.0
| 2,925
|
#!/usr/bin/env python
import sys
import argparse
from Bio import SeqIO
from Bio.SeqFeature import SeqFeature
from Bio.SeqFeature import FeatureLocation
from cpt_gffParser import gffParse, gffWrite, gffSeqFeature
import logging
logging.basicConfig(level=logging.INFO)
def mga_to_gff3(mga_output, genome):
seq_dict = SeqIO.to_dict(SeqIO.parse(genome, "fasta"))
current_record = None
for line in mga_output:
if line.startswith("#"):
if line.startswith("# gc = ") or line.startswith("# self:"):
continue
chromId = line.strip().replace("# ", "")
if " " in chromId:
chromId = chromId[0 : chromId.index(" ")]
if chromId in seq_dict:
if current_record is not None:
yield current_record
current_record = seq_dict[chromId]
else:
raise Exception(
"Found results for sequence %s which was not in fasta file sequences (%s)"
% (chromId, ", ".join(seq_dict.keys()))
)
else:
(
gene_id,
start,
end,
strand,
phase,
complete,
score,
model,
rbs_start,
rbs_end,
rbs_score,
) = line.strip().split("\t")
start = int(start)
end = int(end)
strand = +1 if strand == "+" else -1
# Correct for gff3
start -= 1
rbs_feat = None
if rbs_start != "-":
rbs_start = int(rbs_start)
rbs_end = int(rbs_end)
rbs_feat = gffSeqFeature(
FeatureLocation(rbs_start, rbs_end),
type="Shine_Dalgarno_sequence",
strand=strand,
qualifiers={
"ID": "%s.rbs_%s" % (current_record.id, gene_id),
"Source": "MGA",
},
shift=phase,
source="MGA"
)
cds_feat = gffSeqFeature(
FeatureLocation(start, end),
type="CDS",
strand=strand,
qualifiers={
"Source": "MGA",
"ID": "%s.cds_%s" % (current_record.id, gene_id),
},
shift=phase,
source="MGA"
)
if rbs_feat is not None:
if strand > 0:
gene_start = rbs_start
gene_end = end
else:
gene_start = start
gene_end = rbs_end
else:
gene_start = start
gene_end = end
gene = gffSeqFeature(
FeatureLocation(gene_start, gene_end),
type="gene",
strand=strand,
qualifiers={
"Source": "MGA",
"ID": "%s.%s" % (current_record.id, gene_id),
},
shift=phase,
source="MGA"
)
gene.sub_features = [cds_feat]
if rbs_feat is not None:
gene.sub_features.append(rbs_feat)
current_record.features.append(gene)
yield current_record
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert MGA to GFF3", epilog="")
parser.add_argument(
"mga_output", type=argparse.FileType("r"), help="MetaGeneAnnotator Output"
)
parser.add_argument("genome", type=argparse.FileType("r"), help="Fasta Genome")
args = parser.parse_args()
for result in mga_to_gff3(**vars(args)):
gffWrite([result], sys.stdout)
|
TAMU-CPT/galaxy-tools
|
tools/phage/cpt_convert_mga_to_gff3.py
|
Python
|
gpl-3.0
| 3,886
|
import platform
import sys
import os
THIS_DIR = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(os.path.abspath(THIS_DIR), "../ciscoconfparse/"))
sys.path.insert(0, os.path.abspath(THIS_DIR))
import pytest
from ciscoconfparse import CiscoConfParse
c01 = """policy-map QOS_1
class GOLD
priority percent 10
!
class SILVER
bandwidth 30
random-detect
!
class BRONZE
random-detect
!
interface Serial 1/0
encapsulation ppp
ip address 1.1.1.1 255.255.255.252
!
interface GigabitEthernet4/1
switchport
switchport access vlan 100
switchport voice vlan 150
power inline static max 7000
!
interface GigabitEthernet4/2
switchport
switchport access vlan 100
switchport voice vlan 150
power inline static max 7000
!
interface GigabitEthernet4/3
switchport
switchport access vlan 100
switchport voice vlan 150
!
interface GigabitEthernet4/4
shutdown
!
interface GigabitEthernet4/5
switchport
switchport access vlan 110
!
interface GigabitEthernet4/6
switchport
switchport access vlan 110
!
interface GigabitEthernet4/7
switchport
switchport access vlan 110
!
interface GigabitEthernet4/8
switchport
switchport access vlan 110
!
access-list 101 deny tcp any any eq 25 log
access-list 101 permit ip any any
!
!
logging 1.1.3.5
logging 1.1.3.17
!
banner login ^C
This is a router, and you cannot have it.
Log off now while you still can type. I break the fingers
of all tresspassers.
^C
alias exec showthang show ip route vrf THANG""".splitlines()
config_c01_default_gige = """policy-map QOS_1
class GOLD
priority percent 10
!
class SILVER
bandwidth 30
random-detect
!
class BRONZE
random-detect
!
interface Serial 1/0
encapsulation ppp
ip address 1.1.1.1 255.255.255.252
!
default interface GigabitEthernet4/1
interface GigabitEthernet4/1
switchport
switchport access vlan 100
switchport voice vlan 150
power inline static max 7000
!
default interface GigabitEthernet4/2
interface GigabitEthernet4/2
switchport
switchport access vlan 100
switchport voice vlan 150
power inline static max 7000
!
default interface GigabitEthernet4/3
interface GigabitEthernet4/3
switchport
switchport access vlan 100
switchport voice vlan 150
!
default interface GigabitEthernet4/4
interface GigabitEthernet4/4
shutdown
!
default interface GigabitEthernet4/5
interface GigabitEthernet4/5
switchport
switchport access vlan 110
!
default interface GigabitEthernet4/6
interface GigabitEthernet4/6
switchport
switchport access vlan 110
!
default interface GigabitEthernet4/7
interface GigabitEthernet4/7
switchport
switchport access vlan 110
!
default interface GigabitEthernet4/8
interface GigabitEthernet4/8
switchport
switchport access vlan 110
!
access-list 101 deny tcp any any eq 25 log
access-list 101 permit ip any any
!
!
logging 1.1.3.5
logging 1.1.3.17
!
banner login ^C
This is a router, and you cannot have it.
Log off now while you still can type. I break the fingers
of all tresspassers.
^C
alias exec showthang show ip route vrf THANG""".splitlines()
config_c01_insert_serial_replace = """policy-map QOS_1
class GOLD
priority percent 10
!
class SILVER
bandwidth 30
random-detect
!
class BRONZE
random-detect
!
default interface Serial 2/0
interface Serial 2/0
encapsulation ppp
ip address 1.1.1.1 255.255.255.252
!
interface GigabitEthernet4/1
switchport
switchport access vlan 100
switchport voice vlan 150
power inline static max 7000
!
interface GigabitEthernet4/2
switchport
switchport access vlan 100
switchport voice vlan 150
power inline static max 7000
!
interface GigabitEthernet4/3
switchport
switchport access vlan 100
switchport voice vlan 150
!
interface GigabitEthernet4/4
shutdown
!
interface GigabitEthernet4/5
switchport
switchport access vlan 110
!
interface GigabitEthernet4/6
switchport
switchport access vlan 110
!
interface GigabitEthernet4/7
switchport
switchport access vlan 110
!
interface GigabitEthernet4/8
switchport
switchport access vlan 110
!
access-list 101 deny tcp any any eq 25 log
access-list 101 permit ip any any
!
!
logging 1.1.3.5
logging 1.1.3.17
!
banner login ^C
This is a router, and you cannot have it.
Log off now while you still can type. I break the fingers
of all tresspassers.
^C
alias exec showthang show ip route vrf THANG""".splitlines()
# A smaller version of c01...
c02 = """policy-map QOS_1
class GOLD
priority percent 10
!
class SILVER
bandwidth 30
random-detect
!
class BRONZE
random-detect
!
interface GigabitEthernet4/1
switchport
switchport access vlan 100
switchport voice vlan 150
power inline static max 7000
!""".splitlines()
## For historical reasons, I'm use c03 for configs/sample_01.ios (i.e. c01 was
## already taken)
c03 = """!
service timestamps debug datetime msec localtime show-timezone
service timestamps log datetime msec localtime show-timezone
!
errdisable recovery cause bpduguard
errdisable recovery interval 400
!
aaa new-model
!
ip vrf TEST_100_001
route-target 100:1
rd 100:1
!
interface Serial 1/0
description Uplink to SBC F923X2K425
bandwidth 1500
clock rate 1500
delay 70
encapsulation ppp
ip address 1.1.1.1 255.255.255.252
!
interface Serial 1/1
description Uplink to AT&T
encapsulation hdlc
ip address 1.1.1.9 255.255.255.254
hold-queue 1000 in
hold-queue 1000 out
mpls mtu 1540
ip mtu 1500
mpls ip
!
interface GigabitEthernet4/1
description
switchport
switchport access vlan 100
switchport voice vlan 150
power inline static max 7000
!
interface GigabitEthernet4/2
switchport
switchport access vlan 100
switchport voice vlan 150
power inline static max 7000
speed 100
duplex full
!
interface GigabitEthernet4/3
mtu 9216
switchport
switchport access vlan 100
switchport voice vlan 150
!
interface GigabitEthernet4/4
shutdown
!
interface GigabitEthernet4/5
switchport
switchport access vlan 110
switchport port-security
switchport port-security maximum 3
switchport port-security mac-address sticky
switchport port-security mac-address 1000.2000.3000
switchport port-security mac-address 1000.2000.3001
switchport port-security mac-address 1000.2000.3002
switchport port-security violation shutdown
!
interface GigabitEthernet4/6
description Simulate a Catalyst6500 access port
switchport
switchport access vlan 110
switchport mode access
switchport nonegotiate
switchport port-security
switchport port-security maximum 2
switchport port-security violation restrict
switchport port-security aging type inactivity
switchport port-security aging time 5
spanning-tree portfast
spanning-tree portfast bpduguard
storm-control action shutdown
storm-control broadcast level 0.40
storm-control multicast level 0.35
!
interface GigabitEthernet4/7
description Dot1Q trunk allowing vlans 2-4,7,10,11-19,21-4094
switchport
switchport trunk encapsulation dot1q
switchport mode trunk
switchport trunk native vlan 4094
switchport trunk allowed vlan remove 1,5-10,20
switchport trunk allowed vlan add 7,10
switchport nonegotiate
!
interface GigabitEthernet4/8.120
no switchport
encapsulation dot1q 120
ip vrf forwarding TEST_100_001
ip address 1.1.2.254 255.255.255.0
!
interface ATM5/0/0
no ip address
no ip redirects
no ip unreachables
no ip proxy-arp
load-interval 30
carrier-delay msec 100
no atm ilmi-keepalive
bundle-enable
max-reserved-bandwidth 100
hold-queue 500 in
!
interface ATM5/0/0.32 point-to-point
ip address 1.1.1.5 255.255.255.252
no ip redirects
no ip unreachables
no ip proxy-arp
ip accounting access-violations
pvc 0/32
vbr-nrt 704 704
!
interface ATM5/0/1
shutdown
!
router ospf 100 vrf TEST_100_001
router-id 1.1.2.254
network 1.1.2.0 0.0.0.255 area 0
!
policy-map QOS_1
class GOLD
priority percent 10
!
class SILVER
bandwidth 30
random-detect
!
class BRONZE
random-detect
!
access-list 101 deny tcp any any eq 25 log
access-list 101 permit ip any any
!
!
logging 1.1.3.5
logging 1.1.3.17
!
banner login ^C
This is a router, and you cannot have it.
Log off now while you still can type. I break the fingers
of all tresspassers.
^C
!
alias exec showthang show ip route vrf THANG""".splitlines()
j01 = """## Last commit: 2015-06-28 13:00:59 CST by mpenning
system {
host-name TEST01_EX;
domain-name pennington.net;
domain-search [ pennington.net lab.pennington.net ];
location {
country-code 001;
building HQ_005;
floor 1;
}
root-authentication {
encrypted-password "$1$y7ArHxKU$zUbdeLfBirgkCsKiOJ5Qa0"; ## SECRET-DATA
}
name-server {
172.16.3.222;
}
login {
announcement "Test Lab Switch";
message "Unauthorized access is prohibited";
user mpenning {
full-name "Mike Pennington";
uid 1000;
class super-user;
authentication {
encrypted-password "$1$y7ArHxKU$zUbdeLfBirgkCsKiOJ5Qa0"; ## SECRET-DATA
}
}
}
services {
ssh {
root-login allow;
}
telnet;
web-management {
http;
}
}
syslog {
user * {
any emergency;
}
file messages {
any notice;
authorization info;
}
file interactive-commands {
interactive-commands any;
}
}
ntp {
Management {
vlan-id 1;
interface {
ge-0/0/0.0;
ge-0/0/1.0;
ge-0/0/2.0;
ge-0/0/3.0;
}
}
VLAN_FOO {
vlan-id 5;
}
vlan1 {
vlan-id 1;
l3-interface vlan.1;
}
vlan800 {
vlan-id 800;
}
}
ethernet-switching-options {
storm-control {
interface all;
}
}
interfaces {
ge-0/0/0 {
unit 0 {
family ethernet-switching {
port-mode access;
vlan {
members VLAN_FOO;
}
}
}
}
ge-0/0/1 {
unit 0 {
family ethernet-switching {
port-mode trunk;
vlan {
members all;
}
native-vlan-id 1;
}
}
}
vlan {
unit 0 {
family inet {
address 172.16.15.5/22;
}
}
}
}
routing-options {
static {
route 0.0.0.0/0 next-hop 172.16.12.1;
route 192.168.36.0/25 next-hop 172.16.12.1;
}
}""".splitlines()
a01 = """hostname TEST-FW
!
name 1.1.2.20 loghost01
name 1.1.3.10 dmzsrv00
name 1.1.3.11 dmzsrv01
name 1.1.3.12 dmzsrv02
name 1.1.3.13 dmzsrv03
!
interface Ethernet0/0
description Uplink to SBC F923X2K425
nameif OUTSIDE
security-level 0
delay 70
ip address 1.1.1.1 255.255.255.252
!
interface Ethernet0/1
nameif INSIDE
security-level 100
ip address 1.1.2.1 255.255.255.0
!
interface Ethernet0/2
switchport access vlan 100
!
interface VLAN100
nameif DMZ
security-level 50
ip address 1.1.3.1 255.255.255.0
!
object-group network ANY_addrs
network-object 0.0.0.0 0.0.0.0
!
object-group network INSIDE_addrs1
network-object host 1.1.2.1
network-object 1.1.2.2 255.255.255.255
network-object 1.1.2.0 255.255.255.0
!
object-group network INSIDE_addrs1
network-object host 1.1.2.1
network-object 1.1.2.2 255.255.255.255
network-object 1.1.2.0 255.255.255.0
!
object-group service DNS_svc
service-object udp destination eq dns
!
object-group service NTP_svc
service-object udp destination eq ntp
!
object-group service FTP_svc
service-object tcp destination eq ftp
!
object-group service HTTP_svc
service-object tcp destination eq http
!
object-group service HTTPS_svc
service-object tcp destination eq https
!
access-list INSIDE_in extended permit object-group FTP_svc object-group INSIDE_addrs1 object-group ANY_addrs log
access-list INSIDE_in remark Overlap for test purposes
access-list INSIDE_in extended permit ip object-group INSIDE_addrs1 object-group ANY_addrs log
access-list INSIDE_in extended deny ip any any log
!
!
clock timezone CST -6
clock summer-time CDT recurring
!
logging enable
logging timestamp
logging buffer-size 1048576
logging buffered informational
logging trap informational
logging asdm informational
logging facility 22
logging host INSIDE loghost01
no logging message 302021
!
access-group OUTSIDE_in in interface OUTSIDE
access-group INSIDE_in in interface INSIDE
!""".splitlines()
a02 = """: Saved
: Written by mpenning at 05:37:43.184 CDT Sun Jun 29 2015
!
ASA Version 9.0(3)
!
command-alias exec slog show log | i Deny|denied
command-alias exec sacl sh access-list INSIDE_out | e hitcnt=0 |remark|elements
hostname fw
domain-name pennington.net
enable password 2KFQnbNIdI.2KYOU encrypted
xlate per-session deny tcp any4 any4
xlate per-session deny tcp any4 any6
xlate per-session deny tcp any6 any4
xlate per-session deny tcp any6 any6
xlate per-session deny udp any4 any4 eq domain
xlate per-session deny udp any4 any6 eq domain
xlate per-session deny udp any6 any4 eq domain
xlate per-session deny udp any6 any6 eq domain
passwd 2KFQnbNIdI.2KYOU encrypted
names
name 192.0.2.13 Machine01 description machine01
name 192.0.2.17 Machine02_Windows
name 10.0.0.6 Machine03
name 74.125.130.125 GTalk01 description Google talk server
name 74.125.134.125 GTalk02 description Google talk server
name 74.125.139.125 GTalk03 description Google Talk server
name 74.125.142.125 GTalk04 description Google Talk server
name 74.125.192.125 GTalk05 description Google Talk server
name 74.125.140.125 GTalk06 description Google Talk server
name 74.125.137.125 GTalk07
name 74.125.138.125 GTalk08
name 74.125.141.125 GTalk09
name 74.125.136.125 GTalk10
name 74.125.135.125 GTalk11
name 108.160.160.0 AS19679_Dropbox__108-160-160-0__20
name 199.47.216.0 AS19679_Dropbox__199.47.216.0__22
name 173.194.64.109 GmailSMTP01
name 173.194.64.108 GmailSMTP02
name 128.223.51.103 route-views.oregon-ix.net description Route-Views route server
ip local pool SSL_VPN_ADDRS 10.1.1.240-10.1.1.241 mask 255.255.255.0
!
interface Ethernet0/0
description Internet ISP
switchport access vlan 100
!
interface Ethernet0/1
switchport access vlan 200
!
interface Ethernet0/2
switchport access vlan 200
shutdown
!
interface Ethernet0/3
switchport access vlan 200
!
interface Ethernet0/4
switchport access vlan 200
!
interface Ethernet0/5
switchport access vlan 200
!
interface Ethernet0/6
switchport access vlan 200
!
interface Ethernet0/7
shutdown
!
interface Vlan1
no nameif
no security-level
no ip address
!
interface Vlan100
mac-address 0030.dead.beef
nameif OUTSIDE
security-level 0
ip address dhcp setroute
!
interface Vlan200
nameif INSIDE
security-level 100
ip address 192.0.2.1 255.255.255.0
!
banner motd
banner motd Test banner for $(hostname)
banner motd
banner motd *******************************
boot system disk0:/asa903-k8.bin
ftp mode passive
clock timezone CST -6
clock summer-time CDT recurring
dns domain-lookup INSIDE
dns server-group DefaultDNS
name-server Machine01
domain-name pennington.net
object network GTalk01
host 74.125.130.125
description Created during name migration
object network GTalk02
host 74.125.134.125
description Created during name migration
object network GTalk03
host 74.125.139.125
description Created during name migration
object network GTalk04
host 74.125.142.125
description Created during name migration
object network GTalk05
host 74.125.192.125
description Created during name migration
object network GTalk06
host 74.125.140.125
description Created during name migration
object network GTalk07
host 74.125.137.125
description Created during name migration
object network GTalk08
host 74.125.138.125
description Created during name migration
object network GTalk09
host 74.125.141.125
description Created during name migration
object network GTalk10
host 74.125.136.125
description Created during name migration
object network GTalk11
host 74.125.135.125
description Created during name migration
object network AS19679_Dropbox__108-160-160-0__20
subnet 108.160.160.0 255.255.240.0
description Created during name migration
object network AS19679_Dropbox__199.47.216.0__22
subnet 199.47.216.0 255.255.252.0
description Created during name migration
object network Machine01
host 192.0.2.5
description Created during name migration
object network obj_any
subnet 0.0.0.0 0.0.0.0
object network Machine02_Windows
host 192.0.2.17
description Created during name migration
object-group network GoogleTalk
network-object object GTalk01
network-object object GTalk02
network-object object GTalk03
network-object object GTalk04
network-object object GTalk05
network-object object GTalk06
network-object object GTalk07
network-object object GTalk08
network-object object GTalk09
network-object object GTalk10
network-object object GTalk11
object-group service GoogleTalkPorts
service-object tcp destination eq 5222
service-object tcp destination eq https
service-object udp destination range 19302 19309
object-group network Inside
network-object 192.0.2.0 255.255.255.0
network-object 192.0.22.0 255.255.255.0
network-object 192.0.23.0 255.255.255.0
object-group network DROPBOX_AS19679
network-object object AS19679_Dropbox__108-160-160-0__20
network-object object AS19679_Dropbox__199.47.216.0__22
object-group network GOOGLE_addrs
description dig -t TXT _netblocks.google.com 8.8.8.8
network-object 216.239.32.0 255.255.224.0
network-object 64.233.160.0 255.255.224.0
network-object 66.249.80.0 255.255.240.0
network-object 72.14.192.0 255.255.192.0
network-object 209.85.128.0 255.255.128.0
network-object 66.102.0.0 255.255.240.0
network-object 74.125.0.0 255.255.0.0
network-object 64.18.0.0 255.255.240.0
network-object 207.126.144.0 255.255.240.0
network-object 173.194.0.0 255.255.0.0
object-group network SSH_addrs
network-object 192.168.1.0 255.255.255.0
object-group network ANY_addrs
network-object 0.0.0.0 0.0.0.0
object-group network INSIDE_addrs
network-object 192.0.2.0 255.255.255.0
network-object 10.0.0.0 255.0.0.0
object-group service GOOGLE_svc
description Google's push service for Android
service-object tcp destination eq www
service-object tcp destination eq https
service-object tcp destination eq 5228
service-object tcp destination eq 5222
service-object tcp destination eq 587
object-group service TELNET_svc
service-object tcp destination eq telnet
object-group service WHOIS_svc
service-object tcp destination eq whois
object-group service SSH_svc
service-object tcp destination eq ssh
object-group service WEB_svc
description Standard web services - http, https, ftp
service-object tcp destination eq ftp
service-object tcp destination eq www
service-object tcp destination eq https
service-object icmp
object-group service DNS_svc
service-object udp destination eq domain
service-object tcp destination eq domain
object-group network MACHINE01_addrs
network-object object Machine01
object-group service ANDROID_svc
description Google's push service for Android
service-object tcp destination eq 5228
object-group service GMAILSMTP_svc
service-object tcp destination eq 2525
object-group service NTP_svc
service-object udp destination eq ntp
object-group service SKYPE_svc
service-object udp destination eq 5555
object-group service XBOX_svc
service-object tcp destination eq domain
service-object udp destination eq domain
service-object udp destination eq 88
service-object tcp destination eq 3074
service-object udp destination eq 3074
object-group network ANY
object-group service NaverLine_svc
service-object udp destination eq 11000
service-object udp destination range 9401 9405
object-group network NaverLine_addrs
network-object 174.35.127.0 255.255.255.0
object-group network Facebook_addrs
network-object 66.220.144.0 255.255.240.0
network-object 69.63.176.0 255.255.248.0
network-object 69.63.184.0 255.255.248.0
network-object 69.171.224.0 255.255.240.0
network-object 69.171.239.0 255.255.255.0
network-object 69.171.240.0 255.255.240.0
network-object 69.171.253.0 255.255.255.0
network-object 69.171.255.0 255.255.255.0
network-object 74.119.76.0 255.255.252.0
network-object 103.4.96.0 255.255.252.0
network-object 173.252.64.0 255.255.192.0
network-object 204.15.20.0 255.255.252.0
network-object 31.13.24.0 255.255.248.0
network-object 31.13.64.0 255.255.192.0
network-object 31.13.96.0 255.255.224.0
object-group service IP_SLA_PathTrace_svc
service-object udp destination range 33400 33499
object-group service FTP_svc
service-object tcp destination eq ftp
object-group service TeamViewerPorts
service-object tcp destination eq 5938
object-group service SSLVPN_svc
service-object udp destination eq 443
object-group service TEST_PORTS tcp
port-object eq domain
port-object eq smtp
access-list SPLIT_TUNNEL_NETS remark [[ destinations available via the VPN ]]
access-list SPLIT_TUNNEL_NETS standard permit 192.0.2.0 255.255.255.0
access-list NO_SSLVPN_NAT remark [[ prevent inadvertent nat of sslvpn traffic ]]
access-list NO_SSLVPN_NAT extended permit ip 192.0.2.0 255.255.255.0 192.0.2.0 255.255.255.0
access-list INSIDE_in extended deny object-group SKYPE_svc object-group INSIDE_addrs object-group ANY_addrs log disable
access-list INSIDE_in extended permit object-group GOOGLE_svc object-group INSIDE_addrs object-group GOOGLE_addrs log
access-list INSIDE_in extended permit object-group ANDROID_svc object-group INSIDE_addrs object-group GOOGLE_addrs log
access-list INSIDE_in extended permit object-group IP_SLA_PathTrace_svc any host 4.2.2.2 log
access-list INSIDE_in extended permit object-group DNS_svc object-group INSIDE_addrs object-group ANY_addrs log
access-list INSIDE_in extended permit object-group NTP_svc object-group INSIDE_addrs object-group ANY_addrs log
access-list INSIDE_in extended permit object-group TELNET_svc object-group INSIDE_addrs host 128.223.51.103 log
access-list INSIDE_in extended permit object-group FTP_svc object-group INSIDE_addrs object-group ANY_addrs log
access-list INSIDE_in extended permit object-group WEB_svc object-group INSIDE_addrs object-group ANY_addrs log
access-list INSIDE_in extended permit object-group SSH_svc object-group INSIDE_addrs object-group SSH_addrs log
access-list INSIDE_in extended permit object-group GMAILSMTP_svc object-group TSUNAMI_addrs object-group ANY_addrs log
access-list INSIDE_in extended permit object-group WHOIS_svc object-group TSUNAMI_addrs object-group ANY_addrs log
access-list INSIDE_in extended deny ip any4 any4 log
access-list ANY extended permit ip object-group Inside any4
access-list ANY extended permit ip any4 object-group Inside
access-list VOIP extended permit object-group GoogleTalkPorts object-group Inside object-group GoogleTalk
access-list VOIP extended permit object-group GoogleTalkPorts object-group GoogleTalk object-group Inside
access-list MAINTENANCE extended deny ip any4 any4 log
access-list OUTSIDE_in extended deny ip host 4.2.2.2 any4 log
access-list OUTSIDE_in extended permit icmp any4 0.0.0.0 0.0.0.0 unreachable log interval 1
access-list OUTSIDE_in extended permit icmp any4 0.0.0.0 0.0.0.0 time-exceeded log interval 1
access-list OUTSIDE_in extended deny ip any4 any4 log
pager lines 23
logging enable
logging timestamp
logging buffer-size 1048576
logging buffered informational
logging trap informational
logging asdm informational
logging facility 22
logging host INSIDE Machine01
logging class sys buffered informational
no logging message 302021
no logging message 302020
mtu OUTSIDE 1500
mtu INSIDE 1500
ip verify reverse-path interface INSIDE
icmp unreachable rate-limit 1 burst-size 1
asdm image disk0:/asdm-645.bin
no asdm history enable
arp timeout 14400
no arp permit-nonconnected
!
object network obj_any
nat (INSIDE,OUTSIDE) dynamic interface
access-group OUTSIDE_in in interface OUTSIDE
access-group INSIDE_in in interface INSIDE
route INSIDE 10.0.0.0 255.0.0.0 192.0.2.2 1
timeout xlate 3:00:00
timeout pat-xlate 0:00:30
timeout conn 1:00:00 half-closed 0:59:00 udp 0:02:00 icmp 0:00:02
timeout sunrpc 0:10:00 h323 0:05:00 h225 1:00:00 mgcp 0:05:00 mgcp-pat 0:05:00
timeout sip 0:30:00 sip_media 0:02:00 sip-invite 0:03:00 sip-disconnect 0:02:00
timeout sip-provisional-media 0:02:00 uauth 0:05:00 absolute
timeout tcp-proxy-reassembly 0:01:00
timeout floating-conn 0:00:00
dynamic-access-policy-record DfltAccessPolicy
user-identity default-domain LOCAL
aaa authentication ssh console LOCAL
aaa authentication enable console LOCAL
aaa authentication http console LOCAL
aaa authorization command LOCAL
aaa local authentication attempts max-fail 16
filter java 1-65535 192.0.2.0 255.255.255.0 0.0.0.0 0.0.0.0
http server enable
http 192.0.2.0 255.255.255.0 INSIDE
snmp-server host INSIDE Machine01 poll community public
snmp-server location ServerRoom
snmp-server contact mike@pennington.net
snmp-server community public
snmp-server enable traps snmp authentication linkup linkdown coldstart
crypto ipsec security-association pmtu-aging infinite
crypto ca trustpoint LOCAL_CERT_fw
enrollment self
fqdn fw.pennington.net
subject-name CN=fw.pennington.net
crl configure
crypto ca trustpool policy
telnet timeout 5
ssh scopy enable
ssh 192.0.2.0 255.255.255.0 INSIDE
ssh 10.0.0.0 255.0.0.0 INSIDE
ssh timeout 60
ssh version 2
console timeout 5
no vpn-addr-assign aaa
no vpn-addr-assign dhcp
dhcpd dns 68.94.156.1 Machine01
dhcpd lease 604800
dhcpd domain pennington.net
dhcpd auto_config OUTSIDE
!
threat-detection basic-threat
threat-detection scanning-threat shun duration 30
threat-detection statistics host
threat-detection statistics port
threat-detection statistics protocol
threat-detection statistics access-list
no threat-detection statistics tcp-intercept
ntp server 17.151.16.20
ntp server 17.151.16.21
ntp server 17.151.16.22
ntp server 17.151.16.23
group-policy SSL_VPN_Policy01 internal
group-policy SSL_VPN_Policy01 attributes
dns-server value 192.0.2.13
vpn-idle-timeout none
vpn-filter none
vpn-tunnel-protocol ssl-client ssl-clientless
split-tunnel-policy tunnelspecified
split-tunnel-network-list value SPLIT_TUNNEL_NETS
default-domain value pennington.net
webvpn
anyconnect keep-installer installed
anyconnect ssl rekey time 30
anyconnect ssl rekey method ssl
anyconnect ask none default anyconnect
username mpenning password dXRTaA5wrZ3OL8gz encrypted privilege 15
tunnel-group DefaultWEBVPNGroup general-attributes
address-pool SSL_VPN_ADDRS
default-group-policy SSL_VPN_Policy01
!
!
policy-map type inspect dns preset_dns_map
parameters
message-length maximum client auto
message-length maximum 512
policy-map global_policy
class inspection_default
inspect dns preset_dns_map
inspect h323 h225
inspect h323 ras
inspect rsh
inspect rtsp
inspect esmtp
inspect sqlnet
inspect skinny
inspect sunrpc
inspect xdmcp
inspect sip
inspect netbios
inspect tftp
inspect ip-options
inspect icmp
inspect http
!
service-policy global_policy global
prompt hostname context
no call-home reporting anonymous
call-home
profile CiscoTAC-1
no active
destination address http https://tools.cisco.com/its/service/oddce/services/DDCEService
destination address email callhome@cisco.com
destination transport-method http
subscribe-to-alert-group diagnostic
subscribe-to-alert-group environment
subscribe-to-alert-group inventory periodic monthly
subscribe-to-alert-group configuration periodic monthly
subscribe-to-alert-group telemetry periodic daily
Cryptochecksum:571d01b7b08342e35db838e9acec00f6
: end""".splitlines()
@pytest.yield_fixture(scope='session')
def c01_default_gigethernets(request):
yield config_c01_default_gige
@pytest.yield_fixture(scope='session')
def c01_insert_serial_replace(request):
yield config_c01_insert_serial_replace
@pytest.yield_fixture(scope='function')
def parse_c01(request):
"""Preparsed c01"""
parse_c01 = CiscoConfParse(c01, factory=False)
yield parse_c01
@pytest.yield_fixture(scope='function')
def parse_c01_factory(request):
"""Preparsed c01 with factory option"""
parse_c01_factory = CiscoConfParse(c01, factory=True)
yield parse_c01_factory
@pytest.yield_fixture(scope='function')
def parse_c02(request):
"""Preparsed c02"""
parse_c02 = CiscoConfParse(c02, factory=False)
yield parse_c02
@pytest.yield_fixture(scope='function')
def parse_c02_factory(request):
"""Preparsed c02"""
parse_c02 = CiscoConfParse(c02, factory=True)
yield parse_c02
## parse_c03 yields configs/sample_01.ios
@pytest.yield_fixture(scope='function')
def parse_c03(request):
"""Preparsed c03"""
parse_c03 = CiscoConfParse(c03, factory=False)
yield parse_c03
## parse_c03_factory yields configs/sample_01.ios
@pytest.yield_fixture(scope='function')
def parse_c03_factory(request):
"""Preparsed c01 with factory option"""
parse_c03_factory = CiscoConfParse(c03, factory=True)
yield parse_c03_factory
## parse_j01 yields configs/sample_01.junos
@pytest.yield_fixture(scope='function')
def parse_j01(request):
"""Preparsed j01"""
parse_j01 = CiscoConfParse(j01, syntax='junos', comment='#!', factory=False)
yield parse_j01
## parse_j01_factory yields configs/sample_01.junos
@pytest.yield_fixture(scope='function')
def parse_j01_factory(request):
"""Preparsed j01 with factory option"""
parse_j01_factory = CiscoConfParse(j01, syntax='junos', comment='#!',
factory=True)
yield parse_j01_factory
## parse_a01 yields the asa configuration
@pytest.yield_fixture(scope='function')
def parse_a01(request):
"""Preparsed a01"""
parse_a01_factory = CiscoConfParse(a01, syntax='asa', factory=False)
yield parse_a01_factory
## parse_a01_factory yields the asa configuration
@pytest.yield_fixture(scope='function')
def parse_a01_factory(request):
"""Preparsed a01 with factory option"""
parse_a01_factory = CiscoConfParse(a01, syntax='asa', factory=True)
yield parse_a01_factory
## config_a02 yields an asa configuration
@pytest.yield_fixture(scope='function')
def config_a02(request):
"""Unparsed a02"""
yield a02
## parse_a02 yields an asa configuration
@pytest.yield_fixture(scope='function')
def parse_a02(request):
"""Preparsed a02"""
parse_a02_factory = CiscoConfParse(a02, syntax='asa', factory=False)
yield parse_a02_factory
## parse_a02_factory yields an asa configuration
@pytest.yield_fixture(scope='function')
def parse_a02_factory(request):
"""Preparsed a02 with factory option"""
parse_a02_factory = CiscoConfParse(a02, syntax='asa', factory=True)
yield parse_a02_factory
@pytest.mark.skipif(sys.version_info[0]>=3,
reason="No Python3 MockSSH support")
@pytest.mark.skipif('windows' in platform.system().lower(),
reason="No Windows MockSSH support")
@pytest.yield_fixture(scope='session')
def cisco_sshd_mocked(request):
"""Mock Cisco IOS SSH"""
from fixtures.devices.mock_cisco import start_cisco_mock, stop_cisco_mock
try:
## Start the SSH Server
start_cisco_mock()
yield True
except:
yield False
stop_cisco_mock()
stop_cisco_mock()
|
SivagnanamCiena/ciscoconfparse
|
tests/conftest.py
|
Python
|
gpl-3.0
| 31,317
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/services')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request_initial(
subscription_id: str,
resource_group_name: str,
account_name: str,
service_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/services/{serviceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str', max_length=50, min_length=3),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
account_name: str,
service_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/services/{serviceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str', max_length=50, min_length=3),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
account_name: str,
service_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/services/{serviceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str', max_length=50, min_length=3),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ServiceOperations(object):
"""ServiceOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cosmosdb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> Iterable["_models.ServiceResourceListResult"]:
"""Gets the status of service.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceResourceListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.ServiceResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ServiceResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/services'} # type: ignore
def _create_initial(
self,
resource_group_name: str,
account_name: str,
service_name: str,
create_update_parameters: "_models.ServiceResourceCreateUpdateParameters",
**kwargs: Any
) -> Optional["_models.ServiceResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServiceResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(create_update_parameters, 'ServiceResourceCreateUpdateParameters')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
service_name=service_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServiceResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/services/{serviceName}'} # type: ignore
@distributed_trace
def begin_create(
self,
resource_group_name: str,
account_name: str,
service_name: str,
create_update_parameters: "_models.ServiceResourceCreateUpdateParameters",
**kwargs: Any
) -> LROPoller["_models.ServiceResource"]:
"""Creates a service.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param service_name: Cosmos DB service name.
:type service_name: str
:param create_update_parameters: The Service resource parameters.
:type create_update_parameters:
~azure.mgmt.cosmosdb.models.ServiceResourceCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ServiceResource or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ServiceResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
service_name=service_name,
create_update_parameters=create_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ServiceResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/services/{serviceName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
account_name: str,
service_name: str,
**kwargs: Any
) -> "_models.ServiceResource":
"""Gets the status of service.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param service_name: Cosmos DB service name.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceResource, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ServiceResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
service_name=service_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/services/{serviceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
account_name: str,
service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
service_name=service_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/services/{serviceName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
account_name: str,
service_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes service with the given serviceName.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param service_name: Cosmos DB service name.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
service_name=service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/services/{serviceName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/operations/_service_operations.py
|
Python
|
mit
| 24,443
|
import datetime
from django.http import HttpResponse
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(datetime.tzinfo):
"""UTC
Optimized UTC implementation. It unpickles using the single module global
instance defined beneath this class declaration.
"""
zone = "UTC"
_utcoffset = ZERO
_dst = ZERO
_tzname = zone
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
class JSONResponse(HttpResponse):
def __init__(self, *args, **kwargs):
# Content-Type override
if 'content_type' not in kwargs:
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(*args, **kwargs)
class JSONResponseBadRequest(JSONResponse):
status_code = 400
|
HackUCF/collabCTF
|
tools/misc.py
|
Python
|
mit
| 1,711
|
from jedi._compatibility import u
from jedi import parser
from ..helpers import unittest
class TokenTest(unittest.TestCase):
def test_end_pos_one_line(self):
parsed = parser.Parser(u('''
def testit():
a = "huhu"
'''))
tok = parsed.module.subscopes[0].statements[0]._token_list[2]
self.assertEqual(tok.end_pos, (3, 14))
def test_end_pos_multi_line(self):
parsed = parser.Parser(u('''
def testit():
a = """huhu
asdfasdf""" + "h"
'''))
tok = parsed.module.subscopes[0].statements[0]._token_list[2]
self.assertEqual(tok.end_pos, (4, 11))
|
Eddy0402/Environment
|
vim/ycmd/third_party/jedi/test/test_parser/test_token.py
|
Python
|
gpl-3.0
| 602
|
#!/usr/bin/env python3
#
# Electron Cash - lightweight Bitcoin client
# Copyright (C) 2019 Axel Gembe <derago@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import List, Dict, Callable, Any
from abc import ABC, abstractmethod
from PyQt5.QtGui import QColor
from PyQt5.QtCore import Qt
from electrum_mona.i18n import _
from electrum_mona.qrreader import QrCodeResult
from electrum_mona.gui.qt.util import ColorScheme, QColorLerp
class QrReaderValidatorResult():
"""
Result of a QR code validator
"""
def __init__(self):
self.accepted: bool = False
self.message: str = None
self.message_color: QColor = None
self.simple_result : str = None
self.result_usable: Dict[QrCodeResult, bool] = {}
self.result_colors: Dict[QrCodeResult, QColor] = {}
self.result_messages: Dict[QrCodeResult, str] = {}
self.selected_results: List[QrCodeResult] = []
class AbstractQrReaderValidator(ABC):
"""
Abstract base class for QR code result validators.
"""
@abstractmethod
def validate_results(self, results: List[QrCodeResult]) -> QrReaderValidatorResult:
"""
Checks a list of QR code results for usable codes.
"""
class QrReaderValidatorCounting(AbstractQrReaderValidator):
"""
This QR code result validator doesn't directly accept any results but maintains a dictionary
of detection counts in `result_counts`.
"""
result_counts: Dict[QrCodeResult, int] = {}
def validate_results(self, results: List[QrCodeResult]) -> QrReaderValidatorResult:
res = QrReaderValidatorResult()
for result in results:
# Increment the detection count
if not result in self.result_counts:
self.result_counts[result] = 0
self.result_counts[result] += 1
# Search for missing results, iterate over a copy because the loop might modify the dict
for result in self.result_counts.copy():
# Count down missing results
if result in results:
continue
self.result_counts[result] -= 2
# When the count goes to zero, remove
if self.result_counts[result] < 1:
del self.result_counts[result]
return res
class QrReaderValidatorColorizing(QrReaderValidatorCounting):
"""
This QR code result validator doesn't directly accept any results but colorizes the results
based on the counts maintained by `QrReaderValidatorCounting`.
"""
WEAK_COLOR: QColor = QColor(Qt.red)
STRONG_COLOR: QColor = QColor(Qt.green)
strong_count: int = 10
def validate_results(self, results: List[QrCodeResult]) -> QrReaderValidatorResult:
res = super().validate_results(results)
# Colorize the QR code results by their detection counts
for result in results:
# Enforce strong_count as upper limit
self.result_counts[result] = min(self.result_counts[result], self.strong_count)
# Interpolate between WEAK_COLOR and STRONG_COLOR based on count / strong_count
lerp_factor = (self.result_counts[result] - 1) / self.strong_count
lerped_color = QColorLerp(self.WEAK_COLOR, self.STRONG_COLOR, lerp_factor)
res.result_colors[result] = lerped_color
return res
class QrReaderValidatorStrong(QrReaderValidatorColorizing):
"""
This QR code result validator doesn't directly accept any results but passes every strong
detection in the return values `selected_results`.
"""
def validate_results(self, results: List[QrCodeResult]) -> QrReaderValidatorResult:
res = super().validate_results(results)
for result in results:
if self.result_counts[result] >= self.strong_count:
res.selected_results.append(result)
break
return res
class QrReaderValidatorCounted(QrReaderValidatorStrong):
"""
This QR code result validator accepts a result as soon as there is at least `minimum` and at
most `maximum` QR code(s) with strong detection.
"""
def __init__(self, minimum: int = 1, maximum: int = 1):
super().__init__()
self.minimum = minimum
self.maximum = maximum
def validate_results(self, results: List[QrCodeResult]) -> QrReaderValidatorResult:
res = super().validate_results(results)
num_results = len(res.selected_results)
if num_results < self.minimum:
if num_results > 0:
res.message = _('Too few QR codes detected.')
res.message_color = ColorScheme.RED.as_color()
elif num_results > self.maximum:
res.message = _('Too many QR codes detected.')
res.message_color = ColorScheme.RED.as_color()
else:
res.accepted = True
res.simple_result = (results and results[0].data) or '' # hack added by calin just to take the first one
return res
|
wakiyamap/electrum-mona
|
electrum_mona/gui/qt/qrreader/qtmultimedia/validator.py
|
Python
|
mit
| 6,051
|
# flake8: noqa
"""
Django settings split into different files for better maintenance and
visibility.
"""
from settings.base_settings import *
from settings.installed_apps import *
from settings.staticfiles_settings import *
from settings.middleware_settings import *
from settings.django_settings import *
from settings.templates_settings import *
from settings.local.local_settings import *
|
bitmazk/webfaction-django-boilerplate
|
website/webapps/django/project/settings/__init__.py
|
Python
|
mit
| 392
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._everything import Everything
#-------------------------------------------------------------------------
#
# Everyone
#
#-------------------------------------------------------------------------
class AllNotes(Everything):
"""Matches every note"""
name = _('Every note')
description = _('Matches every note in the database')
|
Forage/Gramps
|
gramps/gen/filters/rules/note/_allnotes.py
|
Python
|
gpl-2.0
| 1,602
|
#
# Copyright 2009-2010 Goran Sterjov
# This file is part of Myelin.
#
# Myelin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Myelin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Myelin. If not, see <http://www.gnu.org/licenses/>.
#
import ctypes
from type import Type
# get library
import myelin.library
_lib = myelin.library.get_library()
_types = []
def add_type (klass):
_types.append (klass)
def get_type (type):
for klass in _types:
if klass._class.get_type().get_atom() == type.get_atom():
return klass
return None
def get_types ():
return _types
class Value (object):
def __init__ (self, ptr = None):
if ptr is None:
ptr = _lib.myelin_value_new ()
self._ptr = ptr
def __del__ (self):
_lib.myelin_value_unref (self)
def __repr__ (self):
return ("<%s.%s object at %#x with an instance of type %s at %#x>" %
(self.__module__,
self.__class__.__name__,
id(self),
self.get_type().get_name(),
self.as_pointer()))
@classmethod
def from_pointer (cls, ptr):
if ptr is None:
raise ValueError ("Value pointer cannot be 'None'")
instance = cls (ptr)
_lib.myelin_value_ref (instance)
return instance
def from_param (self):
return self._ptr
def get (self):
# empty value
if self.is_empty(): return None
# get value type
type = self.get_type()
atom = type.get_atom()
# convert value types
if not type.is_pointer() and not type.is_reference():
# fundamental types
if atom == Type.type_bool (): return self.get_bool ()
elif atom == Type.type_char (): return self.get_char ()
elif atom == Type.type_uchar (): return self.get_uchar ()
elif atom == Type.type_int (): return self.get_int ()
elif atom == Type.type_uint (): return self.get_uint ()
elif atom == Type.type_long (): return self.get_long ()
elif atom == Type.type_ulong (): return self.get_ulong ()
elif atom == Type.type_int64 (): return self.get_int64 ()
elif atom == Type.type_uint64 (): return self.get_uint64 ()
elif atom == Type.type_float (): return self.get_float ()
elif atom == Type.type_double (): return self.get_double ()
# elif atom == Type.type_string (): return self.get_string ()
# convert value to meta class instance
class_type = get_type (type)
if class_type is not None:
return class_type (instance = self)
# dont know how to convert value so just return it as is
else:
return self
def set (self, value, atom = None):
from myelin.module import MetaObject
# convert python types
if type(value) is bool: self.set_bool (value)
# set the right integer type
elif type(value) is int or type(value) is long:
if atom is not None:
if atom == Type.type_char(): self.set_char (value)
elif atom == Type.type_uchar(): self.set_uchar (value)
elif atom == Type.type_int(): self.set_int (value)
elif atom == Type.type_uint(): self.set_uint (value)
elif atom == Type.type_long(): self.set_long (value)
elif atom == Type.type_ulong(): self.set_ulong (value)
# for long only
elif type(value) is long:
if atom == Type.type_int64(): self.set_int64 (value)
elif atom == Type.type_uint64(): self.set_uint64 (value)
else:
if type(value) is int: self.set_long (value)
else: self.set_int64 (value)
elif type(value) is float:
if atom is not None:
if atom == Type.type_float(): self.set_float (value)
elif atom == Type.type_double(): self.set_double (value)
else: self.set_double (value)
elif type(value) is str: self.set_string (value)
# set meta object instance
elif isinstance(value, MetaObject):
val = value._object.get_instance()
self.set_pointer (val.get_type(), val.as_pointer())
else:
raise TypeError ("Cannot determine an equivalent type for the " \
"value type '%s'. Conversion failed." %
type(value))
def get_type (self):
type = _lib.myelin_value_get_type (self)
return Type.from_pointer (type)
def is_empty (self):
return _lib.myelin_value_is_empty (self)
def clear (self):
_lib.myelin_value_clear (self)
def get_bool (self):
return _lib.myelin_value_get_bool (self)
def set_bool (self, value):
_lib.myelin_value_set_bool (self, value)
def get_char (self):
return _lib.myelin_value_get_char (self)
def set_char (self, value):
_lib.myelin_value_set_char (self, value)
def get_uchar (self):
return _lib.myelin_value_get_uchar (self)
def set_uchar (self, value):
_lib.myelin_value_set_uchar (self, value)
def get_int (self):
return _lib.myelin_value_get_int (self)
def set_int (self, value):
_lib.myelin_value_set_int (self, value)
def get_uint (self):
return _lib.myelin_value_get_uint (self)
def set_uint (self, value):
_lib.myelin_value_set_uint (self, value)
def get_long (self):
return _lib.myelin_value_get_long (self)
def set_long (self, value):
_lib.myelin_value_set_long (self, value)
def get_ulong (self):
return _lib.myelin_value_get_ulong (self)
def set_ulong (self, value):
_lib.myelin_value_set_ulong (self, value)
def get_int64 (self):
return _lib.myelin_value_get_int64 (self)
def set_int64 (self, value):
_lib.myelin_value_set_int64 (self, value)
def get_uint64 (self):
return _lib.myelin_value_get_uint64 (self)
def set_uint64 (self, value):
_lib.myelin_value_set_uint64 (self, value)
def get_float (self):
return _lib.myelin_value_get_float (self)
def set_float (self, value):
_lib.myelin_value_set_float (self, value)
def get_double (self):
return _lib.myelin_value_get_double (self)
def set_double (self, value):
_lib.myelin_value_set_double (self, value)
def get_string (self):
return _lib.myelin_value_get_string (self)
def set_string (self, value):
_lib.myelin_value_set_string (self, value)
def as_pointer (self):
return _lib.myelin_value_as_pointer (self)
def set_pointer (self, type, pointer):
_lib.myelin_value_set_pointer (self, type, pointer)
###############################################
# Prototypes #
###############################################
_lib.myelin_value_new.argtypes = None
_lib.myelin_value_new.restype = ctypes.c_void_p
_lib.myelin_value_ref.argtypes = [Value]
_lib.myelin_value_ref.restype = ctypes.c_void_p
_lib.myelin_value_unref.argtypes = [Value]
_lib.myelin_value_unref.restype = None
_lib.myelin_value_get_type.argtypes = [Value]
_lib.myelin_value_get_type.restype = ctypes.c_void_p
_lib.myelin_value_is_empty.argtypes = [Value]
_lib.myelin_value_is_empty.restype = ctypes.c_bool
_lib.myelin_value_clear.argtypes = [Value]
_lib.myelin_value_clear.restype = None
# boolean
_lib.myelin_value_get_bool.argtypes = [Value]
_lib.myelin_value_get_bool.restype = ctypes.c_bool
_lib.myelin_value_set_bool.argtypes = [Value, ctypes.c_bool]
_lib.myelin_value_set_bool.restype = None
# char
_lib.myelin_value_get_char.argtypes = [Value]
_lib.myelin_value_get_char.restype = ctypes.c_char
_lib.myelin_value_set_char.argtypes = [Value, ctypes.c_char]
_lib.myelin_value_set_char.restype = None
# uchar
_lib.myelin_value_get_uchar.argtypes = [Value]
_lib.myelin_value_get_uchar.restype = ctypes.c_ubyte
_lib.myelin_value_set_uchar.argtypes = [Value, ctypes.c_ubyte]
_lib.myelin_value_set_uchar.restype = None
# integer
_lib.myelin_value_get_int.argtypes = [Value]
_lib.myelin_value_get_int.restype = ctypes.c_int
_lib.myelin_value_set_int.argtypes = [Value, ctypes.c_int]
_lib.myelin_value_set_int.restype = None
# uint
_lib.myelin_value_get_uint.argtypes = [Value]
_lib.myelin_value_get_uint.restype = ctypes.c_bool
_lib.myelin_value_set_uint.argtypes = [Value, ctypes.c_uint]
_lib.myelin_value_set_uint.restype = None
# long
_lib.myelin_value_get_long.argtypes = [Value]
_lib.myelin_value_get_long.restype = ctypes.c_long
_lib.myelin_value_set_long.argtypes = [Value, ctypes.c_long]
_lib.myelin_value_set_long.restype = None
# ulong
_lib.myelin_value_get_ulong.argtypes = [Value]
_lib.myelin_value_get_ulong.restype = ctypes.c_ulong
_lib.myelin_value_set_ulong.argtypes = [Value, ctypes.c_ulong]
_lib.myelin_value_set_ulong.restype = None
# 64bit integer
_lib.myelin_value_get_int64.argtypes = [Value]
_lib.myelin_value_get_int64.restype = ctypes.c_int64
_lib.myelin_value_set_int64.argtypes = [Value, ctypes.c_int64]
_lib.myelin_value_set_int64.restype = None
# unsigned 64bit integer
_lib.myelin_value_get_uint64.argtypes = [Value]
_lib.myelin_value_get_uint64.restype = ctypes.c_uint64
_lib.myelin_value_set_uint64.argtypes = [Value, ctypes.c_uint64]
_lib.myelin_value_set_uint64.restype = None
# float
_lib.myelin_value_get_float.argtypes = [Value]
_lib.myelin_value_get_float.restype = ctypes.c_float
_lib.myelin_value_set_float.argtypes = [Value, ctypes.c_float]
_lib.myelin_value_set_float.restype = None
# double
_lib.myelin_value_get_double.argtypes = [Value]
_lib.myelin_value_get_double.restype = ctypes.c_double
_lib.myelin_value_set_double.argtypes = [Value, ctypes.c_double]
_lib.myelin_value_set_double.restype = None
# string
_lib.myelin_value_get_string.argtypes = [Value]
_lib.myelin_value_get_string.restype = ctypes.c_char_p
_lib.myelin_value_set_string.argtypes = [Value, ctypes.c_char_p]
_lib.myelin_value_set_string.restype = None
# pointer
_lib.myelin_value_as_pointer.argtypes = [Value]
_lib.myelin_value_as_pointer.restype = ctypes.c_void_p
_lib.myelin_value_set_pointer.argtypes = [Value, Type, ctypes.c_void_p]
_lib.myelin_value_set_pointer.restype = None
|
gsterjov/Myelin
|
bindings/python/myelin/introspection/value.py
|
Python
|
gpl-3.0
| 11,453
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Roboterclub Aachen e.V.
# All Rights Reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
class ParameterDB:
""" Parameter Data Base
Manages Parameters
"""
def __init__(self, userParameters=None, logger=None):
if logger == None:
self.log = Logger()
else:
self.log = logger
self._parameters = []
self.addUserParameters(userParameters)
def addUserParameters(self, items):
"""
Parses and adds items form [parameters] section in `project.cfg`
"""
if items is None:
return
for item in items:
p = UserParameter.fromUserConfigItem(item, self.log)
if p is not None:
self._parameters.append(p)
def addDriverParameter(self, param):
"""
"""
self.log.error("Please implement ParameterDB.addDriverParameter")
def getParametersForDriver(self, driver):
parameters = []
for param in self._parameters:
if param.driver_type == driver.type and param.driver_name == driver.name:
parameters.append({'name': param.name, 'value': param.value, 'instance': param.driver_instance})
return parameters
@classmethod
def fromDictionary(self, dictionary, logger=None):
"""fromDictionary
"""
p = ParameterDB(userParameters=None, logger=logger)
for param in dictionary['_parameters']:
p._parameters.append(UserParameter.fromDictionary(param, p.log))
return p
def toDictionary(self):
""" Serializes this object into a dictionary that can be used for
dependency tracking with scons.
"""
param_dicts = []
for p in self._parameters:
param_dicts.append(p.toDictionary())
return { '_parameters': param_dicts }
class UserParameter:
""" UserParameter
Represents a parameter declared by the user...
"""
def __init__(self, logger=None):
if logger == None:
self.log = Logger()
else:
self.log = logger
# initialize all members to None
self.driver_type = None
self.driver_name = None
self.driver_instance = None
self.level = None
self.name = None
self.value = None
@classmethod
def fromUserConfigItem(self, user_item, logger=None):
"""fromUserConfigString
"""
incorrect_string_msg = ("Incorrect parameter config line!\n"
"Valid inputs are:\n"
"\tdriver_type.parameter_name = value\n"
"\tdriver_type.driver_instance.parameter_name = value\n"
"\tdriver_type.driver_name.parameter_name = value\n"
"\tdriver_type.driver_name.driver_instance.parameter_name = value")
# Create Parameter
p = UserParameter(logger)
# Sanity check user input
parts = user_item[0].split('.')
if len(parts) not in [2,3,4]:
p.log.error(incorrect_string_msg)
return None
p.driver_type = parts[0]
p.name = parts[-1:][0]
if len(parts) == 3:
if parts[1].isdigit():
p.driver_instance = int(parts[1])
else:
p.driver_name = parts[1]
elif len(parts) == 4:
p.driver_name = parts[1]
p.driver_instance = int(parts[2])
p.value = user_item[1]
p.level = 'user' # this parameter comes directly from the user
return p
@classmethod
def fromDictionary(self, dictionary, logger=None):
"""fromDictionary
"""
p = UserParameter(logger)
p.driver_type = dictionary['driver_type']
p.driver_name = dictionary['driver_name']
p.driver_instance = dictionary['driver_instance']
p.level = dictionary['level']
p.name = dictionary['name']
p.value = dictionary['value']
return p
def toDictionary(self):
""" Serializes this object into a dictionary that can be used for
dependency tracking with scons.
"""
return {'driver_type': self.driver_type,
'driver_name': self.driver_name,
'driver_instance': self.driver_instance,
'level': self.level,
'name': self.name,
'value': self.value}
|
dergraaf/xpcc
|
tools/device_files/parameters.py
|
Python
|
bsd-3-clause
| 3,905
|
# -*- coding: utf-8 -*-
"""
This module calculates a linear system by Gaussian elimination with pivoting.
Almost a copy of on Mike Zingale's code, spring 2013.
"""
import numpy as npy
import os
def gaussElim(A, b):
""" perform gaussian elimination with pivoting, solving A x = b A
is an NxN matrix, x and b are an N-element vectors. Note: A
and b are changed upon exit to be in upper triangular (row
echelon) form """
# b is a vector
if not b.ndim == 1:
print("ERROR: b should be a vector")
return None
N = len(b)
# A is square, with each dimension of length N
if not (A.shape[0] == N and A.shape[1] == N):
print("ERROR: A should be square with each dim of same length as b")
return None
# allocation the solution array
x = npy.zeros((N), dtype=A.dtype)
# find the scale factors for each row -- this is used when pivoting
scales = npy.max(npy.abs(A), 1)
# keep track of the number of times we swapped rows
numRowSwap = 0
# main loop over rows
for k in range(N):
# find the pivot row based on the size of column k -- only consider
# the rows beyond the current row
rowMax = npy.argmax(A[k:, k] / scales[k:])
if (k > 0):
rowMax += k # we sliced A from k:, correct for total rows
# swap the row with the largest scaled element in the current column
# with the current row (pivot) -- do this with b too!
if not rowMax == k:
A[[k, rowMax], :] = A[[rowMax, k], :]
b[[k, rowMax]] = b[[rowMax, k]]
numRowSwap += 1
# do the forward-elimination for all rows below the current
for i in range(k + 1, N):
coeff = A[i, k] / A[k, k]
for j in range(k + 1, N):
A[i, j] += -A[k, j] * coeff
A[i, k] = 0.0
b[i] += -coeff * b[k]
# last solution is easy
x[N - 1] = b[N - 1] / A[N - 1, N - 1]
for i in reversed(range(N - 1)):
isum = b[i]
for j in range(i + 1, N):
isum += -A[i, j] * x[j]
x[i] = isum / A[i, i]
return x
os.system("pause")
|
NicovincX2/Python-3.5
|
Analyse (mathématiques)/Analyse numérique/Conditionnement/gaussElimination.py
|
Python
|
gpl-3.0
| 2,193
|
import numpy as np
from pystella.rf import Band
from pystella.rf.rad_func import Flux2MagAB
from pystella.util.phys_var import phys
__author__ = 'bakl'
class Star:
def __init__(self, name, spec=None, is_flux_eq_luminosity=False):
"""Creates a Star with Spectrum instance. Required parameters: name."""
self._name = name
self._sp = spec
self.is_flux_eq_luminosity = is_flux_eq_luminosity
self.radius_ph = None
self._z = None
self._magnification = 1.
self.distance = None
self.Tcol = {}
self.zeta = {}
def set_radius_ph(self, radius):
self.radius_ph = radius
def set_distance(self, distance):
"""
Set distance to the star [cm]
:param distance:
"""
self.distance = distance
def set_redshift(self, z): # shift spectrum to rest frame
self._z = z
def set_magnification(self, m): # shift spectrum to rest frame
self._magnification = m
def set_Tcol(self, Tcol, bset):
self.Tcol[bset] = Tcol
def get_Tcol(self, bset):
if bset in self.Tcol:
return self.Tcol[bset]
return None
def set_zeta(self, zeta, bset):
self.zeta[bset] = zeta
def get_zeta(self, bset):
if bset in self.zeta:
return self.zeta[bset]
return None
@property
def Name(self):
return self._name
@property
def z(self):
return self._z
@property
def IsRedshift(self):
return self.z is not None and self.z > 0.
@property
def IsRadius(self):
return self.radius_ph is not None
@property
def IsDistance(self):
return self.distance is not None
@property
def IsRadiusDist(self):
return self.IsRadius and self.IsDistance
@property
def Freq(self):
if self._sp is None:
raise ValueError("Spectrum has not been defined. ")
if self.IsRedshift:
return self._sp.Freq / (1. + self.z) # redshift the flux
else:
return self._sp.Freq
@property
def Wl(self):
if self._sp is None:
raise ValueError("Spectrum has not been defined. ")
return phys.c / self.Freq
@property
def Flux(self):
if self._sp is None:
raise ValueError("Spectrum has not been defined. ")
flux = self._sp.Flux * self._magnification
if self.IsRedshift:
return Star.flux_to_redshift(flux, self.z)
else:
return flux
@property
def Flux_wl(self):
return self.Flux * self.Freq ** 2 / phys.c # flux [erg/cm^2/cm) ]
@property
def Luminosity(self):
if self.is_flux_eq_luminosity:
return self.Flux
if self.radius_ph is None:
raise ValueError("Photospheric radius has not been defined. ")
return 4. * np.pi * self.radius_ph ** 2 * self.Flux
@property
def FluxObs(self):
if self.IsRadiusDist:
return self.Luminosity / (4 * np.pi * self.distance ** 2)
elif self.IsDistance:
return self.Flux / (4 * np.pi * self.distance ** 2)
# return self.Flux / (4 * np.pi * self.distance ** 2)
else:
return self.Flux
@property
def FluxWlObs(self):
if self.IsRadiusDist:
if self.is_flux_eq_luminosity:
return self.Flux_wl / (4 * np.pi * self.distance ** 2)
else:
return self.Flux_wl * (self.radius_ph / self.distance) ** 2
elif self.IsDistance:
return self.Flux_wl / (4 * np.pi * self.distance ** 2)
else:
return self.Flux_wl
@property
def FluxAB(self):
return -2.5 * np.log10(self.FluxObs) + phys.ZP_AB
def _response_lmb(self, band, is_b_spline=True):
"""
Compute response flux using provided spectral band
:param band: photometric band
:param is_b_spline: the method of interpolation
:return: :raise ValueError:
"""
from scipy import integrate
from scipy import interpolate
wl = self.Wl
if min(wl) > band.wl[0] or max(wl) < band.wl[-1]:
raise ValueError("Spectrum must be wider then band: " + str(band))
flux = self.FluxWlObs / phys.cm_to_angs # to flux [erg/cm^2/A) ]
wl_s = wl * phys.cm_to_angs
wl_b = band.wl * phys.cm_to_angs
if is_b_spline:
tck = interpolate.splrep(wl_s, flux, s=0)
flux_spline = interpolate.splev(wl_b, tck, der=0)
else:
flux_spline = np.interp(wl_b, wl_s, flux, 0, 0) # One-dimensional linear interpolation.
a = integrate.simps(flux_spline * band.resp_wl * wl_b, wl_b) / (phys.c * phys.cm_to_angs) / phys.h
return a
def magAB(self, b, kind='spline'): # kind='spline' log line
response = Band.response_nu(self.Freq, self.FluxObs, b)
if response <= 0:
raise ValueError("Spectrum should be more 0: %f" % response)
# mag = -2.5 * np.log10(conv) + phys.ZP_AB - band.zp
mag = Flux2MagAB(response / b.Norm) - b.zp
# print('mag= ', mag)
return mag
#
# # todo check
# response1 = b.response(self.Wl, self.FluxWlObs, kind=kind, is_out2zero=True, is_photons=True) #
# if response1 <= 0:
# raise ValueError("The Response of Spectrum should be > 0: %f" % response1)
#
# # norm = b.response(b.wl, np.ones(len(b.wl)), kind='spline')
# mag1 = -2.5 * np.log10(response1 / b.NormWl) - 21.1 - b.zp
# # mag1 = -2.5 * np.log10(response1 ) - 21.1 - b.zp
# return mag1
def magBol(self, b, kind='spline'): # kind='spline' log line
"""
Bolometric magnitude via Luminosity of Sun
:return:
"""
from scipy.integrate import simps
lum = Band.response_nu(self.Freq, self.Flux, b, is_freq_norm=False)
M = phys.Mag_sun + 5. * np.log10(self.distance/phys.pc) - 5
bol = M - 2.5 * np.log10(np.abs(lum) / phys.L_sun)
# print('bol= ', bol)
return bol
def magBolOld(self):
"""
Bolometric magnitude via Luminosity of Sun
:return:
"""
from scipy.integrate import simps
lum = simps(self.Flux[::-1], self.Freq[::-1])
# lum = np.trapz(self.Flux[::-1], self.Freq[::-1])
M = phys.Mag_sun + 5. * np.log10(self.distance/phys.pc) - 5
bol = M - 2.5 * np.log10(np.abs(lum) / phys.L_sun)
# print('bol= ', bol)
return bol
def k_cor(self, band_r, band_o, z=0.):
"""
Compute K-correction for observed and rest-frame bands.
Args:
band_r: Rest-frame band.
band_o: Observed band.
z: redshift
Returns:
* K: K-correction
* If failed return None
"""
# todo make k-correction with b-splinesec
if z > 0:
self.set_redshift(z)
z_o = z
if self.IsRedshift:
z_o = self.z
self.set_redshift(0.)
resp_0 = self._response_lmb(band_r, is_b_spline=False)
self.set_redshift(z_o)
resp_z = self._response_lmb(band_o, is_b_spline=False)
if resp_0 < 0 or resp_z <= 0:
return None
else:
kcor = -2.5 * np.log10(resp_z / resp_0 / (1 + z_o)) + band_r.zp - band_o.zp
return kcor
@staticmethod
def flux_to_redshift(flux, z):
if z <= 0.:
return flux
flux_z = flux * (1.+z)
# flux_z = flux / (1.+z)
# flux_z = flux
# flux_z = np.interp(freq / (1. + z), freq[::-1], flux[::-1])
return flux_z
|
baklanovp/pystella
|
pystella/rf/star.py
|
Python
|
mit
| 7,789
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example downloads HTML Tags for a given campaign and placement ID.
To create campaigns, run create_campaign.py. To create placements, run
create_placement.py.
"""
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to download tags for')
argparser.add_argument(
'campaign_id', type=int,
help='The ID of the campaign to download tags for')
argparser.add_argument(
'placement_id', type=int,
help='The ID of the placement to download tags for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.2', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
campaign_id = flags.campaign_id
placement_id = flags.placement_id
try:
# Construct the request.
request = service.placements().generatetags(
profileId=profile_id, campaignId=campaign_id,
placementIds=[placement_id])
# Execute request and print response.
response = request.execute()
for placement_tag in response['placementTags']:
print_placement_tag(placement_tag)
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
def print_placement_tag(placement_tag):
for tag_data in placement_tag['tagDatas']:
print ('%s - %s\n'
% (placement_tag['placementId'], tag_data['format']))
if 'impressionTag' in tag_data:
print '%s\n\n' % (tag_data['impressionTag'])
if 'clickTag' in tag_data:
print '%s\n\n' % (tag_data['clickTag'])
if __name__ == '__main__':
main(sys.argv)
|
falbassini/googleads-dfa-reporting-samples
|
python/v2.2/download_placement_tags.py
|
Python
|
apache-2.0
| 2,591
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2016 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: neorun.py <cmd=arg>
--start=path/to/neo4j/home <cmd> [arg]
: start the neo4j server in the folder specified by the path
-v version : download the version provided if no neo4j detected
-n neo4j-version: download this specific neo4j enterprise nightly version from teamcity with basic access auth
-l download-url : download the neo4j provided by this url if no neo4j found
-p new-password : change the default password to this new password
--stop=path/to/neo4j/home : stop a neo4j server
-h : show this help message
Example: neorun.py -h
neorun.py --start=path/to/neo4j/home -v 3.0.1 -p TOUFU
neorun.py --start=path/to/neo4j/home -n 3.0 -p TOUFU
neorun.py --start=path/to/neo4j/home -n 3.1
neorun.py --stop=path/to/neo4j/home
"""
import getopt
from sys import argv, stdout, exit
from neoget import neo4j_default_archive, neo4j_archive, download
from neoctl import neo4j_start, neo4j_stop, neo4j_update_default_password
from os import path, rename, getenv
import socket
from time import time, sleep, strftime
KNOWN_HOST = path.join(path.expanduser("~"), ".neo4j", "known_hosts")
NEORUN_START_ARGS_NAME = "NEORUN_START_ARGS"
class Enum(set):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
ServerStatus = Enum(["STARTED", "STOPPED" ])
def main():
if len(argv) <= 1:
print_help()
exit(2)
try:
opts, args = getopt.getopt(argv[1:], "hv:n:l:p:", ["start=", "stop="])
except getopt.GetoptError as err:
print(str(err))
print_help()
exit(2)
else:
exit_code = 0
for opt, arg in opts:
if opt == '-h':
print_help()
exit(2)
if opt == "--start":
neo4j_home = path.abspath(arg)
if neo4j_status() == ServerStatus.STARTED:
stdout.write("Failed to start neo4j as a neo4j server is already running on this machine.\n")
exit(2)
# get the opts from env
env = getenv(NEORUN_START_ARGS_NAME)
if env:
stdout.write("WARNING: using env var `NEORUN_START_ARGS=%s`\n" % env)
try:
start_opts, start_args = getopt.getopt(env.split(), "v:n:l:p:")
except getopt.GetoptError as err:
print(str(err))
print_help()
exit(2)
else:
start_opts = opts
# parse the opts under --start
archive_url, archive_name, require_basic_auth = neo4j_default_archive()
password = ''
for start_opt, start_arg in start_opts:
if start_opt == "-p":
password = start_arg
elif start_opt in ['-v', '-n', '-l']:
archive_url, archive_name, require_basic_auth = neo4j_archive(start_opt, start_arg)
exit_code = handle_start(archive_url, archive_name, neo4j_home, require_basic_auth)
if exit_code == 0 and password:
exit_code = neo4j_update_default_password("localhost", 7474, new_password=password) or 0
elif opt == "--stop":
if neo4j_status() == ServerStatus.STOPPED:
stdout.write("Failed to stop server as no neo4j server is running on this machine.\n")
exit(2)
exit_code = neo4j_stop(neo4j_home=arg) or test_neo4j_status(ServerStatus.STOPPED) or 0
if exit_code != 0:
break
exit(exit_code)
def handle_start(archive_url, archive_name, neo4j_home, require_basic_auth):
if not path.exists(neo4j_home):
folder_name=download(archive_url, archive_name, path.dirname(neo4j_home), require_basic_auth)
if not path.exists(neo4j_home):
# the untared name is different from what the user gives
rename(path.join(path.dirname(neo4j_home), folder_name), neo4j_home)
if path.exists(KNOWN_HOST):
known_host_backup_name = KNOWN_HOST + strftime("%Y%m%d-%H%M%S") + ".backup"
stdout.write("Found an existing known_host file, renaming it to %s.\n" % (known_host_backup_name))
rename(KNOWN_HOST, known_host_backup_name)
exit_code = neo4j_start(neo4j_home) or 0
if exit_code == 0:
exit_code = test_neo4j_status()
return exit_code
# Test if the neo4j server is started (status = STARTED)
# or if the neo4j server is stopped (status = STOPPED) within 4 mins.
# Return 0 if the test success, otherwise 1
def test_neo4j_status(status = ServerStatus.STARTED):
success = False
start_time = time()
timeout = 60 * 4 # in seconds
count = 0
while not success:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
actual_status = s.connect_ex(("localhost", 7474))
if status == ServerStatus.STARTED:
success = True if actual_status == 0 else False
else:
success = True if actual_status != 0 else False
s.close()
current_time = time()
if current_time - start_time > timeout:
# failed to connect to server within timeout
stdout.write("Failed to start server in 4 mins\n")
return 1
count += 1
if count % 10 == 0:
stdout.write(".") # print .... to indicate working on it
sleep(0.1) # sleep for 100ms
# server is started
stdout.write("\n")
return 0
def neo4j_status():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = ServerStatus.STARTED if s.connect_ex(("localhost", 7474)) == 0 else ServerStatus.STOPPED
s.close()
return server_status
def print_help():
print(__doc__)
if __name__ == "__main__":
main()
|
mjbradburn/masters_project
|
node_modules/neo4j-driver/neokit/neorun.py
|
Python
|
apache-2.0
| 6,800
|
# tempfile.py unit tests.
import tempfile
import os
import sys
import re
import warnings
import unittest
from test import test_support
warnings.filterwarnings("ignore",
category=RuntimeWarning,
message="mktemp", module=__name__)
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform in ('openbsd3', 'openbsd4'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class TC(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def failOnException(self, what, ei=None):
if ei is None:
ei = sys.exc_info()
self.fail("%s raised %s: %s" % (what, ei[0], ei[1]))
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
test_classes = []
class test_exports(TC):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
test_classes.append(test_exports)
class test__RandomNameSequence(TC):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = self.r.next()
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in xrange(TEST_FILES):
s = r.next()
self.nameCheck(s, '', '', '')
self.assertNotIn(s, dict)
dict[s] = 1
def test_supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
try:
for s in r:
i += 1
if i == 20:
break
except:
self.failOnException("iteration")
test_classes.append(test__RandomNameSequence)
class test__candidate_tempdir_list(TC):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertIsInstance(c, basestring)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with test_support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertIn(dirname, cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertIn(dirname, cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
test_classes.append(test__candidate_tempdir_list)
# We test _get_default_tempdir by testing gettempdir.
class test__get_candidate_names(TC):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertIsInstance(obj, tempfile._RandomNameSequence)
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
test_classes.append(test__get_candidate_names)
class test__mkstemp_inner(TC):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
try:
file = self.mkstemped(dir, pre, suf, bin)
except:
self.failOnException("_mkstemp_inner")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write("blat")
self.do_create(pre="a").write("blat")
self.do_create(suf="b").write("blat")
self.do_create(pre="a", suf="b").write("blat")
self.do_create(pre="aa", suf=".txt").write("blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = range(TEST_FILES)
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write("blat")
finally:
os.rmdir(dir)
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0600
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if not has_spawnl:
return # ugh, can't use SkipTest.
if test_support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
def test_textmode(self):
# _mkstemp_inner can create files in text mode
if not has_textmode:
return # ugh, can't use SkipTest.
self.do_create(bin=0).write("blat\n")
# XXX should test that the file really is a text file
test_classes.append(test__mkstemp_inner)
class test_gettempprefix(TC):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertIsInstance(p, basestring)
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
try:
fd = os.open(p, os.O_RDWR | os.O_CREAT)
except:
self.failOnException("os.open")
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
test_classes.append(test_gettempprefix)
class test_gettempdir(TC):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
try:
file = tempfile.NamedTemporaryFile()
file.write("blat")
file.close()
except:
self.failOnException("create file in %s" % tempfile.gettempdir())
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
test_classes.append(test_gettempdir)
class test_mkstemp(TC):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
except:
self.failOnException("mkstemp")
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
test_classes.append(test_mkstemp)
class test_mkdtemp(TC):
"""Test mkdtemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("mkdtemp")
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = range(TEST_FILES)
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, basestring)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0777 # Mask off sticky bits inherited from /tmp
expected = 0700
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
test_classes.append(test_mkdtemp)
class test_mktemp(TC):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
try:
file = self.mktemped(self.dir, pre, suf)
except:
self.failOnException("mktemp")
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = range(TEST_FILES)
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
test_classes.append(test_mktemp)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class test_NamedTemporaryFile(TC):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
except:
self.failOnException("NamedTemporaryFile")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write('blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write('blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write('abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
# How to test the mode and bufsize parameters?
test_classes.append(test_NamedTemporaryFile)
class test_SpooledTemporaryFile(TC):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("SpooledTemporaryFile")
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write('blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write('x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write('x' * 20)
self.assertFalse(f._rolled)
f.write('x' * 10)
self.assertFalse(f._rolled)
f.write('x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write('x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write('abc\n')
self.assertFalse(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write('abc\n')
self.assertTrue(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write("a" * 35)
write("b" * 35)
seek(0, 0)
self.assertTrue(read(70) == 'a'*35 + 'b'*35)
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write('abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write('abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_SpooledTemporaryFile)
class test_TemporaryFile(TC):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
try:
tempfile.TemporaryFile()
except:
self.failOnException("TemporaryFile")
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write('blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
ei = sys.exc_info()
# cleanup
f.close()
os.rmdir(dir)
self.failOnException("rmdir", ei)
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write('abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
def test_main():
test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
duducosmos/pgs4a
|
python-install/lib/python2.7/test/test_tempfile.py
|
Python
|
lgpl-2.1
| 26,716
|
"""Functions that transform a Context object to a different representation."""
import json
import six
from http_prompt.utils import smart_quote
def _noop(s):
return s
def _extract_httpie_options(context, quote=False, join_key_value=False,
excluded_keys=None):
if quote:
quote_func = smart_quote
else:
quote_func = _noop
if join_key_value:
def form_new_opts(k, v): return [k + '=' + v]
else:
def form_new_opts(k, v): return [k, v]
excluded_keys = excluded_keys or []
opts = []
for k, v in sorted(six.iteritems(context.options)):
if k not in excluded_keys:
if v is not None:
v = quote_func(v)
new_opts = form_new_opts(k, v)
else:
new_opts = [k]
opts += new_opts
return opts
def _extract_httpie_request_items(context, quote=False):
if quote:
quote_func = smart_quote
else:
quote_func = _noop
items = []
operators_and_items = [
# (separator, dict_of_request_items)
('==', context.querystring_params),
(':=', context.body_json_params),
('=', context.body_params),
(':', context.headers)
]
for sep, item_dict in operators_and_items:
for k, value in sorted(six.iteritems(item_dict)):
if sep == ':=':
json_str = json.dumps(value,
sort_keys=True).replace("'", "\\'")
if isinstance(value, six.string_types) and quote:
json_str = "'" + json_str + "'"
item = quote_func('%s:=%s' % (k, json_str))
items.append(item)
elif isinstance(value, (list, tuple)):
for v in value:
item = quote_func('%s%s%s' % (k, sep, v))
items.append(item)
else:
item = quote_func('%s%s%s' % (k, sep, value))
items.append(item)
return items
def extract_args_for_httpie_main(context, method=None):
"""Transform a Context object to a list of arguments that can be passed to
HTTPie main function.
"""
args = _extract_httpie_options(context)
if method:
args.append(method.upper())
args.append(context.url)
args += _extract_httpie_request_items(context)
return args
def format_to_curl(context, method=None):
"""Format a Context object to a cURL command."""
raise NotImplementedError("curl format is not supported yet")
def format_to_raw(context, method=None):
"""Format a Context object to HTTP raw text."""
raise NotImplementedError("raw format is not supported yet")
def format_to_httpie(context, method=None):
"""Format a Context object to an HTTPie command."""
cmd = ['http'] + _extract_httpie_options(context, quote=True,
join_key_value=True)
if method:
cmd.append(method.upper())
cmd.append(context.url)
cmd += _extract_httpie_request_items(context, quote=True)
return ' '.join(cmd) + '\n'
def format_to_http_prompt(context, excluded_options=None):
"""Format a Context object to HTTP Prompt commands."""
cmds = _extract_httpie_options(context, quote=True, join_key_value=True,
excluded_keys=excluded_options)
cmds.append('cd ' + smart_quote(context.url))
cmds += _extract_httpie_request_items(context, quote=True)
return '\n'.join(cmds) + '\n'
|
eliangcs/http-prompt
|
http_prompt/context/transform.py
|
Python
|
mit
| 3,546
|
# Strine!/usr/bn/env python
import re
from collections import OrderedDict
from funcparserlib.lexer import make_tokenizer
tokval = lambda tok: tok.value
Spec = lambda name, value: (name, (value,))
operators = OrderedDict([
("**", 1), ("++", 1), ("--", 1), ("+=",
1), ("-=", 1), ("*=", 1), ("/=", 1),
("<<", 1), (">>", 1), ("==", 0), ("!=",
0), ("<=", 0), (">=", 0), ("..", 1),
("+", 1), ("-", 1), ("*", 1), ("/", 1), ("=", 1), ("<", 0), (">", 0),
("!", 0), ("%", 1), ("|", 0), ("^", 0), ("&", 0), ("?", 1), (":", 1),
("in", 0), ("is", 0), ("or", 0), ("and", 0), ("not", 0),
("return", 0), ("yield", 0), ("from",
1), ("import", 1), ("raise", 0), ("assert", 0),
])
strtpl = """
([bu])?
{start:s}
[^\\{quote:s}]*?
(
( \\\\[\000-\377]
| {quote:s}
( \\\\[\000-\377]
| [^\\{quote:s}]
| {quote:s}
( \\\\[\000-\377]
| [^\\{quote:s}]
)
)
)
[^\\{quote:s}]*?
)*?
{end:s}
"""
quotes = [
{"quote": "'", "start": "'''", "end": "'''"},
{"quote": '"', "start": '"""', "end": '"""'},
{"quote": "'", "start": "'", "end": "'"},
{"quote": '"', "start": '"', "end": '"'}
]
strre = "".join(strtpl.split())
strre = "|".join([strre.format(**quote) for quote in quotes])
strre = re.compile(strre.format(**quotes[3]))
encoding = "utf-8"
ops = "|".join([re.escape(op) for op in operators])
specs = [
Spec("comment", r'#.*'),
Spec("whitespace", r"[ \t]+"),
Spec('string', strre),
Spec('number', r'(-?(0|([1-9][0-9]*))(\.[0-9]+)?([Ee]-?[0-9]+)?)'),
Spec('identifier', r'[A-Za-z_][A-Za-z0-9_]*'),
Spec('operator', ops),
Spec('op', r'[(){}\[\],:;\n\r]'),
]
useless = ["comment", "whitespace"]
tokenizer = make_tokenizer(specs)
def tokenize(s):
return [x for x in tokenizer(s) if x.type not in useless]
|
prologic/mio
|
mio/lexer.py
|
Python
|
mit
| 2,010
|
import housing_price.housing_price as hp
import yaml
hp.collect()
with open('config.yml', 'r') as f:
config = yaml.load(f)
|
ftan84/housing_price
|
main.py
|
Python
|
mit
| 128
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
This tests the SRC 'targz' packager, which does the following:
- create a targz package containing the specified files.
"""
import TestSCons
python = TestSCons.python
test = TestSCons.TestSCons()
tar = test.detect('TAR', 'tar')
if tar:
test.subdir('src')
test.write( [ 'src', 'main.c' ], r"""
int main( int argc, char* argv[] )
{
return 0;
}
""")
test.write('SConstruct', """
Program( 'src/main.c' )
env=Environment(tools=['default', 'packaging'])
env.Package( PACKAGETYPE = 'src_targz',
target = 'src.tar.gz',
PACKAGEROOT = 'test',
source = [ 'src/main.c', 'SConstruct' ] )
""")
test.run(arguments='', stderr = None)
test.must_exist( 'src.tar.gz' )
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
azatoth/scons
|
test/packaging/tar/gz.py
|
Python
|
mit
| 2,021
|
import util.func
import util.data
import pytest
import tornado.concurrent
from unittest import mock
def test_future():
f = tornado.concurrent.Future()
f2 = util.data.freeze(f)
val = [1, 2]
f.set_result(val)
val.append(3)
assert f2.result() == [1, 2]
def test_unicode_synonymous_with_str():
assert util.data.freeze({u'a': 'b'}) == {'a': 'b'}
assert util.data.freeze(u'asdf') == 'asdf'
def test_setitem_freeze():
with pytest.raises(Exception):
util.data.freeze({'a': 1})['a'] = 2
def test_pop_freeze():
with pytest.raises(Exception):
util.data.freeze({'a': 1}).pop()
def test_popitem_freeze():
with pytest.raises(Exception):
util.data.freeze({'a': 1}).popitem()
def test_update_freeze():
with pytest.raises(Exception):
util.data.freeze({'a': 1}).update()
def test_clear_freeze():
with pytest.raises(Exception):
util.data.freeze({'a': 1}).clear()
def test_append_freeze():
with pytest.raises(ValueError):
util.data.freeze([]).append(1)
def test_getitem_freeze():
assert 1 == util.data.freeze({'a': 1})['a']
def test_set_freeze():
x = {1, 2, 3}
y = util.data.freeze(x)
x.add(4)
assert y == {1, 2, 3}
def test_nested_freeze():
x = {1}
y = util.data.freeze({'val': x})
x.add(2)
assert len(y['val']) == 1
def test_list_freeze():
x = [1, 2, 3]
y = util.data.freeze(x)
x.append(4)
assert y == [1, 2, 3]
def test_dont_refreeze():
fn = util.data.freeze
with mock.patch.object(util.data, 'freeze') as m:
m.side_effect = fn
x = [1, 2, 3]
assert m.call_count == 0
x = util.data.freeze(x)
assert m.call_count == 4 # called once for [] and once for each element
util.data.freeze(x)
assert m.call_count == 5 # called once for [] and shortcircuit
util.data.freeze(x)
assert m.call_count == 6 # called once for [] and shortcircuit
def test_equality():
assert util.data.freeze([1, 2]) == [1, 2]
assert util.data.freeze((1, 2)) == (1, 2)
assert not util.data.freeze([1, 2]) == (x for x in range(1, 3))
|
nathants/s
|
tests/test_data.py
|
Python
|
mit
| 2,143
|
import sys
try:
# Our match_hostname function is the same as 3.5's, so we only want to
# import the match_hostname function if it's at least that good.
if sys.version_info < (3, 5):
raise ImportError("Fallback to vendored code")
from ssl import CertificateError, match_hostname
except ImportError:
try:
# Backport of the function from a pypi module
from backports.ssl_match_hostname import CertificateError, match_hostname
except ImportError:
# Our vendored copy
from ._implementation import CertificateError, match_hostname
# Not needed, but documenting what we provide.
__all__ = ('CertificateError', 'match_hostname')
|
zer0yu/ZEROScan
|
thirdparty/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py
|
Python
|
mit
| 707
|
from crianza.errors import CompileError
from crianza.interpreter import Machine, isconstant, isstring, isbool, isnumber
from crianza import instructions
from crianza import optimizer
EMBEDDED_PUSH_TAG = "embedded_push"
def make_embedded_push(value):
"""Returns a closure that pushed the given value onto a Machine's stack.
We use this to embed stack pushes in the VM code, so that the interpreter
can assume that all instructions are callable Python functions. This makes
dispatching much faster than checking if an instruction is a constant
(number, string, etc) or a Python function.
"""
push = lambda vm: vm.push(value)
push.tag = EMBEDDED_PUSH_TAG
return push
def is_embedded_push(obj):
"""Checks if an instruction object is an embedded push."""
return callable(obj) and hasattr(obj, "tag") and obj.tag==EMBEDDED_PUSH_TAG
def get_embedded_push_value(obj):
"""Extracts the embedded push value."""
assert(is_embedded_push(obj))
assert(len(obj.__closure__) == 1)
return obj.__closure__[0].cell_contents
def check(code):
"""Checks code for obvious errors."""
def safe_lookup(op):
try:
return instructions.lookup(op)
except Exception:
return op
for i, a in enumerate(code):
b = code[i+1] if i+1 < len(code) else None
# Does instruction exist?
if not isconstant(a):
try:
instructions.lookup(a)
except KeyError as err:
# Skip embedded push closures
if not (len(err.args)==1 and is_embedded_push(err.args[0])):
raise CompileError("Instruction at index %d is unknown: %s"
% (i, a))
# Invalid: <str> int
if isstring(a) and safe_lookup(b) == instructions.cast_int:
raise CompileError(
"Cannot convert string to integer (index %d): %s %s" % (i, a,
b))
# Invalid: <int> <binary op>
boolean_ops = [instructions.boolean_not,
instructions.boolean_or,
instructions.boolean_and]
if not isbool(a) and safe_lookup(b) in boolean_ops:
raise CompileError(
"Can only use binary operators on booleans (index %d): %s %s" %
(i, a, b))
return code
def compile(code, silent=True, ignore_errors=False, optimize=True):
"""Compiles subroutine-forms into a complete working code.
A program such as:
: sub1 <sub1 code ...> ;
: sub2 <sub2 code ...> ;
sub1 foo sub2 bar
is compiled into:
<sub1 address> call
foo
<sub2 address> call
exit
<sub1 code ...> return
<sub2 code ...> return
Optimizations are first done on subroutine bodies, then on the main loop
and finally, symbols are resolved (i.e., placeholders for subroutine
addresses are replaced with actual addresses).
Args:
silent: If set to False, will print optimization messages.
ignore_errors: Only applies to the optimization engine, if set to False
it will not raise any exceptions. The actual compilatio will still
raise errors.
optimize: Flag to control whether to optimize code.
Raises:
CompilationError - Raised if invalid code is detected.
Returns:
An array of code that can be run by a Machine. Typically, you want to
pass this to a Machine without doing optimizations.
Usage:
source = parse("<source code>")
code = compile(source)
machine = Machine(code, optimize=False)
machine.run()
"""
assert(isinstance(code, list))
output = []
subroutine = {}
builtins = Machine([]).instructions
# Gather up subroutines
try:
it = code.__iter__()
while True:
word = next(it)
if word == ":":
name = next(it)
if name in builtins:
raise CompileError("Cannot shadow internal word definition '%s'." % name)
if name in [":", ";"]:
raise CompileError("Invalid word name '%s'." % name)
subroutine[name] = []
while True:
op = next(it)
if op == ";":
subroutine[name].append(instructions.lookup(instructions.return_))
break
else:
subroutine[name].append(op)
else:
output.append(word)
except StopIteration:
pass
# Expand all subroutine words to ["<name>", "call"]
for name, code in subroutine.items():
# For subroutines
xcode = []
for op in code:
xcode.append(op)
if op in subroutine:
xcode.append(instructions.lookup(instructions.call))
subroutine[name] = xcode
# Compile main code (code outside of subroutines)
xcode = []
for op in output:
xcode.append(op)
if op in subroutine:
xcode.append(instructions.lookup(instructions.call))
# Because main code comes before subroutines, we need to explicitly add an
# exit instruction
output = xcode
if len(subroutine) > 0:
output += [instructions.lookup(instructions.exit)]
# Optimize main code
if optimize:
output = optimizer.optimized(output, silent=silent, ignore_errors=False)
# Add subroutines to output, track their locations
location = {}
for name, code in subroutine.items():
location[name] = len(output)
if optimize:
output += optimizer.optimized(code, silent=silent, ignore_errors=False)
else:
output += code
# Resolve all subroutine references
for i, op in enumerate(output):
if op in location:
output[i] = location[op]
output = native_types(output)
if not ignore_errors:
check(output)
return output
def to_bool(instr):
if isinstance(instr, bool):
return instr
elif instr == instructions.lookup(instructions.true_):
return True
elif instr == instructions.lookup(instructions.false_):
return False
else:
raise CompileError("Unknown instruction: %s" % instr)
def native_types(code):
"""Convert code elements from strings to native Python types."""
out = []
for c in code:
if isconstant(c, quoted=True):
if isstring(c, quoted=True):
v = c[1:-1]
elif isbool(c):
v = to_bool(c)
elif isnumber(c):
v = c
else:
raise CompileError("Unknown type %s: %s" % (type(c).__name__, c))
# Instead of pushing constants in the code, we always push callable
# Python functions, for fast dispatching:
out.append(make_embedded_push(v))
else:
try:
out.append(instructions.lookup(c))
except KeyError:
raise CompileError("Unknown word '%s'" % c)
return out
|
cslarsen/crianza
|
crianza/compiler.py
|
Python
|
bsd-3-clause
| 7,198
|
from flask_restful import reqparse
from huginn.cli import argtypes
# the waypoint request parser is used to parse the waypoint data from a web
# request
waypoint = reqparse.RequestParser()
waypoint.add_argument("latitude", required=True,
location="json", type=argtypes.latitude)
waypoint.add_argument("longitude", required=True,
location="json", type=argtypes.longitude)
waypoint.add_argument("altitude", required=True,
location="json", type=argtypes.altitude)
|
pmatigakis/Huginn
|
huginn/request_parsers.py
|
Python
|
bsd-3-clause
| 529
|
import unittest, random, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_util
print "have to add leading/trailing whitespace and single/double quotes?"
# A time token is built up from the definition of the time subtokens
# A time token can have single quote/double quote/white space like any
# other string/number token, and is stripped of that before parsing.
# # I'd like to see this rule met. Not sure if it's possible.
# If the type for the column is guessed to be Time,
# then anything that doesn't match the time token definition must be NA'ed by h2o.
#
#
# In all cases where there are multiple integer digits, leading zeroes can be present or individually dropped.
# This could be one or two leading zeroes in some cases. Dropping all zeroes to create nothing, is not legal.
#
# dd : two digit day, from 00 to 31. Is there any checking for the day being legal for the particular month and year?
# MMM: three character month. Legal:
# months = [
# ['Jan', 'JAN', 'jan'],
# ['Feb', 'FEB', 'feb'],
# ['Mar', 'MAR', 'mar'],
# ['Apr', 'APR', 'apr'],
# ['May', 'MAY', 'may'],
# ['Jun', 'JUN', 'jun'],
# ['Jul', 'JUL', 'jul'],
# ['Aug', 'AUG', 'aug'],
# ['Sep', 'SEP', 'sep'],
# ['Oct', 'OCT', 'oct'],
# ['Nov', 'NOV', 'nov'],
# ['Dec', 'DEC', 'dec']
# ]
#
# yy: two digit year, from 00 to 99.
# MM: two digit month, from 00 to 12.
# HH: two digit hour, from 00 to 23.
# MM: two digit minute, from 00 to 59.
# SS: two digit second, from 00 to 59.
# SSS: three digit millisecond, from 000 to 999. (note here that one or two leading zeroes can be dropped).
#
# Subtokens can then be combined in these 4 formats. Note the "-", ":" or "." in the particular format is never optional.
# Anything that doesn't match these formats, or has a subtoken that doesn't match the legal cases, should be NA'ed.
#
# dd-MMM-yy
# yyyy-MM-dd
# yyyy-MM-dd HH:mm:ss
# yyyy-MM-dd HH:mm:ss.SSSS
print "test dd-MMM-yy format. caps for month?"
print "apparently h2o NAs some cases. illegal dates in a month?"
print "seems to be that we require leading zero in year, but it's okay to not have it in the date?"
ROWS = 5
COLS = 20
RESTRICT_TO_28 = False
RESTRICT_MONTH_TO_UPPER = True
if RESTRICT_MONTH_TO_UPPER:
months = [
['nullForZero'],
['JAN'],
['FEB'],
['MAR'],
['APR'],
['MAY'],
['JUN'],
['JUL'],
['AUG'],
['SEP'],
['OCT'],
['NOV'],
['DEC']
]
else:
months = [
['nullForZero'],
['Jan', 'JAN', 'jan'],
['Feb', 'FEB', 'feb'],
['Mar', 'MAR', 'mar'],
['Apr', 'APR', 'apr'],
['May', 'MAY', 'may'],
['Jun', 'JUN', 'jun'],
['Jul', 'JUL', 'jul'],
['Aug', 'AUG', 'aug'],
['Sep', 'SEP', 'sep'],
['Oct', 'OCT', 'oct'],
['Nov', 'NOV', 'nov'],
['Dec', 'DEC', 'dec']
]
# increase weight for Feb
monthWeights = [1 if i!=1 else 5 for i in range(len(months))]
if RESTRICT_TO_28:
days = map(str, range(1,29))
else:
days = map(str, range(1,32))
# increase weight for picking near end of month
dayWeights = [1 if i<27 else 8 for i in range(len(days))]
def getRandomTimeStamp():
# assume leading zero is option
day = days[h2o_util.weighted_choice(dayWeights)]
# may or may not leading zero fill the day
# if random.randint(0,1) == 1:
# day = day.zfill(2)
# yy year
timestampFormat = random.randint(0,5)
timestampFormat = 0
# always 4 digit
yearInt = random.randint(1970, 2016)
yearStr = str(yearInt)
if timestampFormat==0:
# may or may not leading zero fill the year
if random.randint(0,1) == 1:
if str(yearStr[-2])=='0':
# drop the leading zero
year = int(str(yearStr)[-1:])
else:
# keep leading zzero
year = int(str(yearStr)[-2:])
else:
# last two digits. (always zero filled)
year = int(str(yearStr)[-2:])
# yyyy year
else:
year = yearInt
if timestampFormat==0:
# once we pick the month, we have to pick from the choices for the name of the month
# monthIndex = range(1,13)[h2o_util.weighted_choice(monthWeights)]
monthIndex = random.randint(1,12)
month = random.choice(months[monthIndex])
else:
month = str(random.randint(1,12))
# may or may not leading zero fill the month
# if random.randint(0,1) == 1:
# month = month.zfill(2)
# use calendar to make sure the day is legal for that month/year
import calendar
legalDays = calendar.monthrange(yearInt, monthIndex)[1]
if day > legalDays:
day = legalDays
# may or may not leading zero fill the hour
hour = str(random.randint(0,23))
if random.randint(0,1) == 1:
hour = hour.zfill(2)
minute = str(random.randint(0,59))
if random.randint(0,1) == 1:
minute = minute.zfill(2)
second = str(random.randint(0,59))
if random.randint(0,1) == 1:
second = second.zfill(2)
milli = str(random.randint(0,999))
# can be zero filled to 2 if <= 99
r = random.randint(0,2) == 1
if r==1:
milli = milli.zfill(2)
elif r==2:
milli = milli.zfill(3)
# "dd-MMM-yy"
# "yyyy-MM-dd",
# "yyyy-MM-dd HH:mm:ss" };
# "yyyy-MM-dd HH:mm:ss.SSS",
if timestampFormat==0:
a = "%s-%s-%02d" % (day, month, year)
elif timestampFormat==1:
a = "%04d-%s-%s" % (year, month, day)
elif timestampFormat==2:
a = "%04d-%s-%s %s:%s:%s" % (year, month, day, hour, minute, second)
# elif timestampFormat==3:
else:
a = "%04d-%s-%s %s:%s:%s:%s" % (year, month, day, hour, minute, second, milli)
return a
def rand_rowData(colCount=6):
a = [getRandomTimeStamp() for fields in range(colCount)]
# put a little white space in!
b = ", ".join(map(str,a))
return b
def write_syn_dataset(csvPathname, rowCount, colCount, headerData=None, rowData=None):
dsf = open(csvPathname, "w+")
if headerData is not None:
dsf.write(headerData + "\n")
for i in range(rowCount):
rowData = rand_rowData(colCount)
dsf.write(rowData + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1,java_heap_GB=1,use_flatfile=True)
@classmethod
def tearDownClass(cls):
### time.sleep(3600)
h2o.tear_down_cloud(h2o.nodes)
def test_parse_time(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilename = "syn_time.csv"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
headerData = None
colCount = COLS
# rowCount = 1000
rowCount = ROWS
write_syn_dataset(csvPathname, rowCount, colCount, headerData)
for trial in range (20):
rowData = rand_rowData()
# make sure all key names are unique, when we re-put and re-parse (h2o caching issues)
src_key = csvFilename + "_" + str(trial)
hex_key = csvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResultA = h2i.import_parse(path=csvPathname, schema='put', src_key=src_key, hex_key=hex_key)
print "\nA trial #", trial, "parse end on ", csvFilename, 'took', time.time() - start, 'seconds'
inspect = h2o_cmd.runInspect(key=hex_key)
missingValuesListA = h2o_cmd.infoFromInspect(inspect, csvPathname)
print "missingValuesListA", missingValuesListA
numColsA = inspect['numCols']
numRowsA = inspect['numRows']
byteSizeA = inspect['byteSize']
self.assertEqual(missingValuesListA, [], "missingValuesList should be empty")
self.assertEqual(numColsA, colCount)
self.assertEqual(numRowsA, rowCount)
# do a little testing of saving the key as a csv
csvDownloadPathname = SYNDATASETS_DIR + "/csvDownload.csv"
h2o.nodes[0].csv_download(src_key=hex_key, csvPathname=csvDownloadPathname)
# remove the original parsed key. source was already removed by h2o
h2o.nodes[0].remove_key(hex_key)
# interesting. what happens when we do csv download with time data?
start = time.time()
parseResultB = h2i.import_parse(path=csvDownloadPathname, schema='put', src_key=src_key, hex_key=hex_key)
print "B trial #", trial, "parse end on ", csvFilename, 'took', time.time() - start, 'seconds'
inspect = h2o_cmd.runInspect(key=hex_key)
missingValuesListB = h2o_cmd.infoFromInspect(inspect, csvPathname)
print "missingValuesListB", missingValuesListB
numColsB = inspect['numCols']
numRowsB = inspect['numRows']
byteSizeB = inspect['byteSize']
self.assertEqual(missingValuesListA, missingValuesListB,
"missingValuesList mismatches after re-parse of downloadCsv result")
self.assertEqual(numColsA, numColsB,
"numCols mismatches after re-parse of downloadCsv result")
# H2O adds a header to the csv created. It puts quotes around the col numbers if no header
# so I guess that's okay. So allow for an extra row here.
self.assertEqual(numRowsA, numRowsB,
"numRowsA: %s numRowsB: %s mismatch after re-parse of downloadCsv result" % (numRowsA, numRowsB) )
print "H2O writes the internal format (number) out for time."
# ==> syn_time.csv <==
# 31-Oct-49, 25-NOV-10, 08-MAR-44, 23-Nov-34, 19-Feb-96, 23-JUN-30
# 31-Oct-49, 25-NOV-10, 08-MAR-44, 23-Nov-34, 19-Feb-96, 23-JUN-30
# ==> csvDownload.csv <==
# "0","1","2","3","4","5"
# 2.5219584E12,1.293264E12,2.3437116E12,2.0504736E12,3.9829788E12,1.9110204E12
if 1==0:
# extra line for column headers?
self.assertEqual(byteSizeA, byteSizeB,
"byteSize mismatches after re-parse of downloadCsv result %d %d" % (byteSizeA, byteSizeB) )
# FIX! should do some comparison of values?
# maybe can use exec to checksum the columns and compare column list.
# or compare to expected values? (what are the expected values for the number for time inside h2o?)
# FIX! should compare the results of the two parses. The infoFromInspect result?
### h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
h2o.check_sandbox_for_errors()
if __name__ == '__main__':
h2o.unit_main()
|
h2oai/h2o
|
py/testdir_multi_jvm/test_parse_time_fvec.py
|
Python
|
apache-2.0
| 11,031
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This bot replicates all pages (from specific namespaces) in a wiki to a second wiki within one family.
Example:
python replicate_wiki.py [-r] -ns 10 -f wikipedia -o nl li fy
to copy all templates from an nlwiki to liwiki and fywiki. It will show which pages have to be changed
if -r is not present, and will only actually write pages if -r /is/ present.
You can add replicate_replace to your user_config.py, which has the following format:
replicate_replace = {
'wikipedia:li': {'Hoofdpagina': 'Veurblaad'}
}
to replace all occurences of 'Hoofdpagina' with 'Veurblaad' when writing to liwiki. Note that this does
not take the origin wiki into account.
'''
#
# (C) Kasper Souren 2012-2013
# (C) 2013 Pywikipediabot team
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: d6e9ba21483108d2cf378dd25dcce1788c2d0a93 $'
#
import sys
import re
from wikipedia import *
from itertools import imap
def namespaces(site):
'''dict from namespace number to prefix'''
ns = dict(map(lambda n: (site.getNamespaceIndex(n), n),
site.namespaces()))
ns[0] = ''
return ns
def multiple_replace(text, word_dict):
'''Replace all occurrences in text of key value pairs in word_dict'''
for key in word_dict:
text = text.replace(key, word_dict[key])
return text
class SyncSites:
'''Work is done in here.'''
def __init__(self, options):
self.options = options
if options.original_wiki:
original_wiki = options.original_wiki
else:
original_wiki = config.mylang
print "Syncing from " + original_wiki
family = options.family or config.family
sites = options.destination_wiki
self.original = getSite(original_wiki, family)
if options.namespace and 'help' in options.namespace:
nsd = namespaces(self.original)
for k in nsd:
print k, nsd[k]
sys.exit()
self.sites = map(lambda s: getSite(s, family), sites)
self.differences = {}
self.user_diff = {}
print 'Syncing to',
for s in self.sites:
self.differences[s] = []
self.user_diff[s] = []
print s,
print
def check_sysops(self):
'''Check if sysops are the same
TODO: make optional
'''
def get_users(site):
userlist = site.getUrl(site.get_address('Special:Userlist&group=sysop'))
# Hackery but working. At least on MW 1.15.0
# User namespace is number 2
return set(re.findall(site.namespace(2) + ':(\w+)["\&]', userlist))
ref_users = get_users(self.original)
for site in self.sites:
users = get_users(site)
diff = list(ref_users.difference(users))
diff.sort()
self.user_diff[site] = diff
def check_namespaces(self):
'''Check all namespaces, to be ditched for clarity'''
namespaces = [
0, # Main
8, # MediaWiki
152, # DPL
102, # Eigenschap
104, # Type
106, # Formulier
108, # Concept
10, # Sjabloon
]
if self.options.namespace:
print options.namespace
namespaces = [int(options.namespace)]
print "Checking these namespaces", namespaces, "\n"
for ns in namespaces:
self.check_namespace(ns)
def check_namespace(self, namespace):
'''Check an entire namespace'''
print "\nCHECKING NAMESPACE", namespace
pages = imap(lambda p: p.title(),
self.original.allpages('!', namespace))
for p in pages:
if not p in ['MediaWiki:Sidebar', 'MediaWiki:Mainpage',
'MediaWiki:Sitenotice', 'MediaWiki:MenuSidebar']:
try:
self.check_page(p)
except pywikibot.exceptions.NoPage:
print 'Bizarre NoPage exception that we are just going to ignore'
except pywikibot.exceptions.IsRedirectPage:
print 'error: Redirectpage - todo: handle gracefully'
print
def generate_overviews(self):
'''Create page on wikis with overview of bot results'''
for site in self.sites:
sync_overview_page = Page(site, 'User:' + site.loggedInAs() + '/sync.py overview')
output = "== Pages that differ from original ==\n\n"
if self.differences[site]:
output += "".join(map(lambda l: '* [[:' + l + "]]\n", self.differences[site]))
else:
output += "All important pages are the same"
output += "\n\n== Admins from original that are missing here ==\n\n"
if self.user_diff[site]:
output += "".join(map(lambda l: '* ' + l.replace('_', ' ') + "\n", self.user_diff[site]))
else:
output += "All users from original are also present on this wiki"
print output
sync_overview_page.put(output, self.put_message(site))
def put_message(self, site):
return site.loggedInAs() + ' sync.py synchronization from ' + str(self.original)
def check_page(self, pagename):
'''Check one page'''
print "\nChecking", pagename,
sys.stdout.flush()
page1 = Page(self.original, pagename)
txt1 = page1.get()
for site in self.sites:
if options.dest_namespace:
prefix = namespaces(site)[int(options.dest_namespace)]
if prefix:
prefix += ':'
new_pagename = prefix + page1.titleWithoutNamespace()
print "\nCross namespace, new title: ", new_pagename
else:
new_pagename = pagename
page2 = Page(site, new_pagename)
if page2.exists():
txt2 = page2.get()
else:
txt2 = ''
if config.replicate_replace.has_key(str(site)):
txt_new = multiple_replace(txt1, config.replicate_replace[str(site)])
if txt1 != txt_new:
print 'NOTE: text replaced using config.sync_replace'
print txt1, txt_new, txt2
txt1 = txt_new
if txt1 != txt2:
print "\n", site, 'DIFFERS'
self.differences[site].append(pagename)
if self.options.replace:
page2.put(txt1, self.put_message(site))
else:
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-f", "--family", dest="family",
help="wiki family")
parser.add_argument("-r", "--replace", action="store_true",
help="actually replace pages (without this option you will only get an overview page)")
parser.add_argument("-o", "--original", dest="original_wiki",
help="original wiki")
parser.add_argument('destination_wiki', metavar='N', type=str, nargs='+',
help='destination wiki(s)')
parser.add_argument("-ns", "--namespace", dest="namespace",
help="specify namespace")
parser.add_argument("-dns", "--dest-namespace", dest="dest_namespace",
help="destination namespace (if different)")
(options, args) = parser.parse_known_args()
# sync is global for convenient IPython debugging
sync = SyncSites(options)
sync.check_sysops()
sync.check_namespaces()
sync.generate_overviews()
|
races1986/SafeLanguage
|
CEM/replicate_wiki.py
|
Python
|
epl-1.0
| 7,898
|
"""Settings for logging within Connect"""
# pylint: disable=line-too-long,invalid-name
import environ
env = environ.Env(
LOG_LEVEL=(str, 'WARNING')
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'debug': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': env('LOG_LEVEL'),
'propagate': True,
},
'messages.models': {
'handlers': ['debug'],
'level': env('LOG_LEVEL'),
'propagate': True
},
'groups.views': {
'handlers': ['debug'],
'level': env('LOG_LEVEL'),
'propagate': True
},
'accounts.models': {
'handlers': ['debug'],
'level': env('LOG_LEVEL'),
'propagate': True
},
'celery': {
'handlers': ['console'],
'level': env('LOG_LEVEL'),
},
'groups.tasks': {
'handlers': ['debug'],
'level': env('LOG_LEVEL'),
'propagate': True
},
'connectmessages.models': {
'handlers': ['debug'],
'level': env('LOG_LEVEL'),
'propagate': True
}
}
}
|
lpatmo/actionify_the_news
|
connect/settings/logging.py
|
Python
|
mit
| 2,029
|
"""Support for KNX/IP climate devices."""
from __future__ import annotations
from typing import Any
from xknx import XKNX
from xknx.devices import Climate as XknxClimate, ClimateMode as XknxClimateMode
from xknx.dpt.dpt_hvac_mode import HVACControllerMode, HVACOperationMode
from xknx.telegram.address import parse_device_group_address
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_OFF,
PRESET_AWAY,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import CONTROLLER_MODES, CURRENT_HVAC_ACTIONS, DOMAIN, PRESET_MODES
from .knx_entity import KnxEntity
from .schema import ClimateSchema
ATTR_COMMAND_VALUE = "command_value"
CONTROLLER_MODES_INV = {value: key for key, value in CONTROLLER_MODES.items()}
PRESET_MODES_INV = {value: key for key, value in PRESET_MODES.items()}
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up climate(s) for KNX platform."""
if not discovery_info or not discovery_info["platform_config"]:
return
platform_config = discovery_info["platform_config"]
xknx: XKNX = hass.data[DOMAIN].xknx
_async_migrate_unique_id(hass, platform_config)
async_add_entities(
KNXClimate(xknx, entity_config) for entity_config in platform_config
)
@callback
def _async_migrate_unique_id(
hass: HomeAssistant, platform_config: list[ConfigType]
) -> None:
"""Change unique_ids used in 2021.4 to include target_temperature GA."""
entity_registry = er.async_get(hass)
for entity_config in platform_config:
# normalize group address strings - ga_temperature_state was the old uid
ga_temperature_state = parse_device_group_address(
entity_config[ClimateSchema.CONF_TEMPERATURE_ADDRESS][0]
)
old_uid = str(ga_temperature_state)
entity_id = entity_registry.async_get_entity_id("climate", DOMAIN, old_uid)
if entity_id is None:
continue
ga_target_temperature_state = parse_device_group_address(
entity_config[ClimateSchema.CONF_TARGET_TEMPERATURE_STATE_ADDRESS][0]
)
target_temp = entity_config.get(ClimateSchema.CONF_TARGET_TEMPERATURE_ADDRESS)
ga_target_temperature = (
parse_device_group_address(target_temp[0])
if target_temp is not None
else None
)
setpoint_shift = entity_config.get(ClimateSchema.CONF_SETPOINT_SHIFT_ADDRESS)
ga_setpoint_shift = (
parse_device_group_address(setpoint_shift[0])
if setpoint_shift is not None
else None
)
new_uid = (
f"{ga_temperature_state}_"
f"{ga_target_temperature_state}_"
f"{ga_target_temperature}_"
f"{ga_setpoint_shift}"
)
entity_registry.async_update_entity(entity_id, new_unique_id=new_uid)
def _create_climate(xknx: XKNX, config: ConfigType) -> XknxClimate:
"""Return a KNX Climate device to be used within XKNX."""
climate_mode = XknxClimateMode(
xknx,
name=f"{config[CONF_NAME]} Mode",
group_address_operation_mode=config.get(
ClimateSchema.CONF_OPERATION_MODE_ADDRESS
),
group_address_operation_mode_state=config.get(
ClimateSchema.CONF_OPERATION_MODE_STATE_ADDRESS
),
group_address_controller_status=config.get(
ClimateSchema.CONF_CONTROLLER_STATUS_ADDRESS
),
group_address_controller_status_state=config.get(
ClimateSchema.CONF_CONTROLLER_STATUS_STATE_ADDRESS
),
group_address_controller_mode=config.get(
ClimateSchema.CONF_CONTROLLER_MODE_ADDRESS
),
group_address_controller_mode_state=config.get(
ClimateSchema.CONF_CONTROLLER_MODE_STATE_ADDRESS
),
group_address_operation_mode_protection=config.get(
ClimateSchema.CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS
),
group_address_operation_mode_night=config.get(
ClimateSchema.CONF_OPERATION_MODE_NIGHT_ADDRESS
),
group_address_operation_mode_comfort=config.get(
ClimateSchema.CONF_OPERATION_MODE_COMFORT_ADDRESS
),
group_address_operation_mode_standby=config.get(
ClimateSchema.CONF_OPERATION_MODE_STANDBY_ADDRESS
),
group_address_heat_cool=config.get(ClimateSchema.CONF_HEAT_COOL_ADDRESS),
group_address_heat_cool_state=config.get(
ClimateSchema.CONF_HEAT_COOL_STATE_ADDRESS
),
operation_modes=config.get(ClimateSchema.CONF_OPERATION_MODES),
controller_modes=config.get(ClimateSchema.CONF_CONTROLLER_MODES),
)
return XknxClimate(
xknx,
name=config[CONF_NAME],
group_address_temperature=config[ClimateSchema.CONF_TEMPERATURE_ADDRESS],
group_address_target_temperature=config.get(
ClimateSchema.CONF_TARGET_TEMPERATURE_ADDRESS
),
group_address_target_temperature_state=config[
ClimateSchema.CONF_TARGET_TEMPERATURE_STATE_ADDRESS
],
group_address_setpoint_shift=config.get(
ClimateSchema.CONF_SETPOINT_SHIFT_ADDRESS
),
group_address_setpoint_shift_state=config.get(
ClimateSchema.CONF_SETPOINT_SHIFT_STATE_ADDRESS
),
setpoint_shift_mode=config.get(ClimateSchema.CONF_SETPOINT_SHIFT_MODE),
setpoint_shift_max=config[ClimateSchema.CONF_SETPOINT_SHIFT_MAX],
setpoint_shift_min=config[ClimateSchema.CONF_SETPOINT_SHIFT_MIN],
temperature_step=config[ClimateSchema.CONF_TEMPERATURE_STEP],
group_address_on_off=config.get(ClimateSchema.CONF_ON_OFF_ADDRESS),
group_address_on_off_state=config.get(ClimateSchema.CONF_ON_OFF_STATE_ADDRESS),
on_off_invert=config[ClimateSchema.CONF_ON_OFF_INVERT],
group_address_active_state=config.get(ClimateSchema.CONF_ACTIVE_STATE_ADDRESS),
group_address_command_value_state=config.get(
ClimateSchema.CONF_COMMAND_VALUE_STATE_ADDRESS
),
min_temp=config.get(ClimateSchema.CONF_MIN_TEMP),
max_temp=config.get(ClimateSchema.CONF_MAX_TEMP),
mode=climate_mode,
)
class KNXClimate(KnxEntity, ClimateEntity):
"""Representation of a KNX climate device."""
_device: XknxClimate
_attr_temperature_unit = TEMP_CELSIUS
def __init__(self, xknx: XKNX, config: ConfigType) -> None:
"""Initialize of a KNX climate device."""
super().__init__(_create_climate(xknx, config))
self._attr_supported_features = SUPPORT_TARGET_TEMPERATURE
if self.preset_modes:
self._attr_supported_features |= SUPPORT_PRESET_MODE
self._attr_target_temperature_step = self._device.temperature_step
self._attr_unique_id = (
f"{self._device.temperature.group_address_state}_"
f"{self._device.target_temperature.group_address_state}_"
f"{self._device.target_temperature.group_address}_"
f"{self._device._setpoint_shift.group_address}"
)
self.default_hvac_mode: str = config[ClimateSchema.CONF_DEFAULT_CONTROLLER_MODE]
async def async_update(self) -> None:
"""Request a state update from KNX bus."""
await self._device.sync()
if self._device.mode is not None:
await self._device.mode.sync()
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return self._device.temperature.value
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
return self._device.target_temperature.value
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
temp = self._device.target_temperature_min
return temp if temp is not None else super().min_temp
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
temp = self._device.target_temperature_max
return temp if temp is not None else super().max_temp
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
await self._device.set_target_temperature(temperature)
self.async_write_ha_state()
@property
def hvac_mode(self) -> str:
"""Return current operation ie. heat, cool, idle."""
if self._device.supports_on_off and not self._device.is_on:
return HVAC_MODE_OFF
if self._device.mode is not None and self._device.mode.supports_controller_mode:
return CONTROLLER_MODES.get(
self._device.mode.controller_mode.value, self.default_hvac_mode
)
return self.default_hvac_mode
@property
def hvac_modes(self) -> list[str]:
"""Return the list of available operation/controller modes."""
ha_controller_modes: list[str | None] = []
if self._device.mode is not None:
for knx_controller_mode in self._device.mode.controller_modes:
ha_controller_modes.append(
CONTROLLER_MODES.get(knx_controller_mode.value)
)
if self._device.supports_on_off:
if not ha_controller_modes:
ha_controller_modes.append(self.default_hvac_mode)
ha_controller_modes.append(HVAC_MODE_OFF)
hvac_modes = list(set(filter(None, ha_controller_modes)))
return hvac_modes if hvac_modes else [self.default_hvac_mode]
@property
def hvac_action(self) -> str | None:
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
if self._device.supports_on_off and not self._device.is_on:
return CURRENT_HVAC_OFF
if self._device.is_active is False:
return CURRENT_HVAC_IDLE
if self._device.mode is not None and self._device.mode.supports_controller_mode:
return CURRENT_HVAC_ACTIONS.get(
self._device.mode.controller_mode.value, CURRENT_HVAC_IDLE
)
return None
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set operation mode."""
if self._device.supports_on_off and hvac_mode == HVAC_MODE_OFF:
await self._device.turn_off()
else:
if self._device.supports_on_off and not self._device.is_on:
await self._device.turn_on()
if (
self._device.mode is not None
and self._device.mode.supports_controller_mode
):
knx_controller_mode = HVACControllerMode(
CONTROLLER_MODES_INV.get(hvac_mode)
)
await self._device.mode.set_controller_mode(knx_controller_mode)
self.async_write_ha_state()
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
if self._device.mode is not None and self._device.mode.supports_operation_mode:
return PRESET_MODES.get(self._device.mode.operation_mode.value, PRESET_AWAY)
return None
@property
def preset_modes(self) -> list[str] | None:
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
if self._device.mode is None:
return None
presets = [
PRESET_MODES.get(operation_mode.value)
for operation_mode in self._device.mode.operation_modes
]
return list(filter(None, presets))
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if self._device.mode is not None and self._device.mode.supports_operation_mode:
knx_operation_mode = HVACOperationMode(PRESET_MODES_INV.get(preset_mode))
await self._device.mode.set_operation_mode(knx_operation_mode)
self.async_write_ha_state()
@property
def extra_state_attributes(self) -> dict[str, Any] | None:
"""Return device specific state attributes."""
attr: dict[str, Any] = {}
if self._device.command_value.initialized:
attr[ATTR_COMMAND_VALUE] = self._device.command_value.value
return attr
async def async_added_to_hass(self) -> None:
"""Store register state change callback."""
await super().async_added_to_hass()
if self._device.mode is not None:
self._device.mode.register_device_updated_cb(self.after_update_callback)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect device object when removed."""
await super().async_will_remove_from_hass()
if self._device.mode is not None:
self._device.mode.unregister_device_updated_cb(self.after_update_callback)
|
sander76/home-assistant
|
homeassistant/components/knx/climate.py
|
Python
|
apache-2.0
| 13,659
|
'''
Created on Dec 24, 2014
@author: dave
'''
import re
import dragonfly
from dragonfly.actions.action_focuswindow import FocusWindow
from dragonfly.actions.action_key import Key
from dragonfly.actions.action_waitwindow import WaitWindow
from caster.asynch.hmc import squeue
from caster.asynch.hmc import h_launch, homunculus
from caster.lib import utilities, context, settings
def add_vocab():
engine=dragonfly.get_engine()
if engine.name!="natlink":
utilities.report("feature unavailable in your speech recognition engine", speak=True)
return
# attempts to get what was highlighted first
highlighted=context.read_selected_without_altering_clipboard(True)
# change the following regex to accept alphabetical only
disallow="^[A-Za-z]*$"
selected=None
if highlighted[0]==0 and highlighted[1]!="":
if not re.match(disallow, highlighted[1]):
utilities.report("only used for single words", speak=True)
return
selected=highlighted[1]
try:
h_launch.launch(settings.QTYPE_SET, process_set, selected)
except Exception:
utilities.simple_log(False)
def del_vocab():
try:
h_launch.launch(settings.QTYPE_REM, process_delete, None)
except Exception:
utilities.simple_log(False)
def process_set(data):
''''''
word=data["word"]
pronunciation=data["pronunciation"]
if pronunciation=="":
pronunciation=None
word_info=data["word_info"]
#missingno
import natlink
result=0
if pronunciation==None:
result=natlink.addWord(word, word_info)
if result==0 and data["force"]==1:
process_delete(data)
result=natlink.addWord(word, word_info)
else:
result=natlink.addWord(word, word_info, str(pronunciation))
if result==0 and data["force"]==1:
process_delete(data)
result=natlink.addWord(word, word_info, str(pronunciation))
if result==1:
utilities.report("word added successfully: "+word, False)
else:
utilities.report("word add failed: "+word, False)
def process_delete(data):
import natlink
natlink.deleteWord(data["word"])
|
j127/caster
|
caster/asynch/hmc/vocabulary_processing.py
|
Python
|
lgpl-3.0
| 2,241
|
import os
c = get_config()
# Notebook config
# user_path = os.path.expanduser(u'~')
# c.NotebookApp.certfile = os.path.join(user_path, u'.ipython/profile_default/mycert.pem')
# c.NotebookApp.certfile = u'~/.ipython/profile_default/mycert.pem'
# c.NotebookApp.ip = '*'
# c.NotebookApp.open_browser = False
# c.NotebookApp.password = u'sha1:b800bdf0bcf4:0d755e297492c8b3e52a646ec745102871bde5ec'
# It is a good idea to put it on a known, fixed port
c.NotebookApp.port = 9999
# This starts plotting support always with matplotlib
c.IPKernelApp.pylab = 'inline'
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'NoColor'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'Inny [\\#]: '
# c.ZMQPromptManager.in_template = 'Inny [\\#]: '
#InteractiveShell.prompt_in1 is deprecated, use PromptManager.in_template
# WARNING: InteractiveShell.prompt_in1 is deprecated, use PromptManager.in_template
# c.PromptManager.in_template = '[nny]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#import os
# from IPython.core import ipapi
# ip = ipapi.get()
# def myPrompt(self):
# print '$'
# ip.set_hook('pre_prompt_hook', myPrompt)
# raw_input('stopped')
c.TerminalIPythonApp.extensions = [
'line_profiler_ext',
'memory_profiler_ext',
]
c.InteractiveShellApp.extensions = [
'line_profiler_ext',
'memory_profiler_ext',
]
|
wd15/env
|
roles/common/files/ipython_notebook_config.py
|
Python
|
mit
| 5,522
|
# Contributed by Seva Alekseyev <sevaa@nih.gov> with National Institutes of Health, 2016
# Cura is released under the terms of the LGPLv3 or higher.
from math import pi, sin, cos, sqrt
from typing import Dict
import numpy
from UM.Job import Job
from UM.Logger import Logger
from UM.Math.Matrix import Matrix
from UM.Math.Vector import Vector
from UM.Mesh.MeshBuilder import MeshBuilder
from UM.Mesh.MeshReader import MeshReader
from cura.Scene.CuraSceneNode import CuraSceneNode as SceneNode
MYPY = False
try:
if not MYPY:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
# TODO: preserve the structure of scenes that contain several objects
# Use CADPart, for example, to distinguish between separate objects
DEFAULT_SUBDIV = 16 # Default subdivision factor for spheres, cones, and cylinders
EPSILON = 0.000001
class Shape:
# Expects verts in MeshBuilder-ready format, as a n by 3 mdarray
# with vertices stored in rows
def __init__(self, verts, faces, index_base, name):
self.verts = verts
self.faces = faces
# Those are here for debugging purposes only
self.index_base = index_base
self.name = name
class X3DReader(MeshReader):
def __init__(self) -> None:
super().__init__()
self._supported_extensions = [".x3d"]
self._namespaces = {} # type: Dict[str, str]
# Main entry point
# Reads the file, returns a SceneNode (possibly with nested ones), or None
def _read(self, file_name):
try:
self.defs = {}
self.shapes = []
tree = ET.parse(file_name)
xml_root = tree.getroot()
if xml_root.tag != "X3D":
return None
scale = 1000 # Default X3D unit it one meter, while Cura's is one millimeters
if xml_root[0].tag == "head":
for head_node in xml_root[0]:
if head_node.tag == "unit" and head_node.attrib.get("category") == "length":
scale *= float(head_node.attrib["conversionFactor"])
break
xml_scene = xml_root[1]
else:
xml_scene = xml_root[0]
if xml_scene.tag != "Scene":
return None
self.transform = Matrix()
self.transform.setByScaleFactor(scale)
self.index_base = 0
# Traverse the scene tree, populate the shapes list
self.processChildNodes(xml_scene)
if self.shapes:
builder = MeshBuilder()
builder.setVertices(numpy.concatenate([shape.verts for shape in self.shapes]))
builder.setIndices(numpy.concatenate([shape.faces for shape in self.shapes]))
builder.calculateNormals()
builder.setFileName(file_name)
mesh_data = builder.build()
# Manually try and get the extents of the mesh_data. This should prevent nasty NaN issues from
# leaving the reader.
mesh_data.getExtents()
node = SceneNode()
node.setMeshData(mesh_data)
node.setSelectable(True)
node.setName(file_name)
else:
return None
except Exception:
Logger.logException("e", "Exception in X3D reader")
return None
return node
# ------------------------- XML tree traversal
def processNode(self, xml_node):
xml_node = self.resolveDefUse(xml_node)
if xml_node is None:
return
tag = xml_node.tag
if tag in ("Group", "StaticGroup", "CADAssembly", "CADFace", "CADLayer", "Collision"):
self.processChildNodes(xml_node)
if tag == "CADPart":
self.processTransform(xml_node) # TODO: split the parts
elif tag == "LOD":
self.processNode(xml_node[0])
elif tag == "Transform":
self.processTransform(xml_node)
elif tag == "Shape":
self.processShape(xml_node)
def processShape(self, xml_node):
# Find the geometry and the appearance inside the Shape
geometry = appearance = None
for sub_node in xml_node:
if sub_node.tag == "Appearance" and not appearance:
appearance = self.resolveDefUse(sub_node)
elif sub_node.tag in self.geometry_importers and not geometry:
geometry = self.resolveDefUse(sub_node)
# TODO: appearance is completely ignored. At least apply the material color...
if not geometry is None:
try:
self.verts = self.faces = [] # Safeguard
self.geometry_importers[geometry.tag](self, geometry)
m = self.transform.getData()
verts = m.dot(self.verts)[:3].transpose()
self.shapes.append(Shape(verts, self.faces, self.index_base, geometry.tag))
self.index_base += len(verts)
except Exception:
Logger.logException("e", "Exception in X3D reader while reading %s", geometry.tag)
# Returns the referenced node if the node has USE, the same node otherwise.
# May return None is USE points at a nonexistent node
# In X3DOM, when both DEF and USE are in the same node, DEF is ignored.
# Big caveat: XML element objects may evaluate to boolean False!!!
# Don't ever use "if node:", use "if not node is None:" instead
def resolveDefUse(self, node):
USE = node.attrib.get("USE")
if USE:
return self.defs.get(USE, None)
DEF = node.attrib.get("DEF")
if DEF:
self.defs[DEF] = node
return node
def processChildNodes(self, node):
for c in node:
self.processNode(c)
Job.yieldThread()
# Since this is a grouping node, will recurse down the tree.
# According to the spec, the final transform matrix is:
# T * C * R * SR * S * -SR * -C
# Where SR corresponds to the rotation matrix to scaleOrientation
# C and SR are rather exotic. S, slightly less so.
def processTransform(self, node):
rot = readRotation(node, "rotation", (0, 0, 1, 0)) # (angle, axisVactor) tuple
trans = readVector(node, "translation", (0, 0, 0)) # Vector
scale = readVector(node, "scale", (1, 1, 1)) # Vector
center = readVector(node, "center", (0, 0, 0)) # Vector
scale_orient = readRotation(node, "scaleOrientation", (0, 0, 1, 0)) # (angle, axisVactor) tuple
# Store the previous transform; in Cura, the default matrix multiplication is in place
prev = Matrix(self.transform.getData()) # It's deep copy, I've checked
# The rest of transform manipulation will be applied in place
got_center = (center.x != 0 or center.y != 0 or center.z != 0)
T = self.transform
if trans.x != 0 or trans.y != 0 or trans.z != 0:
T.translate(trans)
if got_center:
T.translate(center)
if rot[0] != 0:
T.rotateByAxis(*rot)
if scale.x != 1 or scale.y != 1 or scale.z != 1:
got_scale_orient = scale_orient[0] != 0
if got_scale_orient:
T.rotateByAxis(*scale_orient)
# No scale by vector in place operation in UM
S = Matrix()
S.setByScaleVector(scale)
T.multiply(S)
if got_scale_orient:
T.rotateByAxis(-scale_orient[0], scale_orient[1])
if got_center:
T.translate(-center)
self.processChildNodes(node)
self.transform = prev
# ------------------------- Geometry importers
# They are supposed to fill the self.verts and self.faces arrays, the caller will do the rest
# Primitives
def processGeometryBox(self, node):
(dx, dy, dz) = readFloatArray(node, "size", [2, 2, 2])
dx /= 2
dy /= 2
dz /= 2
self.reserveFaceAndVertexCount(12, 8)
# xz plane at +y, ccw
self.addVertex(dx, dy, dz)
self.addVertex(-dx, dy, dz)
self.addVertex(-dx, dy, -dz)
self.addVertex(dx, dy, -dz)
# xz plane at -y
self.addVertex(dx, -dy, dz)
self.addVertex(-dx, -dy, dz)
self.addVertex(-dx, -dy, -dz)
self.addVertex(dx, -dy, -dz)
self.addQuad(0, 1, 2, 3) # +y
self.addQuad(4, 0, 3, 7) # +x
self.addQuad(7, 3, 2, 6) # -z
self.addQuad(6, 2, 1, 5) # -x
self.addQuad(5, 1, 0, 4) # +z
self.addQuad(7, 6, 5, 4) # -y
# The sphere is subdivided into nr rings and ns segments
def processGeometrySphere(self, node):
r = readFloat(node, "radius", 0.5)
subdiv = readIntArray(node, "subdivision", None)
if subdiv:
if len(subdiv) == 1:
nr = ns = subdiv[0]
else:
(nr, ns) = subdiv
else:
nr = ns = DEFAULT_SUBDIV
lau = pi / nr # Unit angle of latitude (rings) for the given tessellation
lou = 2 * pi / ns # Unit angle of longitude (segments)
self.reserveFaceAndVertexCount(ns*(nr*2 - 2), 2 + (nr - 1)*ns)
# +y and -y poles
self.addVertex(0, r, 0)
self.addVertex(0, -r, 0)
# The non-polar vertices go from x=0, negative z plane counterclockwise -
# to -x, to +z, to +x, back to -z
for ring in range(1, nr):
for seg in range(ns):
self.addVertex(-r*sin(lou * seg) * sin(lau * ring),
r*cos(lau * ring),
-r*cos(lou * seg) * sin(lau * ring))
vb = 2 + (nr - 2) * ns # First vertex index for the bottom cap
# Faces go in order: top cap, sides, bottom cap.
# Sides go by ring then by segment.
# Caps
# Top cap face vertices go in order: down right up
# (starting from +y pole)
# Bottom cap goes: up left down (starting from -y pole)
for seg in range(ns):
self.addTri(0, seg + 2, (seg + 1) % ns + 2)
self.addTri(1, vb + (seg + 1) % ns, vb + seg)
# Sides
# Side face vertices go in order: down right upleft, downright up left
for ring in range(nr - 2):
tvb = 2 + ring * ns
# First vertex index for the top edge of the ring
bvb = tvb + ns
# First vertex index for the bottom edge of the ring
for seg in range(ns):
nseg = (seg + 1) % ns
self.addQuad(tvb + seg, bvb + seg, bvb + nseg, tvb + nseg)
def processGeometryCone(self, node):
r = readFloat(node, "bottomRadius", 1)
height = readFloat(node, "height", 2)
bottom = readBoolean(node, "bottom", True)
side = readBoolean(node, "side", True)
n = readInt(node, "subdivision", DEFAULT_SUBDIV)
d = height / 2
angle = 2 * pi / n
self.reserveFaceAndVertexCount((n if side else 0) + (n-2 if bottom else 0), n+1)
# Vertex 0 is the apex, vertices 1..n are the bottom
self.addVertex(0, d, 0)
for i in range(n):
self.addVertex(-r * sin(angle * i), -d, -r * cos(angle * i))
# Side face vertices go: up down right
if side:
for i in range(n):
self.addTri(1 + (i + 1) % n, 0, 1 + i)
if bottom:
for i in range(2, n):
self.addTri(1, i, i+1)
def processGeometryCylinder(self, node):
r = readFloat(node, "radius", 1)
height = readFloat(node, "height", 2)
bottom = readBoolean(node, "bottom", True)
side = readBoolean(node, "side", True)
top = readBoolean(node, "top", True)
n = readInt(node, "subdivision", DEFAULT_SUBDIV)
nn = n * 2
angle = 2 * pi / n
hh = height/2
self.reserveFaceAndVertexCount((nn if side else 0) + (n - 2 if top else 0) + (n - 2 if bottom else 0), nn)
# The seam is at x=0, z=-r, vertices go ccw -
# to pos x, to neg z, to neg x, back to neg z
for i in range(n):
rs = -r * sin(angle * i)
rc = -r * cos(angle * i)
self.addVertex(rs, hh, rc)
self.addVertex(rs, -hh, rc)
if side:
for i in range(n):
ni = (i + 1) % n
self.addQuad(ni * 2 + 1, ni * 2, i * 2, i * 2 + 1)
for i in range(2, nn-3, 2):
if top:
self.addTri(0, i, i+2)
if bottom:
self.addTri(1, i+1, i+3)
# Semi-primitives
def processGeometryElevationGrid(self, node):
dx = readFloat(node, "xSpacing", 1)
dz = readFloat(node, "zSpacing", 1)
nx = readInt(node, "xDimension", 0)
nz = readInt(node, "zDimension", 0)
height = readFloatArray(node, "height", False)
ccw = readBoolean(node, "ccw", True)
if nx <= 0 or nz <= 0 or len(height) < nx*nz:
return # That's weird, the wording of the standard suggests grids with zero quads are somehow valid
self.reserveFaceAndVertexCount(2*(nx-1)*(nz-1), nx*nz)
for z in range(nz):
for x in range(nx):
self.addVertex(x * dx, height[z*nx + x], z * dz)
for z in range(1, nz):
for x in range(1, nx):
self.addTriFlip((z - 1)*nx + x - 1, z*nx + x, (z - 1)*nx + x, ccw)
self.addTriFlip((z - 1)*nx + x - 1, z*nx + x - 1, z*nx + x, ccw)
def processGeometryExtrusion(self, node):
ccw = readBoolean(node, "ccw", True)
begin_cap = readBoolean(node, "beginCap", True)
end_cap = readBoolean(node, "endCap", True)
cross = readFloatArray(node, "crossSection", (1, 1, 1, -1, -1, -1, -1, 1, 1, 1))
cross = [(cross[i], cross[i+1]) for i in range(0, len(cross), 2)]
spine = readFloatArray(node, "spine", (0, 0, 0, 0, 1, 0))
spine = [(spine[i], spine[i+1], spine[i+2]) for i in range(0, len(spine), 3)]
orient = readFloatArray(node, "orientation", None)
if orient:
# This converts X3D's axis/angle rotation to a 3x3 numpy matrix
def toRotationMatrix(rot):
(x, y, z) = rot[:3]
a = rot[3]
s = sin(a)
c = cos(a)
t = 1-c
return numpy.array((
(x * x * t + c, x * y * t - z*s, x * z * t + y * s),
(x * y * t + z*s, y * y * t + c, y * z * t - x * s),
(x * z * t - y * s, y * z * t + x * s, z * z * t + c)))
orient = [toRotationMatrix(orient[i:i+4]) if orient[i+3] != 0 else None for i in range(0, len(orient), 4)]
scale = readFloatArray(node, "scale", None)
if scale:
scale = [numpy.array(((scale[i], 0, 0), (0, 1, 0), (0, 0, scale[i+1])))
if scale[i] != 1 or scale[i+1] != 1 else None for i in range(0, len(scale), 2)]
# Special treatment for the closed spine and cross section.
# Let's save some memory by not creating identical but distinct vertices;
# later we'll introduce conditional logic to link the last vertex with
# the first one where necessary.
crossClosed = cross[0] == cross[-1]
if crossClosed:
cross = cross[:-1]
nc = len(cross)
cross = [numpy.array((c[0], 0, c[1])) for c in cross]
ncf = nc if crossClosed else nc - 1
# Face count along the cross; for closed cross, it's the same as the
# respective vertex count
spine_closed = spine[0] == spine[-1]
if spine_closed:
spine = spine[:-1]
ns = len(spine)
spine = [Vector(*s) for s in spine]
nsf = ns if spine_closed else ns - 1
# This will be used for fallback, where the current spine point joins
# two collinear spine segments. No need to recheck the case of the
# closed spine/last-to-first point juncture; if there's an angle there,
# it would kick in on the first iteration of the main loop by spine.
def findFirstAngleNormal():
for i in range(1, ns - 1):
spt = spine[i]
z = (spine[i + 1] - spt).cross(spine[i - 1] - spt)
if z.length() > EPSILON:
return z
# All the spines are collinear. Fallback to the rotated source
# XZ plane.
# TODO: handle the situation where the first two spine points match
if len(spine) < 2:
return Vector(0, 0, 1)
v = spine[1] - spine[0]
orig_y = Vector(0, 1, 0)
orig_z = Vector(0, 0, 1)
if v.cross(orig_y).length() > EPSILON:
# Spine at angle with global y - rotate the z accordingly
a = v.cross(orig_y) # Axis of rotation to get to the Z
(x, y, z) = a.normalized().getData()
s = a.length()/v.length()
c = sqrt(1-s*s)
t = 1-c
m = numpy.array((
(x * x * t + c, x * y * t + z*s, x * z * t - y * s),
(x * y * t - z*s, y * y * t + c, y * z * t + x * s),
(x * z * t + y * s, y * z * t - x * s, z * z * t + c)))
orig_z = Vector(*m.dot(orig_z.getData()))
return orig_z
self.reserveFaceAndVertexCount(2*nsf*ncf + (nc - 2 if begin_cap else 0) + (nc - 2 if end_cap else 0), ns*nc)
z = None
for i, spt in enumerate(spine):
if (i > 0 and i < ns - 1) or spine_closed:
snext = spine[(i + 1) % ns]
sprev = spine[(i - 1 + ns) % ns]
y = snext - sprev
vnext = snext - spt
vprev = sprev - spt
try_z = vnext.cross(vprev)
# Might be zero, then all kinds of fallback
if try_z.length() > EPSILON:
if z is not None and try_z.dot(z) < 0:
try_z = -try_z
z = try_z
elif not z: # No z, and no previous z.
# Look ahead, see if there's at least one point where
# spines are not collinear.
z = findFirstAngleNormal()
elif i == 0: # And non-crossed
snext = spine[i + 1]
y = snext - spt
z = findFirstAngleNormal()
else: # last point and not crossed
sprev = spine[i - 1]
y = spt - sprev
# If there's more than one point in the spine, z is already set.
# One point in the spline is an error anyway.
z = z.normalized()
y = y.normalized()
x = y.cross(z) # Already normalized
m = numpy.array(((x.x, y.x, z.x), (x.y, y.y, z.y), (x.z, y.z, z.z)))
# Columns are the unit vectors for the xz plane for the cross-section
if orient:
mrot = orient[i] if len(orient) > 1 else orient[0]
if not mrot is None:
m = m.dot(mrot) # Tested against X3DOM, the result matches, still not sure :(
if scale:
mscale = scale[i] if len(scale) > 1 else scale[0]
if not mscale is None:
m = m.dot(mscale)
# First the cross-section 2-vector is scaled,
# then rotated (which may make it a 3-vector),
# then applied to the xz plane unit vectors
sptv3 = numpy.array(spt.getData()[:3])
for cpt in cross:
v = sptv3 + m.dot(cpt)
self.addVertex(*v)
if begin_cap:
self.addFace([x for x in range(nc - 1, -1, -1)], ccw)
# Order of edges in the face: forward along cross, forward along spine,
# backward along cross, backward along spine, flipped if now ccw.
# This order is assumed later in the texture coordinate assignment;
# please don't change without syncing.
for s in range(ns - 1):
for c in range(ncf):
self.addQuadFlip(s * nc + c, s * nc + (c + 1) % nc,
(s + 1) * nc + (c + 1) % nc, (s + 1) * nc + c, ccw)
if spine_closed:
# The faces between the last and the first spine points
b = (ns - 1) * nc
for c in range(ncf):
self.addQuadFlip(b + c, b + (c + 1) % nc,
(c + 1) % nc, c, ccw)
if end_cap:
self.addFace([(ns - 1) * nc + x for x in range(0, nc)], ccw)
# Triangle meshes
# Helper for numerous nodes with a Coordinate subnode holding vertices
# That all triangle meshes and IndexedFaceSet
# num_faces can be a function, in case the face count is a function of vertex count
def startCoordMesh(self, node, num_faces):
ccw = readBoolean(node, "ccw", True)
self.readVertices(node) # This will allocate and fill the vertex array
if hasattr(num_faces, "__call__"):
num_faces = num_faces(self.getVertexCount())
self.reserveFaceCount(num_faces)
return ccw
def processGeometryIndexedTriangleSet(self, node):
index = readIntArray(node, "index", [])
num_faces = len(index) // 3
ccw = int(self.startCoordMesh(node, num_faces))
for i in range(0, num_faces*3, 3):
self.addTri(index[i + 1 - ccw], index[i + ccw], index[i+2])
def processGeometryIndexedTriangleStripSet(self, node):
strips = readIndex(node, "index")
ccw = int(self.startCoordMesh(node, sum([len(strip) - 2 for strip in strips])))
for strip in strips:
sccw = ccw # Running CCW value, reset for each strip
for i in range(len(strip) - 2):
self.addTri(strip[i + 1 - sccw], strip[i + sccw], strip[i+2])
sccw = 1 - sccw
def processGeometryIndexedTriangleFanSet(self, node):
fans = readIndex(node, "index")
ccw = int(self.startCoordMesh(node, sum([len(fan) - 2 for fan in fans])))
for fan in fans:
for i in range(1, len(fan) - 1):
self.addTri(fan[0], fan[i + 1 - ccw], fan[i + ccw])
def processGeometryTriangleSet(self, node):
ccw = int(self.startCoordMesh(node, lambda num_vert: num_vert // 3))
for i in range(0, self.getVertexCount(), 3):
self.addTri(i + 1 - ccw, i + ccw, i+2)
def processGeometryTriangleStripSet(self, node):
strips = readIntArray(node, "stripCount", [])
ccw = int(self.startCoordMesh(node, sum([n-2 for n in strips])))
vb = 0
for n in strips:
sccw = ccw
for i in range(n-2):
self.addTri(vb + i + 1 - sccw, vb + i + sccw, vb + i + 2)
sccw = 1 - sccw
vb += n
def processGeometryTriangleFanSet(self, node):
fans = readIntArray(node, "fanCount", [])
ccw = int(self.startCoordMesh(node, sum([n-2 for n in fans])))
vb = 0
for n in fans:
for i in range(1, n-1):
self.addTri(vb, vb + i + 1 - ccw, vb + i + ccw)
vb += n
# Quad geometries from the CAD module, might be relevant for printing
def processGeometryQuadSet(self, node):
ccw = self.startCoordMesh(node, lambda num_vert: 2*(num_vert // 4))
for i in range(0, self.getVertexCount(), 4):
self.addQuadFlip(i, i+1, i+2, i+3, ccw)
def processGeometryIndexedQuadSet(self, node):
index = readIntArray(node, "index", [])
num_quads = len(index) // 4
ccw = self.startCoordMesh(node, num_quads*2)
for i in range(0, num_quads*4, 4):
self.addQuadFlip(index[i], index[i+1], index[i+2], index[i+3], ccw)
# 2D polygon geometries
# Won't work for now, since Cura expects every mesh to have a nontrivial convex hull
# The only way around that is merging meshes.
def processGeometryDisk2D(self, node):
innerRadius = readFloat(node, "innerRadius", 0)
outerRadius = readFloat(node, "outerRadius", 1)
n = readInt(node, "subdivision", DEFAULT_SUBDIV)
angle = 2 * pi / n
self.reserveFaceAndVertexCount(n*4 if innerRadius else n-2, n*2 if innerRadius else n)
for i in range(n):
s = sin(angle * i)
c = cos(angle * i)
self.addVertex(outerRadius*c, outerRadius*s, 0)
if innerRadius:
self.addVertex(innerRadius*c, innerRadius*s, 0)
ni = (i+1) % n
self.addQuad(2*i, 2*ni, 2*ni+1, 2*i+1)
if not innerRadius:
for i in range(2, n):
self.addTri(0, i-1, i)
def processGeometryRectangle2D(self, node):
(x, y) = readFloatArray(node, "size", (2, 2))
self.reserveFaceAndVertexCount(2, 4)
self.addVertex(-x/2, -y/2, 0)
self.addVertex(x/2, -y/2, 0)
self.addVertex(x/2, y/2, 0)
self.addVertex(-x/2, y/2, 0)
self.addQuad(0, 1, 2, 3)
def processGeometryTriangleSet2D(self, node):
verts = readFloatArray(node, "vertices", ())
num_faces = len(verts) // 6
verts = [(verts[i], verts[i+1], 0) for i in range(0, 6 * num_faces, 2)]
self.reserveFaceAndVertexCount(num_faces, num_faces * 3)
for vert in verts:
self.addVertex(*vert)
# The front face is on the +Z side, so CCW is a variable
for i in range(0, num_faces*3, 3):
a = Vector(*verts[i+2]) - Vector(*verts[i])
b = Vector(*verts[i+1]) - Vector(*verts[i])
self.addTriFlip(i, i+1, i+2, a.x*b.y > a.y*b.x)
# General purpose polygon mesh
def processGeometryIndexedFaceSet(self, node):
faces = readIndex(node, "coordIndex")
ccw = self.startCoordMesh(node, sum([len(face) - 2 for face in faces]))
for face in faces:
if len(face) == 3:
self.addTriFlip(face[0], face[1], face[2], ccw)
elif len(face) > 3:
self.addFace(face, ccw)
geometry_importers = {
"IndexedFaceSet": processGeometryIndexedFaceSet,
"IndexedTriangleSet": processGeometryIndexedTriangleSet,
"IndexedTriangleStripSet": processGeometryIndexedTriangleStripSet,
"IndexedTriangleFanSet": processGeometryIndexedTriangleFanSet,
"TriangleSet": processGeometryTriangleSet,
"TriangleStripSet": processGeometryTriangleStripSet,
"TriangleFanSet": processGeometryTriangleFanSet,
"QuadSet": processGeometryQuadSet,
"IndexedQuadSet": processGeometryIndexedQuadSet,
"TriangleSet2D": processGeometryTriangleSet2D,
"Rectangle2D": processGeometryRectangle2D,
"Disk2D": processGeometryDisk2D,
"ElevationGrid": processGeometryElevationGrid,
"Extrusion": processGeometryExtrusion,
"Sphere": processGeometrySphere,
"Box": processGeometryBox,
"Cylinder": processGeometryCylinder,
"Cone": processGeometryCone
}
# Parses the Coordinate.@point field, fills the verts array.
def readVertices(self, node):
for c in node:
if c.tag == "Coordinate":
c = self.resolveDefUse(c)
if not c is None:
pt = c.attrib.get("point")
if pt:
# allow the list of float values in 'point' attribute to
# be separated by commas or whitespace as per spec of
# XML encoding of X3D
# Ref ISO/IEC 19776-1:2015 : Section 5.1.2
co = [float(x) for vec in pt.split(',') for x in vec.split()]
num_verts = len(co) // 3
self.verts = numpy.empty((4, num_verts), dtype=numpy.float32)
self.verts[3,:] = numpy.ones((num_verts), dtype=numpy.float32)
# Group by three
for i in range(num_verts):
self.verts[:3,i] = co[3*i:3*i+3]
# Mesh builder helpers
def reserveFaceAndVertexCount(self, num_faces, num_verts):
# Unlike the Cura MeshBuilder, we use 4-vectors stored as columns for easier transform
self.verts = numpy.zeros((4, num_verts), dtype=numpy.float32)
self.verts[3,:] = numpy.ones((num_verts), dtype=numpy.float32)
self.num_verts = 0
self.reserveFaceCount(num_faces)
def reserveFaceCount(self, num_faces):
self.faces = numpy.zeros((num_faces, 3), dtype=numpy.int32)
self.num_faces = 0
def getVertexCount(self):
return self.verts.shape[1]
def addVertex(self, x, y, z):
self.verts[0, self.num_verts] = x
self.verts[1, self.num_verts] = y
self.verts[2, self.num_verts] = z
self.num_verts += 1
# Indices are 0-based for this shape, but they won't be zero-based in the merged mesh
def addTri(self, a, b, c):
self.faces[self.num_faces, 0] = self.index_base + a
self.faces[self.num_faces, 1] = self.index_base + b
self.faces[self.num_faces, 2] = self.index_base + c
self.num_faces += 1
def addTriFlip(self, a, b, c, ccw):
if ccw:
self.addTri(a, b, c)
else:
self.addTri(b, a, c)
# Needs to be convex, but not necessaily planar
# Assumed ccw, cut along the ac diagonal
def addQuad(self, a, b, c, d):
self.addTri(a, b, c)
self.addTri(c, d, a)
def addQuadFlip(self, a, b, c, d, ccw):
if ccw:
self.addTri(a, b, c)
self.addTri(c, d, a)
else:
self.addTri(a, c, b)
self.addTri(c, a, d)
# Arbitrary polygon triangulation.
# Doesn't assume convexity and doesn't check the "convex" flag in the file.
# Works by the "cutting of ears" algorithm:
# - Find an outer vertex with the smallest angle and no vertices inside its adjacent triangle
# - Remove the triangle at that vertex
# - Repeat until done
# Vertex coordinates are supposed to be already set
def addFace(self, indices, ccw):
# Resolve indices to coordinates for faster math
face = [Vector(data=self.verts[0:3, i]) for i in indices]
# Need a normal to the plane so that we can know which vertices form inner angles
normal = findOuterNormal(face)
if not normal: # Couldn't find an outer edge, non-planar polygon maybe?
return
# Find the vertex with the smallest inner angle and no points inside, cut off. Repeat until done
n = len(face)
vi = [i for i in range(n)] # We'll be using this to kick vertices from the face
while n > 3:
max_cos = EPSILON # We don't want to check anything on Pi angles
i_min = 0 # max cos corresponds to min angle
for i in range(n):
inext = (i + 1) % n
iprev = (i + n - 1) % n
v = face[vi[i]]
next = face[vi[inext]] - v
prev = face[vi[iprev]] - v
nextXprev = next.cross(prev)
if nextXprev.dot(normal) > EPSILON: # If it's an inner angle
cos = next.dot(prev) / (next.length() * prev.length())
if cos > max_cos:
# Check if there are vertices inside the triangle
no_points_inside = True
for j in range(n):
if j != i and j != iprev and j != inext:
vx = face[vi[j]] - v
if pointInsideTriangle(vx, next, prev, nextXprev):
no_points_inside = False
break
if no_points_inside:
max_cos = cos
i_min = i
self.addTriFlip(indices[vi[(i_min + n - 1) % n]], indices[vi[i_min]], indices[vi[(i_min + 1) % n]], ccw)
vi.pop(i_min)
n -= 1
self.addTriFlip(indices[vi[0]], indices[vi[1]], indices[vi[2]], ccw)
# ------------------------------------------------------------
# X3D field parsers
# ------------------------------------------------------------
def readFloatArray(node, attr, default):
s = node.attrib.get(attr)
if not s:
return default
return [float(x) for x in s.split()]
def readIntArray(node, attr, default):
s = node.attrib.get(attr)
if not s:
return default
return [int(x, 0) for x in s.split()]
def readFloat(node, attr, default):
s = node.attrib.get(attr)
if not s:
return default
return float(s)
def readInt(node, attr, default):
s = node.attrib.get(attr)
if not s:
return default
return int(s, 0)
def readBoolean(node, attr, default):
s = node.attrib.get(attr)
if not s:
return default
return s.lower() == "true"
def readVector(node, attr, default):
v = readFloatArray(node, attr, default)
return Vector(v[0], v[1], v[2])
def readRotation(node, attr, default):
v = readFloatArray(node, attr, default)
return (v[3], Vector(v[0], v[1], v[2]))
# Returns the -1-separated runs
def readIndex(node, attr):
v = readIntArray(node, attr, [])
chunks = []
chunk = []
for i in range(len(v)):
if v[i] == -1:
if chunk:
chunks.append(chunk)
chunk = []
else:
chunk.append(v[i])
if chunk:
chunks.append(chunk)
return chunks
# Given a face as a sequence of vectors, returns a normal to the polygon place that forms a right triple
# with a vector along the polygon sequence and a vector backwards
def findOuterNormal(face):
n = len(face)
for i in range(n):
for j in range(i+1, n):
edge = face[j] - face[i]
if edge.length() > EPSILON:
edge = edge.normalized()
prev_rejection = Vector()
is_outer = True
for k in range(n):
if k != i and k != j:
pt = face[k] - face[i]
pte = pt.dot(edge)
rejection = pt - edge*pte
if rejection.dot(prev_rejection) < -EPSILON: # points on both sides of the edge - not an outer one
is_outer = False
break
elif rejection.length() > prev_rejection.length(): # Pick a greater rejection for numeric stability
prev_rejection = rejection
if is_outer: # Found an outer edge, prev_rejection is the rejection inside the face. Generate a normal.
return edge.cross(prev_rejection)
return False
# Given two *collinear* vectors a and b, returns the coefficient that takes b to a.
# No error handling.
# For stability, taking the ration between the biggest coordinates would be better...
def ratio(a, b):
if b.x > EPSILON or b.x < -EPSILON:
return a.x / b.x
elif b.y > EPSILON or b.y < -EPSILON:
return a.y / b.y
else:
return a.z / b.z
def pointInsideTriangle(vx, next, prev, nextXprev):
vxXprev = vx.cross(prev)
r = ratio(vxXprev, nextXprev)
if r < 0:
return False
vxXnext = vx.cross(next)
s = -ratio(vxXnext, nextXprev)
return s > 0 and (s + r) < 1
|
Ultimaker/Cura
|
plugins/X3DReader/X3DReader.py
|
Python
|
lgpl-3.0
| 35,800
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('msgs', '0057_update_triggers'),
('flows', '0053_auto_20160414_0642'),
]
operations = [
migrations.AddField(
model_name='flowstep',
name='broadcasts',
field=models.ManyToManyField(help_text='Any broadcasts that are associated with this step (only sent)', related_name='steps', to='msgs.Broadcast'),
),
]
|
ewheeler/rapidpro
|
temba/flows/migrations/0054_flowstep_broadcasts.py
|
Python
|
agpl-3.0
| 555
|
from django.contrib.auth.models import User
from django.test import TestCase
from hc.api.models import Check
class AddCheckTestCase(TestCase):
def setUp(self):
self.alice = User(username="alice")
self.alice.set_password("password")
self.alice.save()
def test_it_works(self):
url = "/checks/add/"
self.client.login(username="alice", password="password")
r = self.client.post(url)
assert r.status_code == 302
assert Check.objects.count() == 1
|
avoinsystems/healthchecks
|
hc/front/tests/test_add_check.py
|
Python
|
bsd-3-clause
| 518
|
#!/usr/bin/python3
#
# from:
# https://stackoverflow.com/questions/19457227/how-to-print-like-printf-in-python3
#
import sys
def printf(format, *args):
sys.stdout.write(format % args)
# Example output:
i = 7
pi = 3.14159265359
printf("hi there, i=%d, pi=%.2f\n", i, pi)
# hi there, i=7, pi=3.14
s = 'aaa'
try:
printf("s=%d\n", s)
except Exception as e:
print("Caught ... ", e.__class__.__name__);
print(e)
try:
printf("s=%y\n", s)
except Exception as e:
print("Caught ... ", e.__class__.__name__);
print(e)
exit()
|
ombt/analytics
|
apex/python/ofc_online_python_tutorial/misc/printf.py
|
Python
|
mit
| 552
|
#!/usr/bin/env python
# Copyright (C) 2010 Sebastian Bittl
# This file is part of Relaying Schemes Implementation.
# Relaying Schemes Implementation is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Relaying Schemes Implementation is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
from Tkinter import *
from tkFileDialog import *
from subprocess import Popen
sys.argv = [""]
sys.argv[0] = "GUI" # clue to distinguish in the alternating module whether there is a GUI or not
from relaying import *
import os # operating system functionality (e.g. signals)
import re # regular expressions
import ctypes # for renaming the subprocess
class App:
myframe = None
# variables for basic set-up
nc = None
nc_button = None
myself = None
direct_link = None
direct_button = None
frequency = None
rate = None
tx_gain = None
rx_gain = None
side = None
# variables for relay control
relay_frame = None
timeout = 2
# variables for node control
node_id = None
label_id = None
A_button = None
B_button = None
node_frame = None
file_source = None
file_name_field = None
file_name_button = None
fixed_data = None
# variables for displaying results
frame_errors = None
thoughput = None
tx_num = None
tx_data = None
rx_num = None
rx_data = None
num_timeouts = None
running = False
timeout_entry = None
timeout_label = None
# variables for controlling the flow graph
tb_tx = None
tb_rx = None
# variables for controlling the run state
run_button = None
cancel_button = None
pid = None
def __init__(self, master):
master.title("GNU Radio Relaying")
network_frame = Frame(master)
network_frame.grid(sticky=NW)
Label(network_frame, text="Network set-up:", font=("Times", "12", "bold underline")).grid(sticky=NW)
frame = Frame(network_frame)
self.myframe = frame
frame.grid(column=0, row=1, sticky=NW)
# Selection for enabling peer2peer, one way or two way communication
self.dualway = IntVar()
p2p = Radiobutton(frame, state=NORMAL, text="Point to point (Relay <-> A)", command=self.setValue, variable=self.dualway, value=0)
p2p.grid(sticky=NW)
Radiobutton(frame, state=NORMAL, text="One-way relaying (A -> B)", command=self.setValue, variable=self.dualway, value=1).grid(sticky=NW)
Radiobutton(frame, state=NORMAL, text="Two-way relaying (A <-> B)", command=self.setValue, variable=self.dualway, value=2).grid(sticky=NW)
# Checkbox for enabling network coding
self.nc = IntVar()
self.nc_button = Checkbutton(frame, state=NORMAL, text="Network Coding", variable=self.nc)
self.nc_button.grid(sticky=NW)
# Checkbox for enabling usage of direct link
self.direct_link = IntVar()
self.direct_button = Checkbutton(frame, state=NORMAL, text="Direct link usage", variable=self.direct_link)
self.direct_button.grid(sticky=NW)
# list with different channel coding possiblities
Label(frame, text="Channel Coding:").grid(sticky=NW)
self.channel_code = IntVar()
cc = Radiobutton(frame, state=NORMAL, text="No channel coding", command=self.setValue, variable=self.channel_code, value=0)
cc.grid(sticky=NW)
Radiobutton(frame, state=NORMAL, text="Reed-Solomon Code", command=self.setValue, variable=self.channel_code, value=1).grid(sticky=NW)
#space
space_frame = Frame(network_frame)
space_frame.grid(column=1, row=1, sticky=NW)
Label(space_frame, text=" ").grid()
# one out of multiple selection of the type of the station
type_frame_general = Frame(network_frame)
type_frame_general.grid(column=2, row=1, sticky=NE)
type_frame = Frame(type_frame_general)
type_frame.grid()
Label(type_frame, text="Role of this station in the network:").grid(sticky=NW)
self.myself = IntVar()
relay_button = Radiobutton(type_frame, text="Relais", variable=self.myself, value=0, command=self.type_selection)
relay_button.grid(sticky=NW)
node_button = Radiobutton(type_frame, text="Node", variable=self.myself, value=1, command=self.type_selection)
node_button.grid(sticky=NW)
# selection of the node-ID
id_frame = Frame(type_frame_general)
id_frame.grid(sticky=NW)
self.label_id = Label(id_frame, text="Select an ID:")
self.label_id.grid(sticky=NW)
self.node_id = StringVar()
self.A_button = Radiobutton(id_frame, text="A", variable=self.node_id, value='A')
self.A_button.grid(sticky=NW)
self.B_button = Radiobutton(id_frame, text="B", variable=self.node_id, value='B')
self.B_button.grid(sticky=NW)
# general parameters which must be equal for the hole network
parameter_frame = Frame(network_frame)
parameter_frame.grid(sticky=SW)
# frequency
Label(parameter_frame, text="Frequency: ").grid(sticky=NW)
self.frequency=StringVar()
Entry(parameter_frame, textvariable=self.frequency, width=5).grid(column=1, row=0, sticky=NW)
self.frequency.set('5375M')
# rate
Label(parameter_frame, text="Rate: ").grid(sticky=NW)
self.rate = StringVar()
Entry(parameter_frame, textvariable=self.rate, width=5).grid(column=1, row=1, sticky=NW)
self.rate.set('1024k') #512 is also possible
# setting of general settings for this station
settings_frame = Frame(master)
settings_frame.grid(sticky=NW)
Label(settings_frame, text="General settings of this station:", font=("Times", "12", "bold underline")).grid(sticky=NW)
settings_frame_details = Frame(settings_frame)
settings_frame_details.grid(row=1, sticky=NW)
# tx-gain
Label(settings_frame_details, text="TX-gain: ").grid(sticky=NW)
self.tx_gain=StringVar()
Entry(settings_frame_details, textvariable=self.tx_gain, width=3).grid(column=1, row=0, sticky=NW)
self.tx_gain.set('20')
# rx-gain
Label(settings_frame_details, text="RX-gain: ").grid(sticky=NW)
self.rx_gain = StringVar()
Entry(settings_frame_details, textvariable=self.rx_gain, width=3).grid(column=1, row=1, sticky=NW)
self.rx_gain.set('46')
add_settings_frame = Frame(master)
add_settings_frame.grid(sticky=NW)
# side
self.side=StringVar()
Label(add_settings_frame, text='Side of daughterboard:').grid(sticky=NW)
db = Radiobutton(add_settings_frame, text='A', variable=self.side, value='A', state=NORMAL)
db.grid(sticky=NW)
Radiobutton(add_settings_frame, text='B', variable=self.side, value='B', state=NORMAL).grid(sticky=NW)
db.invoke()
# setting of relay settings
self.relay_frame= Frame(master)
self.relay_frame.grid(sticky=NW)
Label(self.relay_frame, text="Relay settings:", font=("Times", "12", "bold underline")).grid(sticky=NW)
# timeout:
Label(self.relay_frame, text="Timeoutlimit (in s): ").grid(sticky=NW)
self.timeout=IntVar()
Entry(self.relay_frame, textvariable=self.timeout, width=4).grid(column=1, row=1, sticky=NW)
self.timeout.set(2) # give a default value
# setting of node settings
self.node_frame= Frame(master)
self.node_frame.grid(sticky=NW)
Label(self.node_frame, text="Node settings:", font=("Times", "12", "bold underline")).grid(sticky=NW)
Label(self.node_frame, text="Burst size:").grid(sticky=NW)
self.burst_size = IntVar()
self.burst_size_field = Entry(self.node_frame, textvariable=self.burst_size, width=5)
self.burst_size_field.grid(row=1,sticky=N)
self.burst_size.set(1)
Label(self.node_frame, text="Select data source:").grid(sticky=NW)
self.type_transmission = StringVar()
self.fixed_data = Radiobutton(self.node_frame, text="Fixed data (213 times a 5)", variable=self.type_transmission, value='C', command=self.source_select)
self.fixed_data.grid(sticky=NW)
Radiobutton(self.node_frame, text="File transfer", variable=self.type_transmission, value='F', command=self.source_select).grid(sticky=NW)
self.file_source = StringVar()
self.file_name_field = Entry(self.node_frame, textvariable=self.file_source, width=25)
self.file_name_field.grid(column=0, row=5, sticky=NW)
self.file_name_button = Button(self.node_frame, text="Select", command=self.select_file)
self.file_name_button.grid(column=1, row=5, sticky=NW)
Radiobutton(self.node_frame, text="Video transfer", variable=self.type_transmission, value='V', command=self.source_select).grid(sticky=NW)
Radiobutton(self.node_frame, text="Random data (from /dev/urandom)", variable=self.type_transmission, value='R', command=self.source_select).grid(sticky=NW)
# space
space_frame = Frame(master)
space_frame.grid(column=3)
Label(space_frame, text=" ").grid()
# part of the window to display measurement results
result_frame = Frame(master)
result_frame.grid(column=4, row = 0, sticky=NW)
Label(result_frame, text="Measurement Results:", font=("Times", "12", "bold underline")).grid(row=0, sticky=NW)
result_frame_details = Frame(result_frame)
result_frame_details.grid(row=1, sticky=NW)
# frame error rate
Label(result_frame_details, text="Acutal frame error rate (in %): ").grid(sticky=NW)
self.frame_errors = IntVar()
Entry(result_frame_details, textvariable=self.frame_errors, width=10).grid(column=1, row=0, sticky=NW)
# throughput
Label(result_frame_details, text="Actual throughput (bit / second): ").grid(sticky=NW)
self.throughput = IntVar()
Entry(result_frame_details, textvariable=self.throughput, width=10).grid(column=1, row=1, sticky=NW)
# empty line
Label(result_frame_details, text=" ").grid(row=2, sticky=NW)
# transmitted packets
Label(result_frame_details, text="Number of transmitted packets: ").grid(sticky=NW)
self.tx_num = StringVar()
Entry(result_frame_details, textvariable=self.tx_num, width=10).grid(column=1, row=3, sticky=NW)
self.tx_num.set("0")
Label(result_frame_details, text="Amount of transmitted data (byte): ").grid(sticky=NW)
self.tx_data = StringVar()
Entry(result_frame_details, textvariable=self.tx_data, width=10).grid(column=1, row=4, sticky=NW)
self.tx_data.set("0")
# empty line
Label(result_frame_details, text=" ").grid(row=5, sticky=NW)
# received packets
Label(result_frame_details, text="Number of received packets: ").grid(sticky=NW)
self.rx_num = StringVar()
Entry(result_frame_details, textvariable=self.rx_num, width=10).grid(column=1, row=6, sticky=NW)
self.rx_num.set("0")
Label(result_frame_details, text="Amount of received data (byte): ").grid(sticky=NW)
self.rx_data = StringVar()
Entry(result_frame_details, textvariable=self.rx_data, width=10).grid(column=1, row=7, sticky=NW)
self.rx_data.set("0")
# empty line
Label(result_frame_details, text=" ").grid(row=8, sticky=NW)
# number of occured timeouts, only for relay
self.timeout_label = Label(result_frame_details, text="Number of occured timeouts: ")
self.timeout_label.grid(sticky=NW)
self.num_timeouts = StringVar()
self.timeout_entry = Entry(result_frame_details, textvariable=self.num_timeouts, width=10)
self.timeout_entry.grid(column=1, row=9, sticky=NW)
self.num_timeouts.set("0")
# bottom part of the window
lower_frame = Frame(master)
lower_frame.grid(column=4, sticky=SE)
# buttons for starting, stopping gnuradio and quitting the hole program
self.run_button = Button(lower_frame, text="Run", command=self.runstate)
self.run_button.grid(column=0)
self.cancel_button = Button(lower_frame, text="Cancel", command=self.stop, state = DISABLED)
self.cancel_button.grid(column=1, row=0)
Button(lower_frame, text="QUIT", fg="red", command=frame.quit).grid(column=2, row=0, sticky=SE)
p2p.invoke()
cc.invoke()
self.A_button.invoke()
node_button.invoke()
relay_button.invoke()
signal.signal(signal.SIGALRM, self.timeout_handler)
libc = ctypes.CDLL("libc.so.6")
libc.prctl(15, 'relaying\x00', 0, 0, 0)
def type_selection(self):
if self.myself.get() == 0:
self.label_id["state"]=DISABLED
self.A_button["state"]=DISABLED
self.B_button["state"]=DISABLED
# disable the frame with the node settings
for child in self.node_frame.winfo_children():
child["state"]=DISABLED
# enable the frame with the relay settings
for child in self.relay_frame.winfo_children():
child["state"]=NORMAL
self.direct_button["state"] = DISABLED
self.timeout_label["state"] = NORMAL
self.timeout_entry["state"] = NORMAL
else:
self.label_id["state"]=NORMAL
self.A_button["state"]=NORMAL
self.B_button["state"]=NORMAL
# enable the frame with the node settings
for child in self.node_frame.winfo_children():
child["state"]=NORMAL
self.fixed_data.invoke()
# disable the frame with the relay settings
for child in self.relay_frame.winfo_children():
child["state"]=DISABLED
if (self.dualway.get() != 0):
self.direct_button["state"] = NORMAL
self.timeout_label["state"] = DISABLED
self.timeout_entry["state"] = DISABLED
def select_file(self):
myPath = askopenfilename(filetypes=[("all formats", "*")])
self.file_source.set(myPath)
def source_select(self):
if self.type_transmission.get() == 'F': # only when file transfer is selected
self.file_name_field["state"]=NORMAL
self.file_name_button["state"]=NORMAL
else:
self.file_name_field["state"]=DISABLED
self.file_name_button["state"]=DISABLED
def runstate(self):
if self.running == True:
print "Already running!"
else:
self.running = True
self.run_button["state"] = DISABLED
self.cancel_button["state"] = NORMAL
#reset all statistics
self.frame_errors.set(str(0))
self.throughput.set(str(0)) # bit / second
self.tx_num.set(str(0))
self.tx_data.set(str(0)) # in byte
self.rx_num.set(str(0))
self.rx_data.set(str(0)) # in byte
self.num_timeouts.set(str(0))
if self.myself.get() == 0:
relay = True
else:
relay = False
if (self.dualway.get() == 0) or (self.dualway.get() == 1):
bidirectional = False
else:
bidirectional = True
if self.dualway.get() == 0:
benchmark = True
else:
benchmark = False
timeout = self.timeout.get()
burst = self.burst_size.get()
if self.nc.get() == 0:
nc = False
else:
nc = True
if self.direct_link.get() == 0:
direct_link = False
else:
direct_link = True
try:
if (self.type_transmission.get() == 'V') and (self.node_id.get() == 'B'):
self.video = True
else:
self.video = False
self.read, self.write = os.pipe() # these are file descriptors, not file objects
self.pid = os.fork()
if self.pid != 0: # parent
signal.signal(signal.SIGCONT, self.update_statistic)
else: # child
self.myframe.quit()
libc = ctypes.CDLL("libc.so.6")
libc.prctl(15, 'GNURadio\x00', 0, 0, 0)
main(relay, self.side.get(), self.frequency.get(), self.rate.get(), self.tx_gain.get(), self.rx_gain.get(), self.type_transmission.get(), nc, direct_link, bidirectional, benchmark, self, self.write, timeout, self.node_id.get(), burst, self.channel_code.get())
except:
print "Stopped due to exception!"
pass # stopped due to user interaction or due to timeout
def stop(self):
if self.running == True:
print "Stopping the running gnuradio."
signal.alarm(0)
signal.alarm(5) # start timeout, the following commands have 5 s to complete
os.kill(self.pid, signal.SIGTERM)
if self.video == True:
Popen(["killall", "-9", "vlc"]) # shut down VLC
try:
os.waitpid(self.pid, 0) # there will be a last update of the gui which will interrupt this
except:
pass
self.running = False
self.cancel_button["state"] = DISABLED
self.run_button["state"] = NORMAL # it is assumed that gnuradio can be started again
else:
print "Nothing to stop!"
signal.alarm(0)
def timeout_handler(self, signum, frame):
Popen(["killall", "-9", "GNURadio"]) # killing gnuradio as it did not terminate normally
def setValue(self):
if (self.dualway.get() == 0):
self.nc_button["state"]=DISABLED
self.nc.set(False)
self.direct_button["state"]=DISABLED
self.direct_link.set(False)
elif (self.dualway.get() == 1):
self.nc_button["state"]=DISABLED
self.nc.set(False)
if self.myself.get() != 0:
self.direct_button["state"]=NORMAL
else:
self.nc_button["state"]=NORMAL
if self.myself.get() != 0:
self.direct_button["state"]=NORMAL
def update_statistic(self, signum, frame):
try:
update = os.read(self.read, 100) # read the update information from the pipe
except:
print "update failed"
#print "I read from pipe nr. " + str(self.read)
content = re.match(r"(\w+) (\w+) (\w+) (\w+\.\w+) (\w+) (\w+) (\w+)", update) # 7 parts
if content is not None:
#print "updated my statistic!"
self.update_statistic_direct(int(content.group(1)), int(content.group(2)), int(content.group(3)), float(content.group(4)), int(content.group(5)), int(content.group(6)), int(content.group(7)))
else:
print str(update)
print "update failed"
# function to update the display statistical information inside the GUI
def update_statistic_direct(self, rx_num, rx_right, tx_num, elapsed_time, rx_data, tx_data, timeouts = 0):
#error rate
if (rx_num is not 0) or (tx_num is not 0): # only if any packet has been sent or received
if rx_num != 0:
self.frame_errors.set(str((rx_num - rx_right + 0.0) / rx_num * 100)) # in percent
#throughput
self.throughput.set(str((rx_data + 0.0)*8/elapsed_time)) # bit / second
self.tx_num.set(tx_num)
self.tx_data.set(str(tx_data)) # in byte
self.rx_num.set(rx_num)
self.rx_data.set(str(rx_data)) # in byte
#timeouts
if self.myself.get() == 0: # only the relay has a timeout
self.num_timeouts.set(str(timeouts))
else: # data in pipe consits only of zeros, nothing to do
pass
root = Tk()
app = App(root)
root.mainloop()
sys.exit()
|
UpYou/relay
|
gui.py
|
Python
|
gpl-3.0
| 20,915
|
# -*- coding: utf-8 -*-
import socket
import fcntl
import struct
import time
import os
from bartendro import app
from flask import Flask, request, render_template, Response
from werkzeug.exceptions import Unauthorized
from flask.ext.login import login_required
from bartendro.model.version import DatabaseVersion
def get_ip_address_from_interface(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915,
struct.pack('256s', ifname[:15]))[20:24])
except IOError:
return "[none]"
@app.route('/admin/options')
@login_required
def admin_options():
ver = DatabaseVersion.query.one()
recover = not request.remote_addr.startswith("10.0.0")
wlan0 = get_ip_address_from_interface("wlan0")
eth0 = get_ip_address_from_interface("eth0")
return render_template("admin/options",
options=app.options,
show_passwd_recovery=recover,
title="Options",
eth0=eth0,
wlan0=wlan0,
version = app.version,
schema = ver.schema)
@app.route('/admin/lost-passwd')
def admin_lost_passwd():
if request.remote_addr.startswith("10.0.0"):
raise Unauthorized
return render_template("admin/lost-passwd",
options=app.options)
@app.route('/admin/upload')
@login_required
def admin_upload_db():
return render_template("admin/upload",
title="Upload database",
options=app.options)
|
wyolum/bartendro
|
ui/bartendro/view/admin/options.py
|
Python
|
gpl-2.0
| 1,689
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from test_base import TestBase
from tests.st.utils.docker_host import DockerHost
"""
Test calicoctl status
Most of the status output is checked by the BGP tests, so this module just
contains a simple return code check.
"""
class TestStatus(TestBase):
def test_status(self):
"""
Test that the status command can be executed.
"""
with DockerHost('host', dind=False, start_calico=False) as host:
host.calicoctl("status")
|
alexhersh/calico-docker
|
tests/st/test_status.py
|
Python
|
apache-2.0
| 1,054
|
#!/usr/bin/env python
__usage__ = """
A simple script to automatically produce sitemaps for a webserver,
in the Google Sitemap Protocol (GSP).
Usage: python sitemap_gen.py --config=config.xml [--help] [--testing]
--config=config.xml, specifies config file location
--help, displays usage message
--testing, specified when user is experimenting
""".lstrip()
import sys
if sys.hexversion < 0x02020000:
print 'This script requires Python 2.2 or later.'
print 'Currently run with version: %s' % sys.version
sys.exit(1)
import fnmatch
import glob
import gzip
import md5
import os
import re
import imp
import json
import stat
import time
import types
import urllib
import urlparse
import xml.sax
import datetime
# CONSTANTS:#1
# Text encodings
ENC_ASCII = "ASCII"
ENC_UTF8 = "UTF-8"
ENC_IDNA = "IDNA"
ENC_ASCII_LIST = ['ASCII', 'US-ASCII', 'US', 'IBM367', 'CP367', 'ISO646-US'
'ISO_646.IRV:1991', 'ISO-IR-6', 'ANSI_X3.4-1968',
'ANSI_X3.4-1986', 'CPASCII' ]
ENC_DEFAULT_LIST = ['ISO-8859-1', 'ISO-8859-2', 'ISO-8859-5']
# Maximum number of urls in each sitemap, before next Sitemap is created
MAXURLS_PER_SITEMAP = 50000
# Suffix on a Sitemap index file
SITEINDEX_SUFFIX = '_index.xml'
# Regular expressions tried for extracting URLs from access logs.
ACCESSLOG_CLF_PATTERN = re.compile(r'.+\s+"([^\s]+)\s+([^\s]+)\s+HTTP/\d+\.\d+"\s+200\s+(\d+).*')
# Match patterns for lastmod attributes
LASTMOD_PATTERNS = map(re.compile, [
r'^\d\d\d\d$',
r'^\d\d\d\d-\d\d$',
r'^\d\d\d\d-\d\d-\d\d$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\dZ$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d[+-]\d\d:\d\d$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?[+-]\d\d:\d\d$',
])
# Match patterns for changefreq attributes
CHANGEFREQ_PATTERNS = [
'always', 'hourly', 'daily', 'weekly', 'monthly', 'yearly', 'never'
]
# XML formats
SITEINDEX_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<sitemapindex\n' \
' xmlns="http://www.google.com/schemas/sitemap/0.84"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84\n' \
' http://www.google.com/schemas/sitemap/0.84/' \
'siteindex.xsd">\n'
SITEINDEX_FOOTER = '</sitemapindex>\n'
SITEINDEX_ENTRY = \
' <sitemap>\n' \
' <loc>%(loc)s</loc>\n' \
' <lastmod>%(lastmod)s</lastmod>\n' \
' </sitemap>\n'
SITEMAP_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<urlset\n' \
' xmlns="http://www.google.com/schemas/sitemap/0.84"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84\n' \
' http://www.google.com/schemas/sitemap/0.84/' \
'sitemap.xsd">\n'
SITEMAP_FOOTER = '</urlset>\n'
SITEURL_XML_PREFIX = ' <url>\n'
SITEURL_XML_SUFFIX = ' </url>\n'
# Search engines to notify with the updated sitemaps
#
# This list is very non-obvious in what's going on. Here's the gist:
# Each item in the list is a 6-tuple of items. The first 5 are "almost"
# the same as the input arguments to urlparse.urlunsplit():
# 0 - schema
# 1 - netloc
# 2 - path
# 3 - query <-- EXCEPTION: specify a query map rather than a string
# 4 - fragment
# Additionally, add item 5:
# 5 - query attribute that should be set to the new Sitemap URL
# Clear as mud, I know.
NOTIFICATION_SITES = [
('http', 'www.google.com', 'webmasters/sitemaps/ping', {}, '', 'sitemap')
]
# endfold
def to_timestamp(dt):
""" Convert datetime object to unixtime string """
return "%0.f" % (dt - datetime.datetime(1970,1,1)).total_seconds()
class Error(Exception):#1
"""
Base exception class. In this module we tend not to use our own exception
types for very much, but they come in very handy on XML parsing with SAX.
"""
pass
#Endfold
class SchemaError(Error):#1
"""Failure to process an XML file according to the schema we know."""
pass
#endfold
class Encoder:#1
"""
Manages wide-character/narrow-character conversions for just about all
text that flows into or out of the script.
You should always use this class for string coercion, as opposed to
letting Python handle coercions automatically. Reason: Python
usually assumes ASCII (7-bit) as a default narrow character encoding,
which is not the kind of data we generally deal with.
General high-level methodologies used in sitemap_gen:
[PATHS]
File system paths may be wide or narrow, depending on platform.
This works fine, just be aware of it and be very careful to not
mix them. That is, if you have to pass several file path arguments
into a library call, make sure they are all narrow or all wide.
This class has MaybeNarrowPath() which should be called on every
file system path you deal with.
[URLS]
URL locations are stored in Narrow form, already escaped. This has the
benefit of keeping escaping and encoding as close as possible to the format
we read them in. The downside is we may end up with URLs that have
intermingled encodings -- the root path may be encoded in one way
while the filename is encoded in another. This is obviously wrong, but
it should hopefully be an issue hit by very few users. The workaround
from the user level (assuming they notice) is to specify a default_encoding
parameter in their config file.
[OTHER]
Other text, such as attributes of the URL class, configuration options,
etc, are generally stored in Unicode for simplicity.
"""
def __init__(self):
self._user = None # User-specified default encoding
self._learned = [] # Learned default encodings
self._widefiles = False # File system can be wide
# Can the file system be Unicode?
try:
self._widefiles = os.path.supports_unicode_filenames
except AttributeError:
try:
self._widefiles = sys.getwindowsversion() == os.VER_PLATFORM_WIN32_NT
except AttributeError:
pass
# Try to guess a working default
try:
encoding = sys.getfilesystemencoding()
if encoding and not (encoding.upper() in ENC_ASCII_LIST):
self._learned = [ encoding ]
except AttributeError:
pass
if not self._learned:
encoding = sys.getdefaultencoding()
if encoding and not (encoding.upper() in ENC_ASCII_LIST):
self._learned = [ encoding ]
# If we had no guesses, start with some European defaults
if not self._learned:
self._learned = ENC_DEFAULT_LIST
#end def __init__
def SetUserEncoding(self, encoding):
self._user = encoding
#end def SetUserEncoding
def NarrowText(self, text, encoding):
""" Narrow a piece of arbitrary text """
if type(text) != types.UnicodeType:
return text
# Try the passed in preference
if encoding:
try:
result = text.encode(encoding)
if not encoding in self._learned:
self._learned.append(encoding)
return result
except UnicodeError:
pass
except LookupError:
output.Warn('Unknown encoding: %s' % encoding)
# Try the user preference
if self._user:
try:
return text.encode(self._user)
except UnicodeError:
pass
except LookupError:
temp = self._user
self._user = None
output.Warn('Unknown default_encoding: %s' % temp)
# Look through learned defaults, knock any failing ones out of the list
while self._learned:
try:
return text.encode(self._learned[0])
except:
del self._learned[0]
# When all other defaults are exhausted, use UTF-8
try:
return text.encode(ENC_UTF8)
except UnicodeError:
pass
# Something is seriously wrong if we get to here
return text.encode(ENC_ASCII, 'ignore')
#end def NarrowText
def MaybeNarrowPath(self, text):
""" Paths may be allowed to stay wide """
if self._widefiles:
return text
return self.NarrowText(text, None)
#end def MaybeNarrowPath
def WidenText(self, text, encoding):
""" Widen a piece of arbitrary text """
if type(text) != types.StringType:
return text
# Try the passed in preference
if encoding:
try:
result = unicode(text, encoding)
if not encoding in self._learned:
self._learned.append(encoding)
return result
except UnicodeError:
pass
except LookupError:
output.Warn('Unknown encoding: %s' % encoding)
# Try the user preference
if self._user:
try:
return unicode(text, self._user)
except UnicodeError:
pass
except LookupError:
temp = self._user
self._user = None
output.Warn('Unknown default_encoding: %s' % temp)
# Look through learned defaults, knock any failing ones out of the list
while self._learned:
try:
return unicode(text, self._learned[0])
except:
del self._learned[0]
# Getting here means it wasn't UTF-8 and we had no working default.
# We really don't have anything "right" we can do anymore.
output.Warn('Unrecognized encoding in text: %s' % text)
if not self._user:
output.Warn('You may need to set a default_encoding in your '
'configuration file.')
return text.decode(ENC_ASCII, 'ignore')
#end def WidenText
#Endfold
class Output:#1
"""
Exposes logging functionality, and tracks how many errors
we have thus output.
Logging levels should be used as thus:
Fatal -- extremely sparingly
Error -- config errors, entire blocks of user 'intention' lost
Warn -- individual URLs lost
Log(,0) -- Un-suppressable text that's not an error
Log(,1) -- touched files, major actions
Log(,2) -- parsing notes, filtered or duplicated URLs
Log(,3) -- each accepted URL
"""
def __init__(self):
self.num_errors = 0 # Count of errors
self.num_warns = 0 # Count of warnings
self._errors_shown = {} # Shown errors
self._warns_shown = {} # Shown warnings
self._verbose = 0 # Level of verbosity
#end def __init__
def Log(self, text, level):
""" Output a blurb of diagnostic text, if the verbose level allows it """
if text:
text = encoder.NarrowText(text, None)
if self._verbose >= level:
print text
#end def Log
def Warn(self, text):
""" Output and count a warning. Suppress duplicate warnings. """
if text:
text = encoder.NarrowText(text, None)
hash = md5.new(text).digest()
if not self._warns_shown.has_key(hash):
self._warns_shown[hash] = 1
print '[WARNING] ' + text
else:
self.Log('(suppressed) [WARNING] ' + text, 3)
self.num_warns = self.num_warns + 1
#end def Warn
def Error(self, text):
""" Output and count an error. Suppress duplicate errors. """
if text:
text = encoder.NarrowText(text, None)
hash = md5.new(text).digest()
if not self._errors_shown.has_key(hash):
self._errors_shown[hash] = 1
print '[ERROR] ' + text
else:
self.Log('(suppressed) [ERROR] ' + text, 3)
self.num_errors = self.num_errors + 1
#end def Error
def Fatal(self, text):
""" Output an error and terminate the program. """
if text:
text = encoder.NarrowText(text, None)
print '[FATAL] ' + text
else:
print 'Fatal error.'
sys.exit(1)
#end def Fatal
def SetVerbose(self, level):
""" Sets the verbose level. """
try:
if type(level) != types.IntType:
level = int(level)
if (level >= 0) and (level <= 3):
self._verbose = level
return
except ValueError:
pass
self.Error('Verbose level (%s) must be between 0 and 3 inclusive.' % level)
#end def SetVerbose
#endfold
encoder = Encoder()
output = Output()
class URL(object):#1
""" URL is a smart structure grouping together the properties we
care about for a single web reference. """
__slots__ = 'loc', 'lastmod', 'changefreq', 'priority', "size", "timestamp"
def __init__(self):
self.loc = None # URL -- in Narrow characters
self.lastmod = None # ISO8601 timestamp of last modify
self.changefreq = None # Text term for update frequency
self.priority = None # Float between 0 and 1 (inc)
#end def __init__
def __cmp__(self, other):
if self.loc < other.loc:
return -1
if self.loc > other.loc:
return 1
return 0
#end def __cmp__
def TrySetAttribute(self, attribute, value):
""" Attempt to set the attribute to the value, with a pretty try
block around it. """
if attribute == 'loc':
self.loc = self.Canonicalize(value)
else:
try:
setattr(self, attribute, value)
except AttributeError:
output.Warn('Unknown URL attribute: %s' % attribute)
#end def TrySetAttribute
def IsAbsolute(loc):
""" Decide if the URL is absolute or not """
if not loc:
return False
narrow = encoder.NarrowText(loc, None)
(scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
if (not scheme) or (not netloc):
return False
return True
#end def IsAbsolute
IsAbsolute = staticmethod(IsAbsolute)
def Canonicalize(loc):
""" Do encoding and canonicalization on a URL string """
if not loc:
return loc
# Let the encoder try to narrow it
narrow = encoder.NarrowText(loc, None)
# Escape components individually
(scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
unr = '-._~'
sub = '!$&\'()*+,;='
netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
path = urllib.quote(path, unr + sub + '%:@/')
query = urllib.quote(query, unr + sub + '%:@/?')
frag = urllib.quote(frag, unr + sub + '%:@/?')
# Try built-in IDNA encoding on the netloc
try:
(ignore, widenetloc, ignore, ignore, ignore) = urlparse.urlsplit(loc)
for c in widenetloc:
if c >= unichr(128):
netloc = widenetloc.encode(ENC_IDNA)
netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
break
except UnicodeError:
# urlsplit must have failed, based on implementation differences in the
# library. There is not much we can do here, except ignore it.
pass
except LookupError:
output.Warn('An International Domain Name (IDN) is being used, but this '
'version of Python does not have support for IDNA encoding. '
' (IDNA support was introduced in Python 2.3) The encoding '
'we have used instead is wrong and will probably not yield '
'valid URLs.')
bad_netloc = False
if '%' in netloc:
bad_netloc = True
# Put it all back together
narrow = urlparse.urlunsplit((scheme, netloc, path, query, frag))
# I let '%' through. Fix any that aren't pre-existing escapes.
HEXDIG = '0123456789abcdefABCDEF'
list = narrow.split('%')
narrow = list[0]
del list[0]
for item in list:
if (len(item) >= 2) and (item[0] in HEXDIG) and (item[1] in HEXDIG):
narrow = narrow + '%' + item
else:
narrow = narrow + '%25' + item
# Issue a warning if this is a bad URL
if bad_netloc:
output.Warn('Invalid characters in the host or domain portion of a URL: '
+ narrow)
while narrow.startswith("//"):
narrow = narrow[1:]
if not narrow:
narrow = "/"
return narrow
#end def Canonicalize
Canonicalize = staticmethod(Canonicalize)
def Validate(self, base_url, allow_fragment):
""" Verify the data in this URL is well-formed, and override if not. """
assert type(base_url) == types.StringType
# Test (and normalize) the ref
if not self.loc:
output.Warn('Empty URL')
return False
if allow_fragment:
self.loc = urlparse.urljoin(base_url, self.loc)
if not self.loc.startswith(base_url):
output.Warn('Discarded URL for not starting with the base_url: %s' %
self.loc)
self.loc = None
return False
# Test the lastmod
if self.lastmod:
match = False
self.lastmod = self.lastmod.upper()
for pattern in LASTMOD_PATTERNS:
match = pattern.match(self.lastmod)
if match:
break
if not match:
output.Warn('Lastmod "%s" does not appear to be in ISO8601 format on '
'URL: %s' % (self.lastmod, self.loc))
self.lastmod = None
# Test the changefreq
if self.changefreq:
match = False
self.changefreq = self.changefreq.lower()
for pattern in CHANGEFREQ_PATTERNS:
if self.changefreq == pattern:
match = True
break
if not match:
output.Warn('Changefreq "%s" is not a valid change frequency on URL '
': %s' % (self.changefreq, self.loc))
self.changefreq = None
# Test the priority
if self.priority:
priority = -1.0
try:
priority = float(self.priority)
except ValueError:
pass
if (priority < 0.0) or (priority > 1.0):
output.Warn('Priority "%s" is not a number between 0 and 1 inclusive '
'on URL: %s' % (self.priority, self.loc))
self.priority = None
return True
#end def Validate
def MakeHash(self):
""" Provides a uniform way of hashing URLs """
if not self.loc:
return None
if self.loc.endswith('/'):
return md5.new(self.loc[:-1]).digest()
return md5.new(self.loc).digest()
#end def MakeHash
def Log(self, prefix='URL', level=3):
""" Dump the contents, empty or not, to the log. """
out = prefix + ':'
for attribute in self.__slots__:
value = getattr(self, attribute)
if not value:
value = ''
out = out + (' %s=[%s]' % (attribute, value))
output.Log('%s' % encoder.NarrowText(out, None), level)
#end def Log
def WriteXML(self, file):
""" Dump non-empty contents to the output file, in XML format. """
if not self.loc:
return
out = SITEURL_XML_PREFIX
for attribute in self.__slots__:
# PATCH BEGIN
if not attribute in ["loc", "lastmod", "changefreq", "lastmod"]:
continue
# PATCH END
value = getattr(self, attribute)
if value:
if type(value) == types.UnicodeType:
value = encoder.NarrowText(value, None)
elif type(value) != types.StringType:
value = str(value)
value = xml.sax.saxutils.escape(value)
out = out + (' <%s>%s</%s>\n' % (attribute, value, attribute))
out = out + SITEURL_XML_SUFFIX
file.write(out)
#end def WriteXML
#Endfold
class Filter:#1
"""
A filter on the stream of URLs we find. A filter is, in essence,
a wildcard applied to the stream. You can think of this as an
operator that returns a tri-state when given a URL:
True -- this URL is to be included in the sitemap
None -- this URL is undecided
False -- this URL is to be dropped from the sitemap
"""
def __init__(self, attributes):
self._wildcard = None # Pattern for wildcard match
self._regexp = None # Pattern for regexp match
self._pass = False # "Drop" filter vs. "Pass" filter
if not ValidateAttributes('FILTER', attributes,
('pattern', 'type', 'action')):
return
# Check error count on the way in
num_errors = output.num_errors
# Fetch the attributes
pattern = attributes.get('pattern')
type = attributes.get('type', 'wildcard')
action = attributes.get('action', 'drop')
if type:
type = type.lower()
if action:
action = action.lower()
# Verify the attributes
if not pattern:
output.Error('On a filter you must specify a "pattern" to match')
elif (not type) or ((type != 'wildcard') and (type != 'regexp')):
output.Error('On a filter you must specify either \'type="wildcard"\' '
'or \'type="regexp"\'')
elif (action != 'pass') and (action != 'drop'):
output.Error('If you specify a filter action, it must be either '
'\'action="pass"\' or \'action="drop"\'')
# Set the rule
if action == 'drop':
self._pass = False
elif action == 'pass':
self._pass = True
if type == 'wildcard':
self._wildcard = pattern
elif type == 'regexp':
try:
self._regexp = re.compile(pattern.lower())
except re.error:
output.Error('Bad regular expression: %s' % pattern)
# Log the final results iff we didn't add any errors
if num_errors == output.num_errors:
output.Log('Filter: %s any URL that matches %s "%s"' %
(action, type, pattern), 2)
#end def __init__
def Apply(self, path_url):
""" Process the URL, as above. """
if not path_url:
return None
if self._wildcard:
if fnmatch.fnmatchcase(path_url.lower(), self._wildcard.lower()):
return self._pass
return None
if self._regexp:
if self._regexp.search(path_url.lower()):
return self._pass
return None
assert False # unreachable
#endfold
class InputURL:#1
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a single URL, manually specified in the config file.
"""
def __init__(self, attributes):
self._url = None # The lonely URL
if not ValidateAttributes('URL', attributes,
('href', 'lastmod', 'changefreq', 'priority')):
return
url = URL()
for attr in attributes.keys():
if attr == 'href':
url.TrySetAttribute('loc', attributes[attr])
else:
url.TrySetAttribute(attr, attributes[attr])
if not url.loc:
output.Error('Url entries must have an href attribute.')
return
self._url = url
output.Log('Input: From URL "%s"' % self._url.loc, 2)
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
if self._url:
consumer(self._url, True)
#Endfold
class InputURLList:#1
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a text file with a list of URLs
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From URLLIST "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Urllist entries must have a "path" attribute.')
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'URLLIST')
if not file:
return
# Iterate lines
linenum = 0
for line in file.readlines():
linenum = linenum + 1
# Strip comments and empty lines
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
if (not line) or line[0] == '#':
continue
# Split the line on space
url = URL()
cols = line.split()
for i in range(0,len(cols)):
cols[i] = cols[i].strip()
url.TrySetAttribute('loc', cols[0])
# Extract attributes from the other columns
for i in range(1,len(cols)):
if cols[i]:
try:
(attr_name, attr_val) = cols[i].split('=', 1)
url.TrySetAttribute(attr_name, attr_val)
except ValueError:
output.Warn('Line %d: Unable to parse attribute: %s' %
(linenum, cols[i]))
# Pass it on
consumer(url, False)
file.close()
if frame:
frame.close()
#Endfold
class InputDirectory:#1
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a directory that acts as base for walking the filesystem.
"""
def __init__(self, attributes, base_url):
self._path = None # The directory
self._url = None # The URL equivelant
self._default_file = None
if not ValidateAttributes('DIRECTORY', attributes, ('path', 'url',
'default_file')):
return
# Prep the path -- it MUST end in a sep
path = attributes.get('path')
if not path:
output.Error('Directory entries must have both "path" and "url" '
'attributes')
return
path = encoder.MaybeNarrowPath(path)
if not path.endswith(os.sep):
path = path + os.sep
if not os.path.isdir(path):
output.Error('Can not locate directory: %s' % path)
return
# Prep the URL -- it MUST end in a sep
url = attributes.get('url')
if not url:
output.Error('Directory entries must have both "path" and "url" '
'attributes')
return
url = URL.Canonicalize(url)
if not url.endswith('/'):
url = url + '/'
if not url.startswith(base_url):
url = urlparse.urljoin(base_url, url)
if not url.startswith(base_url):
output.Error('The directory URL "%s" is not relative to the '
'base_url: %s' % (url, base_url))
return
# Prep the default file -- it MUST be just a filename
file = attributes.get('default_file')
if file:
file = encoder.MaybeNarrowPath(file)
if os.sep in file:
output.Error('The default_file "%s" can not include path information.'
% file)
file = None
self._path = path
self._url = url
self._default_file = file
if file:
output.Log('Input: From DIRECTORY "%s" (%s) with default file "%s"'
% (path, url, file), 2)
else:
output.Log('Input: From DIRECTORY "%s" (%s) with no default file'
% (path, url), 2)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
if not self._path:
return
root_path = self._path
root_URL = self._url
root_file = self._default_file
def PerFile(dirpath, name):
"""
Called once per file.
Note that 'name' will occasionally be None -- for a directory itself
"""
# Pull a timestamp
url = URL()
isdir = False
try:
if name:
path = os.path.join(dirpath, name)
else:
path = dirpath
isdir = os.path.isdir(path)
time = None
if isdir and root_file:
file = os.path.join(path, root_file)
try:
time = os.stat(file)[stat.ST_MTIME];
except OSError:
pass
if not time:
time = os.stat(path)[stat.ST_MTIME];
url.lastmod = TimestampISO8601(time)
except OSError:
pass
except ValueError:
pass
# Build a URL
middle = dirpath[len(root_path):]
if os.sep != '/':
middle = middle.replace(os.sep, '/')
if middle:
middle = middle + '/'
if name:
middle = middle + name
if isdir:
middle = middle + '/'
url.TrySetAttribute('loc', root_URL + encoder.WidenText(middle, None))
# Suppress default files. (All the way down here so we can log it.)
if name and (root_file == name):
url.Log(prefix='IGNORED (default file)', level=2)
return
consumer(url, False)
#end def PerFile
def PerDirectory(ignore, dirpath, namelist):
"""
Called once per directory with a list of all the contained files/dirs.
"""
ignore = ignore # Avoid warnings of an unused parameter
if not dirpath.startswith(root_path):
output.Warn('Unable to decide what the root path is for directory: '
'%s' % dirpath)
return
for name in namelist:
PerFile(dirpath, name)
#end def PerDirectory
output.Log('Walking DIRECTORY "%s"' % self._path, 1)
PerFile(self._path, None)
os.path.walk(self._path, PerDirectory, None)
#end def ProduceURLs
#Endfold
class InputAccessLog:#1
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles access logs. It's non-trivial in that we want to
auto-detect log files in the Common Logfile Format (as used by Apache,
for instance) and the Extended Log File Format (as used by IIS, for
instance).
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
self._is_elf = False # Extended Log File Format?
self._is_clf = False # Common Logfile Format?
self._elf_status = -1 # ELF field: '200'
self._elf_method = -1 # ELF field: 'HEAD'
self._elf_uri = -1 # ELF field: '/foo?bar=1'
self._elf_urifrag1 = -1 # ELF field: '/foo'
self._elf_urifrag2 = -1 # ELF field: 'bar=1'
self._elf_bytes = -1 # ELF field: response bytes
self._elf_date = -1 # ELF field: date
self._elf_time = -1 # ELF field: time
if not ValidateAttributes('ACCESSLOG', attributes, ('path', 'encoding')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From ACCESSLOG "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Accesslog entries must have a "path" attribute.')
def RecognizeELFLine(self, line):
""" Recognize the Fields directive that heads an ELF file """
if not line.startswith('#Fields:'):
return False
fields = line.split()
del fields[0]
for i in range(0, len(fields)):
field = fields[i].strip()
if field == 'sc-status':
self._elf_status = i
elif field == 'cs-method':
self._elf_method = i
elif field == 'cs-uri':
self._elf_uri = i
elif field == 'cs-uri-stem':
self._elf_urifrag1 = i
elif field == 'cs-uri-query':
self._elf_urifrag2 = i
elif field == "sc-bytes":
self._elf_bytes = i
elif field == "date":
self._elf_date = i
elif field == "time":
self._elf_time = i
output.Log('Recognized an Extended Log File Format file.', 2)
return True
def GetELFLine(self, line):
""" Fetch the requested URL from an ELF line """
fields = line.split()
count = len(fields)
size = 0
# Verify status was Ok
if self._elf_status >= 0:
if self._elf_status >= count:
return (None, size)
if not fields[self._elf_status].strip() == '200':
return (None, size)
# Verify method was HEAD or GET
if self._elf_method >= 0:
if self._elf_method >= count:
return (None, size)
if not fields[self._elf_method].strip() in ('HEAD', 'GET'):
return (None, size)
# Get "sc-bytes" as possible
if self._elf_bytes >= 0:
size = fields[self._elf_bytes].strip()
# Get "date" as possible
if self._elf_date >= 0:
_date = fields[self._elf_date].strip()
# Get "time" as possible
if self._elf_time >= 0:
_time = fields[self._elf_time].strip()
timestamp = None
if _date and _time:
timestamp = to_timestamp(datetime.datetime.strptime((_date + " " + _time).split(".")[0], "%Y-%m-%d %H:%M:%S"))
# Pull the full URL if we can
if self._elf_uri >= 0:
if self._elf_uri >= count:
return (None, size)
url = fields[self._elf_uri].strip()
if url != '-':
return (url, size, timestamp)
# Put together a fragmentary URL
if self._elf_urifrag1 >= 0:
if self._elf_urifrag1 >= count or self._elf_urifrag2 >= count:
return (url, size, timestamp)
urlfrag1 = fields[self._elf_urifrag1].strip()
urlfrag2 = None
if self._elf_urifrag2 >= 0:
urlfrag2 = fields[self._elf_urifrag2]
if urlfrag1 and (urlfrag1 != '-'):
if urlfrag2 and (urlfrag2 != '-'):
urlfrag1 = urlfrag1 + '?' + urlfrag2
return (urlfrag1, size, timestamp)
return (None, size)
def RecognizeCLFLine(self, line):
""" Try to tokenize a logfile line according to CLF pattern and see if
it works. """
match = ACCESSLOG_CLF_PATTERN.match(line)
recognize = match and (match.group(1) in ('HEAD', 'GET'))
if recognize:
output.Log('Recognized a Common Logfile Format file.', 2)
return recognize
def GetCLFLine(self, line):
""" Fetch the requested URL from a CLF line """
match = ACCESSLOG_CLF_PATTERN.match(line)
if match:
request = match.group(1)
if request in ('HEAD', 'GET'):
return (match.group(2), match.group(3))
return (None, 0)
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'ACCESSLOG')
if not file:
return
# PATCH BEGIN
if not "line_rewrite" in globals():
try:
from sitemap_gen_custom import line_rewrite
except ImportError:
line_rewrite = lambda x: x
else:
line_rewrite = globals()["line_rewrite"]
# PATCH END
# Iterate lines
for line in file.readlines():
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
# PATCH BEGIN
line = line_rewrite(line)
if not line:
continue
# PATCH END
# If we don't know the format yet, try them both
if (not self._is_clf) and (not self._is_elf):
self._is_elf = self.RecognizeELFLine(line)
self._is_clf = self.RecognizeCLFLine(line)
# Digest the line
timestamp = None
match = None
if self._is_elf:
pieces = self.GetELFLine(line)
if len(pieces) > 2:
match, size, timestamp = pieces
else:
match, size = pieces
elif self._is_clf:
match, size = self.GetCLFLine(line)
if not match:
continue
# Pass it on
url = URL()
url.TrySetAttribute('loc', match)
url.TrySetAttribute("size", size)
if timestamp:
url.TrySetAttribute("timestamp", timestamp)
consumer(url, True)
file.close()
if frame:
frame.close()
#Endfold
class InputSitemap(xml.sax.handler.ContentHandler):#1
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles Sitemap files and Sitemap index files. For the sake
of simplicity in design (and simplicity in interfacing with the SAX
package), we do not handle these at the same time, recursively. Instead
we read an index file completely and make a list of Sitemap files, then
go back and process each Sitemap.
"""
class _ContextBase(object):
"""Base class for context handlers in our SAX processing. A context
handler is a class that is responsible for understanding one level of
depth in the XML schema. The class knows what sub-tags are allowed,
and doing any processing specific for the tag we're in.
This base class is the API filled in by specific context handlers,
all defined below.
"""
def __init__(self, subtags):
"""Initialize with a sequence of the sub-tags that would be valid in
this context."""
self._allowed_tags = subtags # Sequence of sub-tags we can have
self._last_tag = None # Most recent seen sub-tag
#end def __init__
def AcceptTag(self, tag):
"""Returns True iff opening a sub-tag is valid in this context."""
valid = tag in self._allowed_tags
if valid:
self._last_tag = tag
else:
self._last_tag = None
return valid
#end def AcceptTag
def AcceptText(self, text):
"""Returns True iff a blurb of text is valid in this context."""
return False
#end def AcceptText
def Open(self):
"""The context is opening. Do initialization."""
pass
#end def Open
def Close(self):
"""The context is closing. Return our result, if any."""
pass
#end def Close
def Return(self, result):
"""We're returning to this context after handling a sub-tag. This
method is called with the result data from the sub-tag that just
closed. Here in _ContextBase, if we ever see a result it means
the derived child class forgot to override this method."""
if result:
raise NotImplementedError
#end def Return
#end class _ContextBase
class _ContextUrlSet(_ContextBase):
"""Context handler for the document node in a Sitemap."""
def __init__(self):
InputSitemap._ContextBase.__init__(self, ('url',))
#end def __init__
#end class _ContextUrlSet
class _ContextUrl(_ContextBase):
"""Context handler for a URL node in a Sitemap."""
def __init__(self, consumer):
"""Initialize this context handler with the callable consumer that
wants our URLs."""
InputSitemap._ContextBase.__init__(self, URL.__slots__)
self._url = None # The URL object we're building
self._consumer = consumer # Who wants to consume it
#end def __init__
def Open(self):
"""Initialize the URL."""
assert not self._url
self._url = URL()
#end def Open
def Close(self):
"""Pass the URL to the consumer and reset it to None."""
assert self._url
self._consumer(self._url, False)
self._url = None
#end def Close
def Return(self, result):
"""A value context has closed, absorb the data it gave us."""
assert self._url
if result:
self._url.TrySetAttribute(self._last_tag, result)
#end def Return
#end class _ContextUrl
class _ContextSitemapIndex(_ContextBase):
"""Context handler for the document node in an index file."""
def __init__(self):
InputSitemap._ContextBase.__init__(self, ('sitemap',))
self._loclist = [] # List of accumulated Sitemap URLs
#end def __init__
def Open(self):
"""Just a quick verify of state."""
assert not self._loclist
#end def Open
def Close(self):
"""Return our list of accumulated URLs."""
if self._loclist:
temp = self._loclist
self._loclist = []
return temp
#end def Close
def Return(self, result):
"""Getting a new loc URL, add it to the collection."""
if result:
self._loclist.append(result)
#end def Return
#end class _ContextSitemapIndex
class _ContextSitemap(_ContextBase):
"""Context handler for a Sitemap entry in an index file."""
def __init__(self):
InputSitemap._ContextBase.__init__(self, ('loc', 'lastmod'))
self._loc = None # The URL to the Sitemap
#end def __init__
def Open(self):
"""Just a quick verify of state."""
assert not self._loc
#end def Open
def Close(self):
"""Return our URL to our parent."""
if self._loc:
temp = self._loc
self._loc = None
return temp
output.Warn('In the Sitemap index file, a "sitemap" entry had no "loc".')
#end def Close
def Return(self, result):
"""A value has closed. If it was a 'loc', absorb it."""
if result and (self._last_tag == 'loc'):
self._loc = result
#end def Return
#end class _ContextSitemap
class _ContextValue(_ContextBase):
"""Context handler for a single value. We return just the value. The
higher level context has to remember what tag led into us."""
def __init__(self):
InputSitemap._ContextBase.__init__(self, ())
self._text = None
#end def __init__
def AcceptText(self, text):
"""Allow all text, adding it to our buffer."""
if self._text:
self._text = self._text + text
else:
self._text = text
return True
#end def AcceptText
def Open(self):
"""Initialize our buffer."""
self._text = None
#end def Open
def Close(self):
"""Return what's in our buffer."""
text = self._text
self._text = None
if text:
text = text.strip()
return text
#end def Close
#end class _ContextValue
def __init__(self, attributes):
"""Initialize with a dictionary of attributes from our entry in the
config file."""
xml.sax.handler.ContentHandler.__init__(self)
self._pathlist = None # A list of files
self._current = -1 # Current context in _contexts
self._contexts = None # The stack of contexts we allow
self._contexts_idx = None # ...contexts for index files
self._contexts_stm = None # ...contexts for Sitemap files
if not ValidateAttributes('SITEMAP', attributes, ['path']):
return
# Init the first file path
path = attributes.get('path')
if path:
path = encoder.MaybeNarrowPath(path)
if os.path.isfile(path):
output.Log('Input: From SITEMAP "%s"' % path, 2)
self._pathlist = [path]
else:
output.Error('Can not locate file "%s"' % path)
else:
output.Error('Sitemap entries must have a "path" attribute.')
#end def __init__
def ProduceURLs(self, consumer):
"""In general: Produces URLs from our data source, hand them to the
callable consumer.
In specific: Iterate over our list of paths and delegate the actual
processing to helper methods. This is a complexity no other data source
needs to suffer. We are unique in that we can have files that tell us
to bring in other files.
Note the decision to allow an index file or not is made in this method.
If we call our parser with (self._contexts == None) the parser will
grab whichever context stack can handle the file. IE: index is allowed.
If instead we set (self._contexts = ...) before parsing, the parser
will only use the stack we specify. IE: index not allowed.
"""
# Set up two stacks of contexts
self._contexts_idx = [InputSitemap._ContextSitemapIndex(),
InputSitemap._ContextSitemap(),
InputSitemap._ContextValue()]
self._contexts_stm = [InputSitemap._ContextUrlSet(),
InputSitemap._ContextUrl(consumer),
InputSitemap._ContextValue()]
# Process the first file
assert self._pathlist
path = self._pathlist[0]
self._contexts = None # We allow an index file here
self._ProcessFile(path)
# Iterate over remaining files
self._contexts = self._contexts_stm # No index files allowed
for path in self._pathlist[1:]:
self._ProcessFile(path)
#end def ProduceURLs
def _ProcessFile(self, path):
"""Do per-file reading/parsing/consuming for the file path passed in."""
assert path
# Open our file
(frame, file) = OpenFileForRead(path, 'SITEMAP')
if not file:
return
# Rev up the SAX engine
try:
self._current = -1
xml.sax.parse(file, self)
except SchemaError:
output.Error('An error in file "%s" made us abort reading the Sitemap.'
% path)
except IOError:
output.Error('Cannot read from file "%s"' % path)
except xml.sax._exceptions.SAXParseException, e:
output.Error('XML error in the file "%s" (line %d, column %d): %s' %
(path, e._linenum, e._colnum, e.getMessage()))
# Clean up
file.close()
if frame:
frame.close()
#end def _ProcessFile
def _MungeLocationListIntoFiles(self, urllist):
"""Given a list of URLs, munge them into our self._pathlist property.
We do this by assuming all the files live in the same directory as
the first file in the existing pathlist. That is, we assume a
Sitemap index points to Sitemaps only in the same directory. This
is not true in general, but will be true for any output produced
by this script.
"""
assert self._pathlist
path = self._pathlist[0]
path = os.path.normpath(path)
dir = os.path.dirname(path)
wide = False
if type(path) == types.UnicodeType:
wide = True
for url in urllist:
url = URL.Canonicalize(url)
output.Log('Index points to Sitemap file at: %s' % url, 2)
(scheme, netloc, path, query, frag) = urlparse.urlsplit(url)
file = os.path.basename(path)
file = urllib.unquote(file)
if wide:
file = encoder.WidenText(file)
if dir:
file = dir + os.sep + file
if file:
self._pathlist.append(file)
output.Log('Will attempt to read Sitemap file: %s' % file, 1)
#end def _MungeLocationListIntoFiles
def startElement(self, tag, attributes):
"""SAX processing, called per node in the config stream.
As long as the new tag is legal in our current context, this
becomes an Open call on one context deeper.
"""
# If this is the document node, we may have to look for a context stack
if (self._current < 0) and not self._contexts:
assert self._contexts_idx and self._contexts_stm
if tag == 'urlset':
self._contexts = self._contexts_stm
elif tag == 'sitemapindex':
self._contexts = self._contexts_idx
output.Log('File is a Sitemap index.', 2)
else:
output.Error('The document appears to be neither a Sitemap nor a '
'Sitemap index.')
raise SchemaError
# Display a kinder error on a common mistake
if (self._current < 0) and (self._contexts == self._contexts_stm) and (
tag == 'sitemapindex'):
output.Error('A Sitemap index can not refer to another Sitemap index.')
raise SchemaError
# Verify no unexpected attributes
if attributes:
text = ''
for attr in attributes.keys():
# The document node will probably have namespaces
if self._current < 0:
if attr.find('xmlns') >= 0:
continue
if attr.find('xsi') >= 0:
continue
if text:
text = text + ', '
text = text + attr
if text:
output.Warn('Did not expect any attributes on any tag, instead tag '
'"%s" had attributes: %s' % (tag, text))
# Switch contexts
if (self._current < 0) or (self._contexts[self._current].AcceptTag(tag)):
self._current = self._current + 1
assert self._current < len(self._contexts)
self._contexts[self._current].Open()
else:
output.Error('Can not accept tag "%s" where it appears.' % tag)
raise SchemaError
#end def startElement
def endElement(self, tag):
"""SAX processing, called per node in the config stream.
This becomes a call to Close on one context followed by a call
to Return on the previous.
"""
tag = tag # Avoid warning on unused argument
assert self._current >= 0
retval = self._contexts[self._current].Close()
self._current = self._current - 1
if self._current >= 0:
self._contexts[self._current].Return(retval)
elif retval and (self._contexts == self._contexts_idx):
self._MungeLocationListIntoFiles(retval)
#end def endElement
def characters(self, text):
"""SAX processing, called when text values are read. Important to
note that one single text value may be split across multiple calls
of this method.
"""
if (self._current < 0) or (
not self._contexts[self._current].AcceptText(text)):
if text.strip():
output.Error('Can not accept text "%s" where it appears.' % text)
raise SchemaError
#end def characters
#endfold
class FilePathGenerator:#1
"""
This class generates filenames in a series, upon request.
You can request any iteration number at any time, you don't
have to go in order.
Example of iterations for '/path/foo.xml.gz':
0 --> /path/foo.xml.gz
1 --> /path/foo1.xml.gz
2 --> /path/foo2.xml.gz
_index.xml --> /path/foo_index.xml
"""
def __init__(self):
self.is_gzip = False # Is this a GZIP file?
self._path = None # '/path/'
self._prefix = None # 'foo'
self._suffix = None # '.xml.gz'
#end def __init__
def Preload(self, path):
""" Splits up a path into forms ready for recombination. """
path = encoder.MaybeNarrowPath(path)
# Get down to a base name
path = os.path.normpath(path)
base = os.path.basename(path).lower()
if not base:
output.Error('Couldn\'t parse the file path: %s' % path)
return False
lenbase = len(base)
# Recognize extension
lensuffix = 0
compare_suffix = ['.xml', '.xml.gz', '.gz']
for suffix in compare_suffix:
if base.endswith(suffix):
lensuffix = len(suffix)
break
if not lensuffix:
output.Error('The path "%s" doesn\'t end in a supported file '
'extension.' % path)
return False
self.is_gzip = suffix.endswith('.gz')
# Split the original path
lenpath = len(path)
self._path = path[:lenpath-lenbase]
self._prefix = path[lenpath-lenbase:lenpath-lensuffix]
self._suffix = path[lenpath-lensuffix:]
return True
#end def Preload
def GeneratePath(self, instance):
""" Generates the iterations, as described above. """
prefix = self._path + self._prefix
if type(instance) == types.IntType:
if instance:
return '%s%d%s' % (prefix, instance, self._suffix)
return prefix + self._suffix
return prefix + instance
#end def GeneratePath
def GenerateURL(self, instance, root_url):
""" Generates iterations, but as a URL instead of a path. """
prefix = root_url + self._prefix
retval = None
if type(instance) == types.IntType:
if instance:
retval = '%s%d%s' % (prefix, instance, self._suffix)
else:
retval = prefix + self._suffix
else:
retval = prefix + instance
return URL.Canonicalize(retval)
#end def GenerateURL
def GenerateWildURL(self, root_url):
""" Generates a wildcard that should match all our iterations """
prefix = URL.Canonicalize(root_url + self._prefix)
temp = URL.Canonicalize(prefix + self._suffix)
suffix = temp[len(prefix):]
return prefix + '*' + suffix
#end def GenerateURL
#Endfold
class PerURLStatistics:#1
""" Keep track of some simple per-URL statistics, like file extension. """
def __init__(self):
self._extensions = {} # Count of extension instances
#end def __init__
def Consume(self, url):
""" Log some stats for the URL. At the moment, that means extension. """
if url and url.loc:
(scheme, netloc, path, query, frag) = urlparse.urlsplit(url.loc)
if not path:
return
# Recognize directories
if path.endswith('/'):
if self._extensions.has_key('/'):
self._extensions['/'] = self._extensions['/'] + 1
else:
self._extensions['/'] = 1
return
# Strip to a filename
i = path.rfind('/')
if i >= 0:
assert i < len(path)
path = path[i:]
# Find extension
i = path.rfind('.')
if i > 0:
assert i < len(path)
ext = path[i:].lower()
if self._extensions.has_key(ext):
self._extensions[ext] = self._extensions[ext] + 1
else:
self._extensions[ext] = 1
else:
if self._extensions.has_key('(no extension)'):
self._extensions['(no extension)'] = self._extensions[
'(no extension)'] + 1
else:
self._extensions['(no extension)'] = 1
#end def Consume
def Log(self):
""" Dump out stats to the output. """
if len(self._extensions):
output.Log('Count of file extensions on URLs:', 1)
set = self._extensions.keys()
set.sort()
for ext in set:
output.Log(' %7d %s' % (self._extensions[ext], ext), 1)
#end def Log
#Endfold
class Sitemap(xml.sax.handler.ContentHandler):#1
"""
This is the big workhorse class that processes your inputs and spits
out sitemap files. It is built as a SAX handler for set up purposes.
That is, it processes an XML stream to bring itself up.
"""
def __init__(self, suppress_notify):
xml.sax.handler.ContentHandler.__init__(self)
self._filters = [] # Filter objects
self._inputs = [] # Input objects
self._urls = {} # Maps URLs to count of dups
self._set = [] # Current set of URLs
self._filegen = None # Path generator for output files
self._wildurl1 = None # Sitemap URLs to filter out
self._wildurl2 = None # Sitemap URLs to filter out
self._sitemaps = 0 # Number of output files
# We init _dup_max to 2 so the default priority is 0.5 instead of 1.0
self._dup_max = 2 # Max number of duplicate URLs
self._stat = PerURLStatistics() # Some simple stats
self._in_site = False # SAX: are we in a Site node?
self._in_Site_ever = False # SAX: were we ever in a Site?
self._default_enc = None # Best encoding to try on URLs
self._base_url = None # Prefix to all valid URLs
self._store_into = None # Output filepath
self._suppress = suppress_notify # Suppress notify of servers
self._lastmod_dict = None
def ValidateBasicConfig(self):
""" Verifies (and cleans up) the basic user-configurable options. """
all_good = True
if self._default_enc:
encoder.SetUserEncoding(self._default_enc)
# Canonicalize the base_url
if all_good and not self._base_url:
output.Error('A site needs a "base_url" attribute.')
all_good = False
if all_good and not URL.IsAbsolute(self._base_url):
output.Error('The "base_url" must be absolute, not relative: %s' %
self._base_url)
all_good = False
if all_good:
self._base_url = URL.Canonicalize(self._base_url)
if not self._base_url.endswith('/'):
self._base_url = self._base_url + '/'
output.Log('BaseURL is set to: %s' % self._base_url, 2)
# Load store_into into a generator
if all_good:
if self._store_into:
if not self._store_into.endswith(".xml"):
output.Error("store_into attribute must be end with .xml")
all_good = False
else:
self._filegen = FilePathGenerator()
if not self._filegen.Preload(self._store_into):
all_good = False
else:
output.Error('A site needs a "store_into" attribute.')
all_good = False
# Ask the generator for patterns on what its output will look like
if all_good:
self._wildurl1 = self._filegen.GenerateWildURL(self._base_url)
self._wildurl2 = self._filegen.GenerateURL(SITEINDEX_SUFFIX,
self._base_url)
# Unify various forms of False
if all_good:
if self._suppress:
if (type(self._suppress) == types.StringType) or (type(self._suppress)
== types.UnicodeType):
if (self._suppress == '0') or (self._suppress.lower() == 'false'):
self._suppress = False
# Done
if not all_good:
output.Log('See "example_config.xml" for more information.', 0)
return all_good
#end def ValidateBasicConfig
def Generate(self):
""" Run over all the Inputs and ask them to Produce """
# Run the inputs
for input in self._inputs:
input.ProduceURLs(self.ConsumeURL)
# Do last flushes
if len(self._set):
self.FlushSet()
if not self._sitemaps:
output.Warn('No URLs were recorded, writing an empty sitemap.')
self.FlushSet()
# Write an index as needed
if self._sitemaps > 1:
self.WriteIndex()
# Save lastmod cache
json.dump(self._lastmod_dict, open(os.path.abspath("%s.cache" % self._store_into[:-4]), "w+"))
# Notify
self.NotifySearch()
# Dump stats
self._stat.Log()
#end def Generate
def ConsumeURL(self, url, allow_fragment):
"""
All per-URL processing comes together here, regardless of Input.
Here we run filters, remove duplicates, spill to disk as needed, etc.
"""
if not url:
return
# Validate
path_url = url.loc
if not url.Validate(self._base_url, allow_fragment):
return
# Run filters
accept = None
for filter in self._filters:
accept = filter.Apply(path_url)
if accept != None:
break
if not (accept or (accept == None)):
url.Log(prefix='FILTERED', level=2)
return
# Ignore our out output URLs
if fnmatch.fnmatchcase(url.loc, self._wildurl1) or fnmatch.fnmatchcase(
url.loc, self._wildurl2):
url.Log(prefix='IGNORED (output file)', level=2)
return
if self._lastmod_dict is None:
try:
self._lastmod_dict = json.load(open(os.path.abspath("%s.cache" % self._store_into[:-4]), "r"))
except:
self._lastmod_dict = {}
timestamp = getattr(url, "timestamp", None)
if not timestamp:
timestamp = to_timestamp(datetime.datetime.now())
if not url.loc in self._lastmod_dict:
self._lastmod_dict[url.loc] = [url.size, timestamp]
if self._lastmod_dict[url.loc][0] != url.size:
self._lastmod_dict[url.loc] = [url.size, timestamp]
url.lastmod = TimestampISO8601(int(self._lastmod_dict[url.loc][1]))
# Note the sighting
hash = url.MakeHash()
if self._urls.has_key(hash):
dup = self._urls[hash]
if dup > 0:
dup = dup + 1
self._urls[hash] = dup
if self._dup_max < dup:
self._dup_max = dup
url.Log(prefix='DUPLICATE')
return
# Acceptance -- add to set
self._urls[hash] = 1
self._set.append(url)
self._stat.Consume(url)
url.Log()
# Flush the set if needed
if len(self._set) >= MAXURLS_PER_SITEMAP:
self.FlushSet()
#end def ConsumeURL
def FlushSet(self):
"""
Flush the current set of URLs to the output. This is a little
slow because we like to sort them all and normalize the priorities
before dumping.
"""
# Sort and normalize
output.Log('Sorting and normalizing collected URLs.', 1)
self._set.sort()
for url in self._set:
hash = url.MakeHash()
dup = self._urls[hash]
if dup > 0:
self._urls[hash] = -1
if not url.priority:
url.priority = '%.4f' % (float(dup) / float(self._dup_max))
# Get the filename we're going to write to
filename = self._filegen.GeneratePath(self._sitemaps)
if not filename:
output.Fatal('Unexpected: Couldn\'t generate output filename.')
self._sitemaps = self._sitemaps + 1
output.Log('Writing Sitemap file "%s" with %d URLs' %
(filename, len(self._set)), 1)
# Write to it
frame = None
file = None
try:
if self._filegen.is_gzip:
basename = os.path.basename(filename);
frame = open(filename, 'wb')
file = gzip.GzipFile(fileobj=frame, filename=basename, mode='wt')
else:
file = open(filename, 'wt')
file.write(SITEMAP_HEADER)
for url in self._set:
url.WriteXML(file)
file.write(SITEMAP_FOOTER)
file.close()
if frame:
frame.close()
frame = None
file = None
except IOError:
output.Fatal('Couldn\'t write out to file: %s' % filename)
os.chmod(filename, 0644)
# Flush
self._set = []
#end def FlushSet
def WriteIndex(self):
""" Write the master index of all Sitemap files """
# Make a filename
filename = self._filegen.GeneratePath(SITEINDEX_SUFFIX)
if not filename:
output.Fatal('Unexpected: Couldn\'t generate output index filename.')
output.Log('Writing index file "%s" with %d Sitemaps' %
(filename, self._sitemaps), 1)
# Make a lastmod time
lastmod = TimestampISO8601(time.time())
# Write to it
try:
fd = open(filename, 'wt')
fd.write(SITEINDEX_HEADER)
for mapnumber in range(0,self._sitemaps):
# Write the entry
mapurl = self._filegen.GenerateURL(mapnumber, self._base_url)
mapattributes = { 'loc' : mapurl, 'lastmod' : lastmod }
fd.write(SITEINDEX_ENTRY % mapattributes)
fd.write(SITEINDEX_FOOTER)
fd.close()
fd = None
except IOError:
output.Fatal('Couldn\'t write out to file: %s' % filename)
os.chmod(filename, 0644)
#end def WriteIndex
def NotifySearch(self):
""" Send notification of the new Sitemap(s) to the search engines. """
if self._suppress:
output.Log('Search engine notification is suppressed.', 1)
return
output.Log('Notifying search engines.', 1)
# Override the urllib's opener class with one that doesn't ignore 404s
class ExceptionURLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
output.Log('HTTP error %d: %s' % (errcode, errmsg), 2)
raise IOError
#end def http_error_default
#end class ExceptionURLOpener
old_opener = urllib._urlopener
urllib._urlopener = ExceptionURLopener()
# Build the URL we want to send in
if self._sitemaps > 1:
url = self._filegen.GenerateURL(SITEINDEX_SUFFIX, self._base_url)
else:
url = self._filegen.GenerateURL(0, self._base_url)
# Test if we can hit it ourselves
try:
u = urllib.urlopen(url)
u.close()
except IOError:
output.Error('When attempting to access our generated Sitemap at the '
'following URL:\n %s\n we failed to read it. Please '
'verify the store_into path you specified in\n'
' your configuration file is web-accessable. Consult '
'the FAQ for more\n information.' % url)
output.Warn('Proceeding to notify with an unverifyable URL.')
# Cycle through notifications
# To understand this, see the comment near the NOTIFICATION_SITES comment
for ping in NOTIFICATION_SITES:
query_map = ping[3]
query_attr = ping[5]
query_map[query_attr] = url
query = urllib.urlencode(query_map)
notify = urlparse.urlunsplit((ping[0], ping[1], ping[2], query, ping[4]))
# Send the notification
output.Log('Notifying: %s' % ping[1], 1)
output.Log('Notification URL: %s' % notify, 2)
try:
u = urllib.urlopen(notify)
u.read()
u.close()
except IOError:
output.Warn('Cannot contact: %s' % ping[1])
if old_opener:
urllib._urlopener = old_opener
#end def NotifySearch
def startElement(self, tag, attributes):
""" SAX processing, called per node in the config stream. """
if tag == 'site':
if self._in_site:
output.Error('Can not nest Site entries in the configuration.')
else:
self._in_site = True
if not ValidateAttributes('SITE', attributes,
('verbose', 'default_encoding', 'base_url', 'store_into',
'suppress_search_engine_notify')):
return
verbose = attributes.get('verbose', 0)
if verbose:
output.SetVerbose(verbose)
self._default_enc = attributes.get('default_encoding')
self._base_url = attributes.get('base_url')
self._store_into = attributes.get('store_into')
if not self._suppress:
self._suppress = attributes.get('suppress_search_engine_notify',
False)
self.ValidateBasicConfig()
elif tag == 'filter':
self._filters.append(Filter(attributes))
elif tag == 'url':
self._inputs.append(InputURL(attributes))
elif tag == 'urllist':
for attributeset in ExpandPathAttribute(attributes, 'path'):
self._inputs.append(InputURLList(attributeset))
elif tag == 'directory':
self._inputs.append(InputDirectory(attributes, self._base_url))
elif tag == 'accesslog':
for attributeset in ExpandPathAttribute(attributes, 'path'):
self._inputs.append(InputAccessLog(attributeset))
elif tag == 'sitemap':
for attributeset in ExpandPathAttribute(attributes, 'path'):
self._inputs.append(InputSitemap(attributeset))
else:
output.Error('Unrecognized tag in the configuration: %s' % tag)
#end def startElement
def endElement(self, tag):
""" SAX processing, called per node in the config stream. """
if tag == 'site':
assert self._in_site
self._in_site = False
self._in_site_ever = True
#end def endElement
def endDocument(self):
""" End of SAX, verify we can proceed. """
if not self._in_site_ever:
output.Error('The configuration must specify a "site" element.')
else:
if not self._inputs:
output.Warn('There were no inputs to generate a sitemap from.')
#end def endDocument
#endfold
def ValidateAttributes(tag, attributes, goodattributes):#1
""" Makes sure 'attributes' does not contain any attribute not
listed in 'goodattributes' """
all_good = True
for attr in attributes.keys():
if not attr in goodattributes:
output.Error('Unknown %s attribute: %s' % (tag, attr))
all_good = False
return all_good
#Endfold
def ExpandPathAttribute(src, attrib):#1
""" Given a dictionary of attributes, return a list of dictionaries
with all the same attributes except for the one named attrib.
That one, we treat as a file path and expand into all its possible
variations. """
# Do the path expansion. On any error, just return the source dictionary.
path = src.get(attrib)
if not path:
return [src]
path = encoder.MaybeNarrowPath(path);
pathlist = glob.glob(path)
if not pathlist:
return [src]
# If this isn't actually a dictionary, make it one
if type(src) != types.DictionaryType:
tmp = {}
for key in src.keys():
tmp[key] = src[key]
src = tmp
# Create N new dictionaries
retval = []
for path in pathlist:
dst = src.copy()
dst[attrib] = path
retval.append(dst)
return retval
#Endfold
def OpenFileForRead(path, logtext):#1
""" Opens a text file, be it GZip or plain """
frame = None
file = None
if not path:
return (frame, file)
try:
if path.endswith('.gz'):
frame = open(path, 'rb')
file = gzip.GzipFile(fileobj=frame, mode='rt')
else:
file = open(path, 'rt')
if logtext:
output.Log('Opened %s file: %s' % (logtext, path), 1)
else:
output.Log('Opened file: %s' % path, 1)
except IOError:
output.Error('Can not open file: %s' % path)
return (frame, file)
#Endfold
def TimestampISO8601(t):#1
""" Seconds since epoch (1970-01-01) to ISO 8601 time string """
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
#Endfold
def CreateSitemapFromFile(configpath, suppress_notify):#1
""" Sets up a new Sitemap object from the specified configuration file. """
# Remember error count on the way in
num_errors = output.num_errors
# Rev up SAX to parse the config
sitemap = Sitemap(suppress_notify)
try:
output.Log('Reading configuration file: %s' % configpath, 0)
xml.sax.parse(configpath, sitemap)
except IOError:
output.Error('Cannot read configuration file: %s' % configpath)
except xml.sax._exceptions.SAXParseException, e:
output.Error('XML error in the config file (line %d, column %d): %s' %
(e._linenum, e._colnum, e.getMessage()))
except xml.sax._exceptions.SAXReaderNotAvailable:
output.Error('Some installs of Python 2.2 did not include complete support'
' for XML.\n Please try upgrading your version of Python'
' and re-running the script.')
# If we added any errors, return no sitemap
if num_errors == output.num_errors:
return sitemap
return None
#Endfold
def ProcessCommandFlags(args):#1
"""
Parse command line flags per specified usage, pick off key, value pairs
All flags of type "--key=value" will be processed as __flags[key] = value,
"--option" will be processed as __flags[option] = option
"""
flags = {}
rkeyval = '--(?P<key>\S*)[=](?P<value>\S*)' # --key=val
roption = '--(?P<option>\S*)' # --key
r = '(' + rkeyval + ')|(' + roption + ')'
rc = re.compile(r)
for a in args:
try:
rcg = rc.search(a).groupdict()
if rcg.has_key('key'):
flags[rcg['key']] = rcg['value']
if rcg.has_key('option'):
flags[rcg['option']] = rcg['option']
except AttributeError:
return None
return flags
#endfold
if __name__ == '__main__':#1
flags = ProcessCommandFlags(sys.argv[1:])
if not flags or not flags.has_key('config') or flags.has_key('help'):
output.Log(__usage__, 0)
else:
if "rewriter" in flags:
line_rewrite = imp.load_source("", "%s.py" % os.path.abspath(flags["rewriter"])).line_rewrite
suppress_notify = flags.has_key('testing')
sitemap = CreateSitemapFromFile(flags['config'], suppress_notify)
if not sitemap:
output.Log('Configuration file errors -- exiting.', 0)
else:
sitemap.Generate()
output.Log('Number of errors: %d' % output.num_errors, 1)
output.Log('Number of warnings: %d' % output.num_warns, 1)
#endfold
|
pagefreezer/SitemapGenerator
|
sitemap_gen.py
|
Python
|
bsd-3-clause
| 79,973
|
# Twisted imports
from twisted.protocols import msn, loopback
from twisted.internet.defer import Deferred
from twisted.trial import unittest
# System imports
import StringIO
class StringIOWithoutClosing(StringIO.StringIO):
def close(self): pass
class DummySwitchboardClient(msn.MSNSwitchboardClient):
def userTyping(self, message):
self.state = 'TYPING'
def gotSendRequest(self, fileName, fileSize, cookie, message):
if fileName == 'foobar.ext' and fileSize == 31337 and cookie == 1234: self.state = 'INVITATION'
class DummyNotificationClient(msn.MSNNotificationClient):
def loggedIn(self, userHandle, screenName, verified):
if userHandle == 'foo@bar.com' and screenName == 'Test Screen Name' and verified: self.state = 'LOGIN'
def gotProfile(self, message):
self.state = 'PROFILE'
def gotContactStatus(self, code, userHandle, screenName):
if code == msn.STATUS_AWAY and userHandle == "foo@bar.com" and screenName == "Test Screen Name": self.state = 'INITSTATUS'
def contactStatusChanged(self, code, userHandle, screenName):
if code == msn.STATUS_LUNCH and userHandle == "foo@bar.com" and screenName == "Test Name": self.state = 'NEWSTATUS'
def contactOffline(self, userHandle):
if userHandle == "foo@bar.com": self.state = 'OFFLINE'
def statusChanged(self, code):
if code == msn.STATUS_HIDDEN: self.state = 'MYSTATUS'
class NotificationTests(unittest.TestCase):
""" testing the various events in MSNNotificationClient """
def setUp(self):
self.client = DummyNotificationClient()
self.client.state = 'START'
def tearDown(self):
self.client = None
def testLogin(self):
self.client.lineReceived('USR 1 OK foo@bar.com Test%20Screen%20Name 1')
self.failUnless((self.client.state == 'LOGIN'), message='Failed to detect successful login')
def testProfile(self):
m = 'MSG Hotmail Hotmail 353\r\nMIME-Version: 1.0\r\nContent-Type: text/x-msmsgsprofile; charset=UTF-8\r\n'
m += 'LoginTime: 1016941010\r\nEmailEnabled: 1\r\nMemberIdHigh: 40000\r\nMemberIdLow: -600000000\r\nlang_preference: 1033\r\n'
m += 'preferredEmail: foo@bar.com\r\ncountry: AU\r\nPostalCode: 90210\r\nGender: M\r\nKid: 0\r\nAge:\r\nsid: 400\r\n'
m += 'kv: 2\r\nMSPAuth: 2CACCBCCADMoV8ORoz64BVwmjtksIg!kmR!Rj5tBBqEaW9hc4YnPHSOQ$$\r\n\r\n'
map(self.client.lineReceived, m.split('\r\n')[:-1])
self.failUnless((self.client.state == 'PROFILE'), message='Failed to detect initial profile')
def testStatus(self):
t = [('ILN 1 AWY foo@bar.com Test%20Screen%20Name', 'INITSTATUS', 'Failed to detect initial status report'),
('NLN LUN foo@bar.com Test%20Name', 'NEWSTATUS', 'Failed to detect contact status change'),
('FLN foo@bar.com', 'OFFLINE', 'Failed to detect contact signing off'),
('CHG 1 HDN', 'MYSTATUS', 'Failed to detect my status changing')]
for i in t:
self.client.lineReceived(i[0])
self.failUnless((self.client.state == i[1]), message=i[2])
class MessageHandlingTests(unittest.TestCase):
""" testing various message handling methods from MSNSwichboardClient """
def setUp(self):
self.client = DummySwitchboardClient()
self.client.state = 'START'
def tearDown(self):
self.client = None
def testTypingCheck(self):
m = msn.MSNMessage()
m.setHeader('Content-Type', 'text/x-msmsgscontrol')
m.setHeader('TypingUser', 'foo@bar')
self.client.checkMessage(m)
self.failUnless((self.client.state == 'TYPING'), message='Failed to detect typing notification')
def testFileInvitation(self):
m = msn.MSNMessage()
m.setHeader('Content-Type', 'text/x-msmsgsinvite; charset=UTF-8')
m.message += 'Application-Name: File Transfer\r\n'
m.message += 'Invitation-Command: Invite\r\n'
m.message += 'Invitation-Cookie: 1234\r\n'
m.message += 'Application-File: foobar.ext\r\n'
m.message += 'Application-FileSize: 31337\r\n\r\n'
self.client.checkMessage(m)
self.failUnless((self.client.state == 'INVITATION'), message='Failed to detect file transfer invitation')
def testFileResponse(self):
d = Deferred()
d.addCallback(self.fileResponse)
self.client.cookies['iCookies'][1234] = (d, None)
m = msn.MSNMessage()
m.setHeader('Content-Type', 'text/x-msmsgsinvite; charset=UTF-8')
m.message += 'Invitation-Command: ACCEPT\r\n'
m.message += 'Invitation-Cookie: 1234\r\n\r\n'
self.client.checkMessage(m)
self.failUnless((self.client.state == 'RESPONSE'), message='Failed to detect file transfer response')
def testFileInfo(self):
d = Deferred()
d.addCallback(self.fileInfo)
self.client.cookies['external'][1234] = (d, None)
m = msn.MSNMessage()
m.setHeader('Content-Type', 'text/x-msmsgsinvite; charset=UTF-8')
m.message += 'Invitation-Command: ACCEPT\r\n'
m.message += 'Invitation-Cookie: 1234\r\n'
m.message += 'IP-Address: 192.168.0.1\r\n'
m.message += 'Port: 6891\r\n'
m.message += 'AuthCookie: 4321\r\n\r\n'
self.client.checkMessage(m)
self.failUnless((self.client.state == 'INFO'), message='Failed to detect file transfer info')
def fileResponse(self, (accept, cookie, info)):
if accept and cookie == 1234: self.client.state = 'RESPONSE'
def fileInfo(self, (accept, ip, port, aCookie, info)):
if accept and ip == '192.168.0.1' and port == 6891 and aCookie == 4321: self.client.state = 'INFO'
class FileTransferTestCase(unittest.TestCase):
""" test MSNFileSend against MSNFileReceive """
def setUp(self):
self.input = StringIOWithoutClosing()
self.input.writelines(['a'] * 7000)
self.input.seek(0)
self.output = StringIOWithoutClosing()
def tearDown(self):
self.input = None
self.output = None
def testFileTransfer(self):
auth = 1234
sender = msn.MSNFileSend(self.input)
sender.auth = auth
sender.fileSize = 7000
client = msn.MSNFileReceive(auth, "foo@bar.com", self.output)
client.fileSize = 7000
loopback.loopback(sender, client)
self.failUnless((client.completed and sender.completed), message="send failed to complete")
self.failUnless((self.input.getvalue() == self.output.getvalue()), message="saved file does not match original")
|
fxia22/ASM_xf
|
PythonD/site_python/twisted/test/test_msn.py
|
Python
|
gpl-2.0
| 6,590
|
# -*- coding: utf-8 -*-
"""
flask.views
~~~~~~~~~~~
This module provides class-based views inspired by the ones in Django.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from .globals import request
from ._compat import with_metaclass
http_method_funcs = frozenset(['get', 'post', 'head', 'options',
'delete', 'put', 'trace', 'patch'])
class View(object):
"""Alternative way to use view functions. A subclass has to implement
:meth:`dispatch_request` which is called with the view arguments from
the URL routing system. If :attr:`methods` is provided the methods
do not have to be passed to the :meth:`~flask.Flask.add_url_rule`
method explicitly::
class MyView(View):
methods = ['GET']
def dispatch_request(self, name):
return 'Hello %s!' % name
app.add_url_rule('/hello/<name>', view_func=MyView.as_view('myview'))
When you want to decorate a pluggable view you will have to either do that
when the view function is created (by wrapping the return value of
:meth:`as_view`) or you can use the :attr:`decorators` attribute::
class SecretView(View):
methods = ['GET']
decorators = [superuser_required]
def dispatch_request(self):
...
The decorators stored in the decorators list are applied one after another
when the view function is created. Note that you can *not* use the class
based decorators since those would decorate the view class and not the
generated view function!
"""
#: A list of methods this view can handle.
methods = None
#: The canonical way to decorate class-based views is to decorate the
#: return value of as_view(). However since this moves parts of the
#: logic from the class declaration to the place where it's hooked
#: into the routing system.
#:
#: You can place one or more decorators in this list and whenever the
#: view function is created the result is automatically decorated.
#:
#: .. versionadded:: 0.8
decorators = ()
def dispatch_request(self):
"""Subclasses have to override this method to implement the
actual view function code. This method is called with all
the arguments from the URL rule.
"""
raise NotImplementedError()
@classmethod
def as_view(cls, name, *class_args, **class_kwargs):
"""Converts the class into an actual view function that can be used
with the routing system. Internally this generates a function on the
fly which will instantiate the :class:`View` on each request and call
the :meth:`dispatch_request` method on it.
The arguments passed to :meth:`as_view` are forwarded to the
constructor of the class.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# We attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
return view
class MethodViewType(type):
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
if 'methods' not in d:
methods = set(rv.methods or [])
for key in d:
if key in http_method_funcs:
methods.add(key.upper())
# If we have no method at all in there we don't want to
# add a method list. (This is for instance the case for
# the base class or another subclass of a base method view
# that does not introduce new methods).
if methods:
rv.methods = sorted(methods)
return rv
class MethodView(with_metaclass(MethodViewType, View)):
"""Like a regular class-based view but that dispatches requests to
particular methods. For instance if you implement a method called
:meth:`get` it means it will respond to ``'GET'`` requests and
the :meth:`dispatch_request` implementation will automatically
forward your request to that. Also :attr:`options` is set for you
automatically::
class CounterAPI(MethodView):
def get(self):
return session.get('counter', 0)
def post(self):
session['counter'] = session.get('counter', 0) + 1
return 'OK'
app.add_url_rule('/counter', view_func=CounterAPI.as_view('counter'))
"""
def dispatch_request(self, *args, **kwargs):
meth = getattr(self, request.method.lower(), None)
# If the request method is HEAD and we don't have a handler for it
# retry with GET.
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Unimplemented method %r' % request.method
return meth(*args, **kwargs)
|
wildchildyn/autism-website
|
yanni_env/lib/python3.6/site-packages/flask/views.py
|
Python
|
gpl-3.0
| 5,630
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
gkc1000/pyscf
|
pyscf/x2c/__init__.py
|
Python
|
apache-2.0
| 611
|
# -*- encoding: utf-8 -*-
from plone.server.addons import Addon
from zope.interface import Interface
from plone.server import configure
from plone.server.api.service import Service
from plone.server.interfaces import ISite
from plone.server.testing import PloneFunctionalTestCase
from plone.server.content import Item
import json
class TestConfigure(PloneFunctionalTestCase):
"""Functional testing of the API REST."""
def test_register_service(self):
cur_count = len(configure.get_configurations('plone.server.tests', 'service'))
class TestService(Service):
async def __call__(self):
return {
"foo": "bar"
}
configure.register_configuration(TestService, dict(
context=ISite,
name="@foobar",
permission='plone.ViewContent'
), 'service')
self.assertEqual(
len(configure.get_configurations('plone.server.tests', 'service')),
cur_count + 1)
# now test it...
configure.load_configuration(
self.layer.app.app.config, 'plone.server.tests', 'service')
self.layer.app.app.config.execute_actions()
resp = self.layer.requester('GET', '/plone/plone/@foobar')
response = json.loads(resp.text)
self.assertEqual(response['foo'], 'bar')
def test_register_contenttype(self):
cur_count = len(
configure.get_configurations('plone.server.tests', 'contenttype'))
class IMyType(Interface):
pass
class MyType(Item):
pass
configure.register_configuration(MyType, dict(
context=ISite,
schema=IMyType,
portal_type="MyType1",
behaviors=["plone.server.behaviors.dublincore.IDublinCore"]
), 'contenttype')
self.assertEqual(
len(configure.get_configurations('plone.server.tests', 'contenttype')),
cur_count + 1)
# now test it...
configure.load_configuration(
self.layer.app.app.config, 'plone.server.tests', 'contenttype')
self.layer.app.app.config.execute_actions()
resp = self.layer.requester('GET', '/plone/plone/@types')
response = json.loads(resp.text)
self.assertTrue(any("MyType1" in s['title'] for s in response))
def test_register_behavior(self):
cur_count = len(
configure.get_configurations('plone.server.tests', 'behavior'))
from plone.server.interfaces import IFormFieldProvider
from zope.interface import provider
from zope import schema
@provider(IFormFieldProvider)
class IMyBehavior(Interface):
foobar = schema.Text()
configure.behavior(
title="MyBehavior",
provides=IMyBehavior,
factory="plone.behavior.AnnotationStorage",
for_="plone.server.interfaces.IResource"
)()
self.assertEqual(
len(configure.get_configurations('plone.server.tests', 'behavior')),
cur_count + 1)
class IMyType(Interface):
pass
class MyType(Item):
pass
configure.register_configuration(MyType, dict(
context=ISite,
schema=IMyType,
portal_type="MyType2",
behaviors=[IMyBehavior]
), 'contenttype')
# now test it...
configure.load_configuration(
self.layer.app.app.config, 'plone.server.tests', 'contenttype')
self.layer.app.app.config.execute_actions()
resp = self.layer.requester('GET', '/plone/plone/@types')
response = json.loads(resp.text)
type_ = [s for s in response if s['title'] == 'MyType2'][0]
self.assertTrue('foobar' in type_['definitions']['IMyBehavior']['properties'])
def test_register_addon(self):
cur_count = len(
configure.get_configurations('plone.server.tests', 'addon'))
@configure.addon(
name="myaddon",
title="My addon")
class MyAddon(Addon):
@classmethod
def install(cls, site, request):
# install code
pass
@classmethod
def uninstall(cls, site, request):
# uninstall code
pass
self.assertEqual(
len(configure.get_configurations('plone.server.tests', 'addon')),
cur_count + 1)
# now test it...
configure.load_configuration(
self.layer.app.app.config, 'plone.server.tests', 'addon')
self.layer.app.app.config.execute_actions()
resp = self.layer.requester('GET', '/plone/plone/@addons')
response = json.loads(resp.text)
self.assertTrue('myaddon' in [a['id'] for a in response['available']])
|
plone/plone.server
|
src/plone.server/plone/server/tests/test_configure.py
|
Python
|
bsd-2-clause
| 4,868
|
import bpy, platform
from single_track.panels import Panel as SingleTrackPanel
from functions import *
class ListPanel(bpy.types.Panel):
'''class of the panel who contains addon multi track control'''
bl_space_type = "CLIP_EDITOR"
bl_region_type = "TOOLS"
bl_label = "Multi track: Tracks list"
bl_category = "Curve Anim"
def draw(self, context):
'''the function that draw the addon UI'''
context.scene.curve_to_frame.draw_track_list_panel( self.layout, context )
class AmplitudePanel(bpy.types.Panel):
'''class of the panel who contains amplitude and peaks settings control for multi track feature'''
bl_space_type = "CLIP_EDITOR"
bl_region_type = "TOOLS"
bl_label = "Multi track: Amplitude & Peaks Settings"
bl_category = "Curve Anim"
def draw(self, context):
'''the function that draw the addon UI'''
layout = self.layout
context.scene.curve_to_frame.draw_amplitude_panel( context, layout)
class OutputPanel(bpy.types.Panel):
'''class of the panel who contains output settings control for multi track feature'''
bl_space_type = "CLIP_EDITOR"
bl_region_type = "TOOLS"
bl_label = "Multi track: Output Settings"
bl_category = "Curve Anim"
def draw(self, context):
'''the function that draw the panel'''
layout = self.layout
scene = context.scene
scene.curve_to_frame.draw_output( layout, scene )
# draw run button or error message
#scene.curve_to_frame.draw_run_button( layout )
class SwitchPanel(bpy.types.Panel):
'''class of the panel who contains tracks switching settings for multi track feature'''
bl_space_type = "CLIP_EDITOR"
bl_region_type = "TOOLS"
bl_category = "Curve Anim"
bl_label = "Multi track: Track Switch Settings"
def draw(self, context):
'''the function that draw the panel'''
layout = self.layout
context.scene.curve_to_frame.draw_switch( layout )
class Panel(SingleTrackPanel):
'''class containing all needed method to draw panel'''
def draw_track_list_panel( self, layout, context ):
'''Draw the tracks list Panel'''
# track adding field
row = layout.row()
col = row.column()
col.prop_search(self, "track_add", bpy.data, "movieclips")
col = row.column()
col.operator(
"clip.open", text='', icon='FILESEL')
# error message if unvalid track
if self.track_add != '':
row = layout.row()
if self.track_add not in bpy.data.movieclips.keys():
row.label( ' Error: movieclip not found', icon = 'ERROR' )
else:
row.label( ' Unvalid choice : only image sequence can be used.',
icon = 'ERROR' )
# display Tracks list
row = layout.row()
col = row.column()
col.template_list(
"TrackItem",
"",
self,
"tracks",
self,
"selected_track",
rows=5)
# track list action button
col = row.column( align=True )
col.operator("curve_to_frame.tracks_list_action", icon='TRIA_UP', text="").action = 'UP'
col.operator("curve_to_frame.tracks_list_action", icon='FILE_TICK', text="").action = 'CHECK'
col.operator("curve_to_frame.tracks_list_action", icon='X', text="").action = 'REMOVE'
col.operator("curve_to_frame.tracks_list_action", icon='TRIA_DOWN', text="").action = 'DOWN'
# display selected track settings
if (self.selected_track >= 0
and self.selected_track < len(self.tracks) ):
track = self.tracks[self.selected_track]
track_clip = track.get()
if track_clip is not None:
track_info = track_clip.curve_to_frame
# Display selected track directory path
layout.separator()
row = layout.row()
row.label( text = "Frame Directory path:" )
row = layout.row()
row.label( text= track_info.path )
# Display selected track source file extension
row = layout.row()
col = row.column()
col.label( text="File type: "+track_info.ext )
# Display first to last accepted frame name range
col = row.column()
col.label( text="Valid frames: "\
+track_info.get_frame_name(track_info.first)+' to '\
+track_info.get_frame_name(track_info.last) )
# Display Start/End settings
layout.separator()
row = layout.row()
col = row.column()
col.prop(track, "start")
col = row.column()
col.prop(track, "end")
# Display Random following track
if self.follow_rules:
row = layout.row()
row.prop(track, "followers")
else:
layout.separator()
row = layout.row()
row.label( text = "Can't find any movie clip corresponding to this track!", icon = 'ERROR' )
def draw_amplitude_panel(self, context, layout):
'''Draw the amplitude panel for multi track'''
refresh_curve = "curve_to_frame.generate_multi_track_curves"
refresh_mini_maxi = "curve_to_frame.multi_track_get_amplitude_range"
restore_peak_shape = "curve_to_frame.multi_track_default_peak_shape"
# draw amplitude settings
self.draw_amplitude( layout,
refresh_curve, refresh_mini_maxi )
# draw peaks rythm settings
self.draw_peak(layout, refresh_curve )
# draw peaks profile settings
self.draw_peak_shape( layout, refresh_curve,
restore_peak_shape )
# draw combination node settings and combination and output value
self.draw_combination( layout, refresh_curve, True )
def draw_output( self, layout, scene ):
'''Draw multi track output panel'''
warning = (not scene.ctf_real_copy \
and platform.system().lower() not in ['linux', 'unix'])
row = layout.row()
col = row.column()
if( check_driver(self.id_data, 'curve_to_frame.' ) ):
# check no driver is use on curve to frame property
col.label(text='This function can\'t be used with driver!',
icon='ERROR')
elif(warning):
# check there is no warning
col.operator(
"curve_to_frame.render_multi_track",
text="Ignore warning and generate animation",
icon = 'ERROR')
# A checkbox to get real frame file copy
col = row.column()
col.prop( scene, "ctf_real_copy", icon='ERROR' )
warning = True
else:
# draw standart run button
col.operator(
"curve_to_frame.render_multi_track",
text="Generate animation")
# A checkbox to get real frame file copy
col = row.column()
col.prop( scene, "ctf_real_copy" )
def draw_switch( self, layout ):
'''Draw multi track switch panel'''
row = layout.row()
col = row.column()
col.prop(self, 'switch_mode')
if self.switch_mode == 'random':
row = layout.row()
col = row.column()
col.prop(self, 'follow_rules')
col = row.column()
col.prop(self, 'never_the_same')
if self.follow_rules:
col.enabled = False
elif self.switch_mode == 'manual':
row = layout.row()
row.prop(self, 'manual_switch')
elif self.switch_mode == 'cyclic':
row = layout.row()
col = row.column()
col.prop(self, 'cyclic_mode')
if self.cyclic_mode == 'custom':
row = layout.row()
col = row.column()
col.prop(self, 'custom_cycle')
if self.get_custom_cycle() is None:
col = row.column()
col.label(icon = 'ERROR', text='Unvalid input.')
self.draw_switch_moment( layout )
def draw_switch_moment( self, layout ):
'''Draw the switch moment settings in switch panel'''
layout.separator()
row = layout.row()
row.label(text = 'Switching moment:')
if self.switch_mode == 'manual':
# switch at perfect frame option when manual switching mode
row = layout.row()
row.prop(self, 'switch_at_perfect_frame')
else:
# switch at custom instant when no in manual switching mode
row = layout.row()
col = row.column()
col.prop( self, 'switch_at_custom_keyframe' )
col = row.column()
col.prop( self, 'custom_keyframe' )
if not self.switch_at_custom_keyframe:
col.enabled = False
# switch at each peaks starting or keyframe option
row = layout.row()
col = row.column()
col.prop( self, 'switch_at_peaks' )
col = row.column()
col.prop( self, 'switch_at_peaks_keyframes' )
layout.separator()
row = layout.row()
row.label('Switch when:' )
# switch at peaks value option
row = layout.row()
col = row.column()
col.prop( self, 'switch_when_peaks_get_over' )
col = row.column()
col.prop( self, 'peaks_over_trigger_values' )
if not self.switch_when_peaks_get_over:
col.enabled = False
row = layout.row()
col = row.column()
col.prop( self, 'switch_when_peaks_get_under' )
col = row.column()
col.prop( self, 'peaks_under_trigger_values' )
if not self.switch_when_peaks_get_under:
col.enabled = False
# switch at amplitude value option
row = layout.row()
col = row.column()
col.prop( self, 'switch_when_amplitude_get_over' )
col = row.column()
col.prop( self, 'amplitude_over_trigger_values' )
if not self.switch_when_amplitude_get_over:
col.enabled = False
row = layout.row()
col = row.column()
col.prop( self, 'switch_when_amplitude_get_under' )
col = row.column()
col.prop( self, 'amplitude_under_trigger_values' )
if not self.switch_when_amplitude_get_under:
col.enabled = False
# switch at combination value option
row = layout.row()
col = row.column()
col.prop( self, 'switch_when_combination_get_over' )
col = row.column()
col.prop( self, 'combination_over_trigger_values' )
if not self.switch_when_combination_get_over:
col.enabled = False
row = layout.row()
col = row.column()
col.prop( self, 'switch_when_combination_get_under' )
col = row.column()
col.prop( self, 'combination_under_trigger_values' )
if not self.switch_when_combination_get_under:
col.enabled = False
# accuracy settings
layout.separator()
row = layout.row()
row.prop( self, 'values_evaluation_accuracy' )
# switch minimal gap
row = layout.row()
col = row.column()
col.prop( self, 'maximal_switch_gap_option' )
col = row.column()
col.prop( self, 'maximal_switch_gap' )
if not self.maximal_switch_gap_option:
col.enabled = False
col = row.column()
col.prop( self, 'maximal_switch_gap_proportional_option' )
if not self.maximal_switch_gap_option:
col.enabled = False
# switch minimal gap
row = layout.row()
col = row.column()
col.prop( self, 'minimal_switch_gap_option' )
col = row.column()
col.prop( self, 'minimal_switch_gap' )
if not self.minimal_switch_gap_option:
col.enabled = False
elif self.maximal_switch_gap_option and \
self.minimal_switch_gap > self.maximal_switch_gap:
row = layout.row()
row.label(
icon='ERROR',
text='Error: Minimal gap must be smaller than maximal gap!'
)
layout.separator()
row = layout.row()
row.operator(
"curve_to_frame.generate_track_switching_curve",
icon='FILE_REFRESH',
text="refresh switch curve")
|
CaptainDesAstres/Frames-Animated-By-Curve
|
multi_track/panels.py
|
Python
|
gpl-3.0
| 10,702
|
######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Module for the Poisson distribution node.
"""
import numpy as np
from scipy import special
from .expfamily import ExponentialFamily
from .expfamily import ExponentialFamilyDistribution
from .node import Moments
from .gamma import GammaMoments
from bayespy.utils import utils
class PoissonMoments(Moments):
"""
Class for the moments of Poisson variables
"""
def compute_fixed_moments(self, x):
"""
Compute the moments for a fixed value
"""
# Make sure the values are integers in valid range
x = np.asanyarray(x)
if not utils.isinteger(x):
raise ValueError("Count not integer")
# Now, the moments are just the counts
return [x]
def compute_dims_from_values(self, x):
"""
Return the shape of the moments for a fixed value.
The realizations are scalars, thus the shape of the moment is ().
"""
return ( (), )
class PoissonDistribution(ExponentialFamilyDistribution):
"""
Class for the VMP formulas of Poisson variables.
"""
def compute_message_to_parent(self, parent, index, u, u_lambda):
"""
Compute the message to a parent node.
"""
if index == 0:
m0 = -1
m1 = np.copy(u[0])
return [m0, m1]
else:
raise ValueError("Index out of bounds")
def compute_phi_from_parents(self, u_lambda, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
l = u_lambda[0]
logl = u_lambda[1]
phi0 = logl
return [phi0]
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
u0 = np.exp(phi[0])
u = [u0]
g = -u0
return (u, g)
def compute_cgf_from_parents(self, u_lambda):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
l = u_lambda[0]
g = -l
return g
def compute_fixed_moments_and_f(self, x, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
# Check the validity of x
x = np.asanyarray(x)
if not utils.isinteger(x):
raise ValueError("Values must be integers")
if np.any(x < 0):
raise ValueError("Values must be positive")
# Compute moments
u0 = np.copy(x)
u = [u0]
# Compute f(x)
f = -special.gammaln(x+1)
return (u, f)
class Poisson(ExponentialFamily):
"""
Node for Poisson random variables.
"""
dims = ( (), )
_moments = PoissonMoments()
_parent_moments = [GammaMoments()]
_distribution = PoissonDistribution()
def __init__(self, l, **kwargs):
"""
Create Poisson random variable node
"""
super().__init__(l, **kwargs)
def random(self):
"""
Draw a random sample from the distribution.
"""
raise NotImplementedError("Poisson random sampling not yet implemented")
def show(self):
"""
Print the distribution using standard parameterization.
"""
l = self.u[0]
print("%s ~ Categorical(lambda)" % self.name)
print(" lambda = ")
print(l)
|
nipunreddevil/bayespy
|
bayespy/inference/vmp/nodes/poisson.py
|
Python
|
gpl-3.0
| 4,417
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Communique.surfase'
db.delete_column('lizard_area_communique', 'surfase')
# Adding field 'Communique.surface'
db.add_column('lizard_area_communique', 'surface', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=1, blank=True), keep_default=False)
def backwards(self, orm):
# Adding field 'Communique.surfase'
db.add_column('lizard_area_communique', 'surfase', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=1, blank=True), keep_default=False)
# Deleting field 'Communique.surface'
db.delete_column('lizard_area_communique', 'surface')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_area.area': {
'Meta': {'ordering': "('name',)", 'object_name': 'Area', '_ormbases': ['lizard_area.Communique']},
'area_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'area_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'communique_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Communique']", 'unique': 'True', 'primary_key': 'True'}),
'data_administrator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.DataAdministrator']", 'null': 'True', 'blank': 'True'}),
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']", 'null': 'True', 'blank': 'True'}),
'dt_created': ('django.db.models.fields.DateTimeField', [], {}),
'dt_latestchanged': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dt_latestsynchronized': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'})
},
'lizard_area.areawfsconfiguration': {
'Meta': {'object_name': 'AreaWFSConfiguration'},
'area_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'cql_filter': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maxFeatures': ('django.db.models.fields.IntegerField', [], {'default': '64000'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'typeName': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'lizard_area.category': {
'Meta': {'ordering': "('name',)", 'object_name': 'Category'},
'geo_object_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_geo.GeoObjectGroup']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapnik_xml_style_sheet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.MapnikXMLStyleSheet']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'lizard_area.communique': {
'Meta': {'object_name': 'Communique', '_ormbases': ['lizard_geo.GeoObject']},
'areasort': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'areasort_krw': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'dt_latestchanged_krw': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'surface': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '1', 'blank': 'True'}),
'watertype_krw': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'lizard_area.dataadministrator': {
'Meta': {'object_name': 'DataAdministrator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_area.mapnikxmlstylesheet': {
'Meta': {'object_name': 'MapnikXMLStyleSheet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'source_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'style': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'})
},
'lizard_area.synchronizationhistory': {
'Meta': {'object_name': 'SynchronizationHistory'},
'amount_activated': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'amount_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'amount_deactivated': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'amount_synchronized': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'amount_updated': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']", 'null': 'True', 'blank': 'True'}),
'dt_finish': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dt_start': ('django.db.models.fields.DateTimeField', [], {}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_geo.geoobject': {
'Meta': {'object_name': 'GeoObject'},
'geo_object_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObjectGroup']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'lizard_geo.geoobjectgroup': {
'Meta': {'object_name': 'GeoObjectGroup'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'source_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['name']", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
}
}
complete_apps = ['lizard_area']
|
lizardsystem/lizard-area
|
lizard_area/migrations/0012_auto__del_field_communique_surfase__add_field_communique_surface.py
|
Python
|
gpl-3.0
| 12,275
|
# -*- coding: utf-8 -*-
# ##########################################################
# ## make sure administrator is on localhost
# ###########################################################
import os
import socket
import datetime
import copy
import gluon.contenttype
import gluon.fileutils
try:
import pygraphviz as pgv
except ImportError:
pgv = None
is_gae = request.env.web2py_runtime_gae or False
# ## critical --- make a copy of the environment
global_env = copy.copy(globals())
global_env['datetime'] = datetime
http_host = request.env.http_host.split(':')[0]
remote_addr = request.env.remote_addr
try:
hosts = (http_host, socket.gethostname(),
socket.gethostbyname(http_host),
'::1', '127.0.0.1', '::ffff:127.0.0.1')
except:
hosts = (http_host, )
if request.is_https:
session.secure()
elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1") and \
(request.function != 'manage'):
raise HTTP(200, T('appadmin is disabled because insecure channel'))
if request.function == 'manage':
if not 'auth' in globals() or not request.args:
redirect(URL(request.controller, 'index'))
manager_action = auth.settings.manager_actions.get(request.args(0), None)
if manager_action is None and request.args(0) == 'auth':
manager_action = dict(role=auth.settings.auth_manager_role,
heading=T('Manage Access Control'),
tables=[auth.table_user(),
auth.table_group(),
auth.table_permission()])
manager_role = manager_action.get('role', None) if manager_action else None
if not (gluon.fileutils.check_credentials(request) or auth.has_membership(manager_role)):
raise HTTP(403, "Not authorized")
menu = False
elif (request.application == 'admin' and not session.authorized) or \
(request.application != 'admin' and not gluon.fileutils.check_credentials(request)):
redirect(URL('admin', 'default', 'index',
vars=dict(send=URL(args=request.args, vars=request.vars))))
else:
response.subtitle = T('Database Administration (appadmin)')
menu = True
ignore_rw = True
response.view = 'appadmin.html'
if menu:
response.menu = [[T('design'), False, URL('admin', 'default', 'design',
args=[request.application])], [T('db'), False,
URL('index')], [T('state'), False,
URL('state')], [T('cache'), False,
URL('ccache')]]
# ##########################################################
# ## auxiliary functions
# ###########################################################
if False and request.tickets_db:
from gluon.restricted import TicketStorage
ts = TicketStorage()
ts._get_table(request.tickets_db, ts.tablename, request.application)
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
def eval_in_global_env(text):
exec ('_ret=%s' % text, {}, global_env)
return global_env['_ret']
def get_database(request):
if request.args and request.args[0] in databases:
return eval_in_global_env(request.args[0])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_table(request):
db = get_database(request)
if len(request.args) > 1 and request.args[1] in db.tables:
return (db, request.args[1])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_query(request):
try:
return eval_in_global_env(request.vars.query)
except Exception:
return None
def query_by_table_type(tablename, db, request=request):
keyed = hasattr(db[tablename], '_primarykey')
if keyed:
firstkey = db[tablename][db[tablename]._primarykey[0]]
cond = '>0'
if firstkey.type in ['string', 'text']:
cond = '!=""'
qry = '%s.%s.%s%s' % (
request.args[0], request.args[1], firstkey.name, cond)
else:
qry = '%s.%s.id>0' % tuple(request.args[:2])
return qry
# ##########################################################
# ## list all databases and tables
# ###########################################################
def index():
return dict(databases=databases)
# ##########################################################
# ## insert a new record
# ###########################################################
def insert():
(db, table) = get_table(request)
form = SQLFORM(db[table], ignore_rw=ignore_rw)
if form.accepts(request.vars, session):
response.flash = T('new record inserted')
return dict(form=form, table=db[table])
# ##########################################################
# ## list all records in table and insert new record
# ###########################################################
def download():
import os
db = get_database(request)
return response.download(request, db)
def csv():
import gluon.contenttype
response.headers['Content-Type'] = \
gluon.contenttype.contenttype('.csv')
db = get_database(request)
query = get_query(request)
if not query:
return None
response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\
% tuple(request.vars.query.split('.')[:2])
return str(db(query, ignore_common_filters=True).select())
def import_csv(table, file):
table.import_from_csv_file(file)
def select():
import re
db = get_database(request)
dbname = request.args[0]
try:
is_imap = db._uri.startswith("imap://")
except (KeyError, AttributeError, TypeError):
is_imap = False
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)')
if len(request.args) > 1 and hasattr(db[request.args[1]], '_primarykey'):
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)')
if request.vars.query:
match = regex.match(request.vars.query)
if match:
request.vars.query = '%s.%s.%s==%s' % (request.args[0],
match.group('table'), match.group('field'),
match.group('value'))
else:
request.vars.query = session.last_query
query = get_query(request)
if request.vars.start:
start = int(request.vars.start)
else:
start = 0
nrows = 0
step = 100
fields = []
if is_imap:
step = 3
stop = start + step
table = None
rows = []
orderby = request.vars.orderby
if orderby:
orderby = dbname + '.' + orderby
if orderby == session.last_orderby:
if orderby[0] == '~':
orderby = orderby[1:]
else:
orderby = '~' + orderby
session.last_orderby = orderby
session.last_query = request.vars.query
form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px',
_name='query', _value=request.vars.query or '',
requires=IS_NOT_EMPTY(
error_message=T("Cannot be empty")))), TR(T('Update:'),
INPUT(_name='update_check', _type='checkbox',
value=False), INPUT(_style='width:400px',
_name='update_fields', _value=request.vars.update_fields
or '')), TR(T('Delete:'), INPUT(_name='delete_check',
_class='delete', _type='checkbox', value=False), ''),
TR('', '', INPUT(_type='submit', _value=T('submit')))),
_action=URL(r=request, args=request.args))
tb = None
if form.accepts(request.vars, formname=None):
regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+')
match = regex.match(form.vars.query.strip())
if match:
table = match.group('table')
try:
nrows = db(query, ignore_common_filters=True).count()
if form.vars.update_check and form.vars.update_fields:
db(query, ignore_common_filters=True).update(
**eval_in_global_env('dict(%s)' % form.vars.update_fields))
response.flash = T('%s %%{row} updated', nrows)
elif form.vars.delete_check:
db(query, ignore_common_filters=True).delete()
response.flash = T('%s %%{row} deleted', nrows)
nrows = db(query, ignore_common_filters=True).count()
if is_imap:
fields = [db[table][name] for name in
("id", "uid", "created", "to",
"sender", "subject")]
if orderby:
rows = db(query, ignore_common_filters=True).select(
*fields, limitby=(start, stop),
orderby=eval_in_global_env(orderby))
else:
rows = db(query, ignore_common_filters=True).select(
*fields, limitby=(start, stop))
except Exception, e:
import traceback
tb = traceback.format_exc()
(rows, nrows) = ([], 0)
response.flash = DIV(T('Invalid Query'), PRE(str(e)))
# begin handle upload csv
csv_table = table or request.vars.table
if csv_table:
formcsv = FORM(str(T('or import from csv file')) + " ",
INPUT(_type='file', _name='csvfile'),
INPUT(_type='hidden', _value=csv_table, _name='table'),
INPUT(_type='submit', _value=T('import')))
else:
formcsv = None
if formcsv and formcsv.process().accepted:
try:
import_csv(db[request.vars.table],
request.vars.csvfile.file)
response.flash = T('data uploaded')
except Exception, e:
response.flash = DIV(T('unable to parse csv file'), PRE(str(e)))
# end handle upload csv
return dict(
form=form,
table=table,
start=start,
stop=stop,
step=step,
nrows=nrows,
rows=rows,
query=request.vars.query,
formcsv=formcsv,
tb=tb
)
# ##########################################################
# ## edit delete one record
# ###########################################################
def update():
(db, table) = get_table(request)
keyed = hasattr(db[table], '_primarykey')
record = None
db[table]._common_filter = None
if keyed:
key = [f for f in request.vars if f in db[table]._primarykey]
if key:
record = db(db[table][key[0]] == request.vars[key[
0]]).select().first()
else:
record = db(db[table].id == request.args(
2)).select().first()
if not record:
qry = query_by_table_type(table, db)
session.flash = T('record does not exist')
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
if keyed:
for k in db[table]._primarykey:
db[table][k].writable = False
form = SQLFORM(
db[table], record, deletable=True, delete_label=T('Check to delete'),
ignore_rw=ignore_rw and not keyed,
linkto=URL('select',
args=request.args[:1]), upload=URL(r=request,
f='download', args=request.args[:1]))
if form.accepts(request.vars, session):
session.flash = T('done!')
qry = query_by_table_type(table, db)
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
return dict(form=form, table=db[table])
# ##########################################################
# ## get global variables
# ###########################################################
def state():
return dict()
def ccache():
if is_gae:
form = FORM(
P(TAG.BUTTON(T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")))
else:
cache.ram.initialize()
cache.disk.initialize()
form = FORM(
P(TAG.BUTTON(
T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON(
T("Clear RAM"), _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON(
T("Clear DISK"), _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
session.flash = ""
if is_gae:
if request.vars.yes:
cache.ram.clear()
session.flash += T("Cache Cleared")
else:
clear_ram = False
clear_disk = False
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += T("Ram Cleared")
if clear_disk:
cache.disk.clear()
session.flash += T("Disk Cleared")
redirect(URL(r=request))
try:
from guppy import hpy
hp = hpy()
except ImportError:
hp = False
import shelve
import os
import copy
import time
import math
from gluon import portalocker
ram = {
'entries': 0,
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time(),
'keys': []
}
disk = copy.copy(ram)
total = copy.copy(ram)
disk['keys'] = []
total['keys'] = []
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
if is_gae:
gae_stats = cache.ram.client.get_stats()
try:
gae_stats['ratio'] = ((gae_stats['hits'] * 100) /
(gae_stats['hits'] + gae_stats['misses']))
except ZeroDivisionError:
gae_stats['ratio'] = T("?")
gae_stats['oldest'] = GetInHMS(time.time() - gae_stats['oldest_item_age'])
total.update(gae_stats)
else:
# get ram stats directly from the cache object
ram_stats = cache.ram.stats[request.application]
ram['hits'] = ram_stats['hit_total'] - ram_stats['misses']
ram['misses'] = ram_stats['misses']
try:
ram['ratio'] = ram['hits'] * 100 / ram_stats['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
for key, value in cache.ram.storage.iteritems():
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
ram['entries'] += 1
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
ram['keys'].append((key, GetInHMS(time.time() - value[0])))
for key in cache.disk.storage:
value = cache.disk.storage[key]
if isinstance(value[1], dict):
disk['hits'] = value[1]['hit_total'] - value[1]['misses']
disk['misses'] = value[1]['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value[1]['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
disk['entries'] += 1
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
disk['keys'].append((key, GetInHMS(time.time() - value[0])))
ram_keys = ram.keys() # ['hits', 'objects', 'ratio', 'entries', 'keys', 'oldest', 'bytes', 'misses']
ram_keys.remove('ratio')
ram_keys.remove('oldest')
for key in ram_keys:
total[key] = ram[key] + disk[key]
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] +
total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
def key_table(keys):
return TABLE(
TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))),
*[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys],
**dict(_class='cache-keys',
_style="border-collapse: separate; border-spacing: .5em;"))
if not is_gae:
ram['keys'] = key_table(ram['keys'])
disk['keys'] = key_table(disk['keys'])
total['keys'] = key_table(total['keys'])
return dict(form=form, total=total,
ram=ram, disk=disk, object_stats=hp != False)
def table_template(table):
from gluon.html import TR, TD, TABLE, TAG
def FONT(*args, **kwargs):
return TAG.font(*args, **kwargs)
def types(field):
f_type = field.type
if not isinstance(f_type,str):
return ' '
elif f_type == 'string':
return field.length
elif f_type == 'id':
return B('pk')
elif f_type.startswith('reference') or \
f_type.startswith('list:reference'):
return B('fk')
else:
return ' '
# This is horribe HTML but the only one graphiz understands
rows = []
cellpadding = 4
color = "#000000"
bgcolor = "#FFFFFF"
face = "Helvetica"
face_bold = "Helvetica Bold"
border = 0
rows.append(TR(TD(FONT(table, _face=face_bold, _color=bgcolor),
_colspan=3, _cellpadding=cellpadding,
_align="center", _bgcolor=color)))
for row in db[table]:
rows.append(TR(TD(FONT(row.name, _color=color, _face=face_bold),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(row.type, _color=color, _face=face),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(types(row), _color=color, _face=face),
_align="center", _cellpadding=cellpadding,
_border=border)))
return "< %s >" % TABLE(*rows, **dict(_bgcolor=bgcolor, _border=1,
_cellborder=0, _cellspacing=0)
).xml()
def bg_graph_model():
graph = pgv.AGraph(layout='dot', directed=True, strict=False, rankdir='LR')
subgraphs = dict()
for tablename in db.tables:
if hasattr(db[tablename],'_meta_graphmodel'):
meta_graphmodel = db[tablename]._meta_graphmodel
else:
meta_graphmodel = dict(group=request.application, color='#ECECEC')
group = meta_graphmodel['group'].replace(' ', '')
if group not in subgraphs:
subgraphs[group] = dict(meta=meta_graphmodel, tables=[])
subgraphs[group]['tables'].append(tablename)
graph.add_node(tablename, name=tablename, shape='plaintext',
label=table_template(tablename))
for n, key in enumerate(subgraphs.iterkeys()):
graph.subgraph(nbunch=subgraphs[key]['tables'],
name='cluster%d' % n,
style='filled',
color=subgraphs[key]['meta']['color'],
label=subgraphs[key]['meta']['group'])
for tablename in db.tables:
for field in db[tablename]:
f_type = field.type
if isinstance(f_type,str) and (
f_type.startswith('reference') or
f_type.startswith('list:reference')):
referenced_table = f_type.split()[1].split('.')[0]
n1 = graph.get_node(tablename)
n2 = graph.get_node(referenced_table)
graph.add_edge(n1, n2, color="#4C4C4C", label='')
graph.layout()
if not request.args:
response.headers['Content-Type'] = 'image/png'
return graph.draw(format='png', prog='dot')
else:
response.headers['Content-Disposition']='attachment;filename=graph.%s'%request.args(0)
if request.args(0) == 'dot':
return graph.string()
else:
return graph.draw(format=request.args(0), prog='dot')
def graph_model():
return dict(databases=databases, pgv=pgv)
def manage():
tables = manager_action['tables']
if isinstance(tables[0], str):
db = manager_action.get('db', auth.db)
db = globals()[db] if isinstance(db, str) else db
tables = [db[table] for table in tables]
if request.args(0) == 'auth':
auth.table_user()._plural = T('Users')
auth.table_group()._plural = T('Roles')
auth.table_membership()._plural = T('Memberships')
auth.table_permission()._plural = T('Permissions')
if request.extension != 'load':
return dict(heading=manager_action.get('heading',
T('Manage %(action)s') % dict(action=request.args(0).replace('_', ' ').title())),
tablenames=[table._tablename for table in tables],
labels=[table._plural.title() for table in tables])
table = tables[request.args(1, cast=int)]
formname = '%s_grid' % table._tablename
linked_tables = orderby = None
if request.args(0) == 'auth':
auth.table_group()._id.readable = \
auth.table_membership()._id.readable = \
auth.table_permission()._id.readable = False
auth.table_membership().user_id.label = T('User')
auth.table_membership().group_id.label = T('Role')
auth.table_permission().group_id.label = T('Role')
auth.table_permission().name.label = T('Permission')
if table == auth.table_user():
linked_tables=[auth.settings.table_membership_name]
elif table == auth.table_group():
orderby = 'role' if not request.args(3) or '.group_id' not in request.args(3) else None
elif table == auth.table_permission():
orderby = 'group_id'
kwargs = dict(user_signature=True, maxtextlength=1000,
orderby=orderby, linked_tables=linked_tables)
smartgrid_args = manager_action.get('smartgrid_args', {})
kwargs.update(**smartgrid_args.get('DEFAULT', {}))
kwargs.update(**smartgrid_args.get(table._tablename, {}))
grid = SQLFORM.smartgrid(table, args=request.args[:2], formname=formname, **kwargs)
return grid
def hooks():
import functools
import inspect
list_op=['_%s_%s' %(h,m) for h in ['before', 'after'] for m in ['insert','update','delete']]
tables=[]
with_build_it=False
for db_str in sorted(databases):
db = databases[db_str]
for t in db.tables:
method_hooks=[]
for op in list_op:
functions = []
for f in getattr(db[t], op):
if hasattr(f, '__call__'):
try:
if isinstance(f, (functools.partial)):
f = f.func
filename = inspect.getsourcefile(f)
details = {'funcname':f.__name__,
'filename':filename[len(request.folder):] if request.folder in filename else None,
'lineno': inspect.getsourcelines(f)[1]}
if details['filename']: # Built in functions as delete_uploaded_files are not editable
details['url'] = URL(a='admin',c='default',f='edit', args=[request['application'], details['filename']],vars={'lineno':details['lineno']})
if details['filename'] or with_build_it:
functions.append(details)
# compiled app and windows build don't support code inspection
except:
pass
if len(functions):
method_hooks.append({'name':op, 'functions':functions})
if len(method_hooks):
tables.append({'name':"%s.%s" % (db_str,t), 'slug': IS_SLUG()("%s.%s" % (db_str,t))[0], 'method_hooks':method_hooks})
# Render
ul_main = UL(_class='nav nav-list')
for t in tables:
ul_main.append(A(t['name'], _onclick="collapse('a_%s')" % t['slug']))
ul_t = UL(_class='nav nav-list', _id="a_%s" % t['slug'], _style='display:none')
for op in t['method_hooks']:
ul_t.append(LI (op['name']))
ul_t.append(UL([LI(A(f['funcname'], _class="editor_filelink", _href=f['url']if 'url' in f else None, **{'_data-lineno':f['lineno']-1})) for f in op['functions']]))
ul_main.append(ul_t)
return ul_main
|
allthroughthenight/aces
|
web2py/applications/welcome/controllers/appadmin.py
|
Python
|
gpl-3.0
| 25,689
|
# Macros
#
CODING_BUG = """It looks like you've hit a bug in the server. Please, \
do not hesitate to report it at http://bugs.cherokee-project.com/ so \
the developer team can fix it."""
UNKNOWN_CAUSE = """An unexpected error has just occurred in the \
server. The cause of the issue is unknown. Please, do not hesitate to \
report it at http://bugs.cherokee-project.com/ so the developer team \
can fix it."""
SYSTEM_ISSUE = """The issue seems to be related to your system."""
BROKEN_CONFIG = """The configuration file seems to be broken."""
INTERNAL_ISSUE = """The server found an internal problem. """
# cherokee/proxy_host.c
#
e('PROXY_HEADER_PARSE',
title = "Could not parse header from the back-end",
desc = "It looks like the back-end server sent a malformed HTTP response.",
debug = "Dump of the header buffer (len=%d): %s")
# cherokee/source.c
#
e('SOURCE_NONBLOCK',
title = "Failed to set nonblocking (fd=%d): ${errno}",
desc = SYSTEM_ISSUE)
# cherokee/rrd_tools.c
#
e('RRD_NO_BINARY',
title = "Could not find the rrdtool binary.",
desc = "A custom rrdtool binary has not been defined, and the server could not find one in the $PATH.",
debug = "PATH=%s",
admin = '/general#tabs_general-0',
show_bt = False)
e('RRD_EXECV',
title = "execv failed cmd='%s': ${errno}",
desc = SYSTEM_ISSUE)
e('RRD_FORK',
title = "Fork failed pid=%d: ${errno}",
desc = SYSTEM_ISSUE)
e('RRD_WRITE',
title = "Cannot write in %s: ${errno}",
desc = SYSTEM_ISSUE)
e('RRD_DIR_PERMS',
title = "Insufficient permissions to work with the RRD directory: %s",
desc = SYSTEM_ISSUE,
admin = '/general#tabs_general-0')
e('RRD_MKDIR_WRITE',
title = "Cannot create the '%s' directory; or the directory doesn't have write permissions",
desc = SYSTEM_ISSUE,
admin = '/general#tabs_general-0')
# cherokee/balancer_ip_hash.c
#
e('BALANCER_IP_REACTIVE',
title = "Taking source='%s' back on-line: %d active.",
desc = "The server is re-enabling one of the Information Sources.")
e('BALANCER_IP_DISABLE',
title = "Taking source='%s' off-line. Active %d.",
desc = "The server is disabling one of the Information Sources.")
e('BALANCER_IP_EXHAUSTED',
title = "Sources exhausted: re-enabling one.",
desc = "All the information sources are disabled at this moment. Cherokee needs to re-enable at least one.")
# cherokee/balancer_failover.c
#
e('BALANCER_FAILOVER_REACTIVE',
title = "Taking source='%s' back on-line.",
desc = "The server is re-enabling one of the Information Sources.")
e('BALANCER_FAILOVER_DISABLE',
title = "Taking source='%s' off-line.",
desc = "The server is disabling one of the Information Sources.")
e('BALANCER_FAILOVER_ENABLE_ALL',
title = "Taking all sources back on-line.",
desc = "All the Information Sources have been off-lined. The server is re-enabling all of them in order to start over again.")
# cherokee/resolv_cache.c
#
e('RESOLVE_TIMEOUT',
title = "Timed out while resolving '%s'",
desc = "For some reason, Cherokee could not resolve the hostname.")
# cherokee/validator_authlist.c
#
e('VALIDATOR_AUTHLIST_USER',
title = "Could not read 'user' value for '%s'",
desc = BROKEN_CONFIG)
e('VALIDATOR_AUTHLIST_PASSWORD',
title = "Could not read 'password' value for '%s'",
desc = BROKEN_CONFIG)
e('VALIDATOR_AUTHLIST_EMPTY',
title = "Empty authlist: Access will be denied.",
desc = "The access to this resource will be denied as long as the list of allowed users is empty.")
# cherokee/validator_pam.c
#
e('VALIDATOR_PAM_DELAY',
title = "Setting pam fail delay failed",
desc = "Cherokee could not configure PAM propertly. Most likely you have found an incompatibility issue between Cherokee and your system PAM library.")
e('VALIDATOR_PAM_AUTH',
title = "User '%s' - not authenticated: %s",
desc = "Most likely the password did not match")
e('VALIDATOR_PAM_ACCOUNT',
title = "User '%s' - invalid account: %s",
desc = "The specified user does not exist on the system.")
# cherokee/validator_ldap.c
#
e('VALIDATOR_LDAP_KEY',
title = "Validator LDAP: Unknown key: '%s'",
desc = BROKEN_CONFIG)
e('VALIDATOR_LDAP_PROPERTY',
title = "The LDAP validation module requires a '%s' property",
desc = "It looks like you did not fill a required property. Check the LDAP details and try again.")
e('VALIDATOR_LDAP_SECURITY',
title = "Security problem found in LDAP validation config",
desc = "LDAP validator: Potential security problem found: anonymous bind validation. Check (RFC 2251, section 4.2.2)")
e('VALIDATOR_LDAP_CONNECT',
title = "Could not connect to LDAP: %s:%d: '${errno}'",
desc = SYSTEM_ISSUE)
e('VALIDATOR_LDAP_V3',
title = "Could not set the LDAP version 3: %s",
desc = SYSTEM_ISSUE)
e('VALIDATOR_LDAP_CA',
title = "Could not set CA file %s: %s",
desc = SYSTEM_ISSUE)
e('VALIDATOR_LDAP_STARTTLS',
title = "cannot StartTLS, it is not supported by LDAP client libraries",
desc = SYSTEM_ISSUE)
e('VALIDATOR_LDAP_BIND',
title = "Could not bind (%s:%d): %s:%s : %s",
desc = SYSTEM_ISSUE)
e('VALIDATOR_LDAP_SEARCH',
title = "Could not search in LDAP server: %s",
desc = SYSTEM_ISSUE)
# cherokee/validator_file.c
#
e('VALIDATOR_FILE',
title = "Unknown path type '%s'",
desc = BROKEN_CONFIG)
e('VALIDATOR_FILE_NO_FILE',
title = "File based validators need a password file",
desc = "This validation modules reads a local file in order to get the authorizated user list. The configuration specifies. Please try to reconfigure the details and ensure a filename is provided.")
# cherokee/validator.c
#
e('VALIDATOR_METHOD_UNKNOWN',
title = "Unknown authentication method '%s'",
desc = BROKEN_CONFIG)
# cherokee/handler_*.c
#
e('HANDLER_REGEX_GROUPS',
title = "Too many groups in the regex",
desc = "The specified regular expression is wrong. Please double check it.")
e('HANDLER_NO_BALANCER',
title = "The handler needs a balancer",
desc = BROKEN_CONFIG)
# cherokee/handler_secdownload.c
#
e('HANDLER_SECDOWN_SECRET',
title = "Handler secdownload needs a secret",
desc = "You must define a passphrase to be used as shared secret between the Hidden Downloads handler and the script you use to generate the URLs.")
# cherokee/handler_server_info.c
#
e('HANDLER_SRV_INFO_MOD',
title = "Unknown module type (%d)",
desc = CODING_BUG)
e('HANDLER_SRV_INFO_TYPE',
title = "Unknown ServerInfo type: '%s'",
desc = "Your configuration file is either broken, obsolete or has been tampered with. You need to reconfigure the verbosity of your ServerInfo handler.")
# cherokee/handler_file.c
#
e('HANDLER_FILE_TIME_PARSE',
title = "Unparseable time '%s'")
# cherokee/handler_ssi.c
#
e('HANDLER_SSI_PROPERTY',
title = "Unknown SSI property: '%s'",
desc = BROKEN_CONFIG)
# cherokee/handler_fcgi.c
#
e('HANDLER_FCGI_VERSION',
title = "Parsing error: unknown version")
e('HANDLER_FCGI_PARSING',
title = "Parsing error: unknown type")
e('HANDLER_FCGI_STDERR',
title = "%s")
e('HANDLER_FCGI_BALANCER',
title = "Found a FastCGI handler without a Load Balancer",
desc = BROKEN_CONFIG)
# cherokee/handler_error_redir.c
#
e('HANDLER_ERROR_REDIR_CODE',
title = "Wrong error code: '%s'",
desc = BROKEN_CONFIG)
e('HANDLER_ERROR_REDIR_URL',
title = "HTTP Error %d redirection: An 'url' property is required",
desc = BROKEN_CONFIG)
# cherokee/handler_dirlist.c
#
e('HANDLER_DIRLIST_THEME',
title = "Could not load theme '%s': %s",
desc = "Either the directory where your theme resides has been deleted, or the permissions are wrong.")
e('HANDLER_DIRLIST_BAD_THEME',
title = "The theme is incomplete",
desc = "Most likely someone has inadvertedly deleted some of the files of your theme. Please try to restore the files or change your theme selection.")
# cherokee/handler_post_report.c
#
e('HANDLER_POST_REPORT_LANG',
title = "Unrecognized language '%s'",
desc = "Cherokee's POST status reporter supports a number of output languages and formats, including: JSON, Python, PHP and Ruby.")
# cherokee/handler_dbslayer.c
#
e('HANDLER_DBSLAYER_LANG',
title = "Unrecognized language '%s'",
desc = "Cherokee's DBSlayer supports a number of output languages and formats, including: JSON, Python, PHP and Ruby. Please reconfigure the DBSlayer rule to match one of those.")
e('HANDLER_DBSLAYER_BALANCER',
title = "DBSlayer handler needs a balancer",
desc = "The DBSlayer handler needs must specify a load balancing strategy and a list of target hosts to attend the load. At least one host is required. Please ensure it is correctly configured.")
# cherokee/handler_custom_error.c
#
e('HANDLER_CUSTOM_ERROR_HTTP',
title = "Handler custom error needs an HTTP error value.",
desc = BROKEN_CONFIG)
# cherokee/handler_cgi.c
#
e('HANDLER_CGI_SET_PROP',
title = "Setting pipe properties fd=%d: '${errno}'",
desc = SYSTEM_ISSUE)
e('HANDLER_CGI_SETID',
title = "%s: could not set UID %d",
desc = "Most probably the server is not running as root, and therefore it cannot switch to a new user. If you want Cherokee to be able to change use UID to execute CGIs, you'll have to run it as root.")
e('HANDLER_CGI_EXECUTE',
title = "Could not execute '%s': %s",
desc = SYSTEM_ISSUE)
e('HANDLER_CGI_GET_HOSTNAME',
title = "Error getting host name.",
desc = SYSTEM_ISSUE)
# cherokee/config_entry.c
#
e('CONFIG_ENTRY_BAD_TYPE',
title = "Wrong plug-in: The module must implement a handler.",
desc = "The server tried to set a handler, but the loaded plug-in contained another sort of module.")
# cherokee/balancer_*.c
#
e('BALANCER_EMPTY',
title = "The Load Balancer cannot be empty",
desc = BROKEN_CONFIG)
e('BALANCER_UNDEFINED',
title = "Balancer defined without a value",
desc = BROKEN_CONFIG)
e('BALANCER_NO_KEY',
title = "Balancer: No '%s' log has been defined.",
desc = BROKEN_CONFIG)
e('BALANCER_BAD_SOURCE',
title = "Could not find source '%s'",
desc = "For some reason the load balancer module is using a missing Information Source. Please recheck that it uses a correct one.",
admin = "/source")
e('BALANCER_ONLINE_SOURCE',
title = "Taking source='%s' back on-line",
desc = "The information source is being re-enabled.")
e('BALANCER_OFFLINE_SOURCE',
title = "Taking source='%s' back on-line",
desc = "The information source is being disabled.")
e('BALANCER_EXHAUSTED',
title = "Sources exhausted: re-enabling one.",
desc = "All the Information Sources have been off-lined. The server needs to re-enable at least one of them.")
# cherokee/encoder_*.c
#
e('ENCODER_NOT_SET_VALUE',
title = "Encoder init error",
desc = "The server did not found a valid initialization value for the encoder",
debug = "%s")
e('ENCODER_DEFLATEINIT2',
title = "deflateInit2(): %s",
desc = SYSTEM_ISSUE)
e('ENCODER_DEFLATEEND',
title = "deflateEnd(): %s",
desc = SYSTEM_ISSUE)
e('ENCODER_DEFLATE',
title = "deflate(): err=%s, avail=%d",
desc = SYSTEM_ISSUE)
# cherokee/logger_*.c
#
e('LOGGER_NO_KEY',
title = "Logger: No '%s' log has been defined.",
desc = BROKEN_CONFIG)
e('LOGGER_NO_WRITER',
title = "Logger writer type is required.",
desc = BROKEN_CONFIG)
e('LOGGER_WRITER_UNKNOWN',
title = "Unknown logger writer type '%s'",
desc = BROKEN_CONFIG)
e('LOGGER_WRITER_READ',
title = "Logger writer (%s): Could not read the filename.",
desc = "A property of the log writer is missing. Odds are you selected to write the output of the log into a file but you did not define it.")
e('LOGGER_WRITER_APPEND',
title = "Could not open '%s' for appending",
desc = "This is probably related to the file permissions. Please make sure that it is writable for the user under which Cherokee is run.")
e('LOGGER_WRITER_ALLOC',
title = "Allocation logger->max_bufsize %d failed.",
desc = "The system might have run out of memory.")
e('LOGGER_WRITER_PIPE',
title = "Could not create pipe (errno=%d): ${errno}",
desc = SYSTEM_ISSUE)
e('LOGGER_WRITER_FORK',
title = "Could not fork (errno=%d): ${errno}",
desc = SYSTEM_ISSUE)
e('LOGGER_X_REAL_IP_PARSE',
title = "Could not parse X-Real-IP access list",
desc = "You must define an access list in order to activate the X-Real-IP support.")
# cherokee/logger_custom.c
#
e('LOGGER_CUSTOM_NO_TEMPLATE',
title = "A template is needed for logging connections: %s",
desc = "Since you are trying to use a custom logging template, providing the template is mandatory.")
e('LOGGER_CUSTOM_TEMPLATE',
title = "Could not parse custom log: '%s'",
desc = "The server found a problem while processing the logging template. Please ensure it is correct.")
# cherokee/fdpoll-epoll.c
#
e('FDPOLL_EPOLL_CTL_ADD',
title = "epoll_ctl: ep_fd %d, fd %d: '${errno}'",
desc = SYSTEM_ISSUE)
e('FDPOLL_EPOLL_CTL_DEL',
title = "epoll_ctl: ep_fd %d, fd %d: '${errno}'",
desc = SYSTEM_ISSUE)
e('FDPOLL_EPOLL_CTL_MOD',
title = "epoll_ctl: ep_fd %d, fd %d: '${errno}'",
desc = SYSTEM_ISSUE)
e('FDPOLL_EPOLL_CREATE',
title = "epoll_create: %d: '${errno}'",
desc = SYSTEM_ISSUE)
e('FDPOLL_EPOLL_CLOEXEC',
title = "Could not set CloseExec to the epoll descriptor: fcntl: '${errno}'",
desc = SYSTEM_ISSUE)
# cherokee/fdpoll-port.c
#
e('FDPOLL_PORTS_FD_ASSOCIATE',
title = "fd_associate: fd %d: '${errno}'",
desc = SYSTEM_ISSUE)
e('FDPOLL_PORTS_ASSOCIATE',
title = "port_associate: fd %d: '${errno}'",
desc = SYSTEM_ISSUE)
e('FDPOLL_PORTS_GETN',
title = "port_getn: '${errno}'",
desc = SYSTEM_ISSUE)
# cherokee/fdpoll-poll.c
#
e('FDPOLL_POLL_FULL',
title = "The FD Poll is full",
desc = "The server reached the file descriptor limit. This usaully happens when many simultaneous connections are kept open. Try to increase this limit.",
admin = "/advanced#Resources-2")
e('FDPOLL_POLL_DEL',
title = "Could not remove fd %d (idx=%d) from the poll",
desc = CODING_BUG)
# cherokee/fdpoll-kqueue.c
#
e('FDPOLL_KQUEUE',
title = "kevent returned: '${errno}'",
desc = SYSTEM_ISSUE)
# cherokee/gen_evhost.c
#
e('GEN_EVHOST_TPL_DROOT',
title = "EvHost needs a 'tpl_document_root property'",
desc = BROKEN_CONFIG)
e('GEN_EVHOST_PARSE',
title = "EvHost: Could not parse template '%s'",
desc = "Could not parse the template definining how virtual servers are located. You need to re-define the Dynamic Document Root for your Advanced Virtual Hosting.")
# cherokee/vrule_*.c
#
e('VRULE_NO_PROPERTY',
title = "Virtual Server Rule prio=%d needs a '%s' property",
desc = BROKEN_CONFIG)
# cherokee/vrule_target_ip.c
#
e('VRULE_TARGET_IP_PARSE',
title = "Could not parse 'to' entry: '%s'",
desc = BROKEN_CONFIG)
# cherokee/vrule_rehost.c
#
e('VRULE_REHOST_NO_DOMAIN',
title = "Virtual Server '%s' regex vrule needs a 'domain' entry",
desc = BROKEN_CONFIG)
# cherokee/rule_*.c
#
e('RULE_NO_PROPERTY',
title = "Rule prio=%d needs a '%s' property",
desc = BROKEN_CONFIG)
# cherokee/rule_request.c
#
e('RULE_REQUEST_NO_TABLE',
title = "Could not access to the RegEx table",
desc = CODING_BUG)
e('RULE_REQUEST_NO_PCRE_PTR',
title = "RegExp rule has null pcre",
desc = CODING_BUG)
# cherokee/rule_method.c
#
e('RULE_METHOD_UNKNOWN',
title = "Could not recognize HTTP method '%s'",
desc = "The rule found an entry with an unsupported HTTP method. Probably the configuration file has been tampered with.")
# cherokee/rule_header.c
#
e('RULE_HEADER_UNKNOWN_HEADER',
title = "Unknown header '%s'",
desc = "The rule found an entry with an unsupported header. Probably the configuration file has been tampered with.")
e('RULE_HEADER_UNKNOWN_TYPE',
title = "Unknown type '%s'",
desc = "The rule found an entry with an unsupported type. Probably the configuration file has been tampered with.")
# cherokee/rule_from.c
#
e('RULE_FROM_ENTRY',
title = "Could not parse 'from' entry: '%s'",
desc = "The entries of this rule must be either IP address or network masks. Both IPv4 and IPv6 addresses and masks are supported.")
# cherokee/rule_bind.c
#
e('RULE_BIND_PORT',
title = "Rule prio=%d type='bind', invalid port='%s'",
desc = BROKEN_CONFIG)
# cherokee/server.c
#
e('SERVER_GROUP_NOT_FOUND',
title = "Group '%s' not found in the system",
desc = "Seem like you've specified a wrong GID. Change the specified one or try to create it using the addgroup/groupadd command.",
admin = "/general#Permissions-3")
e('SERVER_USER_NOT_FOUND',
title = "User '%s' not found in the system",
desc = "Looks like you've specified a wrong UID. Either change the specified one or try to create it using the adduser/useradd command.",
admin = "/general#Permissions-3")
e('SERVER_THREAD_IGNORE',
title = "Ignoring thread_policy entry '%s'",
desc = "It looks like an error ocurred with the selected OS thread policy and it has been ignored. Once a valid one is selected, the issue will be fixed.",
admin = "/advanced#Resources-2")
e('SERVER_THREAD_POLICY',
title = "Unknown thread policy '%s'",
desc = "The specified OS thread policy is unknown. You should try re-selecting one.",
admin = "/advanced#Resources-2")
e('SERVER_TOKEN',
title = "Unknown server token '%s'",
desc = "An incorrect server token was specified. Please choose one that is available in you Network behavior settings.",
admin = "/general")
e('SERVER_POLLING_UNRECOGNIZED',
title = "Polling method '%s' has not been recognized",
desc = "An incorrect polling method was specified. Please try to fix that in your advanced settings.",
admin = "/advanced#Connections-1")
e('SERVER_POLLING_UNSUPPORTED',
title = "Polling method '%s' is not supported by this OS",
desc = "The specified polling method does not work on your platform. Please try to choose another one in your advanced settings.",
admin = "/advanced#Connections-1")
e('SERVER_POLLING_UNKNOWN',
title = "Unknown polling method '%s'",
desc = "An incorrect polling method was specified. Please try to fix that in your advanced settings.",
admin = "/advanced#Connections-1")
e('SERVER_NO_BIND',
title = "Not listening on any port.",
desc = "The web server needs to be associated to a TCP port. Please try to specify that in your general settings.",
admin = "/general#Ports_to_listen-2")
e('SERVER_IGNORE_TLS',
title = "Ignoring TLS port %d",
desc = "No TLS backend is specified, but the configuration specifies a secure port and it is being ignored. Either enable a TLS backend or disable the TLS checkbox for the specified port.",
admin = "/general#Network-1",
show_bt = False)
e('SERVER_TLS_DEFAULT',
title = "TLS/SSL support required for 'default' Virtual Server.",
desc = "TLS/SSL support must be set up in the 'default' Virtual Server. Its certificate will be used by the server in case TLS SNI information is not provided by the client.")
e('SERVER_NO_CRYPTOR',
title = "Virtual Server '%s' is trying to use SSL/TLS, but no Crypto engine is active.",
desc = "For a Virtual Server to use SSL/TLS, a Crypto engine must be available server-wide.")
e('SERVER_PARSE',
title = "Server parser: Unknown key '%s'",
desc = BROKEN_CONFIG)
e('SERVER_INITGROUPS',
title = "initgroups: Unable to set groups for user '%s' and GID %d",
desc = SYSTEM_ISSUE)
e('SERVER_SETGID',
title = "cannot change group to GID %d, running with GID=%d",
desc = "Most probably you the server did not have enough permissions to change its execution group.")
e('SERVER_SETUID',
title = "cannot change group to UID %d, running with UID=%d",
desc = "Most probably you the server did not have enough permissions to change its execution user.")
e('SERVER_GET_FDLIMIT',
title = "Could not get File Descriptor limit",
desc = SYSTEM_ISSUE,
debug = "poll_type = %d")
e('SERVER_FDS_SYS_LIMIT',
title = "The server FD limit seems to be higher than the system limit",
desc = "The opened file descriptor limit of the server is %d, while the limit of the system is %d. This is an unlikely situation. You could try to raise the opened file descriptor limit of your system.")
e('SERVER_THREAD_POLL',
title = "The FD limit of the thread is greater than the limit of the poll",
desc = "It seems that an internal server thread assumed a file descriptor limit of %d. However, its FD poll has a lower limit of %d descriptors. The limit has been reduced to the poll limit.")
e('SERVER_NEW_THREAD',
title = "Could not create an internal server thread",
desc = "This is a extremely unusual error. For some reason the server could not create a thread while launching the server.",
debug = "ret = %d")
e('SERVER_TLS_INIT',
title = "cannot initialize TLS for '%s' virtual host",
desc = "This is usually caused by an error with a certificate or private key.")
e('SERVER_FD_SET',
title = "Unable to raise file descriptor limit to %d",
desc = SYSTEM_ISSUE,
show_bt = False)
e('SERVER_FD_GET',
title = "Unable to read the file descriptor limit of the system",
desc = SYSTEM_ISSUE)
e('SERVER_LOW_FD_LIMIT',
title = "The number of available file descriptors is too low",
desc = "The number of available file descriptors: %d, is too low. At least there should be %d available. Please, try to raise your system file descriptor limit.")
e('SERVER_UID_GET',
title = "Could not get information about the UID %d",
desc = SYSTEM_ISSUE)
e('SERVER_CHROOT',
title = "Could not chroot() to '%s': '${errno}'",
desc = SYSTEM_ISSUE)
e('SERVER_CHDIR',
title = "Could not chdir() to '%s': '${errno}'",
desc = SYSTEM_ISSUE)
e('SERVER_SOURCE',
title = "Invalid Source entry '%s'",
desc = BROKEN_CONFIG)
e('SERVER_SOURCE_TYPE',
title = "Source %d: An entry 'type' is required",
desc = BROKEN_CONFIG)
e('SERVER_SOURCE_TYPE_UNKNOWN',
title = "Source %d has an unknown type: '%s'",
desc = BROKEN_CONFIG)
e('SERVER_VSERVER_PRIO',
title = "Invalid Virtual Server entry '%s'",
desc = BROKEN_CONFIG)
e('SERVER_NO_VSERVERS',
title = "No virtual hosts have been configured",
desc = "There should exist at least one virtual server.")
e('SERVER_NO_DEFAULT_VSERVER',
title = "Lowest priority virtual server must be 'default'",
desc = "The lowest priority virtual server should be named 'default'.")
e('SERVER_FORK',
title = "Could not fork()",
desc = SYSTEM_ISSUE)
e('SERVER_PANIC',
title = "Could not execute the Panic handler: '%s', status %d",
desc = "Something happened with the server, and it felt panic. It tried to call an external program to report it to the administrator, but it failed.")
# cherokee/source_interpreter.c
#
e('SRC_INTER_NO_USER',
title = "User '%s' not found in the system",
desc = "The server is configured to execute an interpreter as a different user. However, it seems that the user does not exist in the system.",
admin = "/source/%d")
e('SRC_INTER_NO_GROUP',
title = "Group '%s' not found in the system",
desc = "The server is configured to execute an interpreter as a different group. However, it seems that the group does not exist in the system.",
admin = "/source/%d")
e('SRC_INTER_EMPTY_INTERPRETER',
title = "There is a 'Interpreter Source' witout an interpreter.",
desc = "The server configuration defines an 'interpreter' information source that does not specify an interpreter.",
admin = "/source/%d")
e('SRC_INTER_NO_INTERPRETER',
title = "Could not find interpreter '%s'",
desc = "The server configuration refers to an interpreter that is not installed in this system.",
admin = "/source/%d")
e('SRC_INTER_ENV_IN_COMMAND',
title = "The command to launch the interpreter contains environment variables",
desc = "Please remove the environment variables from the command, and add them as such.",
admin = "/source/%d",
debug = "Command: %s")
e('SRC_INTER_SPAWN',
title = "Could not spawn '%s'",
desc = SYSTEM_ISSUE)
e('SRC_INTER_SETUID',
title = "Can't change setuid %d",
desc = SYSTEM_ISSUE)
e('SRC_INTER_SETGID',
title = "Can't change setgid %d",
desc = SYSTEM_ISSUE)
e('SRC_INTER_CHROOT',
title = "Could not chroot() to '%s'",
desc = SYSTEM_ISSUE)
# cherokee/config_reader.c
#
e('CONF_READ_ACCESS_FILE',
title = "Could not access file",
desc = "The configuration file '%s' could not be accessed. Most probably the server user does not have enough permissions to read it, or lacks search permission on the file path.",
show_bt = False)
e('CONF_OPEN_DIR',
title = "Could not open directory",
desc = "Could not open directory '%s'. Please check the server user and file permissions.",
show_bt = False)
e('CONF_READ_CHILDREN_SAME_NODE',
title = "'%s' and '%s' as child of the same node",
desc = CODING_BUG)
e('CONF_READ_PARSE',
title = "Parsing error",
desc = "The server could not parse the configuration. Something must be wrong with formation. At this stage the lexical is checked.",
debug = "%s")
# cherokee/flcache.c
#
e('FLCACHE_CHOWN',
title = "Could not chown the FLCache directory '%s' to user '%s' and group '%s'",
desc = SYSTEM_ISSUE)
e('FLCACHE_MKDIR',
title = "Could not create the '%s' directory, or it doesn't have %s permissions",
desc = SYSTEM_ISSUE)
e('FLCACHE_MKDIRS',
title = "Could not create the FLCache temporal directy neither under %s nor under %s, or it doesn't have %s permissions",
desc = SYSTEM_ISSUE)
e('FLCACHE_CREATE_FILE',
title = "Could not create the '%s' cache object file: ${errno}",
desc = SYSTEM_ISSUE)
# cherokee/template.c
#
e('TEMPLATE_NO_TOKEN',
title = "Token not found '%s'",
desc = "It seems that the template uses an undefined token.")
# cherokee/services-client.c
#
e('CLIENT_ALREADY_INIT',
title = "Could not initialise service client, already initialised.",
desc = CODING_BUG)
# cherokee/services-server.c
#
e('SERVER_ALREADY_INIT',
title = "Could not initialise service server, already initialised.",
desc = CODING_BUG)
e('SERVER_CANNOT_SOCKETPAIR',
title = "Could not create socket pair for service server: ${errno}",
desc = SYSTEM_ISSUE)
# cherokee/http.c
#
e('HTTP_UNKNOWN_CODE',
title = "Unknown HTTP status code %d")
# cherokee/icons.c
#
e('ICONS_NO_DEFAULT',
title = "A default icon is needed",
desc = "Please, specify a default icon. It is the icon that Cherokee will use whenever no other icon is used.",
admin = "/general#Icons-4")
e('ICONS_ASSIGN_SUFFIX',
title = "Could not assign suffix '%s' to file '%s'",
desc = UNKNOWN_CAUSE,
admin = "/general#Icons-4")
e('ICONS_DUP_SUFFIX',
title = "Duped suffix (case insensitive) '%s', pointing to '%s'",
desc = UNKNOWN_CAUSE,
admin = "/general#Icons-4")
# cherokee/header.c
#
e('HEADER_EMPTY',
title = "Calling cherokee_header_parse() with an empty header",
desc = CODING_BUG)
e('HEADER_NO_EOH',
title = "Could not find the End Of Header",
desc = CODING_BUG,
debug = "len=%d, buf=%s")
e('HEADER_TOO_MANY_CRLF',
title = "Too many initial CRLF",
desc = CODING_BUG)
e('HEADER_ADD_HEADER',
title = "Failed to store a header entry while parsing",
desc = CODING_BUG)
# cherokee/socket.c
#
e('SOCKET_NO_IPV6',
title = "IPv6 support is disabled. Configuring for IPv4 support only.",
desc = SYSTEM_ISSUE,
admin = "/general#Network-4",
show_bt = False)
e('SOCKET_NEW_SOCKET',
title = "Could not create socket: ${errno}",
desc = SYSTEM_ISSUE)
e('SOCKET_SET_LINGER',
title = "Could not set SO_LINGER on fd=%d: ${errno}",
desc = CODING_BUG)
e('SOCKET_RM_NAGLES',
title = "Could not disable Nagle's algorithm",
desc = SYSTEM_ISSUE)
e('SOCKET_NON_BLOCKING',
title = "Could not set non-blocking, fd %d",
desc = CODING_BUG)
e('SOCKET_NO_SOCKET',
title = "%s is not a socket",
desc = "The file is supposed to be a Unix socket, although it does not look like one.")
e('SOCKET_REMOVE',
title = "Could not remove %s",
desc = "Could not remove the Unix socket because: ${errno}")
e('SOCKET_WRITE',
title = "Could not write to socket: write(%d, ..): '${errno}'",
desc = CODING_BUG)
e('SOCKET_READ',
title = "Could not read from socket: read(%d, ..): '${errno}'",
desc = CODING_BUG)
e('SOCKET_WRITEV',
title = "Could not write a vector to socket: writev(%d, ..): '${errno}'",
desc = CODING_BUG)
e('SOCKET_CONNECT',
title = "Could not connect: ${errno}",
desc = SYSTEM_ISSUE)
e('SOCKET_BAD_FAMILY',
title = "Unknown socket family: %d",
desc = CODING_BUG)
e('SOCKET_SET_NODELAY',
title = "Could not set TCP_NODELAY to fd %d: ${errno}",
desc = CODING_BUG)
e('SOCKET_RM_NODELAY',
title = "Could not remove TCP_NODELAY from fd %d: ${errno}",
desc = CODING_BUG)
e('SOCKET_SET_CORK',
title = "Could not set TCP_CORK to fd %d: ${errno}",
desc = CODING_BUG)
e('SOCKET_RM_CORK',
title = "Could not set TCP_CORK from fd %d: ${errno}",
desc = CODING_BUG)
# cherokee/thread.c
#
e('THREAD_RM_FD_POLL',
title = "Could not remove fd(%d) from fdpoll",
desc = CODING_BUG)
e('THREAD_HANDLER_RET',
title = "Unknown ret %d from handler %s",
desc = CODING_BUG)
e('THREAD_OUT_OF_FDS',
title = "Run out of file descriptors",
desc = "The server is under heavy load and it has run out of file descriptors. It can be fixed by raising the file descriptor limit and restarting the server.",
admin = "/advanced")
e('THREAD_GET_CONN_OBJ',
title = "Trying to get a new connection object",
desc = "Either the system run out of memory, or you've hit a bug in the code.")
e('THREAD_SET_SOCKADDR',
title = "Could not set sockaddr",
desc = CODING_BUG)
e('THREAD_CREATE',
title = "Could not create a system thread: '${errno}'",
desc = "This is a extremely unusual error. For some reason your system could not create a thread while launching the server. You might have hit some system restriction.",
debug = "pthread_create() error = %d")
# cherokee/connection.c
#
e('CONNECTION_AUTH',
title = "Unknown authentication method",
desc = BROKEN_CONFIG)
e('CONNECTION_LOCAL_DIR',
title = "Could not build the local directory string",
desc = CODING_BUG)
e('CONNECTION_GET_VSERVER',
title = "Could not get virtual server: '%s'",
desc = CODING_BUG)
# cherokee/ncpus.c
#
e('NCPUS_PSTAT',
title = "pstat_getdynamic() failed: '${errno}'",
desc = SYSTEM_ISSUE)
e('NCPUS_HW_NCPU',
title = "sysctl(CTL_HW:HW_NCPU) failed: '${errno}'",
desc = SYSTEM_ISSUE)
e('NCPUS_SYSCONF',
title = "sysconf(_SC_NPROCESSORS_ONLN) failed: '${errno}'",
desc = SYSTEM_ISSUE)
# cherokee/init.c
#
e('INIT_CPU_NUMBER',
title = "Could not figure the CPU/core number of your server. Read %d, set to 1")
e('INIT_GET_FD_LIMIT',
title = "Could not get the file descriptor limit of your system",
desc = SYSTEM_ISSUE)
# cherokee/utils.c
#
e('UTIL_F_GETFL',
title = "fcntl (F_GETFL, fd=%d, 0): ${errno}",
desc = CODING_BUG)
e('UTIL_F_SETFL',
title = "fcntl (F_GETFL, fd=%d, flags=%d (+%s)): ${errno}",
desc = CODING_BUG)
e('UTIL_F_GETFD',
title = "fcntl (F_GETFD, fd=%d, 0): ${errno}",
desc = CODING_BUG)
e('UTIL_F_SETFD',
title = "fcntl (F_GETFD, fd=%d, flags=%d (+%s)): ${errno}",
desc = CODING_BUG)
e('UTIL_MKDIR',
title = "Could not mkdir '%s' (UID %d): ${errno}",
desc = "Most probably there you have to adjust some permissions.")
# cherokee/avl.c
#
e('AVL_PREVIOUS',
title = "AVL Tree inconsistency: Right child",
desc = CODING_BUG)
e('AVL_NEXT',
title = "AVL Tree inconsistency: Left child",
desc = CODING_BUG)
e('AVL_BALANCE',
title = "AVL Tree inconsistency: Balance",
desc = CODING_BUG)
# cherokee/buffer.c
#
e('BUFFER_NEG_ESTIMATION',
title = "Buffer: Bad memory estimation. The format '%s' estimated a negative length: %d.",
desc = CODING_BUG)
e('BUFFER_NO_SPACE',
title = "Buffer: No target memory. The format '%s' got a free size of %d (estimated %d).",
desc = CODING_BUG)
e('BUFFER_BAD_ESTIMATION',
title = "Buffer: Bad estimation. Too few memory: '%s' -> '%s', esti=%d real=%d size=%d.",
desc = CODING_BUG)
e('BUFFER_AVAIL_SIZE',
title = "Buffer: Bad estimation: Estimation=%d, needed=%d available size=%d: %s.",
desc = CODING_BUG)
e('BUFFER_OPEN_FILE',
title = "Could not open the file: %s, ${errno}",
desc = "Please check that the file exists and the server has read access.")
e('BUFFER_READ_FILE',
title = "Could not read from fd: read(%d, %d, ..) = ${errno}",
desc = "Please check that the file exists and the server has read access.")
# cherokee/plugin_loader.c
#
UNAVAILABLE_PLUGIN = """Either you are trying to use an unavailable
(uninstalled?) plugin, or there is a installation issue."""
e('PLUGIN_LOAD_NO_SYM',
title = "Could not get simbol '%s': %s",
desc = INTERNAL_ISSUE)
e('PLUGIN_DLOPEN',
title = "Something just happened while opening a plug-in file",
desc = "The operating system reported '%s' while trying to load '%s'.")
e('PLUGIN_NO_INIT',
title = "The plug-in initialization function (%s) could not be found",
desc = CODING_BUG)
e('PLUGIN_NO_OPEN',
title = "Could not open the '%s' module",
desc = UNAVAILABLE_PLUGIN)
e('PLUGIN_NO_INFO',
title = "Could not access the 'info' entry of the %s plug-in",
desc = UNAVAILABLE_PLUGIN)
# cherokee/virtual_server.c
#
e('VSERVER_BAD_METHOD',
title = "Unsupported method '%s'",
admin = "/vserver/%d/rule/%d",
desc = "For some reason the configuration file is trying to use an invalid authentication method. Either the file has been tampered with, or you are using a legacy configuration from a system that was compiled with support for more authentication methods.")
e('VSERVER_TIME_MISSING',
title = "Expiration time without a 'time' property",
admin = "/vserver/%d/rule/%d",
desc = "The expiration time feature is being used but no amount of time has been specified. Either provide on or disable Expiration.")
e('VSERVER_RULE_UNKNOWN_KEY',
title = "Virtual Server Rule, Unknown key '%s'",
admin = "/vserver/%d/rule/%d",
desc = "Most probably you are using an old configuration file that contains a deprecated key. Loading and then saving it through Cherokee-Admin should update the old entries for you automatically.")
e('VSERVER_TYPE_MISSING',
title = "Rule matches must specify a 'type' property",
admin = "/vserver/%d/rule/%d",
desc = "For some reason the rule is incomplete. Try editing or recreating it within Cherokee-Admin.")
e('VSERVER_LOAD_MODULE',
title = "Could not load rule module '%s'",
admin = "/vserver/%d",
desc = "The server could not load a plug-in file. This might be due to some problem in the installation.")
e('VSERVER_BAD_PRIORITY',
title = "Invalid priority '%s'",
admin = "/vserver/%d",
desc = "For some reason your configuration file contains invalid priority values, which must be an integer higher than 0. Most likely it has been edited by hand and the value must be fixed manually or the rule has to be discarded.")
e('VSERVER_RULE_MATCH_MISSING',
title = "Rules must specify a 'match' property",
admin = "/vserver/%d/rule/%d",
desc = "For some reason there is an incomplete rule in your configuration file. Try locating it in Cherokee-Admin and fill in all the mandatory fields.")
e('VSERVER_MATCH_MISSING',
title = "Virtual Server must specify a 'match' property",
admin = "/vserver/%d#Host_Match-2",
desc = "Try filling in the fields under the 'Host Match' tab.")
e('VSERVER_UNKNOWN_KEY',
title = "Virtual Server, Unknown key '%s'",
admin = "/vserver/%d",
desc = "Most probably you are using an old configuration file that contains a deprecated key. Loading and then saving it through Cherokee-Admin should update the old entries for you automatically.")
e('VSERVER_NICK_MISSING',
title = "Virtual Server without a 'nick' property",
admin = "/vserver/%d#Basics-1",
desc = "For some reason, a mandatory property is not present in your configuration. Fill in the 'Virtual Server nickname' field, under the 'Basics' tab.")
e('VSERVER_DROOT_MISSING',
title = "Virtual Server without a 'document_root' property",
admin = "/vserver/%d#Basics-1",
desc = "You seem to have forgotten to provide a valid Document Root. This is the root path that contains the files and directories that will be made publicly available through the web server. It can be an empty path and even /dev/null, but it is a mandatory property.")
e('VSERVER_FLCACHE_UNKNOWN_POLICY',
title = "Unknown Front-Line Cache caching policy: %s",
admin = "/vserver/%d/rule/%d",
desc = BROKEN_CONFIG)
# cherokee/regex.c
#
e('REGEX_COMPILATION',
title = "Could not compile <<%s>>: %s (offset=%d)",
desc = "For some reason, PCRE could not compile the regular expression. Please modify the regular expression in order to solve this problem.")
# cherokee/access.c
#
e('ACCESS_IPV4_MAPPED',
title = "This IP '%s' is IPv6-mapped IPv6 address",
desc = "It can be solved by specifying the IP in IPv4 style: a.b.c.d, instead of IPv6 style: ::ffff:a.b.c.d style")
e('ACCESS_INVALID_IP',
title = "The IP address '%s' seems to be invalid",
desc = "You must have made a mistake. Please, try to fix the IP and try again.")
e('ACCESS_INVALID_MASK',
title = "The network mask '%s' seems to be invalid",
desc = "You must have made a mistake. Please, try to fix the IP and try again.")
# cherokee/bind.c
#
e('BIND_PORT_NEEDED',
title = "A port entry is need",
desc = "It seems that the configuration file includes a port listening entry with the wrong format. It should contain one port specification, but it does not in this case.",
admin = "/general#Ports_to_listen-2")
e('BIND_COULDNT_BIND_PORT',
title = "Could not bind() port=%d (UID=%d, GID=%d)",
desc = "Most probably there is another web server listening to the same port. You will have to shut it down before launching Cherokee. It could also be a permissions issue as well. Remember that non-root user cannot listen to ports < 1024.",
admin = "/general#Ports_to_listen-2")
# cherokee/handler_rrd.c
#
e('HANDLER_RENDER_RRD_EXEC',
title = "Could not execute RRD command: %s",
desc = SYSTEM_ISSUE)
e('HANDLER_RENDER_RRD_EMPTY_REPLY',
title = "RRDtool empty response",
desc = SYSTEM_ISSUE)
e('HANDLER_RENDER_RRD_MSG',
title = "RRDtool replied an error message: %s",
desc = SYSTEM_ISSUE)
e('HANDLER_RENDER_RRD_INVALID_REQ',
title = "Invalid request: %s",
desc = SYSTEM_ISSUE)
# cherokee/collector_rrd.c
#
e('COLLECTOR_COMMAND_EXEC',
title = "Could not execute RRD command: %s",
desc = SYSTEM_ISSUE,
admin = "/general#Network-1")
e('COLLECTOR_NEW_THREAD',
title = "Could not create the RRD working thread: error=%d",
desc = SYSTEM_ISSUE)
e('COLLECTOR_NEW_MUTEX',
title = "Could not create the RRD working mutex: error=%d",
desc = SYSTEM_ISSUE)
# cherokee/validator_mysql.c
#
e('VALIDATOR_MYSQL_HASH',
title = "Validator MySQL: Unknown hash type: '%s'",
desc = CODING_BUG)
e('VALIDATOR_MYSQL_KEY',
title = "Validator MySQL: Unknown key: '%s'",
desc = CODING_BUG)
e('VALIDATOR_MYSQL_USER',
title = "MySQL validator: a 'user' entry is needed",
desc = "Make sure that a valid MySQL user-name has been provided.")
e('VALIDATOR_MYSQL_DATABASE',
title = "MySQL validator: a 'database' entry is needed",
desc = "Make sure that a valid MySQL database-name has been provided.")
e('VALIDATOR_MYSQL_QUERY',
title = "MySQL validator: a 'query' entry is needed",
desc = "Make sure that a MySQL query has been provided.")
e('VALIDATOR_MYSQL_SOURCE',
title = "MySQL validator misconfigured: A Host or Unix socket is needed.",
desc = "Make sure that a working database host is specified for MySQL validation.")
e('VALIDATOR_MYSQL_NOCONN',
title = "Unable to connect to MySQL server: %s:%d %s",
desc = "Most probably the MySQL server is down or you've mistyped a connetion parameter")
# cherokee/error_log.c
#
e('ERRORLOG_PARAM',
title = "Unknown parameter type '%c'",
desc = "Accepted parameter are 's' and 'd'")
# cherokee/cryptor_libssl.c
#
e('SSL_NO_ENTROPY',
title = "Not enough entropy in the pool",
desc = SYSTEM_ISSUE)
e('SSL_SOCKET',
title = "Could not get the socket struct: %p",
desc = SYSTEM_ISSUE)
e('SSL_SRV_MATCH',
title = "Servername did not match: '%s'",
desc = "A TLS negotiation using SNI is sending a domain name that does not match any of the available ones. This makes it impossible to present a certificate with a correct CA. Check the list of TLS enabled Virtual Servers if you expect otherwise.")
e('SSL_CHANGE_CTX',
title = "Could not change the SSL context: servername='%s'",
desc = SYSTEM_ISSUE)
e('SSL_ALLOCATE_CTX',
title = "OpenSSL: Could not allocate OpenSSL context",
desc = SYSTEM_ISSUE)
e('SSL_CIPHER',
title = "OpenSSL: cannot set cipher list '%s': %s",
desc = SYSTEM_ISSUE)
e('SSL_CERTIFICATE',
title = "OpenSSL: cannot use certificate file '%s': %s",
desc = "An error occured while trying to load a certificate into the SSL context structure. Most likely the certificate file is wrong or has been corrupted.")
e('SSL_KEY',
title = "OpenSSL: cannot use private key file '%s': %s",
desc = "An error occured while trying to load a private key the SSL context structure. Most likely the file is wrong or has been corrupted.")
e('SSL_KEY_MATCH',
title = "OpenSSL: Private key does not match the certificate public key",
desc = "The private key must agree with the corresponding public key in the certificate associated with a specific SSL context. Double check both private key and certificate.")
e('SSL_CA_READ',
title = "OpenSSL: cannot read trusted CA list '%s': %s",
desc = "If this happens, CA certificates for verification purposes cannot be located. It is likely there is a problem with your private key.")
e('SSL_CA_LOAD',
title = "SSL_load_client_CA_file '%s': %s",
desc = "A file of PEM formatted certificates should be read to extract data of the certificates found. It is likely there is a problem with your private key.")
e('SSL_SESSION_ID',
title = "Unable to set SSL session-id context for '%s': %s",
desc = SYSTEM_ISSUE)
e('SSL_SNI',
title = "Could not activate TLS SNI for '%s': %s",
desc = "It looks like Cherokee was compiled with TLS SNI support. However, it is currently using a SSL library (libssl/openssl) without TLS SNI support, and thus SNI is disabled.")
e('SSL_CONNECTION',
title = "OpenSSL: Unable to create a new SSL connection from the SSL context: %s",
desc = SYSTEM_ISSUE)
e('SSL_FD',
title = "OpenSSL: cannot set fd(%d): %s",
desc = SYSTEM_ISSUE)
e('SSL_INIT',
title = "Init OpenSSL: %s",
desc = SYSTEM_ISSUE)
e('SSL_SW_DEFAULT',
title = "SSL_write: unknown errno: ${errno}",
desc = SYSTEM_ISSUE)
e('SSL_SW_ERROR',
title = "SSL_write (%d, ..) -> err=%d '%s'",
desc = SYSTEM_ISSUE)
e('SSL_SR_DEFAULT',
title = "SSL_read: unknown errno: ${errno}",
desc = SYSTEM_ISSUE)
e('SSL_SR_ERROR',
title = "OpenSSL: SSL_read (%d, ..) -> err=%d '%s'",
desc = SYSTEM_ISSUE)
e('SSL_CREATE_CTX',
title = "OpenSSL: Unable to create a new SSL context: %s",
desc = SYSTEM_ISSUE)
e('SSL_CTX_LOAD',
title = "OpenSSL: '%s': %s",
desc = SYSTEM_ISSUE)
e('SSL_CTX_SET',
title = "OpenSSL: cannot set certificate verification paths: %s",
desc = SYSTEM_ISSUE)
e('SSL_SNI_SRV',
title = "OpenSSL: Could not set SNI server name: %s",
desc = SYSTEM_ISSUE)
e('SSL_CONNECT',
title = "OpenSSL: cannot connect: %s",
desc = SYSTEM_ISSUE)
e('SSL_PKCS11',
title = "Could not init pkcs11 engine",
desc = SYSTEM_ISSUE)
e('SSL_DEFAULTS',
title = "Could not set all defaults",
desc = SYSTEM_ISSUE)
|
cherokee/webserver
|
cherokee/error_list.py
|
Python
|
gpl-2.0
| 43,677
|
#
# This code is part of Ansible, but is an independent component.
#
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat, Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network_common import to_list, ComplexList
from ansible.module_utils.connection import exec_command
from ansible.module_utils.six import iteritems
from ansible.module_utils.urls import fetch_url
_DEVICE_CONNECTION = None
nxos_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE'])),
'use_ssl': dict(type='bool'),
'validate_certs': dict(type='bool'),
'timeout': dict(type='int'),
'transport': dict(default='cli', choices=['cli', 'nxapi'])
}
nxos_argument_spec = {
'provider': dict(type='dict', options=nxos_provider_spec),
}
nxos_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9),
'use_ssl': dict(removed_in_version=2.9, type='bool'),
'validate_certs': dict(removed_in_version=2.9, type='bool'),
'timeout': dict(removed_in_version=2.9, type='int'),
'transport': dict(removed_in_version=2.9, default='cli', choices=['cli', 'nxapi'])
}
nxos_argument_spec.update(nxos_top_spec)
def get_provider_argspec():
return nxos_provider_spec
def check_args(module, warnings):
pass
def load_params(module):
provider = module.params.get('provider') or dict()
for key, value in iteritems(provider):
if key in nxos_argument_spec:
if module.params.get(key) is None and value is not None:
module.params[key] = value
def get_connection(module):
global _DEVICE_CONNECTION
if not _DEVICE_CONNECTION:
load_params(module)
if is_nxapi(module):
conn = Nxapi(module)
else:
conn = Cli(module)
_DEVICE_CONNECTION = conn
return _DEVICE_CONNECTION
class Cli:
def __init__(self, module):
self._module = module
self._device_configs = {}
def exec_command(self, command):
if isinstance(command, dict):
command = self._module.jsonify(command)
return exec_command(self._module, command)
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
rc, out, err = self.exec_command(cmd)
if rc != 0:
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'))
cfg = to_text(out, errors='surrogate_then_replace').strip()
self._device_configs[cmd] = cfg
return cfg
def run_commands(self, commands, check_rc=True):
"""Run list of commands on remote device and return results
"""
responses = list()
for item in to_list(commands):
if item['output'] == 'json' and not is_json(item['command']):
cmd = '%s | json' % item['command']
elif item['output'] == 'text' and is_json(item['command']):
cmd = item['command'].split('|')[0]
else:
cmd = item['command']
rc, out, err = self.exec_command(cmd)
out = to_text(out, errors='surrogate_then_replace')
if check_rc and rc != 0:
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'))
try:
out = self._module.from_json(out)
except ValueError:
out = str(out).strip()
responses.append(out)
return responses
def load_config(self, config, return_error=False):
"""Sends configuration commands to the remote device
"""
rc, out, err = self.exec_command('configure')
if rc != 0:
self._module.fail_json(msg='unable to enter configuration mode', output=to_text(err, errors='surrogate_then_replace'))
msgs = []
for cmd in config:
rc, out, err = self.exec_command(cmd)
if rc != 0:
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'))
elif out:
msgs.append(out)
self.exec_command('end')
return msgs
class Nxapi:
OUTPUT_TO_COMMAND_TYPE = {
'text': 'cli_show_ascii',
'json': 'cli_show',
'bash': 'bash',
'config': 'cli_conf'
}
def __init__(self, module):
self._module = module
self._nxapi_auth = None
self._device_configs = {}
self._module.params['url_username'] = self._module.params['username']
self._module.params['url_password'] = self._module.params['password']
host = self._module.params['host']
port = self._module.params['port']
if self._module.params['use_ssl']:
proto = 'https'
port = port or 443
else:
proto = 'http'
port = port or 80
self._url = '%s://%s:%s/ins' % (proto, host, port)
def _error(self, msg, **kwargs):
self._nxapi_auth = None
if 'url' not in kwargs:
kwargs['url'] = self._url
self._module.fail_json(msg=msg, **kwargs)
def _request_builder(self, commands, output, version='1.0', chunk='0', sid=None):
"""Encodes a NXAPI JSON request message
"""
try:
command_type = self.OUTPUT_TO_COMMAND_TYPE[output]
except KeyError:
msg = 'invalid format, received %s, expected one of %s' % \
(output, ','.join(self.OUTPUT_TO_COMMAND_TYPE.keys()))
self._error(msg=msg)
if isinstance(commands, (list, set, tuple)):
commands = ' ;'.join(commands)
msg = {
'version': version,
'type': command_type,
'chunk': chunk,
'sid': sid,
'input': commands,
'output_format': 'json'
}
return dict(ins_api=msg)
def send_request(self, commands, output='text', check_status=True, return_error=False):
# only 10 show commands can be encoded in each request
# messages sent to the remote device
if output != 'config':
commands = collections.deque(to_list(commands))
stack = list()
requests = list()
while commands:
stack.append(commands.popleft())
if len(stack) == 10:
body = self._request_builder(stack, output)
data = self._module.jsonify(body)
requests.append(data)
stack = list()
if stack:
body = self._request_builder(stack, output)
data = self._module.jsonify(body)
requests.append(data)
else:
body = self._request_builder(commands, 'config')
requests = [self._module.jsonify(body)]
headers = {'Content-Type': 'application/json'}
result = list()
timeout = self._module.params['timeout']
for req in requests:
if self._nxapi_auth:
headers['Cookie'] = self._nxapi_auth
response, headers = fetch_url(
self._module, self._url, data=req, headers=headers,
timeout=timeout, method='POST'
)
self._nxapi_auth = headers.get('set-cookie')
if headers['status'] != 200:
self._error(**headers)
try:
response = self._module.from_json(response.read())
except ValueError:
self._module.fail_json(msg='unable to parse response')
if response['ins_api'].get('outputs'):
output = response['ins_api']['outputs']['output']
for item in to_list(output):
if check_status and item['code'] != '200':
if return_error:
result.append(item)
else:
self._error(output=output, **item)
elif 'body' in item:
result.append(item['body'])
# else:
# error in command but since check_status is disabled
# silently drop it.
# result.append(item['msg'])
return result
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
out = self.send_request(cmd)
cfg = str(out[0]).strip()
self._device_configs[cmd] = cfg
return cfg
def run_commands(self, commands, check_rc=True):
"""Run list of commands on remote device and return results
"""
output = None
queue = list()
responses = list()
def _send(commands, output):
return self.send_request(commands, output, check_status=check_rc)
for item in to_list(commands):
if is_json(item['command']):
item['command'] = str(item['command']).split('|')[0]
item['output'] = 'json'
if all((output == 'json', item['output'] == 'text')) or all((output == 'text', item['output'] == 'json')):
responses.extend(_send(queue, output))
queue = list()
output = item['output'] or 'json'
queue.append(item['command'])
if queue:
responses.extend(_send(queue, output))
return responses
def load_config(self, commands, return_error=False):
"""Sends the ordered set of commands to the device
"""
commands = to_list(commands)
msg = self.send_request(commands, output='config', check_status=True, return_error=return_error)
if return_error:
return msg
else:
return []
def is_json(cmd):
return str(cmd).endswith('| json')
def is_text(cmd):
return not is_json(cmd)
def is_nxapi(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
return 'nxapi' in (transport, provider_transport)
def to_command(module, commands):
if is_nxapi(module):
default_output = 'json'
else:
default_output = 'text'
transform = ComplexList(dict(
command=dict(key=True),
output=dict(default=default_output),
prompt=dict(),
answer=dict()
), module)
commands = transform(to_list(commands))
for item in commands:
if is_json(item['command']):
item['output'] = 'json'
return commands
def get_config(module, flags=None):
flags = [] if flags is None else flags
conn = get_connection(module)
return conn.get_config(flags)
def run_commands(module, commands, check_rc=True):
conn = get_connection(module)
return conn.run_commands(to_command(module, commands), check_rc)
def load_config(module, config, return_error=False):
conn = get_connection(module)
return conn.load_config(config, return_error=return_error)
|
rmfitzpatrick/ansible
|
lib/ansible/module_utils/nxos.py
|
Python
|
gpl-3.0
| 13,537
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
SIZE = 8
with open("../benchmarks/many-images.yaml", "w") as text_file:
text_file.write("root:\n")
text_file.write(" items:\n")
for y in range(0, 64):
yb = SIZE * y
for x in range(0, 128):
xb = SIZE * x
text_file.write(" - image: solid-color({0}, {1}, 0, 255, {2}, {2})\n".format(x, y, SIZE))
text_file.write(" bounds: {0} {1} {2} {2}\n".format(xb, yb, SIZE))
|
servo/webrender
|
wrench/script/gen-many-images.py
|
Python
|
mpl-2.0
| 634
|
"""
Module contains tools for processing files into DataFrames or other objects
"""
from __future__ import print_function
from collections import defaultdict
import re
import csv
import sys
import warnings
import datetime
from textwrap import fill
import numpy as np
from pandas import compat
from pandas.compat import (range, lrange, PY3, StringIO, lzip,
zip, string_types, map, u)
from pandas.core.dtypes.common import (
is_integer, _ensure_object,
is_list_like, is_integer_dtype,
is_float, is_dtype_equal,
is_object_dtype, is_string_dtype,
is_scalar, is_categorical_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.index import (Index, MultiIndex, RangeIndex,
_ensure_index_from_sequences)
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.categorical import Categorical
from pandas.core import algorithms
from pandas.core.common import AbstractMethodError
from pandas.io.date_converters import generic_parser
from pandas.errors import ParserWarning, ParserError, EmptyDataError
from pandas.io.common import (get_filepath_or_buffer, is_file_like,
_validate_header_arg, _get_handle,
UnicodeReader, UTF8Recoder, _NA_VALUES,
BaseIterator, _infer_compression)
from pandas.core.tools import datetimes as tools
from pandas.util._decorators import Appender
import pandas._libs.lib as lib
import pandas._libs.parsers as parsers
from pandas._libs.tslibs import parsing
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = u('\ufeff')
_parser_params = """Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<http://pandas.pydata.org/pandas-docs/stable/io.html>`_.
Parameters
----------
filepath_or_buffer : str, pathlib.Path, py._path.local.LocalPath or any \
object with a read() method (such as a file handle or StringIO)
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file could
be file ://localhost/path/to/table.csv
%s
delim_whitespace : boolean, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
.. versionadded:: 0.18.1 support for the Python parser.
header : int or list of ints, default 'infer'
Row number(s) to use as the column names, and the start of the data.
Default behavior is as if set to 0 if no ``names`` passed, otherwise
``None``. Explicitly pass ``header=0`` to be able to replace existing
names. The header can be a list of integers that specify row locations for
a multi-index on the columns e.g. [0,1,3]. Intervening rows that are not
specified will be skipped (e.g. 2 in this example is skipped). Note that
this parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so header=0 denotes the first line of data
rather than the first line of the file.
names : array-like, default None
List of column names to use. If file contains no header row, then you
should explicitly pass header=None. Duplicates in this list will cause
a ``UserWarning`` to be issued.
index_col : int or sequence or False, default None
Column to use as the row labels of the DataFrame. If a sequence is given, a
MultiIndex is used. If you have a malformed file with delimiters at the end
of each line, you might consider index_col=False to force pandas to _not_
use the first column as the index (row names)
usecols : array-like or callable, default None
Return a subset of the columns. If array-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid array-like
`usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz'].
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
as_recarray : boolean, default False
.. deprecated:: 0.19.0
Please call `pd.read_csv(...).to_records()` instead.
Return a NumPy recarray instead of a DataFrame after parsing the data.
If set to True, this option takes precedence over the `squeeze` parameter.
In addition, as row indices are not available in such a format, the
`index_col` parameter will be ignored.
squeeze : boolean, default False
If the parsed data only contains one column then return a Series
prefix : str, default None
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : boolean, default True
Duplicate columns will be specified as 'X.0'...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `str` or `object` to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
%s
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels
true_values : list, default None
Values to consider as True
false_values : list, default None
Values to consider as False
skipinitialspace : boolean, default False
Skip spaces after delimiter.
skiprows : list-like or integer or callable, default None
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c')
skip_footer : int, default 0
.. deprecated:: 0.19.0
Use the `skipfooter` parameter instead, as they are identical
nrows : int, default None
Number of rows of file to read. Useful for reading pieces of large files
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '""" + fill("', '".join(sorted(_NA_VALUES)),
70, subsequent_indent=" ") + """'.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
na_filter : boolean, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
skip_blank_lines : boolean, default True
If True, skip over blank lines rather than interpreting as NaN values
parse_dates : boolean or list of ints or names or list of lists or dict, \
default False
* boolean. If True -> try parsing the index.
* list of ints or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call result
'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. For non-standard
datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : boolean, default False
If True and `parse_dates` is enabled, pandas will attempt to infer the
format of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : boolean, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, default None
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : boolean, default False
DD/MM format dates, international and European format
iterator : boolean, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
chunksize : int, default None
Return TextFileReader object for iteration.
See the `IO Tools docs
<http://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
.. versionadded:: 0.18.1 support for 'zip' and 'xz' compression.
thousands : str, default None
Thousands separator
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
float_precision : string, default None
Specifies which converter the C engine should use for floating-point
values. The options are `None` for the ordinary converter,
`high` for the high-precision converter, and `round_trip` for the
round-trip converter.
lineterminator : str (length 1), default None
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : boolean, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), default None
One-character string used to escape delimiter when quoting is QUOTE_NONE.
comment : str, default None
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if comment='#', parsing '#empty\\na,b,c\\n1,2,3'
with `header=0` will result in 'a,b,c' being
treated as the header.
encoding : str, default None
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_
dialect : str or csv.Dialect instance, default None
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
tupleize_cols : boolean, default False
.. deprecated:: 0.21.0
This argument will be removed and will always convert to MultiIndex
Leave a list of tuples on columns as is (default is to convert to
a MultiIndex on the columns)
error_bad_lines : boolean, default True
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
returned.
warn_bad_lines : boolean, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
low_memory : boolean, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser)
buffer_lines : int, default None
.. deprecated:: 0.19.0
This argument is not respected by the parser
compact_ints : boolean, default False
.. deprecated:: 0.19.0
Argument moved to ``pd.to_numeric``
If compact_ints is True, then for any column that is of integer dtype,
the parser will attempt to cast it as the smallest integer dtype possible,
either signed or unsigned depending on the specification from the
`use_unsigned` parameter.
use_unsigned : boolean, default False
.. deprecated:: 0.19.0
Argument moved to ``pd.to_numeric``
If integer columns are being compacted (i.e. `compact_ints=True`), specify
whether the column should be compacted to the smallest signed or unsigned
integer dtype.
memory_map : boolean, default False
If a filepath is provided for `filepath_or_buffer`, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
Returns
-------
result : DataFrame or TextParser
"""
# engine is not used in read_fwf() so is factored out of the shared docstring
_engine_doc = """engine : {'c', 'python'}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete."""
_sep_doc = r"""sep : str, default {default}
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``
delimiter : str, default ``None``
Alternative argument name for sep."""
_read_csv_doc = """
Read CSV (comma-separated) file into DataFrame
%s
""" % (_parser_params % (_sep_doc.format(default="','"), _engine_doc))
_read_table_doc = """
Read general delimited file into DataFrame
%s
""" % (_parser_params % (_sep_doc.format(default="\\t (tab-stop)"),
_engine_doc))
_fwf_widths = """\
colspecs : list of pairs (int, int) or 'infer'. optional
A list of pairs (tuples) giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of ints. optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
delimiter : str, default ``'\t' + ' '``
Characters to consider as filler characters in the fixed-width file.
Can be used to specify the filler character of the fields
if it is not spaces (e.g., '~').
"""
_read_fwf_doc = """
Read a table of fixed-width formatted lines into DataFrame
%s
""" % (_parser_params % (_fwf_widths, ''))
def _validate_integer(name, val, min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
msg = "'{name:s}' must be an integer >={min_val:d}".format(name=name,
min_val=min_val)
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Check if the `names` parameter contains duplicates.
If duplicates are found, we issue a warning before returning.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output DataFrame.
Returns
-------
names : array-like or None
The original `names` parameter.
"""
if names is not None:
if len(names) != len(set(names)):
msg = ("Duplicate names specified. This "
"will raise an error in the future.")
warnings.warn(msg, UserWarning, stacklevel=3)
return names
def _read(filepath_or_buffer, kwds):
"""Generic reader of line files."""
encoding = kwds.get('encoding', None)
if encoding is not None:
encoding = re.sub('_', '-', encoding).lower()
kwds['encoding'] = encoding
compression = kwds.get('compression')
compression = _infer_compression(filepath_or_buffer, compression)
filepath_or_buffer, _, compression = get_filepath_or_buffer(
filepath_or_buffer, encoding, compression)
kwds['compression'] = compression
if kwds.get('date_parser', None) is not None:
if isinstance(kwds['parse_dates'], bool):
kwds['parse_dates'] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get('iterator', False)
chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1)
nrows = _validate_integer('nrows', kwds.get('nrows', None))
# Check for duplicates in names.
_validate_names(kwds.get("names", None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
try:
data = parser.read(nrows)
finally:
parser.close()
return data
_parser_defaults = {
'delimiter': None,
'doublequote': True,
'escapechar': None,
'quotechar': '"',
'quoting': csv.QUOTE_MINIMAL,
'skipinitialspace': False,
'lineterminator': None,
'header': 'infer',
'index_col': None,
'names': None,
'prefix': None,
'skiprows': None,
'na_values': None,
'true_values': None,
'false_values': None,
'converters': None,
'dtype': None,
'skipfooter': 0,
'keep_default_na': True,
'thousands': None,
'comment': None,
'decimal': b'.',
# 'engine': 'c',
'parse_dates': False,
'keep_date_col': False,
'dayfirst': False,
'date_parser': None,
'usecols': None,
'nrows': None,
# 'iterator': False,
'chunksize': None,
'verbose': False,
'encoding': None,
'squeeze': False,
'compression': None,
'mangle_dupe_cols': True,
'tupleize_cols': False,
'infer_datetime_format': False,
'skip_blank_lines': True
}
_c_parser_defaults = {
'delim_whitespace': False,
'as_recarray': False,
'na_filter': True,
'compact_ints': False,
'use_unsigned': False,
'low_memory': True,
'memory_map': False,
'buffer_lines': None,
'error_bad_lines': True,
'warn_bad_lines': True,
'tupleize_cols': False,
'float_precision': None
}
_fwf_defaults = {
'colspecs': 'infer',
'widths': None,
}
_c_unsupported = {'skipfooter'}
_python_unsupported = {
'low_memory',
'buffer_lines',
'float_precision',
}
_deprecated_defaults = {
'as_recarray': None,
'buffer_lines': None,
'compact_ints': None,
'use_unsigned': None,
'tupleize_cols': None
}
_deprecated_args = {
'as_recarray',
'buffer_lines',
'compact_ints',
'use_unsigned',
'tupleize_cols',
}
def _make_parser_function(name, sep=','):
default_sep = sep
def parser_f(filepath_or_buffer,
sep=sep,
delimiter=None,
# Column and Index Locations and Names
header='infer',
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression='infer',
thousands=None,
decimal=b'.',
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
tupleize_cols=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
skip_footer=0, # deprecated
# Internal
doublequote=True,
delim_whitespace=False,
as_recarray=None,
compact_ints=None,
use_unsigned=None,
low_memory=_c_parser_defaults['low_memory'],
buffer_lines=None,
memory_map=False,
float_precision=None):
# Alias sep -> delimiter.
if delimiter is None:
delimiter = sep
if delim_whitespace and delimiter is not default_sep:
raise ValueError("Specified a delimiter with both sep and"
" delim_whitespace=True; you can only"
" specify one.")
if engine is not None:
engine_specified = True
else:
engine = 'c'
engine_specified = False
if skip_footer != 0:
warnings.warn("The 'skip_footer' argument has "
"been deprecated and will be removed "
"in a future version. Please use the "
"'skipfooter' argument instead.",
FutureWarning, stacklevel=2)
kwds = dict(delimiter=delimiter,
engine=engine,
dialect=dialect,
compression=compression,
engine_specified=engine_specified,
doublequote=doublequote,
escapechar=escapechar,
quotechar=quotechar,
quoting=quoting,
skipinitialspace=skipinitialspace,
lineterminator=lineterminator,
header=header,
index_col=index_col,
names=names,
prefix=prefix,
skiprows=skiprows,
na_values=na_values,
true_values=true_values,
false_values=false_values,
keep_default_na=keep_default_na,
thousands=thousands,
comment=comment,
decimal=decimal,
parse_dates=parse_dates,
keep_date_col=keep_date_col,
dayfirst=dayfirst,
date_parser=date_parser,
nrows=nrows,
iterator=iterator,
chunksize=chunksize,
skipfooter=skipfooter or skip_footer,
converters=converters,
dtype=dtype,
usecols=usecols,
verbose=verbose,
encoding=encoding,
squeeze=squeeze,
memory_map=memory_map,
float_precision=float_precision,
na_filter=na_filter,
compact_ints=compact_ints,
use_unsigned=use_unsigned,
delim_whitespace=delim_whitespace,
as_recarray=as_recarray,
warn_bad_lines=warn_bad_lines,
error_bad_lines=error_bad_lines,
low_memory=low_memory,
buffer_lines=buffer_lines,
mangle_dupe_cols=mangle_dupe_cols,
tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format,
skip_blank_lines=skip_blank_lines)
return _read(filepath_or_buffer, kwds)
parser_f.__name__ = name
return parser_f
read_csv = _make_parser_function('read_csv', sep=',')
read_csv = Appender(_read_csv_doc)(read_csv)
read_table = _make_parser_function('read_table', sep='\t')
read_table = Appender(_read_table_doc)(read_table)
@Appender(_read_fwf_doc)
def read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds):
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, 'infer') and widths is not None:
raise ValueError("You must specify only one of 'widths' and "
"'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds['colspecs'] = colspecs
kwds['engine'] = 'python-fwf'
return _read(filepath_or_buffer, kwds)
class TextFileReader(BaseIterator):
"""
Passed dialect overrides any of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = 'python'
engine_specified = False
self._engine_specified = kwds.get('engine_specified', engine_specified)
if kwds.get('dialect') is not None:
dialect = kwds['dialect']
if dialect in csv.list_dialects():
dialect = csv.get_dialect(dialect)
# Any valid dialect should have these attributes.
# If any are missing, we will raise automatically.
for param in ('delimiter', 'doublequote', 'escapechar',
'skipinitialspace', 'quotechar', 'quoting'):
try:
dialect_val = getattr(dialect, param)
except AttributeError:
raise ValueError("Invalid dialect '{dialect}' provided"
.format(dialect=kwds['dialect']))
provided = kwds.get(param, _parser_defaults[param])
# Messages for conflicting values between the dialect instance
# and the actual parameters provided.
conflict_msgs = []
if dialect_val != provided:
conflict_msgs.append((
"Conflicting values for '{param}': '{val}' was "
"provided, but the dialect specifies '{diaval}'. "
"Using the dialect-specified value.".format(
param=param, val=provided, diaval=dialect_val)))
if conflict_msgs:
warnings.warn('\n\n'.join(conflict_msgs), ParserWarning,
stacklevel=2)
kwds[param] = dialect_val
if kwds.get('header', 'infer') == 'infer':
kwds['header'] = 0 if kwds.get('names') is None else None
self.orig_options = kwds
# miscellanea
self.engine = engine
self._engine = None
self._currow = 0
options = self._get_options_with_defaults(engine)
self.chunksize = options.pop('chunksize', None)
self.nrows = options.pop('nrows', None)
self.squeeze = options.pop('squeeze', False)
# might mutate self.engine
self.engine = self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if 'has_index_names' in kwds:
self.options['has_index_names'] = kwds['has_index_names']
self._make_engine(self.engine)
def close(self):
self._engine.close()
def _get_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in compat.iteritems(_parser_defaults):
value = kwds.get(argname, default)
# see gh-12935
if argname == 'mangle_dupe_cols' and not value:
raise ValueError('Setting mangle_dupe_cols=False is '
'not supported yet')
else:
options[argname] = value
for argname, default in compat.iteritems(_c_parser_defaults):
if argname in kwds:
value = kwds[argname]
if engine != 'c' and value != default:
if ('python' in engine and
argname not in _python_unsupported):
pass
elif value == _deprecated_defaults.get(argname, default):
pass
else:
raise ValueError(
'The %r option is not supported with the'
' %r engine' % (argname, engine))
else:
value = _deprecated_defaults.get(argname, default)
options[argname] = value
if engine == 'python-fwf':
for argname, default in compat.iteritems(_fwf_defaults):
options[argname] = kwds.get(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f):
next_attr = "__next__" if PY3 else "next"
# The C engine doesn't need the file-like to have the "next" or
# "__next__" attribute. However, the Python engine explicitly calls
# "next(...)" when iterating through such an object, meaning it
# needs to have that attribute ("next" for Python 2.x, "__next__"
# for Python 3.x)
if engine != "c" and not hasattr(f, next_attr):
msg = ("The 'python' engine cannot iterate "
"through this file buffer.")
raise ValueError(msg)
return engine
def _clean_options(self, options, engine):
result = options.copy()
engine_specified = self._engine_specified
fallback_reason = None
sep = options['delimiter']
delim_whitespace = options['delim_whitespace']
# C engine not supported yet
if engine == 'c':
if options['skipfooter'] > 0:
fallback_reason = "the 'c' engine does not support"\
" skipfooter"
engine = 'python'
encoding = sys.getfilesystemencoding() or 'utf-8'
if sep is None and not delim_whitespace:
if engine == 'c':
fallback_reason = "the 'c' engine does not support"\
" sep=None with delim_whitespace=False"
engine = 'python'
elif sep is not None and len(sep) > 1:
if engine == 'c' and sep == '\s+':
result['delim_whitespace'] = True
del result['delimiter']
elif engine not in ('python', 'python-fwf'):
# wait until regex engine integrated
fallback_reason = "the 'c' engine does not support"\
" regex separators (separators > 1 char and"\
" different from '\s+' are"\
" interpreted as regex)"
engine = 'python'
elif delim_whitespace:
if 'python' in engine:
result['delimiter'] = '\s+'
elif sep is not None:
encodeable = True
try:
if len(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ('python', 'python-fwf'):
fallback_reason = "the separator encoded in {encoding}" \
" is > 1 char long, and the 'c' engine" \
" does not support such separators".format(
encoding=encoding)
engine = 'python'
quotechar = options['quotechar']
if (quotechar is not None and
isinstance(quotechar, (str, compat.text_type, bytes))):
if (len(quotechar) == 1 and ord(quotechar) > 127 and
engine not in ('python', 'python-fwf')):
fallback_reason = ("ord(quotechar) > 127, meaning the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support "
"such quotechars")
engine = 'python'
if fallback_reason and engine_specified:
raise ValueError(fallback_reason)
if engine == 'c':
for arg in _c_unsupported:
del result[arg]
if 'python' in engine:
for arg in _python_unsupported:
if fallback_reason and result[arg] != _c_parser_defaults[arg]:
msg = ("Falling back to the 'python' engine because"
" {reason}, but this causes {option!r} to be"
" ignored as it is not supported by the 'python'"
" engine.").format(reason=fallback_reason,
option=arg)
raise ValueError(msg)
del result[arg]
if fallback_reason:
warnings.warn(("Falling back to the 'python' engine because"
" {0}; you can avoid this warning by specifying"
" engine='python'.").format(fallback_reason),
ParserWarning, stacklevel=5)
index_col = options['index_col']
names = options['names']
converters = options['converters']
na_values = options['na_values']
skiprows = options['skiprows']
# really delete this one
keep_default_na = result.pop('keep_default_na')
_validate_header_arg(options['header'])
depr_warning = ''
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
msg = ("The '{arg}' argument has been deprecated "
"and will be removed in a future version."
.format(arg=arg))
if arg == 'as_recarray':
msg += ' Please call pd.to_csv(...).to_records() instead.'
elif arg == 'tupleize_cols':
msg += (' Column tuples will then '
'always be converted to MultiIndex.')
if result.get(arg, depr_default) != depr_default:
# raise Exception(result.get(arg, depr_default), depr_default)
depr_warning += msg + '\n\n'
else:
result[arg] = parser_default
if depr_warning != '':
warnings.warn(depr_warning, FutureWarning, stacklevel=2)
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not isinstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result['index_col'] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not isinstance(converters, dict):
raise TypeError('Type converters must be a dict or'
' subclass, input was '
'a {0!r}'.format(type(converters).__name__))
else:
converters = {}
# Converting values to NA
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is internally handled by the
# c-engine, so only need for python parsers
if engine != 'c':
if is_integer(skiprows):
skiprows = lrange(skiprows)
if skiprows is None:
skiprows = set()
elif not callable(skiprows):
skiprows = set(skiprows)
# put stuff back
result['names'] = names
result['converters'] = converters
result['na_values'] = na_values
result['na_fvalues'] = na_fvalues
result['skiprows'] = skiprows
return result, engine
def __next__(self):
try:
return self.get_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine='c'):
if engine == 'c':
self._engine = CParserWrapper(self.f, **self.options)
else:
if engine == 'python':
klass = PythonParser
elif engine == 'python-fwf':
klass = FixedWidthFieldParser
else:
raise ValueError('Unknown engine: {engine} (valid options are'
' "c", "python", or' ' "python-fwf")'.format(
engine=engine))
self._engine = klass(self.f, **self.options)
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
if nrows is not None:
if self.options.get('skipfooter'):
raise ValueError('skipfooter not supported for iteration')
ret = self._engine.read(nrows)
if self.options.get('as_recarray'):
return ret
# May alter columns / col_dict
index, columns, col_dict = self._create_index(ret)
if index is None:
if col_dict:
# Any column is actually fine:
new_rows = len(compat.next(compat.itervalues(col_dict)))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = len(index)
df = DataFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and len(df.columns) == 1:
return df[df.columns[0]].copy()
return df
def _create_index(self, ret):
index, columns, col_dict = ret
return index, columns, col_dict
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = min(size, self.nrows - self._currow)
return self.read(nrows=size)
def _is_index_col(col):
return col is not None and col is not False
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return set([i for i, name in enumerate(names)
if usecols(name)])
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : array-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
"""
msg = ("'usecols' must either be all strings, all unicode, "
"all integers or a callable")
if usecols is not None:
if callable(usecols):
return usecols, None
usecols_dtype = lib.infer_dtype(usecols)
if usecols_dtype not in ('empty', 'integer',
'string', 'unicode'):
raise ValueError(msg)
return set(usecols), usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = ("Only booleans, lists, and "
"dictionaries are accepted "
"for the 'parse_dates' parameter")
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
class ParserBase(object):
def __init__(self, kwds):
self.names = kwds.get('names')
self.orig_names = None
self.prefix = kwds.pop('prefix', None)
self.index_col = kwds.get('index_col', None)
self.index_names = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(
kwds.pop('parse_dates', False))
self.date_parser = kwds.pop('date_parser', None)
self.dayfirst = kwds.pop('dayfirst', False)
self.keep_date_col = kwds.pop('keep_date_col', False)
self.na_values = kwds.get('na_values')
self.na_fvalues = kwds.get('na_fvalues')
self.true_values = kwds.get('true_values')
self.false_values = kwds.get('false_values')
self.as_recarray = kwds.get('as_recarray', False)
self.tupleize_cols = kwds.get('tupleize_cols', False)
self.mangle_dupe_cols = kwds.get('mangle_dupe_cols', True)
self.infer_datetime_format = kwds.pop('infer_datetime_format', False)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_format=self.infer_datetime_format
)
# validate header options for mi
self.header = kwds.get('header')
if isinstance(self.header, (list, tuple, np.ndarray)):
if not all(map(is_integer, self.header)):
raise ValueError("header must be integer or list of integers")
if kwds.get('as_recarray'):
raise ValueError("cannot specify as_recarray when "
"specifying a multi-index header")
if kwds.get('usecols'):
raise ValueError("cannot specify usecols when "
"specifying a multi-index header")
if kwds.get('names'):
raise ValueError("cannot specify names when "
"specifying a multi-index header")
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = isinstance(self.index_col, (list, tuple,
np.ndarray))
if not (is_sequence and
all(map(is_integer, self.index_col)) or
is_integer(self.index_col)):
raise ValueError("index_col must only contain row numbers "
"when specifying a multi-index header")
# GH 16338
elif self.header is not None and not is_integer(self.header):
raise ValueError("header must be integer or list of integers")
self._name_processed = False
self._first_chunk = True
# GH 13932
# keep references to file handles opened by the parser itself
self.handles = []
def close(self):
for f in self.handles:
f.close()
@property
def _has_complex_date_col(self):
return (isinstance(self.parse_dates, dict) or
(isinstance(self.parse_dates, list) and
len(self.parse_dates) > 0 and
isinstance(self.parse_dates[0], list)))
def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
return ((j == self.parse_dates) or
(name is not None and name == self.parse_dates))
else:
return ((j in self.parse_dates) or
(name is not None and name in self.parse_dates))
def _extract_multi_indexer_columns(self, header, index_names, col_names,
passed_names=False):
""" extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers """
if len(header) < 2:
return header[0], index_names, col_names, passed_names
# the names are the tuples of the header that are not the index cols
# 0 is the name of the index, assuming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not isinstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header.pop(-1)
index_names, names, index_col = _clean_index_names(index_names,
self.index_col)
# extract the columns
field_count = len(header[0])
def extract(r):
return tuple([r[i] for i in range(field_count) if i not in sic])
columns = lzip(*[extract(r) for r in header])
names = ic + columns
def tostr(x):
return str(x) if not isinstance(x, compat.string_types) else x
# if we find 'Unnamed' all of a single level, then our header was too
# long
for n in range(len(columns[0])):
if all(['Unnamed' in tostr(c[n]) for c in columns]):
raise ParserError(
"Passed header=[%s] are too many rows for this "
"multi_index of columns"
% ','.join([str(x) for x in self.header])
)
# clean the column names (if we have an index_col)
if len(ic):
col_names = [r[0] if len(r[0]) and 'Unnamed' not in r[0] else None
for r in header]
else:
col_names = [None] * len(header)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
counts = defaultdict(int)
for i, col in enumerate(names):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
col = '%s.%d' % (col, cur_count)
cur_count = counts[col]
names[i] = col
counts[col] = cur_count + 1
return names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if (not self.tupleize_cols and len(columns) and
not isinstance(columns, MultiIndex) and
all([isinstance(c, tuple) for c in columns])):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._get_simple_index(alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _,
self.index_col) = _clean_index_names(list(columns),
self.index_col)
self._name_processed = True
index = self._get_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = len(indexnamerow) - len(columns)
index = index.set_names(indexnamerow[:coffset])
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _get_simple_index(self, data, columns):
def ix(col):
if not isinstance(col, compat.string_types):
return col
raise ValueError('Index %s invalid' % col)
index = None
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.append(i)
index.append(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in reversed(sorted(to_remove)):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _get_complex_date_index(self, data, col_names):
def _get_name(icol):
if isinstance(icol, compat.string_types):
return icol
if col_names is None:
raise ValueError(('Must supply column order to use %s as '
'index') % str(icol))
for i, c in enumerate(col_names):
if i == icol:
return c
index = None
to_remove = []
index = []
for idx in self.index_col:
name = _get_name(idx)
to_remove.append(name)
index.append(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in reversed(sorted(to_remove)):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True):
arrays = []
for i, arr in enumerate(index):
if (try_parse_dates and self._should_parse_dates(i)):
arr = self._date_conv(arr)
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
if isinstance(self.na_values, dict):
col_name = self.index_names[i]
if col_name is not None:
col_na_values, col_na_fvalues = _get_na_values(
col_name, self.na_values, self.na_fvalues)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
names = self.index_names
index = _ensure_index_from_sequences(arrays, names)
return index
def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
converters=None, dtypes=None):
result = {}
for c, values in compat.iteritems(dct):
conv_f = None if converters is None else converters.get(c, None)
if isinstance(dtypes, dict):
cast_type = dtypes.get(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _get_na_values(
c, na_values, na_fvalues)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(("Both a converter and dtype were specified "
"for column {0} - only the converter will "
"be used").format(c), ParserWarning,
stacklevel=7)
try:
values = lib.map_infer(values, conv_f)
except ValueError:
mask = algorithms.isin(
values, list(na_values)).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues,
try_num_bool=False)
else:
# skip inference if specified dtype is object
try_num_bool = not (cast_type and is_string_dtype(cast_type))
# general type inference and conversion
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues,
try_num_bool)
# type specificed in dtype param
if cast_type and not is_dtype_equal(cvals, cast_type):
cvals = self._cast_types(cvals, cast_type, c)
if issubclass(cvals.dtype.type, np.integer) and self.compact_ints:
cvals = lib.downcast_int64(
cvals, parsers.na_values,
self.use_unsigned)
result[c] = cvals
if verbose and na_count:
print('Filled %d NA values in column %s' % (na_count, str(c)))
return result
def _infer_types(self, values, na_values, try_num_bool=True):
"""
Infer types of values, possibly casting
Parameters
----------
values : ndarray
na_values : set
try_num_bool : bool, default try
try to cast values to numeric (first preference) or boolean
Returns:
--------
converted : ndarray
na_count : int
"""
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = algorithms.isin(values, list(na_values))
na_count = mask.sum()
if na_count > 0:
if is_integer_dtype(values):
values = values.astype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
if try_num_bool:
try:
result = lib.maybe_convert_numeric(values, na_values, False)
na_count = isna(result).sum()
except Exception:
result = values
if values.dtype == np.object_:
na_count = lib.sanitize_objects(result, na_values, False)
else:
result = values
if values.dtype == np.object_:
na_count = lib.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
result = lib.maybe_convert_bool(values,
true_values=self.true_values,
false_values=self.false_values)
return result, na_count
def _cast_types(self, values, cast_type, column):
"""
Cast values to specified type
Parameters
----------
values : ndarray
cast_type : string or np.dtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray
"""
if is_categorical_dtype(cast_type):
known_cats = (isinstance(cast_type, CategoricalDtype) and
cast_type.categories is not None)
if not is_object_dtype(values) and not known_cats:
# XXX this is for consistency with
# c-parser which parses all categories
# as strings
values = astype_nansafe(values, str)
cats = Index(values).unique().dropna()
values = Categorical._from_inferred_categories(
cats, cats.get_indexer(values), cast_type
)
else:
try:
values = astype_nansafe(values, cast_type, copy=True)
except ValueError:
raise ValueError("Unable to convert column %s to "
"type %s" % (column, cast_type))
return values
def _do_date_conversions(self, names, data):
# returns data, columns
if self.parse_dates is not None:
data, names = _process_date_conversion(
data, self._date_conv, self.parse_dates, self.index_col,
self.index_names, names, keep_date_col=self.keep_date_col)
return names, data
class CParserWrapper(ParserBase):
"""
"""
def __init__(self, src, **kwds):
self.kwds = kwds
kwds = kwds.copy()
ParserBase.__init__(self, kwds)
if 'utf-16' in (kwds.get('encoding') or ''):
if isinstance(src, compat.string_types):
src = open(src, 'rb')
self.handles.append(src)
src = UTF8Recoder(src, kwds['encoding'])
kwds['encoding'] = 'utf-8'
# #2442
kwds['allow_leading_cols'] = self.index_col is not False
self._reader = parsers.TextReader(src, **kwds)
# XXX
self.usecols, self.usecols_dtype = _validate_usecols_arg(
self._reader.usecols)
passed_names = self.names is None
if self._reader.header is None:
self.names = None
else:
if len(self._reader.header) > 1:
# we have a multi index in the columns
self.names, self.index_names, self.col_names, passed_names = (
self._extract_multi_indexer_columns(
self._reader.header, self.index_names, self.col_names,
passed_names
)
)
else:
self.names = list(self._reader.header[0])
if self.names is None:
if self.prefix:
self.names = ['%s%d' % (self.prefix, i)
for i in range(self._reader.table_width)]
else:
self.names = lrange(self._reader.table_width)
# gh-9755
#
# need to set orig_names here first
# so that proper indexing can be done
# with _set_noconvert_columns
#
# once names has been filtered, we will
# then set orig_names again to names
self.orig_names = self.names[:]
if self.usecols:
usecols = _evaluate_usecols(self.usecols, self.orig_names)
# GH 14671
if (self.usecols_dtype == 'string' and
not set(usecols).issubset(self.orig_names)):
raise ValueError("Usecols do not match names.")
if len(self.names) > len(usecols):
self.names = [n for i, n in enumerate(self.names)
if (i in usecols or n in usecols)]
if len(self.names) < len(usecols):
raise ValueError("Usecols do not match names.")
self._set_noconvert_columns()
self.orig_names = self.names
if not self._has_complex_date_col:
if (self._reader.leading_cols == 0 and
_is_index_col(self.index_col)):
self._name_processed = True
(index_names, self.names,
self.index_col) = _clean_index_names(self.names,
self.index_col)
if self.index_names is None:
self.index_names = index_names
if self._reader.header is None and not passed_names:
self.index_names = [None] * len(self.index_names)
self._implicit_index = self._reader.leading_cols > 0
def close(self):
for f in self.handles:
f.close()
# close additional handles opened by C parser (for compression)
try:
self._reader.close()
except:
pass
def _set_noconvert_columns(self):
"""
Set the columns that should not undergo dtype conversions.
Currently, any column that is involved with date parsing will not
undergo such conversions.
"""
names = self.orig_names
if self.usecols_dtype == 'integer':
# A set of integers will be converted to a list in
# the correct order every single time.
usecols = list(self.usecols)
usecols.sort()
elif (callable(self.usecols) or
self.usecols_dtype not in ('empty', None)):
# The names attribute should have the correct columns
# in the proper order for indexing with parse_dates.
usecols = self.names[:]
else:
# Usecols is empty.
usecols = None
def _set(x):
if usecols is not None and is_integer(x):
x = usecols[x]
if not is_integer(x):
x = names.index(x)
self._reader.set_noconvert(x)
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif self.parse_dates:
if isinstance(self.index_col, list):
for k in self.index_col:
_set(k)
elif self.index_col is not None:
_set(self.index_col)
def set_error_bad_lines(self, status):
self._reader.set_error_bad_lines(int(status))
def read(self, nrows=None):
try:
data = self._reader.read(nrows)
except StopIteration:
if self._first_chunk:
self._first_chunk = False
names = self._maybe_dedup_names(self.orig_names)
index, columns, col_dict = _get_empty_meta(
names, self.index_col, self.index_names,
dtype=self.kwds.get('dtype'))
columns = self._maybe_make_multi_index_columns(
columns, self.col_names)
if self.usecols is not None:
columns = self._filter_usecols(columns)
col_dict = dict(filter(lambda item: item[0] in columns,
col_dict.items()))
return index, columns, col_dict
else:
raise
# Done with first read, next time raise StopIteration
self._first_chunk = False
if self.as_recarray:
# what to do if there are leading columns?
return data
names = self.names
if self._reader.leading_cols:
if self._has_complex_date_col:
raise NotImplementedError('file structure not yet supported')
# implicit index, no index names
arrays = []
for i in range(self._reader.leading_cols):
if self.index_col is None:
values = data.pop(i)
else:
values = data.pop(self.index_col[i])
values = self._maybe_parse_dates(values, i,
try_parse_dates=True)
arrays.append(values)
index = _ensure_index_from_sequences(arrays)
if self.usecols is not None:
names = self._filter_usecols(names)
names = self._maybe_dedup_names(names)
# rename dict keys
data = sorted(data.items())
data = dict((k, v) for k, (i, v) in zip(names, data))
names, data = self._do_date_conversions(names, data)
else:
# rename dict keys
data = sorted(data.items())
# ugh, mutation
names = list(self.orig_names)
names = self._maybe_dedup_names(names)
if self.usecols is not None:
names = self._filter_usecols(names)
# columns as list
alldata = [x[1] for x in data]
data = dict((k, v) for k, (i, v) in zip(names, data))
names, data = self._do_date_conversions(names, data)
index, names = self._make_index(data, alldata, names)
# maybe create a mi on the columns
names = self._maybe_make_multi_index_columns(names, self.col_names)
return index, names, data
def _filter_usecols(self, names):
# hackish
usecols = _evaluate_usecols(self.usecols, names)
if usecols is not None and len(names) != len(usecols):
names = [name for i, name in enumerate(names)
if i in usecols or name in usecols]
return names
def _get_index_names(self):
names = list(self._reader.header[0])
idx_names = None
if self._reader.leading_cols == 0 and self.index_col is not None:
(idx_names, names,
self.index_col) = _clean_index_names(names, self.index_col)
return names, idx_names
def _maybe_parse_dates(self, values, index, try_parse_dates=True):
if try_parse_dates and self._should_parse_dates(index):
values = self._date_conv(values)
return values
def TextParser(*args, **kwds):
"""
Converts lists of lists/tuples into DataFrames with proper type inference
and optional (e.g. string to datetime) conversion. Also enables iterating
lazily over chunks of large files
Parameters
----------
data : file-like object or list
delimiter : separator character to use
dialect : str or csv.Dialect instance, default None
Ignored if delimiter is longer than 1 character
names : sequence, default
header : int, default 0
Row to use to parse column labels. Defaults to the first row. Prior
rows will be discarded
index_col : int or list, default None
Column or columns to use as the (possibly hierarchical) index
has_index_names: boolean, default False
True if the cols defined in index_col have an index name and are
not in the header
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN.
keep_default_na : bool, default True
thousands : str, default None
Thousands separator
comment : str, default None
Comment out remainder of line
parse_dates : boolean, default False
keep_date_col : boolean, default False
date_parser : function, default None
skiprows : list of integers
Row numbers to skip
skipfooter : int
Number of line at bottom of file to skip
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
encoding : string, default None
Encoding to use for UTF when reading/writing (ex. 'utf-8')
squeeze : boolean, default False
returns Series if only one column
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
float_precision : string, default None
Specifies which converter the C engine should use for floating-point
values. The options are None for the ordinary converter,
'high' for the high-precision converter, and 'round_trip' for the
round-trip converter.
"""
kwds['engine'] = 'python'
return TextFileReader(*args, **kwds)
def count_empty_vals(vals):
return sum([1 for v in vals if v == '' or v is None])
class PythonParser(ParserBase):
def __init__(self, f, **kwds):
"""
Workhorse function for processing nested list into DataFrame
Should be replaced by np.genfromtxt eventually?
"""
ParserBase.__init__(self, kwds)
self.data = None
self.buf = []
self.pos = 0
self.line_pos = 0
self.encoding = kwds['encoding']
self.compression = kwds['compression']
self.memory_map = kwds['memory_map']
self.skiprows = kwds['skiprows']
if callable(self.skiprows):
self.skipfunc = self.skiprows
else:
self.skipfunc = lambda x: x in self.skiprows
self.skipfooter = _validate_skipfooter_arg(kwds['skipfooter'])
self.delimiter = kwds['delimiter']
self.quotechar = kwds['quotechar']
if isinstance(self.quotechar, compat.text_type):
self.quotechar = str(self.quotechar)
self.escapechar = kwds['escapechar']
self.doublequote = kwds['doublequote']
self.skipinitialspace = kwds['skipinitialspace']
self.lineterminator = kwds['lineterminator']
self.quoting = kwds['quoting']
self.usecols, _ = _validate_usecols_arg(kwds['usecols'])
self.skip_blank_lines = kwds['skip_blank_lines']
self.warn_bad_lines = kwds['warn_bad_lines']
self.error_bad_lines = kwds['error_bad_lines']
self.names_passed = kwds['names'] or None
self.na_filter = kwds['na_filter']
self.has_index_names = False
if 'has_index_names' in kwds:
self.has_index_names = kwds['has_index_names']
self.verbose = kwds['verbose']
self.converters = kwds['converters']
self.dtype = kwds['dtype']
self.compact_ints = kwds['compact_ints']
self.use_unsigned = kwds['use_unsigned']
self.thousands = kwds['thousands']
self.decimal = kwds['decimal']
self.comment = kwds['comment']
self._comment_lines = []
mode = 'r' if PY3 else 'rb'
f, handles = _get_handle(f, mode, encoding=self.encoding,
compression=self.compression,
memory_map=self.memory_map)
self.handles.extend(handles)
# Set self.data to something that can read lines.
if hasattr(f, 'readline'):
self._make_reader(f)
else:
self.data = f
# Get columns in two steps: infer from data, then
# infer column indices from self.usecols if is is specified.
self._col_indices = None
self.columns, self.num_original_columns = self._infer_columns()
# Now self.columns has the set of columns that we will process.
# The original set is stored in self.original_columns.
if len(self.columns) > 1:
# we are processing a multi index column
self.columns, self.index_names, self.col_names, _ = (
self._extract_multi_indexer_columns(
self.columns, self.index_names, self.col_names
)
)
# Update list of original names to include all indices.
self.num_original_columns = len(self.columns)
else:
self.columns = self.columns[0]
# get popped off for index
self.orig_names = list(self.columns)
# needs to be cleaned/refactored
# multiple date column thing turning into a real spaghetti factory
if not self._has_complex_date_col:
(index_names, self.orig_names, self.columns) = (
self._get_index_name(self.columns))
self._name_processed = True
if self.index_names is None:
self.index_names = index_names
if self.parse_dates:
self._no_thousands_columns = self._set_no_thousands_columns()
else:
self._no_thousands_columns = None
if len(self.decimal) != 1:
raise ValueError('Only length-1 decimal markers supported')
if self.thousands is None:
self.nonnum = re.compile('[^-^0-9^%s]+' % self.decimal)
else:
self.nonnum = re.compile('[^-^0-9^%s^%s]+' % (self.thousands,
self.decimal))
def _set_no_thousands_columns(self):
# Create a set of column ids that are not to be stripped of thousands
# operators.
noconvert_columns = set()
def _set(x):
if is_integer(x):
noconvert_columns.add(x)
else:
noconvert_columns.add(self.columns.index(x))
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif self.parse_dates:
if isinstance(self.index_col, list):
for k in self.index_col:
_set(k)
elif self.index_col is not None:
_set(self.index_col)
return noconvert_columns
def _make_reader(self, f):
sep = self.delimiter
if sep is None or len(sep) == 1:
if self.lineterminator:
raise ValueError('Custom line terminators not supported in '
'python parser (yet)')
class MyDialect(csv.Dialect):
delimiter = self.delimiter
quotechar = self.quotechar
escapechar = self.escapechar
doublequote = self.doublequote
skipinitialspace = self.skipinitialspace
quoting = self.quoting
lineterminator = '\n'
dia = MyDialect
sniff_sep = True
if sep is not None:
sniff_sep = False
dia.delimiter = sep
# attempt to sniff the delimiter
if sniff_sep:
line = f.readline()
while self.skipfunc(self.pos):
self.pos += 1
line = f.readline()
line = self._check_comments([line])[0]
self.pos += 1
self.line_pos += 1
sniffed = csv.Sniffer().sniff(line)
dia.delimiter = sniffed.delimiter
if self.encoding is not None:
self.buf.extend(list(
UnicodeReader(StringIO(line),
dialect=dia,
encoding=self.encoding)))
else:
self.buf.extend(list(csv.reader(StringIO(line),
dialect=dia)))
if self.encoding is not None:
reader = UnicodeReader(f, dialect=dia,
encoding=self.encoding,
strict=True)
else:
reader = csv.reader(f, dialect=dia,
strict=True)
else:
def _read():
line = f.readline()
if compat.PY2 and self.encoding:
line = line.decode(self.encoding)
pat = re.compile(sep)
yield pat.split(line.strip())
for line in f:
yield pat.split(line.strip())
reader = _read()
self.data = reader
def read(self, rows=None):
try:
content = self._get_lines(rows)
except StopIteration:
if self._first_chunk:
content = []
else:
raise
# done with first read, next time raise StopIteration
self._first_chunk = False
columns = list(self.orig_names)
if not len(content): # pragma: no cover
# DataFrame with the right metadata, even though it's length 0
names = self._maybe_dedup_names(self.orig_names)
index, columns, col_dict = _get_empty_meta(
names, self.index_col, self.index_names, self.dtype)
columns = self._maybe_make_multi_index_columns(
columns, self.col_names)
return index, columns, col_dict
# handle new style for names in index
count_empty_content_vals = count_empty_vals(content[0])
indexnamerow = None
if self.has_index_names and count_empty_content_vals == len(columns):
indexnamerow = content[0]
content = content[1:]
alldata = self._rows_to_cols(content)
data = self._exclude_implicit_index(alldata)
columns = self._maybe_dedup_names(self.columns)
columns, data = self._do_date_conversions(columns, data)
data = self._convert_data(data)
if self.as_recarray:
return self._to_recarray(data, columns)
index, columns = self._make_index(data, alldata, columns, indexnamerow)
return index, columns, data
def _exclude_implicit_index(self, alldata):
names = self._maybe_dedup_names(self.orig_names)
if self._implicit_index:
excl_indices = self.index_col
data = {}
offset = 0
for i, col in enumerate(names):
while i + offset in excl_indices:
offset += 1
data[col] = alldata[i + offset]
else:
data = dict((k, v) for k, v in zip(names, alldata))
return data
# legacy
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
return self.read(rows=size)
def _convert_data(self, data):
# apply converters
def _clean_mapping(mapping):
"converts col numbers to names"
clean = {}
for col, v in compat.iteritems(mapping):
if isinstance(col, int) and col not in self.orig_names:
col = self.orig_names[col]
clean[col] = v
return clean
clean_conv = _clean_mapping(self.converters)
if not isinstance(self.dtype, dict):
# handles single dtype applied to all columns
clean_dtypes = self.dtype
else:
clean_dtypes = _clean_mapping(self.dtype)
# Apply NA values.
clean_na_values = {}
clean_na_fvalues = {}
if isinstance(self.na_values, dict):
for col in self.na_values:
na_value = self.na_values[col]
na_fvalue = self.na_fvalues[col]
if isinstance(col, int) and col not in self.orig_names:
col = self.orig_names[col]
clean_na_values[col] = na_value
clean_na_fvalues[col] = na_fvalue
else:
clean_na_values = self.na_values
clean_na_fvalues = self.na_fvalues
return self._convert_to_ndarrays(data, clean_na_values,
clean_na_fvalues, self.verbose,
clean_conv, clean_dtypes)
def _to_recarray(self, data, columns):
dtypes = []
o = compat.OrderedDict()
# use the columns to "order" the keys
# in the unordered 'data' dictionary
for col in columns:
dtypes.append((str(col), data[col].dtype))
o[col] = data[col]
tuples = lzip(*o.values())
return np.array(tuples, dtypes)
def _infer_columns(self):
names = self.names
num_original_columns = 0
clear_buffer = True
if self.header is not None:
header = self.header
if isinstance(header, (list, tuple, np.ndarray)):
have_mi_columns = len(header) > 1
# we have a mi columns, so read an extra line
if have_mi_columns:
header = list(header) + [header[-1] + 1]
else:
have_mi_columns = False
header = [header]
columns = []
for level, hr in enumerate(header):
try:
line = self._buffered_line()
while self.line_pos <= hr:
line = self._next_line()
except StopIteration:
if self.line_pos < hr:
raise ValueError(
'Passed header=%s but only %d lines in file'
% (hr, self.line_pos + 1))
# We have an empty file, so check
# if columns are provided. That will
# serve as the 'line' for parsing
if have_mi_columns and hr > 0:
if clear_buffer:
self._clear_buffer()
columns.append([None] * len(columns[-1]))
return columns, num_original_columns
if not self.names:
raise EmptyDataError(
"No columns to parse from file")
line = self.names[:]
unnamed_count = 0
this_columns = []
for i, c in enumerate(line):
if c == '':
if have_mi_columns:
this_columns.append('Unnamed: %d_level_%d'
% (i, level))
else:
this_columns.append('Unnamed: %d' % i)
unnamed_count += 1
else:
this_columns.append(c)
if not have_mi_columns and self.mangle_dupe_cols:
counts = defaultdict(int)
for i, col in enumerate(this_columns):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
col = "%s.%d" % (col, cur_count)
cur_count = counts[col]
this_columns[i] = col
counts[col] = cur_count + 1
elif have_mi_columns:
# if we have grabbed an extra line, but its not in our
# format so save in the buffer, and create an blank extra
# line for the rest of the parsing code
if hr == header[-1]:
lc = len(this_columns)
ic = (len(self.index_col)
if self.index_col is not None else 0)
if lc != unnamed_count and lc - ic > unnamed_count:
clear_buffer = False
this_columns = [None] * lc
self.buf = [self.buf[-1]]
columns.append(this_columns)
if len(columns) == 1:
num_original_columns = len(this_columns)
if clear_buffer:
self._clear_buffer()
if names is not None:
if ((self.usecols is not None and
len(names) != len(self.usecols)) or
(self.usecols is None and
len(names) != len(columns[0]))):
raise ValueError('Number of passed names did not match '
'number of header fields in the file')
if len(columns) > 1:
raise TypeError('Cannot pass names with multi-index '
'columns')
if self.usecols is not None:
# Set _use_cols. We don't store columns because they are
# overwritten.
self._handle_usecols(columns, names)
else:
self._col_indices = None
num_original_columns = len(names)
columns = [names]
else:
columns = self._handle_usecols(columns, columns[0])
else:
try:
line = self._buffered_line()
except StopIteration:
if not names:
raise EmptyDataError(
"No columns to parse from file")
line = names[:]
ncols = len(line)
num_original_columns = ncols
if not names:
if self.prefix:
columns = [['%s%d' % (self.prefix, i)
for i in range(ncols)]]
else:
columns = [lrange(ncols)]
columns = self._handle_usecols(columns, columns[0])
else:
if self.usecols is None or len(names) >= num_original_columns:
columns = self._handle_usecols([names], names)
num_original_columns = len(names)
else:
if (not callable(self.usecols) and
len(names) != len(self.usecols)):
raise ValueError(
'Number of passed names did not match number of '
'header fields in the file'
)
# Ignore output but set used columns.
self._handle_usecols([names], names)
columns = [names]
num_original_columns = ncols
return columns, num_original_columns
def _handle_usecols(self, columns, usecols_key):
"""
Sets self._col_indices
usecols_key is used if there are string usecols.
"""
if self.usecols is not None:
if callable(self.usecols):
col_indices = _evaluate_usecols(self.usecols, usecols_key)
elif any([isinstance(u, string_types) for u in self.usecols]):
if len(columns) > 1:
raise ValueError("If using multiple headers, usecols must "
"be integers.")
col_indices = []
for col in self.usecols:
if isinstance(col, string_types):
col_indices.append(usecols_key.index(col))
else:
col_indices.append(col)
else:
col_indices = self.usecols
columns = [[n for i, n in enumerate(column) if i in col_indices]
for column in columns]
self._col_indices = col_indices
return columns
def _buffered_line(self):
"""
Return a line from buffer, filling buffer if required.
"""
if len(self.buf) > 0:
return self.buf[0]
else:
return self._next_line()
def _check_for_bom(self, first_row):
"""
Checks whether the file begins with the BOM character.
If it does, remove it. In addition, if there is quoting
in the field subsequent to the BOM, remove it as well
because it technically takes place at the beginning of
the name, not the middle of it.
"""
# first_row will be a list, so we need to check
# that that list is not empty before proceeding.
if not first_row:
return first_row
# The first element of this row is the one that could have the
# BOM that we want to remove. Check that the first element is a
# string before proceeding.
if not isinstance(first_row[0], compat.string_types):
return first_row
# Check that the string is not empty, as that would
# obviously not have a BOM at the start of it.
if not first_row[0]:
return first_row
# Since the string is non-empty, check that it does
# in fact begin with a BOM.
first_elt = first_row[0][0]
# This is to avoid warnings we get in Python 2.x if
# we find ourselves comparing with non-Unicode
if compat.PY2 and not isinstance(first_elt, unicode): # noqa
try:
first_elt = u(first_elt)
except UnicodeDecodeError:
return first_row
if first_elt != _BOM:
return first_row
first_row = first_row[0]
if len(first_row) > 1 and first_row[1] == self.quotechar:
start = 2
quote = first_row[1]
end = first_row[2:].index(quote) + 2
# Extract the data between the quotation marks
new_row = first_row[start:end]
# Extract any remaining data after the second
# quotation mark.
if len(first_row) > end + 1:
new_row += first_row[end + 1:]
return [new_row]
elif len(first_row) > 1:
return [first_row[1:]]
else:
# First row is just the BOM, so we
# return an empty string.
return [""]
def _is_line_empty(self, line):
"""
Check if a line is empty or not.
Parameters
----------
line : str, array-like
The line of data to check.
Returns
-------
boolean : Whether or not the line is empty.
"""
return not line or all(not x for x in line)
def _next_line(self):
if isinstance(self.data, list):
while self.skipfunc(self.pos):
self.pos += 1
while True:
try:
line = self._check_comments([self.data[self.pos]])[0]
self.pos += 1
# either uncommented or blank to begin with
if (not self.skip_blank_lines and
(self._is_line_empty(
self.data[self.pos - 1]) or line)):
break
elif self.skip_blank_lines:
ret = self._remove_empty_lines([line])
if ret:
line = ret[0]
break
except IndexError:
raise StopIteration
else:
while self.skipfunc(self.pos):
self.pos += 1
next(self.data)
while True:
orig_line = self._next_iter_line(row_num=self.pos + 1)
self.pos += 1
if orig_line is not None:
line = self._check_comments([orig_line])[0]
if self.skip_blank_lines:
ret = self._remove_empty_lines([line])
if ret:
line = ret[0]
break
elif self._is_line_empty(orig_line) or line:
break
# This was the first line of the file,
# which could contain the BOM at the
# beginning of it.
if self.pos == 1:
line = self._check_for_bom(line)
self.line_pos += 1
self.buf.append(line)
return line
def _alert_malformed(self, msg, row_num):
"""
Alert a user about a malformed row.
If `self.error_bad_lines` is True, the alert will be `ParserError`.
If `self.warn_bad_lines` is True, the alert will be printed out.
Parameters
----------
msg : The error message to display.
row_num : The row number where the parsing error occurred.
Because this row number is displayed, we 1-index,
even though we 0-index internally.
"""
if self.error_bad_lines:
raise ParserError(msg)
elif self.warn_bad_lines:
base = 'Skipping line {row_num}: '.format(row_num=row_num)
sys.stderr.write(base + msg + '\n')
def _next_iter_line(self, row_num):
"""
Wrapper around iterating through `self.data` (CSV source).
When a CSV error is raised, we check for specific
error messages that allow us to customize the
error message displayed to the user.
Parameters
----------
row_num : The row number of the line being parsed.
"""
try:
return next(self.data)
except csv.Error as e:
if self.warn_bad_lines or self.error_bad_lines:
msg = str(e)
if 'NULL byte' in msg:
msg = ('NULL byte detected. This byte '
'cannot be processed in Python\'s '
'native csv library at the moment, '
'so please pass in engine=\'c\' instead')
elif 'newline inside string' in msg:
msg = ('EOF inside string starting with '
'line ' + str(row_num))
if self.skipfooter > 0:
reason = ('Error could possibly be due to '
'parsing errors in the skipped footer rows '
'(the skipfooter keyword is only applied '
'after Python\'s csv library has parsed '
'all rows).')
msg += '. ' + reason
self._alert_malformed(msg, row_num)
return None
def _check_comments(self, lines):
if self.comment is None:
return lines
ret = []
for l in lines:
rl = []
for x in l:
if (not isinstance(x, compat.string_types) or
self.comment not in x):
rl.append(x)
else:
x = x[:x.find(self.comment)]
if len(x) > 0:
rl.append(x)
break
ret.append(rl)
return ret
def _remove_empty_lines(self, lines):
"""
Iterate through the lines and remove any that are
either empty or contain only one whitespace value
Parameters
----------
lines : array-like
The array of lines that we are to filter.
Returns
-------
filtered_lines : array-like
The same array of lines with the "empty" ones removed.
"""
ret = []
for l in lines:
# Remove empty lines and lines with only one whitespace value
if (len(l) > 1 or len(l) == 1 and
(not isinstance(l[0], compat.string_types) or
l[0].strip())):
ret.append(l)
return ret
def _check_thousands(self, lines):
if self.thousands is None:
return lines
return self._search_replace_num_columns(lines=lines,
search=self.thousands,
replace='')
def _search_replace_num_columns(self, lines, search, replace):
ret = []
for l in lines:
rl = []
for i, x in enumerate(l):
if (not isinstance(x, compat.string_types) or
search not in x or
(self._no_thousands_columns and
i in self._no_thousands_columns) or
self.nonnum.search(x.strip())):
rl.append(x)
else:
rl.append(x.replace(search, replace))
ret.append(rl)
return ret
def _check_decimal(self, lines):
if self.decimal == _parser_defaults['decimal']:
return lines
return self._search_replace_num_columns(lines=lines,
search=self.decimal,
replace='.')
def _clear_buffer(self):
self.buf = []
_implicit_index = False
def _get_index_name(self, columns):
"""
Try several cases to get lines:
0) There are headers on row 0 and row 1 and their
total summed lengths equals the length of the next line.
Treat row 0 as columns and row 1 as indices
1) Look for implicit index: there are more columns
on row 1 than row 0. If this is true, assume that row
1 lists index columns and row 0 lists normal columns.
2) Get index from the columns if it was listed.
"""
orig_names = list(columns)
columns = list(columns)
try:
line = self._next_line()
except StopIteration:
line = None
try:
next_line = self._next_line()
except StopIteration:
next_line = None
# implicitly index_col=0 b/c 1 fewer column names
implicit_first_cols = 0
if line is not None:
# leave it 0, #2442
# Case 1
if self.index_col is not False:
implicit_first_cols = len(line) - self.num_original_columns
# Case 0
if next_line is not None:
if len(next_line) == len(line) + self.num_original_columns:
# column and index names on diff rows
self.index_col = lrange(len(line))
self.buf = self.buf[1:]
for c in reversed(line):
columns.insert(0, c)
# Update list of original names to include all indices.
orig_names = list(columns)
self.num_original_columns = len(columns)
return line, orig_names, columns
if implicit_first_cols > 0:
# Case 1
self._implicit_index = True
if self.index_col is None:
self.index_col = lrange(implicit_first_cols)
index_name = None
else:
# Case 2
(index_name, columns_,
self.index_col) = _clean_index_names(columns, self.index_col)
return index_name, orig_names, columns
def _rows_to_cols(self, content):
col_len = self.num_original_columns
if self._implicit_index:
col_len += len(self.index_col)
max_len = max([len(row) for row in content])
# Check that there are no rows with too many
# elements in their row (rows with too few
# elements are padded with NaN).
if (max_len > col_len and
self.index_col is not False and
self.usecols is None):
footers = self.skipfooter if self.skipfooter else 0
bad_lines = []
iter_content = enumerate(content)
content_len = len(content)
content = []
for (i, l) in iter_content:
actual_len = len(l)
if actual_len > col_len:
if self.error_bad_lines or self.warn_bad_lines:
row_num = self.pos - (content_len - i + footers)
bad_lines.append((row_num, actual_len))
if self.error_bad_lines:
break
else:
content.append(l)
for row_num, actual_len in bad_lines:
msg = ('Expected %d fields in line %d, saw %d' %
(col_len, row_num + 1, actual_len))
if (self.delimiter and
len(self.delimiter) > 1 and
self.quoting != csv.QUOTE_NONE):
# see gh-13374
reason = ('Error could possibly be due to quotes being '
'ignored when a multi-char delimiter is used.')
msg += '. ' + reason
self._alert_malformed(msg, row_num + 1)
# see gh-13320
zipped_content = list(lib.to_object_array(
content, min_width=col_len).T)
if self.usecols:
if self._implicit_index:
zipped_content = [
a for i, a in enumerate(zipped_content)
if (i < len(self.index_col) or
i - len(self.index_col) in self._col_indices)]
else:
zipped_content = [a for i, a in enumerate(zipped_content)
if i in self._col_indices]
return zipped_content
def _get_lines(self, rows=None):
lines = self.buf
new_rows = None
# already fetched some number
if rows is not None:
# we already have the lines in the buffer
if len(self.buf) >= rows:
new_rows, self.buf = self.buf[:rows], self.buf[rows:]
# need some lines
else:
rows -= len(self.buf)
if new_rows is None:
if isinstance(self.data, list):
if self.pos > len(self.data):
raise StopIteration
if rows is None:
new_rows = self.data[self.pos:]
new_pos = len(self.data)
else:
new_rows = self.data[self.pos:self.pos + rows]
new_pos = self.pos + rows
# Check for stop rows. n.b.: self.skiprows is a set.
if self.skiprows:
new_rows = [row for i, row in enumerate(new_rows)
if not self.skipfunc(i + self.pos)]
lines.extend(new_rows)
self.pos = new_pos
else:
new_rows = []
try:
if rows is not None:
for _ in range(rows):
new_rows.append(next(self.data))
lines.extend(new_rows)
else:
rows = 0
while True:
new_row = self._next_iter_line(
row_num=self.pos + rows + 1)
rows += 1
if new_row is not None:
new_rows.append(new_row)
except StopIteration:
if self.skiprows:
new_rows = [row for i, row in enumerate(new_rows)
if not self.skipfunc(i + self.pos)]
lines.extend(new_rows)
if len(lines) == 0:
raise
self.pos += len(new_rows)
self.buf = []
else:
lines = new_rows
if self.skipfooter:
lines = lines[:-self.skipfooter]
lines = self._check_comments(lines)
if self.skip_blank_lines:
lines = self._remove_empty_lines(lines)
lines = self._check_thousands(lines)
return self._check_decimal(lines)
def _make_date_converter(date_parser=None, dayfirst=False,
infer_datetime_format=False):
def converter(*date_cols):
if date_parser is None:
strs = _concat_date_cols(date_cols)
try:
return tools.to_datetime(
_ensure_object(strs),
utc=None,
box=False,
dayfirst=dayfirst,
errors='ignore',
infer_datetime_format=infer_datetime_format
)
except:
return tools.to_datetime(
parsing.try_parse_dates(strs, dayfirst=dayfirst))
else:
try:
result = tools.to_datetime(
date_parser(*date_cols), errors='ignore')
if isinstance(result, datetime.datetime):
raise Exception('scalar parser')
return result
except Exception:
try:
return tools.to_datetime(
parsing.try_parse_dates(_concat_date_cols(date_cols),
parser=date_parser,
dayfirst=dayfirst),
errors='ignore')
except Exception:
return generic_parser(date_parser, *date_cols)
return converter
def _process_date_conversion(data_dict, converter, parse_spec,
index_col, index_names, columns,
keep_date_col=False):
def _isindex(colspec):
return ((isinstance(index_col, list) and
colspec in index_col) or
(isinstance(index_names, list) and
colspec in index_names))
new_cols = []
new_data = {}
orig_names = columns
columns = list(columns)
date_cols = set()
if parse_spec is None or isinstance(parse_spec, bool):
return data_dict, columns
if isinstance(parse_spec, list):
# list of column lists
for colspec in parse_spec:
if is_scalar(colspec):
if isinstance(colspec, int) and colspec not in data_dict:
colspec = orig_names[colspec]
if _isindex(colspec):
continue
data_dict[colspec] = converter(data_dict[colspec])
else:
new_name, col, old_names = _try_convert_dates(
converter, colspec, data_dict, orig_names)
if new_name in data_dict:
raise ValueError('New date column already in dict %s' %
new_name)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
elif isinstance(parse_spec, dict):
# dict of new name to column list
for new_name, colspec in compat.iteritems(parse_spec):
if new_name in data_dict:
raise ValueError('Date column %s already in dict' %
new_name)
_, col, old_names = _try_convert_dates(converter, colspec,
data_dict, orig_names)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
data_dict.update(new_data)
new_cols.extend(columns)
if not keep_date_col:
for c in list(date_cols):
data_dict.pop(c)
new_cols.remove(c)
return data_dict, new_cols
def _try_convert_dates(parser, colspec, data_dict, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int) and c not in columns:
colnames.append(columns[c])
else:
colnames.append(c)
new_name = '_'.join([str(x) for x in colnames])
to_parse = [data_dict[c] for c in colnames if c in data_dict]
new_col = parser(*to_parse)
return new_name, new_col, colnames
def _clean_na_values(na_values, keep_default_na=True):
if na_values is None:
if keep_default_na:
na_values = _NA_VALUES
else:
na_values = set()
na_fvalues = set()
elif isinstance(na_values, dict):
na_values = na_values.copy() # Prevent aliasing.
if keep_default_na:
for k, v in compat.iteritems(na_values):
if not is_list_like(v):
v = [v]
v = set(v) | _NA_VALUES
na_values[k] = v
na_fvalues = dict([
(k, _floatify_na_values(v)) for k, v in na_values.items() # noqa
])
else:
if not is_list_like(na_values):
na_values = [na_values]
na_values = _stringify_na_values(na_values)
if keep_default_na:
na_values = na_values | _NA_VALUES
na_fvalues = _floatify_na_values(na_values)
return na_values, na_fvalues
def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
return None, columns, index_col
columns = list(columns)
cp_cols = list(columns)
index_names = []
# don't mutate
index_col = list(index_col)
for i, c in enumerate(index_col):
if isinstance(c, compat.string_types):
index_names.append(c)
for j, name in enumerate(cp_cols):
if name == c:
index_col[i] = j
columns.remove(name)
break
else:
name = cp_cols[c]
columns.remove(name)
index_names.append(name)
# hack
if isinstance(index_names[0], compat.string_types)\
and 'Unnamed' in index_names[0]:
index_names[0] = None
return index_names, columns, index_col
def _get_empty_meta(columns, index_col, index_names, dtype=None):
columns = list(columns)
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
if not isinstance(dtype, dict):
# if dtype == None, default will be np.object.
default_dtype = dtype or np.object
dtype = defaultdict(lambda: default_dtype)
else:
# Save a copy of the dictionary.
_dtype = dtype.copy()
dtype = defaultdict(lambda: np.object)
# Convert column indexes to column names.
for k, v in compat.iteritems(_dtype):
col = columns[k] if is_integer(k) else k
dtype[col] = v
if index_col is None or index_col is False:
index = Index([])
else:
data = [Series([], dtype=dtype[name]) for name in index_names]
index = _ensure_index_from_sequences(data, names=index_names)
index_col.sort()
for i, n in enumerate(index_col):
columns.pop(n - i)
col_dict = dict((col_name,
Series([], dtype=dtype[col_name]))
for col_name in columns)
return index, columns, col_dict
def _floatify_na_values(na_values):
# create float versions of the na_values
result = set()
for v in na_values:
try:
v = float(v)
if not np.isnan(v):
result.add(v)
except:
pass
return result
def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
for x in na_values:
result.append(str(x))
result.append(x)
try:
v = float(x)
# we are like 999 here
if v == int(v):
v = int(v)
result.append("%s.0" % v)
result.append(str(v))
result.append(v)
except:
pass
try:
result.append(int(x))
except:
pass
return set(result)
def _get_na_values(col, na_values, na_fvalues):
if isinstance(na_values, dict):
if col in na_values:
return na_values[col], na_fvalues[col]
else:
return _NA_VALUES, set()
else:
return na_values, na_fvalues
def _get_col_names(colspec, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int):
colnames.append(columns[c])
return colnames
def _concat_date_cols(date_cols):
if len(date_cols) == 1:
if compat.PY3:
return np.array([compat.text_type(x) for x in date_cols[0]],
dtype=object)
else:
return np.array([
str(x) if not isinstance(x, compat.string_types) else x
for x in date_cols[0]
], dtype=object)
rs = np.array([' '.join([compat.text_type(y) for y in x])
for x in zip(*date_cols)], dtype=object)
return rs
class FixedWidthReader(BaseIterator):
"""
A reader of fixed-width lines.
"""
def __init__(self, f, colspecs, delimiter, comment, skiprows=None):
self.f = f
self.buffer = None
self.delimiter = '\r\n' + delimiter if delimiter else '\n\r\t '
self.comment = comment
if colspecs == 'infer':
self.colspecs = self.detect_colspecs(skiprows=skiprows)
else:
self.colspecs = colspecs
if not isinstance(self.colspecs, (tuple, list)):
raise TypeError("column specifications must be a list or tuple, "
"input was a %r" % type(colspecs).__name__)
for colspec in self.colspecs:
if not (isinstance(colspec, (tuple, list)) and
len(colspec) == 2 and
isinstance(colspec[0], (int, np.integer, type(None))) and
isinstance(colspec[1], (int, np.integer, type(None)))):
raise TypeError('Each column specification must be '
'2 element tuple or list of integers')
def get_rows(self, n, skiprows=None):
"""
Read rows from self.f, skipping as specified.
We distinguish buffer_rows (the first <= n lines)
from the rows returned to detect_colspecs because
it's simpler to leave the other locations with
skiprows logic alone than to modify them to deal
with the fact we skipped some rows here as well.
Parameters
----------
n : int
Number of rows to read from self.f, not counting
rows that are skipped.
skiprows: set, optional
Indices of rows to skip.
Returns
-------
detect_rows : list of str
A list containing the rows to read.
"""
if skiprows is None:
skiprows = set()
buffer_rows = []
detect_rows = []
for i, row in enumerate(self.f):
if i not in skiprows:
detect_rows.append(row)
buffer_rows.append(row)
if len(detect_rows) >= n:
break
self.buffer = iter(buffer_rows)
return detect_rows
def detect_colspecs(self, n=100, skiprows=None):
# Regex escape the delimiters
delimiters = ''.join([r'\%s' % x for x in self.delimiter])
pattern = re.compile('([^%s]+)' % delimiters)
rows = self.get_rows(n, skiprows)
if not rows:
raise EmptyDataError("No rows from which to infer column width")
max_len = max(map(len, rows))
mask = np.zeros(max_len + 1, dtype=int)
if self.comment is not None:
rows = [row.partition(self.comment)[0] for row in rows]
for row in rows:
for m in pattern.finditer(row):
mask[m.start():m.end()] = 1
shifted = np.roll(mask, 1)
shifted[0] = 0
edges = np.where((mask ^ shifted) == 1)[0]
edge_pairs = list(zip(edges[::2], edges[1::2]))
return edge_pairs
def __next__(self):
if self.buffer is not None:
try:
line = next(self.buffer)
except StopIteration:
self.buffer = None
line = next(self.f)
else:
line = next(self.f)
# Note: 'colspecs' is a sequence of half-open intervals.
return [line[fromm:to].strip(self.delimiter)
for (fromm, to) in self.colspecs]
class FixedWidthFieldParser(PythonParser):
"""
Specialization that Converts fixed-width fields into DataFrames.
See PythonParser for details.
"""
def __init__(self, f, **kwds):
# Support iterators, convert to a list.
self.colspecs = kwds.pop('colspecs')
PythonParser.__init__(self, f, **kwds)
def _make_reader(self, f):
self.data = FixedWidthReader(f, self.colspecs, self.delimiter,
self.comment, self.skiprows)
|
NixaSoftware/CVis
|
venv/lib/python2.7/site-packages/pandas/io/parsers.py
|
Python
|
apache-2.0
| 121,901
|
from honeybee.hbsurface import HBSurface as AnalysisSurface
import geometryoperation as go
import config
class HBSurface(AnalysisSurface):
"""Honeybee surface.
Args:
name: A unique string for surface name
sortedPoints: A list of 3 points or more as tuple or list with three items
(x, y, z). Points should be sorted. This class won't sort the points.
If surfaces has multiple subsurfaces you can pass lists of point lists
to this function (e.g. ((0, 0, 0), (10, 0, 0), (0, 10, 0))).
surfaceType: Optional input for surface type. You can use any of the surface
types available from surfacetype libraries or use a float number to
indicate the type. If not indicated it will be assigned based on normal
angle of the surface which will be calculated from surface points.
0.0: Wall 0.5: UndergroundWall
1.0: Roof 1.5: UndergroundCeiling
2.0: Floor 2.25: UndergroundSlab
2.5: SlabOnGrade 2.75: ExposedFloor
3.0: Ceiling 4.0: AirWall
6.0: Context
isNameSetByUser: If you want the name to be changed by honeybee any case
set isNameSetByUser to True. Default is set to False which let Honeybee
to rename the surface in cases like creating a newHBZone.
radProperties: Radiance properties for this surface. If empty default
RADProperties will be assigned to surface by Honeybee.
epProperties: EnergyPlus properties for this surface. If empty default
epProperties will be assigned to surface by Honeybee.
"""
def __init__(self, name, sortedPoints, surfaceType=None,
isNameSetByUser=False, isTypeSetByUser=False,
radProperties=None, epProperties=None,
isCreatedFromGeometry=False):
"""Create a honeybee surface for Grasshopper."""
AnalysisSurface.__init__(
self, name, sortedPoints=sortedPoints, surfaceType=surfaceType,
isNameSetByUser=isNameSetByUser, isTypeSetByUser=isTypeSetByUser,
radProperties=radProperties, epProperties=epProperties)
self.isCreatedFromGeometry = isCreatedFromGeometry
@classmethod
def fromGeometry(cls, name, geometry, surfaceType=None,
isNameSetByUser=False, isTypeSetByUser=False,
radProperties=None, epProperties=None,
isCreatedFromGeometry=True):
"Create a honeybee surface from Grasshopper or Dynamo geometry."
_pts = go.extractSurfacePointsFromGeometry(geometry)
_srf = cls(name, _pts, surfaceType, isNameSetByUser, isTypeSetByUser,
radProperties, epProperties, isCreatedFromGeometry)
_srf.geometry = geometry
return _srf
@property
def geometry(self):
"""Return geometry."""
if self.isCreatedFromGeometry:
return self.__geometry
else:
return self.profile
@geometry.setter
def geometry(self, geo):
"""Set geometry."""
self.__geometry = geo
@property
def profile(self):
"""Get profile curve of this surface."""
return go.polygon(tuple(go.xyzToGeometricalPoints(self.absolutePoints)))
|
antonszilasi/honeybeex
|
honeybeex/hbsurface.py
|
Python
|
gpl-3.0
| 3,372
|
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Serializer for plugins tasks"""
from nailgun import consts
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.orchestrator.tasks_serializer import get_uids_for_roles
from nailgun.orchestrator.tasks_serializer import get_uids_for_tasks
import nailgun.orchestrator.tasks_templates as templates
from nailgun.plugins.manager import PluginManager
from nailgun.settings import settings
class BasePluginDeploymentHooksSerializer(object):
# TODO(dshulyak) refactor it to be consistent with task_serializer
def __init__(self, cluster, nodes):
self.cluster = cluster
self.nodes = nodes
def deployment_tasks(self, plugins, stage):
tasks = []
plugin_tasks = []
sorted_plugins = sorted(plugins, key=lambda p: p.plugin.name)
for plugin in sorted_plugins:
stage_tasks = filter(
lambda t: t['stage'].startswith(stage), plugin.tasks)
plugin_tasks.extend(self._set_tasks_defaults(plugin, stage_tasks))
sorted_tasks = self._sort_by_stage_postfix(plugin_tasks)
for task in sorted_tasks:
make_task = None
uids = get_uids_for_roles(self.nodes, task['role'])
if not uids:
continue
if task['type'] == 'shell':
make_task = templates.make_shell_task
elif task['type'] == 'puppet':
make_task = templates.make_puppet_task
elif task['type'] == 'reboot':
make_task = templates.make_reboot_task
else:
logger.warn('Task is skipped {0}, because its type is '
'not supported').format(task)
if make_task:
tasks.append(self._serialize_task(make_task(uids, task), task))
return tasks
def _set_tasks_defaults(self, plugin, tasks):
for task in tasks:
self._set_task_defaults(plugin, task)
return tasks
def _set_task_defaults(self, plugin, task):
task['parameters'].setdefault('cwd', plugin.slaves_scripts_path)
task.setdefault('diagnostic_name', plugin.full_name)
task.setdefault('fail_on_error', True)
return task
def _serialize_task(self, task, default_task):
task.update({
'diagnostic_name': default_task['diagnostic_name'],
'fail_on_error': default_task['fail_on_error']})
return task
def serialize_task(self, plugin, task):
return self._serialize_task(
self._set_task_defaults(plugin, task), task)
def _sort_by_stage_postfix(self, tasks):
"""Sorts tasks in the correct order by task postfixes,
for example here are several tasks' stages:
stage: post_deployment/100
stage: post_deployment
stage: post_deployment/-100
The method returns tasks in the next order
stage: post_deployment/-100
stage: post_deployment # because by default postifx is 0
stage: post_deployment/100
"""
def postfix(task):
stage_list = task['stage'].split('/')
postfix = stage_list[-1] if len(stage_list) > 1 else 0
try:
postfix = float(postfix)
except ValueError:
logger.warn(
'Task %s has non numeric postfix "%s", set to 0',
task, postfix)
postfix = 0
return postfix
return sorted(tasks, key=postfix)
class PluginsPreDeploymentHooksSerializer(BasePluginDeploymentHooksSerializer):
def serialize(self):
tasks = []
plugins = PluginManager.get_cluster_plugins_with_tasks(self.cluster)
tasks.extend(self.create_repositories(plugins))
tasks.extend(self.sync_scripts(plugins))
tasks.extend(self.deployment_tasks(plugins))
return tasks
def _get_node_uids_for_plugin_tasks(self, plugin):
# TODO(aroma): remove concatenation of tasks when unified way of
# processing will be introduced for deployment tasks and existing
# plugin tasks
tasks_to_process = plugin.tasks + plugin.deployment_tasks
uids = get_uids_for_tasks(self.nodes, tasks_to_process)
# NOTE(aroma): pre-deployment tasks should not be executed on
# master node because in some cases it leads to errors due to
# commands need to be run are not compatible with master node
# OS (CentOS). E.g. of such situation - create repository
# executes `apt-get update` which fails on CentOS
if consts.MASTER_ROLE in uids:
uids.remove(consts.MASTER_ROLE)
return uids
def create_repositories(self, plugins):
operating_system = self.cluster.release.operating_system
repo_tasks = []
for plugin in plugins:
uids = self._get_node_uids_for_plugin_tasks(plugin)
# If there are no nodes for tasks execution
# or if there are no files in repository
if not uids or not plugin.repo_files(self.cluster):
continue
if operating_system == consts.RELEASE_OS.centos:
repo = self.get_centos_repo(plugin)
repo_tasks.append(
self.serialize_task(
plugin,
templates.make_centos_repo_task(uids, repo)))
elif operating_system == consts.RELEASE_OS.ubuntu:
repo = self.get_ubuntu_repo(plugin)
repo_tasks.append(
self.serialize_task(
plugin,
templates.make_ubuntu_sources_task(uids, repo)))
# do not add preferences task to task list if we can't
# complete it (e.g. can't retrieve or parse Release file)
task = templates.make_ubuntu_preferences_task(uids, repo)
if task is not None:
repo_tasks.append(self.serialize_task(plugin, task))
# apt-get update executed after every additional source.list
# to be able understand what plugin source.list caused error
repo_tasks.append(
self.serialize_task(
plugin,
templates.make_apt_update_task(uids)))
else:
raise errors.InvalidOperatingSystem(
'Operating system {0} is invalid'.format(operating_system))
return repo_tasks
def sync_scripts(self, plugins):
tasks = []
for plugin in plugins:
uids = self._get_node_uids_for_plugin_tasks(plugin)
if not uids:
continue
tasks.append(
self.serialize_task(
plugin,
templates.make_sync_scripts_task(
uids,
plugin.master_scripts_path(self.cluster),
plugin.slaves_scripts_path)))
return tasks
def deployment_tasks(self, plugins):
return super(
PluginsPreDeploymentHooksSerializer, self).\
deployment_tasks(plugins, consts.STAGES.pre_deployment)
def get_centos_repo(self, plugin):
return {
'type': 'rpm',
'name': plugin.full_name,
'uri': plugin.repo_url(self.cluster),
'priority': settings.REPO_PRIORITIES['plugins']['centos']}
def get_ubuntu_repo(self, plugin):
return {
'type': 'deb',
'name': plugin.full_name,
'uri': plugin.repo_url(self.cluster),
'suite': '/',
'section': '',
'priority': settings.REPO_PRIORITIES['plugins']['ubuntu']}
class PluginsPostDeploymentHooksSerializer(
BasePluginDeploymentHooksSerializer):
def serialize(self):
tasks = []
plugins = PluginManager.get_cluster_plugins_with_tasks(self.cluster)
tasks.extend(self.deployment_tasks(plugins))
return tasks
def deployment_tasks(self, plugins):
return super(
PluginsPostDeploymentHooksSerializer, self).\
deployment_tasks(plugins, consts.STAGES.post_deployment)
|
SmartInfrastructures/fuel-web-dev
|
nailgun/nailgun/orchestrator/plugins_serializers.py
|
Python
|
apache-2.0
| 8,920
|
# -*- coding: utf-8 -*-
#
# 2021-02-04 Timo Sturm <timo.sturm@netknights.it>
# Fix import of yubikeys from yubico
# 2020-11-11 Timo Sturm <timo.sturm@netknights.it>
# Select how to validate PSKC imports
# 2018-05-10 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add fileversion to OATH CSV
# 2017-11-24 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Generate the encryption key for PSKC export
# in the HSM
# 2017-10-17 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Allow export to pskc file
# 2017-01-23 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Avoid XML bombs
# 2016-07-17 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add GPG encrpyted import
# 2016-01-16 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add PSKC import with pre shared key
# 2015-05-28 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add PSKC import
# 2014-12-11 Cornelius Kölbel <cornelius@privacyidea.org>
# code cleanup during flask migration
# 2014-10-27 Cornelius Kölbel <cornelius@privacyidea.org>
# add parsePSKCdata
# 2014-05-08 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''This file is part of the privacyidea service
It is used for importing SafeNet (former Aladdin)
XML files, that hold the OTP secrets for eToken PASS.
'''
import hmac, hashlib
import defusedxml.ElementTree as etree
import re
import binascii
import base64
import html
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from privacyidea.lib.utils import (modhex_decode, modhex_encode,
hexlify_and_unicode, to_unicode, to_utf8,
b64encode_and_unicode)
from privacyidea.lib.config import get_token_class
from privacyidea.lib.log import log_with
from privacyidea.lib.crypto import (aes_decrypt_b64, aes_encrypt_b64, geturandom)
from bs4 import BeautifulSoup
import traceback
from passlib.crypto.digest import pbkdf2_hmac
import gnupg
from os import path
import logging
log = logging.getLogger(__name__)
def _create_static_password(key_hex):
'''
According to yubikey manual 5.5.5 the static-ticket is the same
algorithm with no moving factors.
The msg_hex that is encoded with the AES key is
'000000000000ffffffffffffffff0f2e'
'''
msg_hex = "000000000000ffffffffffffffff0f2e"
msg_bin = binascii.unhexlify(msg_hex)
cipher = Cipher(algorithms.AES(binascii.unhexlify(key_hex)),
modes.ECB(), default_backend())
encryptor = cipher.encryptor()
password_bin = encryptor.update(msg_bin) + encryptor.finalize()
password = modhex_encode(password_bin)
return password
class ImportException(Exception):
def __init__(self, description):
self.description = description
def __str__(self):
return ('{0!s}'.format(self.description))
def getTagName(elem):
match = re.match("^({.*?})(.*)$", elem.tag)
if match:
return match.group(2)
else:
return elem.tag
@log_with(log)
def parseOATHcsv(csv):
'''
(#653)
This function parses CSV data for oath token.
The file format is
for HOTP
serial, key, hotp, [6|8], [counter]
for TOTP
serial, key, totp, [6|8], [30|60]
for OCRA
serial, key, ocra, [ocra-suite]
for TAN
serial, key, tan, tan1 tan2 tan3 tan4
It imports sha1 hotp or totp token.
I can also import ocra token.
The default is hotp
if totp is set, the default seconds are 30
if ocra is set, an ocra-suite is required, otherwise the default
ocra-suite is used.
It returns a dictionary:
{
serial: { 'type' : xxxx,
'otpkey' : xxxx,
'timeStep' : xxxx,
'otplen' : xxx,
'ocrasuite' : xxx }
}
'''
TOKENS = {}
version = 0
csv_array = csv.split('\n')
m = re.match(r"^#\s*version:\s*(\d+)", csv_array[0])
if m:
version = m.group(1)
log.debug("the file is version {0}.".format(version))
log.debug("the file contains {0:d} lines.".format(len(csv_array)))
for line in csv_array:
# Do not parse comment lines
if line.startswith("#"):
continue
l = line.split(',')
# Do not parse emtpy lines, it could be [] or ['']
if len(l) <= 1:
continue
# Import the user
user = {}
if version == "2":
# extract the user from the first three columns
user["username"] = l.pop(0).strip()
user["resolver"] = l.pop(0).strip()
user["realm"] = l.pop(0).strip()
# check for empty serial
serial = l[0].strip()
if len(serial) > 0:
if len(l) < 2:
log.error("the line {0!s} did not contain a hotp key".format(line))
continue
# ttype
if len(l) == 2:
# No tokentype, take the default "hotp"
l.append("hotp")
ttype = l[2].strip().lower()
tok_class = get_token_class(ttype)
params = tok_class.get_import_csv(l)
log.debug("read the line {0!s}".format(params))
params["user"] = user
TOKENS[serial] = params
return TOKENS
@log_with(log)
def parseYubicoCSV(csv):
'''
This function reads the CSV data as created by the Yubico personalization
GUI.
Traditional Format:
Yubico OTP,12/11/2013 11:10,1,vvgutbiedkvi,
ab86c04de6a3,d26a7c0f85fdda28bd816e406342b214,,,0,0,0,0,0,0,0,0,0,0
OATH-HOTP,11.12.13 18:55,1,cccccccccccc,,
916821d3a138bf855e70069605559a206ba854cd,,,0,0,0,6,0,0,0,0,0,0
Static Password,11.12.13 19:08,1,,d5a3d50327dc,
0e8e37b0e38b314a56748c030f58d21d,,,0,0,0,0,0,0,0,0,0,0
Yubico Format:
# OATH mode
508326,,0,69cfb9202438ca68964ec3244bfa4843d073a43b,,2013-12-12T08:41:07,
1382042,,0,bf7efc1c8b6f23604930a9ce693bdd6c3265be00,,2013-12-12T08:41:17,
# Yubico mode
508326,cccccccccccc,83cebdfb7b93,a47c5bf9c152202f577be6721c0113af,,
2013-12-12T08:43:17,
# static mode
508326,,,9e2fd386224a7f77e9b5aee775464033,,2013-12-12T08:44:34,
column 0: serial
column 1: public ID in yubico mode
column 2: private ID in yubico mode, 0 in OATH mode, blank in static mode
column 3: AES key
BUMMER: The Yubico Format does not contain the information,
which slot of the token was written.
If now public ID or serial is given, we can not import the token, as the
returned dictionary needs the token serial as a key.
It returns a dictionary with the new tokens to be created:
{
serial: { 'type' : yubico,
'otpkey' : xxxx,
'otplen' : xxx,
'description' : xxx
}
}
'''
TOKENS = {}
csv_array = csv.split('\n')
log.debug("the file contains {0:d} tokens.".format(len(csv_array)))
for line in csv_array:
l = line.split(',')
serial = ""
key = ""
otplen = 32
public_id = ""
slot = ""
if len(l) >= 6:
first_column = l[0].strip()
if first_column.lower() in ["yubico otp",
"oath-hotp",
"static password"]:
# traditional format
typ = l[0].strip()
slot = l[2].strip()
public_id = l[3].strip()
key = l[5].strip()
if public_id == "":
# Usually a "static password" does not have a public ID!
# So we would bail out here for static passwords.
log.warning("No public ID in line {0!r}".format(line))
continue
serial_int = int(binascii.hexlify(modhex_decode(public_id)),
16)
if typ.lower() == "yubico otp":
ttype = "yubikey"
otplen = 32 + len(public_id)
serial = "UBAM{0:08d}_{1!s}".format(serial_int, slot)
TOKENS[serial] = {'type': ttype,
'otpkey': key,
'otplen': otplen,
'description': public_id
}
elif typ.lower() == "oath-hotp":
'''
WARNING: this does not work out at the moment, since the
Yubico GUI either
1. creates a serial in the CSV, but then the serial is
always prefixed! We can not authenticate with this!
2. if it does not prefix the serial there is no serial in
the CSV! We can not import and assign the token!
'''
ttype = "hotp"
otplen = 6
serial = "UBOM{0:08d}_{1!s}".format(serial_int, slot)
TOKENS[serial] = {'type': ttype,
'otpkey': key,
'otplen': otplen,
'description': public_id
}
else:
log.warning("at the moment we do only support Yubico OTP"
" and HOTP: %r" % line)
continue
elif first_column.isdigit():
# first column is a number, (serial number), so we are
# in the yubico format
serial = first_column
# the yubico format does not specify a slot
slot = "X"
key = l[3].strip()
if l[2].strip() == "0":
# HOTP
typ = "hotp"
serial = "UBOM{0!s}_{1!s}".format(serial, slot)
otplen = 6
elif l[2].strip() == "":
# Static
typ = "pw"
serial = "UBSM{0!s}_{1!s}".format(serial, slot)
key = _create_static_password(key)
otplen = len(key)
log.warning("We can not enroll a static mode, since we do"
" not know the private identify and so we do"
" not know the static password.")
continue
else:
# Yubico
typ = "yubikey"
serial = "UBAM{0!s}_{1!s}".format(serial, slot)
public_id = l[1].strip()
otplen = 32 + len(public_id)
TOKENS[serial] = {'type': typ,
'otpkey': key,
'otplen': otplen,
'description': public_id
}
else:
log.warning("the line {0!r} did not contain a enough values".format(line))
continue
return TOKENS
@log_with(log)
def parseSafeNetXML(xml):
"""
This function parses XML data of a Aladdin/SafeNet XML
file for eToken PASS
It returns a dictionary of
serial : { otpkey , counter, type }
"""
TOKENS = {}
try:
elem_tokencontainer = etree.fromstring(xml)
except etree.ParseError as e:
log.debug(traceback.format_exc())
raise ImportException('Could not parse XML data: {0!s}'.format(e))
if getTagName(elem_tokencontainer) != "Tokens":
raise ImportException("No toplevel element Tokens")
for elem_token in list(elem_tokencontainer):
SERIAL = None
COUNTER = None
HMAC = None
DESCRIPTION = None
if getTagName(elem_token) == "Token":
SERIAL = elem_token.get("serial")
log.debug("Found token with serial {0!s}".format(SERIAL))
for elem_tdata in list(elem_token):
tag = getTagName(elem_tdata)
if "ProductName" == tag:
DESCRIPTION = elem_tdata.text
log.debug("The Token with the serial %s has the "
"productname %s" % (SERIAL, DESCRIPTION))
if "Applications" == tag:
for elem_apps in elem_tdata:
if getTagName(elem_apps) == "Application":
for elem_app in elem_apps:
tag = getTagName(elem_app)
if "Seed" == tag:
HMAC = elem_app.text
if "MovingFactor" == tag:
COUNTER = elem_app.text
if not SERIAL:
log.error("Found token without a serial")
else:
if HMAC:
hashlib = "sha1"
if len(HMAC) == 64:
hashlib = "sha256"
TOKENS[SERIAL] = {'otpkey': HMAC,
'counter': COUNTER,
'type': 'hotp',
'hashlib': hashlib
}
else:
log.error("Found token {0!s} without a element 'Seed'".format(
SERIAL))
return TOKENS
def strip_prefix_from_soup(xml_soup):
"""
We strip prefixes from the XML tags.
<pskc:encryption>
</pskc:encryption>
results in:
<encryption>
</encryption>
:param xml_soup: Beautiful Soup XML with tags with prefixes
:type xml_soup: Beautiful Soup object
:return: Beautiful Soup without prefixes in the tags
"""
# strip the prefixes from the tags!
for tag in xml_soup.findAll():
if tag.name.find(":") >= 1:
prefix, name = tag.name.split(":")
tag.name = name
return xml_soup
def derive_key(xml, password):
"""
Derive the encryption key from the password with the parameters given
in the XML soup.
:param xml: The XML
:param password: the password
:return: The derived key, hexlified
"""
if not password:
raise ImportException("The XML KeyContainer specifies a derived "
"encryption key, but no password given!")
keymeth = xml.keycontainer.encryptionkey.derivedkey.keyderivationmethod
derivation_algo = keymeth["algorithm"].split("#")[-1]
if derivation_algo.lower() != "pbkdf2":
raise ImportException("We only support PBKDF2 as Key derivation "
"function!")
salt = keymeth.find("salt").text.strip()
keylength = keymeth.find("keylength").text.strip()
rounds = keymeth.find("iterationcount").text.strip()
r = pbkdf2_hmac('sha1', to_utf8(password), base64.b64decode(salt),
rounds=int(rounds), keylen=int(keylength))
return binascii.hexlify(r)
@log_with(log)
def parsePSKCdata(xml_data,
preshared_key_hex=None,
password=None,
validate_mac='check_fail_hard',
do_checkserial=False):
"""
This function parses XML data of a PSKC file, (RFC6030)
It can read
* AES-128-CBC encrypted (preshared_key_bin) data
* password based encrypted data
* plain text data
:param xml_data: The XML data
:type xml_data: basestring
:param preshared_key_hex: The preshared key, hexlified
:param password: The password that encrypted the keys
:param do_checkserial: Check if the serial numbers conform to the OATH
specification (not yet implemented)
:param validate_mac: Operation mode of hmac validation. Possible values:
- 'check_fail_hard' : If an invalid hmac is encountered no token gets parsed.
- 'check_fail_soft' : Skip tokens with invalid MAC.
- 'no_check' : Hmac of tokens are not checked, every token is parsed.
:return: tuple of a dictionary of token dictionaries and a list of serial of not imported tokens
{ serial : { otpkey , counter, .... }}, [serial, serial, ...]
"""
abort = False
not_imported_serials = []
tokens = {}
xml = strip_prefix_from_soup(BeautifulSoup(xml_data, "lxml"))
if not xml.keycontainer:
raise ImportException("No KeyContainer found in PSKC data. Could not "
"import any tokens.")
if xml.keycontainer.encryptionkey and \
xml.keycontainer.encryptionkey.derivedkey:
# If we have a password we also need a tag EncryptionKey in the
# KeyContainer
preshared_key_hex = derive_key(xml, password)
key_packages = xml.keycontainer.findAll("keypackage")
for key_package in key_packages:
token = {}
key = key_package.key
try:
token["description"] = key_package.deviceinfo.manufacturer.string
except Exception as exx:
log.debug("Can not get manufacturer string {0!s}".format(exx))
algo = key["algorithm"]
serial = key["id"]
# Special treatment for pskc files exported from Yubico
yubi_mapping = {"http://www.yubico.com/#yubikey-aes": ("yubikey", "UBAM"),
"urn:ietf:params:xml:ns:keyprov:pskc:hotp": ("hotp", "UBOM")}
if algo in yubi_mapping.keys() and re.match(r"\d+:\d+",
serial): # check if the serial fits the pattern "<SerialNo>:<Slot>
t_type = yubi_mapping[algo][0]
serial_split = serial.split(":")
serial_no = serial_split[0]
slot = serial_split[1]
serial = "{!s}{!s}_{!s}".format(yubi_mapping[algo][1], serial_no, slot)
else:
try:
serial = key_package.deviceinfo.serialno.string.strip()
except Exception as exx:
log.debug("Can not get serial string from device info {0!s}".format(exx))
t_type = algo.split(":")[-1].lower()
token["type"] = t_type
parameters = key.algorithmparameters
token["otplen"] = parameters.responseformat["length"] or 6
# token["hashlib"] = parameters.suite or "sha1"
hash_lib = "sha1"
# Check if hashlib is explicitly set in file
if parameters.suite and parameters.suite.string:
hash_lib = parameters.suite.string.lower()
else:
log.warning("No hashlib defined, falling back to default {}.".format(hash_lib))
token["hashlib"] = hash_lib
try:
if key.data.secret.plainvalue:
secret = key.data.secret.plainvalue.string
token["otpkey"] = hexlify_and_unicode(base64.b64decode(secret))
elif key.data.secret.encryptedvalue:
encryptionmethod = key.data.secret.encryptedvalue.encryptionmethod
enc_algorithm = encryptionmethod["algorithm"].split("#")[-1]
if enc_algorithm.lower() != "aes128-cbc":
raise ImportException("We only import PSKC files with "
"AES128-CBC.")
enc_data = key.data.secret.encryptedvalue.ciphervalue.text
enc_data = enc_data.strip()
preshared_key = binascii.unhexlify(preshared_key_hex)
secret = aes_decrypt_b64(preshared_key, enc_data)
if token["type"].lower() in ["hotp", "totp"]:
token["otpkey"] = hexlify_and_unicode(secret)
elif token["type"].lower() in ["pw"]:
token["otpkey"] = to_unicode(secret)
else:
token["otpkey"] = to_unicode(secret)
if validate_mac != 'no_check':
# Validate MAC:
encrypted_mac_key = xml.keycontainer.find("mackey").text
mac_key = aes_decrypt_b64(preshared_key, encrypted_mac_key)
enc_data_bin = base64.b64decode(enc_data)
hm = hmac.new(key=mac_key, msg=enc_data_bin, digestmod=hashlib.sha1)
mac_value_calculated = b64encode_and_unicode(hm.digest())
mac_value_xml = key.data.find('valuemac').text.strip()
is_invalid = not hmac.compare_digest(mac_value_xml, mac_value_calculated)
if is_invalid and validate_mac == 'check_fail_hard':
abort = True
elif is_invalid and validate_mac == 'check_fail_soft':
not_imported_serials.append(serial)
continue
except Exception as exx:
log.error("Failed to import tokendata: {0!s}".format(exx))
log.debug(traceback.format_exc())
raise ImportException("Failed to import tokendata. Wrong "
"encryption key? %s" % exx)
if token["type"] in ["hotp", "totp"] and key.data.counter:
token["counter"] = key.data.counter.text.strip()
if token["type"] == "totp":
if key.data.timeinterval:
token["timeStep"] = key.data.timeinterval.text.strip()
if key.data.timedrift:
token["timeShift"] = key.data.timedrift.text.strip()
tokens[serial] = token
if abort:
not_imported_serials = tokens.keys()
tokens = {} # reset tokens
return tokens, not_imported_serials
class GPGImport(object):
"""
This class is used to decrypt GPG encrypted import files.
The decrypt method returns the unencrpyted files.
Create the keypair like this:
GNUPGHOME=/etc/privacyidea/gpg gpg --gen-key
"""
def __init__(self, config=None):
self.config = config or {}
self.gnupg_home = self.config.get("PI_GNUPG_HOME",
"/etc/privacyidea/gpg")
if path.isdir(self.gnupg_home):
self.gpg = gnupg.GPG(gnupghome=self.gnupg_home)
self.private_keys = self.gpg.list_keys(True)
else:
log.warning(u"Directory {} does not exists!".format(self.gnupg_home))
def get_publickeys(self):
"""
This returns the public GPG key to be displayed in the Import Dialog.
The administrator can send this public key to his token vendor and
the token vendor can use this public key to encrypt the token import
file.
:return: a dictionary of public keys with fingerprint
"""
public_keys = {}
if path.isdir(self.gnupg_home):
keys = self.gpg.list_keys(secret=True)
else:
keys = []
log.warning(u"Directory {} does not exists!".format(self.gnupg_home))
for key in keys:
ascii_armored_public_key = self.gpg.export_keys(key.get("keyid"))
public_keys[key.get("keyid")] = {"armor": ascii_armored_public_key,
"fingerprint": key.get(
"fingerprint")}
return public_keys
def decrypt(self, input_data):
"""
Decrypts the input data with one of the private keys.
Since this functionality is only used for decrypting import lists, the
decrypted data is assumed to be of type text und thus converted to unicode.
:param input_data: The data to decrypt
:type input_data: str or bytes
:return: The decrypted input_data
:rtype: str
"""
decrypted = self.gpg.decrypt(message=input_data)
if not decrypted.ok:
log.error(u"Decrpytion failed: {0!s}. {1!s}".format(
decrypted.status, decrypted.stderr))
raise Exception(decrypted.stderr)
return to_unicode(decrypted.data)
def export_pskc(tokenobj_list, psk=None):
"""
Take a list of token objects and create a beautifulsoup xml object.
If no preshared key is given, we create one and return it.
:param tokenobj_list: list of token objects
:param psk: pre-shared-key for AES-128-CBC in hex format
:return: tuple of (psk, number of tokens, beautifulsoup)
"""
if psk:
psk = binascii.unhexlify(psk)
else:
psk = geturandom(16)
mackey = geturandom(20)
encrypted_mackey = aes_encrypt_b64(psk, mackey)
number_of_exported_tokens = 0
# define the header
soup = BeautifulSoup("""<KeyContainer Version="1.0"
xmlns="urn:ietf:params:xml:ns:keyprov:pskc"
xmlns:ds="http://www.w3.org/2000/09/xmldsig#"
xmlns:xenc="http://www.w3.org/2001/04/xmlenc#">
<EncryptionKey>
<ds:KeyName>Pre-shared-key</ds:KeyName>
</EncryptionKey>
<MACMethod Algorithm="http://www.w3.org/2000/09/xmldsig#hmac-sha1">
<MACKey>
<xenc:EncryptionMethod
Algorithm="http://www.w3.org/2001/04/xmlenc#aes128-cbc"/>
<xenc:CipherData>
<xenc:CipherValue>{encrypted_mackey}</xenc:CipherValue>
</xenc:CipherData>
</MACKey>
</MACMethod>
""".format(encrypted_mackey=encrypted_mackey), "html.parser")
for tokenobj in tokenobj_list:
if tokenobj.type.lower() not in ["totp", "hotp", "pw"]:
continue
type = tokenobj.type.lower()
issuer = "privacyIDEA"
try:
manufacturer = tokenobj.token.description.encode("ascii", "replace")
manufacturer = to_unicode(manufacturer)
except UnicodeEncodeError:
manufacturer = "deleted during export"
serial = tokenobj.token.serial
otplen = tokenobj.token.otplen
counter = tokenobj.token.count
suite = tokenobj.get_tokeninfo("hashlib", default="sha1")
if type == "totp":
timestep = tokenobj.get_tokeninfo("timeStep")
timedrift = tokenobj.get_tokeninfo("timeShift")
else:
timestep = 0
timedrift = 0
otpkey = tokenobj.token.get_otpkey().getKey()
try:
if tokenobj.type.lower() in ["totp", "hotp"]:
encrypted_otpkey = aes_encrypt_b64(psk, binascii.unhexlify(otpkey))
elif tokenobj.type.lower() in ["pw"]:
encrypted_otpkey = aes_encrypt_b64(psk, otpkey)
else:
encrypted_otpkey = aes_encrypt_b64(psk, otpkey)
hm = hmac.new(key=mackey, msg=base64.b64decode(encrypted_otpkey), digestmod=hashlib.sha1)
mac_value = b64encode_and_unicode(hm.digest())
except TypeError:
# Some keys might be odd string length
continue
try:
kp2 = BeautifulSoup("""<KeyPackage>
<DeviceInfo>
<Manufacturer>{manufacturer}</Manufacturer>
<SerialNo>{serial}</SerialNo>
</DeviceInfo>
<Key Id="{serial}"
Algorithm="urn:ietf:params:xml:ns:keyprov:pskc:{type}">
<Issuer>{issuer}</Issuer>
<AlgorithmParameters>
<ResponseFormat Length="{otplen}" Encoding="DECIMAL"/>
<Suite hashalgo="{suite}" />
</AlgorithmParameters>
<Data>
<Secret>
<EncryptedValue>
<xenc:EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#aes128-cbc"/>
<xenc:CipherData>
<xenc:CipherValue>{encrypted_otpkey}</xenc:CipherValue>
</xenc:CipherData>
</EncryptedValue>
<ValueMAC>{value_mac}</ValueMAC>
</Secret>
<Time>
<PlainValue>0</PlainValue>
</Time>
<TimeInterval>
<PlainValue>{timestep}</PlainValue>
</TimeInterval>
<Counter>
<PlainValue>{counter}</PlainValue>
</Counter>
<TimeDrift>
<PlainValue>{timedrift}</PlainValue>
</TimeDrift>
</Data>
</Key>
</KeyPackage>""".format(serial=html.escape(serial), type=html.escape(type), otplen=otplen,
issuer=html.escape(issuer), manufacturer=html.escape(manufacturer),
counter=counter, timestep=timestep, encrypted_otpkey=encrypted_otpkey,
timedrift=timedrift, value_mac=mac_value,
suite=html.escape(suite)), "html.parser")
soup.macmethod.insert_after(kp2)
number_of_exported_tokens += 1
except Exception as e:
log.warning(u"Failed to export the token {0!s}: {1!s}".format(serial, e))
tb = traceback.format_exc()
log.debug(tb)
return hexlify_and_unicode(psk), number_of_exported_tokens, soup
|
privacyidea/privacyidea
|
privacyidea/lib/importotp.py
|
Python
|
agpl-3.0
| 30,394
|
import chinese
|
yueranyuan/vector_edu
|
learntools/kt/__init__.py
|
Python
|
mit
| 15
|
"""Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Unit test for big_query_client_test.
Temp tables from tests are created in the dataset specified by TEMP_DATASET_ID,
by default 'samples_mart_temp'. They are expected to be cleaned up, either
during the test, or in the tearDown. If samples_mart_temp is filling up with
tables, this may be indicative of customer-facing service failures. If tests
are not running, you can use the BQ UI to delete and re-create the dataset.
"""
__author__ = 'joemu@google.com (Joe Allan Muharsky)'
import hashlib
import logging
import random
import time
import unittest
from apiclient.errors import HttpError
import httplib2
import mox
import pytest
from perfkit import test_util
from perfkit.common import big_query_client
from perfkit.common import credentials_lib
from perfkit.common import data_source_config
TEMP_DATASET_ID = big_query_client.TEMP_DATASET_ID
def BuildHttpError(status):
"""Build a HttpError with the specified failure status."""
response = httplib2.Response(
{'status': status, 'content-type': 'application/json'})
return HttpError(response, '{}')
class MockRequest(object):
"""Mock request, used to test _ExecuteRequestWithRetries."""
def __init__(self):
self.request_count = 0
self.errors = []
# Override default execute() behavior. If an error is on the stack, pop and
# raise it.
def execute(self):
self.request_count += 1
if self.errors:
raise self.errors.pop()
class BigQueryClientTest(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
test_util.SetConfigPaths()
self.client = big_query_client.BigQueryClient(
credentials_lib.DEFAULT_CREDENTIALS,
data_source_config.Environments.TESTING)
self.temp_tables = set()
def tearDown(self):
self.mox.UnsetStubs()
# For now, if any tables were left around due to failed tests, delete them.
# If these tables turn out to be useful for debugging purposes, this
# may be updated to move them to a different dataset.
for table_ref in self.temp_tables:
dataset_name = table_ref[0]
table_name = table_ref[1]
logging.error('Unexpected temp table remaining at test end: %s.%s',
dataset_name, table_name)
self.client.DeleteTable(dataset_name=dataset_name, table_name=table_name)
# TODO: Move to a utility class for managing and deleting temp tables.
def AddTempTableRef(self):
"""Adds a reference to a temp table to a tracking dictionary.
This is used to maintain a list of tables created during testing, so that
they can be cleaned up at the end of the run in the event of a test failure.
TEMP_DATASET_ID will be used as the dataset.
Returns:
A string that is a unique name for a table.
"""
table_name = self.client.GetRandomTableName()
# table_name should be guaranteed random.
self.assertFalse((TEMP_DATASET_ID, table_name) in self.temp_tables)
self.temp_tables.add((TEMP_DATASET_ID, table_name))
return table_name
# TODO: Move to a utility class for managing and deleting temp tables.
def RemoveTempTableRef(self, table_name):
"""Removes a reference from a temp table from the tracking dictionary.
This method should be called when a test has verified or expects that the
temp table has been cleaned up. It verifies that the referenced table
was recorded in the first place (via AddTempTableRef()), and that it does
not exist in BigQuery. If either of these conditions fails, it is an
indicator of a test or product issue. Temp tables are expected to be
stored in the dataset specified by TEMP_DATASET_ID.
Args:
table_name: The name of the temp table to delete.
"""
self.assertTrue((TEMP_DATASET_ID, table_name) in self.temp_tables)
self.assertFalse(
self.client.TableExists(dataset_name=TEMP_DATASET_ID,
table_name=table_name),
'Expected table {dataset}.{table} to be deleted. Table found.'.format(
dataset=TEMP_DATASET_ID,
table=table_name))
self.temp_tables.discard((TEMP_DATASET_ID, table_name))
@pytest.mark.integration
def testQuery(self):
query = ('SELECT number, letter, symbol '
'FROM unit_test_data.query_test '
'WHERE number <> 1')
rows = self.client.Query(query, max_results_per_page=100)['rows']
expected_rows = [{u'f': [{u'v': 2}, {u'v': u'b'}, {u'v': u'@'}]},
{u'f': [{u'v': 3}, {u'v': u'c'}, {u'v': u'#'}]}]
self.assertEquals(expected_rows, rows)
@pytest.mark.integration
def testQueryMultiPage(self):
query = ('SELECT number, letter, symbol '
'FROM unit_test_data.query_test '
'WHERE number <> 1')
rows = self.client.Query(query, max_results_per_page=1)['rows']
expected_rows = [{u'f': [{u'v': 2}, {u'v': u'b'}, {u'v': u'@'}]},
{u'f': [{u'v': 3}, {u'v': u'c'}, {u'v': u'#'}]}]
self.assertEquals(expected_rows, rows)
@pytest.mark.integration
def testCopyTable(self):
table_name = self.AddTempTableRef()
self.client.CopyTable(source_dataset='unit_test_data',
source_table='query_test',
destination_dataset=TEMP_DATASET_ID,
destination_table=table_name)
def _CallbackHandler(reply):
rows = reply['rows']
expected_rows = [{u'f': [{u'v': '1'}, {u'v': u'a'}, {u'v': u'!'}]},
{u'f': [{u'v': '2'}, {u'v': u'b'}, {u'v': u'@'}]},
{u'f': [{u'v': '3'}, {u'v': u'c'}, {u'v': u'#'}]}]
self.assertEquals(expected_rows, rows)
self.client.ListTableData(dataset_name=TEMP_DATASET_ID,
table_name=table_name,
page_callback=_CallbackHandler)
self.client.DeleteTable(dataset_name=TEMP_DATASET_ID,
table_name=table_name)
time.sleep(30)
self.RemoveTempTableRef(table_name=table_name)
@pytest.mark.integration
def testCopyTableMissingSource(self):
destination_table = self.client.GetRandomTableName()
source_table = self.client.GetRandomTableName()
self.assertFalse(
self.client.TableExists(dataset_name='unit_test_data',
table_name=source_table),
'The source table was found, and should not exist for this test.')
self.assertRaises(
big_query_client.NoTableError,
self.client.CopyTable,
source_dataset='unit_test_data',
source_table=source_table,
destination_dataset=TEMP_DATASET_ID,
destination_table=destination_table)
@pytest.mark.integration
def testDeleteTable(self):
table_name = self.AddTempTableRef()
# Initially, the table should not exist.
self.assertFalse(self.client.TableExists(dataset_name=TEMP_DATASET_ID,
table_name=table_name))
# Delete should gracefully return false in this case.
self.assertFalse(self.client.DeleteTable(dataset_name=TEMP_DATASET_ID,
table_name=table_name))
# Make a copy of the results table, and store it as the target table.
self.client.CopyTable(source_table='results',
source_dataset='samples_mart_testdata',
destination_table=table_name,
destination_dataset=TEMP_DATASET_ID)
# Delete the table.
self.assertTrue(self.client.DeleteTable(dataset_name=TEMP_DATASET_ID,
table_name=table_name))
self.assertFalse(self.client.TableExists(dataset_name=TEMP_DATASET_ID,
table_name=table_name))
self.RemoveTempTableRef(table_name)
@pytest.mark.integration
def testTableExists(self):
table_name = self.AddTempTableRef()
# Initially, the table should not exist.
self.assertFalse(self.client.TableExists(dataset_name=TEMP_DATASET_ID,
table_name=table_name))
# Make a copy of the results table, and store it as the target table.
self.client.CopyTable(source_table='results',
source_dataset='samples_mart_testdata',
destination_table=table_name,
destination_dataset=TEMP_DATASET_ID)
# Verify the table exists.
self.assertTrue(self.client.TableExists(dataset_name=TEMP_DATASET_ID,
table_name=table_name))
# Delete the table.
self.client.DeleteTable(dataset_name=TEMP_DATASET_ID,
table_name=table_name)
# Verify the table no longer exists.
self.assertFalse(self.client.TableExists(dataset_name=TEMP_DATASET_ID,
table_name=table_name))
@pytest.mark.integration
def testListTableData(self):
def VerifyListTable(reply):
self.assertEquals(len(reply['rows']), 3)
# Verify the first and last row/column values.
self.assertEquals(reply['rows'][0]['f'][0]['v'], '1')
self.assertEquals(reply['rows'][2]['f'][2]['v'], '#')
self.client.ListTableData(dataset_name='unit_test_data',
table_name='query_test',
page_callback=VerifyListTable)
@pytest.mark.integration
def testListTableDataMultiPage(self):
self.page_counter = 0
def VerifyListTable(reply):
expected_reply_rows = [
[{u'f': [{u'v': '1'}, {u'v': u'a'}, {u'v': u'!'}]},
{u'f': [{u'v': '2'}, {u'v': u'b'}, {u'v': u'@'}]}],
[{u'f': [{u'v': '3'}, {u'v': u'c'}, {u'v': u'#'}]}]]
self.assertTrue(self.page_counter <= len(expected_reply_rows),
'Extra pages encountered.')
expected_rows = expected_reply_rows[self.page_counter]
self.page_counter += 1
self.assertEquals(expected_rows, reply['rows'])
self.client.ListTableData(dataset_name='unit_test_data',
table_name='query_test',
page_callback=VerifyListTable,
max_results_per_page=2)
self.assertEquals(self.page_counter, 2)
@pytest.mark.integration
def testQueryLargeResults(self):
def _CallbackHandler(reply):
self.assertEquals(len(reply['rows']), 2)
# Verify the first and last row/column values.
self.assertEquals(reply['rows'][0]['f'][0]['v'], '2')
self.assertEquals(reply['rows'][-1]['f'][-1]['v'], '#')
query = ('SELECT number, letter, symbol '
'FROM unit_test_data.query_test '
'WHERE number <> 1')
self.client.QueryLargeResults(query=query,
page_callback=_CallbackHandler)
@pytest.mark.integration
def testQueryLargeResultsExplicitTable(self):
"""Verifies that the BigQuery works with a specific table name.
This also verifies that the table is cleaned up after the query is done.
"""
query = ('SELECT number, letter, symbol '
'FROM unit_test_data.query_test')
temp_table_name = self.AddTempTableRef()
self.assertFalse(self.client.TableExists(
dataset_name=TEMP_DATASET_ID,
table_name=temp_table_name))
def _CallbackHandler(unused_reply):
# Assert to suppress unused parameter lint warning.
self.assertIsNotNone(unused_reply)
# Verify that the temp table exists.
self.assertTrue(self.client.TableExists(
dataset_name=TEMP_DATASET_ID,
table_name=temp_table_name))
self.client.QueryLargeResults(query=query,
page_callback=_CallbackHandler,
temp_dataset_name=TEMP_DATASET_ID,
temp_table_name=temp_table_name)
self.assertFalse(self.client.TableExists(dataset_name=TEMP_DATASET_ID,
table_name=temp_table_name))
self.RemoveTempTableRef(table_name=temp_table_name)
@pytest.mark.integration
def testQueryInto(self):
query = 'SELECT number, letter, symbol FROM unit_test_data.query_test'
table_name = self.AddTempTableRef()
self.client.QueryInto(query,
destination_dataset=TEMP_DATASET_ID,
destination_table=table_name,
write_disposition='WRITE_TRUNCATE')
def _CallbackHandler(reply):
rows = reply['rows']
expected_rows = [{u'f': [{u'v': '1'}, {u'v': u'a'}, {u'v': u'!'}]},
{u'f': [{u'v': '2'}, {u'v': u'b'}, {u'v': u'@'}]},
{u'f': [{u'v': '3'}, {u'v': u'c'}, {u'v': u'#'}]}]
self.assertEquals(expected_rows, rows)
self.client.ListTableData(dataset_name=TEMP_DATASET_ID,
table_name=table_name,
page_callback=_CallbackHandler)
self.client.DeleteTable(dataset_name=TEMP_DATASET_ID,
table_name=table_name)
self.RemoveTempTableRef(table_name=table_name)
@pytest.mark.query
def testSampleQueryResultsMaxErrors(self):
query_results = {'totalRows': 50, 'rows': [0] * 100}
self.assertRaises(big_query_client.SamplingError,
self.client.SampleQueryResultsMax, query_results, 50)
query_results = {'totalRows': 100, 'rows': [0] * 100}
self.assertRaises(big_query_client.SamplingError,
self.client.SampleQueryResultsMax, query_results, -1)
self.assertRaises(big_query_client.SamplingError,
self.client.SampleQueryResultsMax, query_results, 200)
@pytest.mark.query
def testSampleQueryResultsMaxBelow(self):
query_results = {'totalRows': 5, 'rows': range(5)}
sampled_results = self.client.SampleQueryResultsMax(query_results, 5)
self.assertEquals(query_results, sampled_results)
@pytest.mark.query
def testSampleQueryResultsMax(self):
self.mox.StubOutWithMock(random, 'sample')
query_results = {'totalRows': 5, 'rows': range(5)}
random.sample(query_results['rows'], 3).AndReturn([1, 3, 4])
self.mox.ReplayAll()
results = self.client.SampleQueryResultsMax(query_results, 3)
self.mox.VerifyAll()
self.assertEquals({'totalRows': 3, 'rows': [1, 3, 4]}, results)
@pytest.mark.query
def testSampleQueryResultsMaxCount(self):
query_results = {'totalRows': 100, 'rows': range(100)}
results = self.client.SampleQueryResultsMax(query_results, 10)
self.assertEquals(10, len(results['rows']))
self.assertEquals(10, results['totalRows'])
@pytest.mark.query
def testSampleQueryResultsMaxRepeatable(self):
query_results = {'totalRows': 100, 'rows': range(100)}
first_results = self.client.SampleQueryResultsMax(query_results, 10)
second_results = self.client.SampleQueryResultsMax(query_results, 10)
self.assertEquals(first_results, second_results)
@pytest.mark.query
def testSampleQueryResultsFractionErrors(self):
query_results = {'totalRows': 100, 'rows': [0] * 100}
self.assertRaises(big_query_client.SamplingError,
self.client.SampleQueryResultsFraction, query_results,
1.1)
self.assertRaises(big_query_client.SamplingError,
self.client.SampleQueryResultsFraction, query_results,
-1)
@pytest.mark.query
def testSampleQueryResultsFraction(self):
self.mox.StubOutWithMock(big_query_client.BigQueryClient,
'SampleQueryResultsMax')
query_results = {'totalRows': 5, 'rows': range(5)}
expected_results = {'totalRows': 2, 'rows': [2, 3]}
big_query_client.BigQueryClient.SampleQueryResultsMax(
query_results, 3).AndReturn(expected_results)
self.mox.ReplayAll()
results = self.client.SampleQueryResultsFraction(query_results, .6)
self.mox.VerifyAll()
self.assertEquals(expected_results, results)
@pytest.mark.jobs
def testBuildJobId(self):
files = ['gs://perfkit_test_data/0000000099995',
'gs://perfkit_test_data/0000000099996',
'gs://perfkit_test_data/0000000099997']
hasher = hashlib.sha1()
hasher.update(''.join(files))
hashed_file_names = hasher.hexdigest()
expected_id = ('load_job_0_0_gs-perfkit_test_data-0000000099995_'
'gs-perfkit_test_data-0000000099997_' + hashed_file_names)
actual_id = big_query_client.BigQueryClient.BuildJobIdString(files, 0, 0)
self.assertEquals(expected_id, actual_id)
files = ['gs://perfkit_test_data/0000000099995']
hasher = hashlib.sha1()
hasher.update(''.join(files))
hashed_file_names = hasher.hexdigest()
expected_id = ('load_job_2_3_gs-perfkit_test_data-0000000099995_'
'gs-perfkit_test_data-0000000099995_' + hashed_file_names)
actual_id = big_query_client.BigQueryClient.BuildJobIdString(files, 2, 3)
self.assertEquals(expected_id, actual_id)
@pytest.mark.formatting
def testFormatDataForTemplates(self):
source_data = {
'schema': {
'fields': [{'name': 'col1'},
{'name': 'col2'}]},
'rows': [{'f': [{'v': 'foo'},
{'v': 'bar'}]}]}
expected_data = {
'schema': {
'fields': [{'name': 'col1'},
{'name': 'col2'}]},
'rows': [{'col1': 'foo', 'col2': 'bar'}]}
big_query_client.BigQueryClient.FormatDataForTemplates(source_data)
self.assertEqual(source_data, expected_data)
@pytest.mark.integration
def testGetByJobId(self):
job = self.client.GetJobByID('job_95d2c143954e4975a7c9c0731203a91a')
self.assertEquals('1354925437485', job['statistics']['endTime'])
@pytest.mark.execute
def testExecuteRequestWithRetriesPass(self):
request = MockRequest()
self.client._ExecuteRequestWithRetries(request, num_tries=1)
self.assertEquals(1, request.request_count)
@pytest.mark.execute
def testExecuteRequestWithRetriesPassAfterRetry(self):
request = MockRequest()
request.errors = [HttpError(
{'status': big_query_client.BigQueryClient.RETRYABLE_ERRORS[0]},
'message')]
self.client._ExecuteRequestWithRetries(request, num_tries=2)
self.assertEquals(2, request.request_count)
@pytest.mark.execute
def testExecuteRequestWithRetriesErrorAndNoRetries(self):
request = MockRequest()
request.errors = [HttpError(
{'status': big_query_client.BigQueryClient.RETRYABLE_ERRORS[0]},
'message')]
self.assertRaises(
HttpError, self.client._ExecuteRequestWithRetries, request,
num_tries=1)
@pytest.mark.execute
def testExecuteRequestWithRetriesNonRetryableError(self):
request = MockRequest()
request.errors = [HttpError({'status': 999}, 'message')]
self.assertRaises(
HttpError, self.client._ExecuteRequestWithRetries, request,
num_tries=2)
@pytest.mark.formatting
def testFormatQueryAllOptionalArgs(self):
select_args = ['test', 'metric as other_thing']
from_args = ['samples_mart.results']
where_args = ['target<>""', 'owner="jeffsl"']
group_args = ['test', 'other_thing']
order_args = ['test', 'other_sort']
row_limit = 1000
expected_query = (
'SELECT\n\ttest,\n\tmetric as other_thing\n'
'FROM samples_mart.results\n'
'WHERE\n\ttarget<>"" AND\n\towner="jeffsl"\n'
'GROUP BY\n\ttest,\n\tother_thing\n'
'ORDER BY\n\ttest,\n\tother_sort\n'
'LIMIT 1000')
query = big_query_client.BigQueryClient.FormatQuery(
select_args=select_args,
from_args=from_args,
where_args=where_args,
group_args=group_args,
order_args=order_args,
row_limit=row_limit)
self.assertEquals(expected_query, query)
@pytest.mark.formatting
def testFormatQueryNoOptionalArgs(self):
select_args = ['test', 'metric as other_thing']
from_args = ['samples_mart.results']
expected_query = (
'SELECT\n\ttest,\n\tmetric as other_thing\n'
'FROM samples_mart.results')
query = big_query_client.BigQueryClient.FormatQuery(select_args, from_args)
self.assertEquals(expected_query, query)
if __name__ == '__main__':
unittest.main()
|
dq922/PerfKitExplorer
|
server/perfkit/common/big_query_client_test.py
|
Python
|
apache-2.0
| 20,931
|
import struct, os, tempfile, time
from subprocess import check_call
from bup import git
from bup.helpers import *
from wvtest import *
top_dir = os.path.realpath('../../..')
bup_exe = top_dir + '/bup'
bup_tmp = top_dir + '/t/tmp'
def exc(*cmd):
cmd_str = ' '.join(cmd)
print >> sys.stderr, cmd_str
check_call(cmd)
def exo(*cmd):
cmd_str = ' '.join(cmd)
print >> sys.stderr, cmd_str
return readpipe(cmd)
@wvtest
def testmangle():
afile = 0100644
afile2 = 0100770
alink = 0120000
adir = 0040000
adir2 = 0040777
WVPASSEQ(git.mangle_name("a", adir2, adir), "a")
WVPASSEQ(git.mangle_name(".bup", adir2, adir), ".bup.bupl")
WVPASSEQ(git.mangle_name("a.bupa", adir2, adir), "a.bupa.bupl")
WVPASSEQ(git.mangle_name("b.bup", alink, alink), "b.bup.bupl")
WVPASSEQ(git.mangle_name("b.bu", alink, alink), "b.bu")
WVPASSEQ(git.mangle_name("f", afile, afile2), "f")
WVPASSEQ(git.mangle_name("f.bup", afile, afile2), "f.bup.bupl")
WVPASSEQ(git.mangle_name("f.bup", afile, adir), "f.bup.bup")
WVPASSEQ(git.mangle_name("f", afile, adir), "f.bup")
WVPASSEQ(git.demangle_name("f.bup", afile), ("f", git.BUP_CHUNKED))
WVPASSEQ(git.demangle_name("f.bupl", afile), ("f", git.BUP_NORMAL))
WVPASSEQ(git.demangle_name("f.bup.bupl", afile), ("f.bup", git.BUP_NORMAL))
WVPASSEQ(git.demangle_name(".bupm", afile), ('', git.BUP_NORMAL))
WVPASSEQ(git.demangle_name(".bupm", adir), ('', git.BUP_CHUNKED))
# for safety, we ignore .bup? suffixes we don't recognize. Future
# versions might implement a .bup[a-z] extension as something other
# than BUP_NORMAL.
WVPASSEQ(git.demangle_name("f.bupa", afile), ("f.bupa", git.BUP_NORMAL))
@wvtest
def testencode():
s = 'hello world'
looseb = ''.join(git._encode_looseobj('blob', s))
looset = ''.join(git._encode_looseobj('tree', s))
loosec = ''.join(git._encode_looseobj('commit', s))
packb = ''.join(git._encode_packobj('blob', s))
packt = ''.join(git._encode_packobj('tree', s))
packc = ''.join(git._encode_packobj('commit', s))
WVPASSEQ(git._decode_looseobj(looseb), ('blob', s))
WVPASSEQ(git._decode_looseobj(looset), ('tree', s))
WVPASSEQ(git._decode_looseobj(loosec), ('commit', s))
WVPASSEQ(git._decode_packobj(packb), ('blob', s))
WVPASSEQ(git._decode_packobj(packt), ('tree', s))
WVPASSEQ(git._decode_packobj(packc), ('commit', s))
@wvtest
def testpacks():
initial_failures = wvfailure_count()
tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tgit-')
os.environ['BUP_MAIN_EXE'] = bup_exe
os.environ['BUP_DIR'] = bupdir = tmpdir + "/bup"
git.init_repo(bupdir)
git.verbose = 1
w = git.PackWriter()
w.new_blob(os.urandom(100))
w.new_blob(os.urandom(100))
w.abort()
w = git.PackWriter()
hashes = []
nobj = 1000
for i in range(nobj):
hashes.append(w.new_blob(str(i)))
log('\n')
nameprefix = w.close()
print repr(nameprefix)
WVPASS(os.path.exists(nameprefix + '.pack'))
WVPASS(os.path.exists(nameprefix + '.idx'))
r = git.open_idx(nameprefix + '.idx')
print repr(r.fanout)
for i in range(nobj):
WVPASS(r.find_offset(hashes[i]) > 0)
WVPASS(r.exists(hashes[99]))
WVFAIL(r.exists('\0'*20))
pi = iter(r)
for h in sorted(hashes):
WVPASSEQ(str(pi.next()).encode('hex'), h.encode('hex'))
WVFAIL(r.find_offset('\0'*20))
r = git.PackIdxList(bupdir + '/objects/pack')
WVPASS(r.exists(hashes[5]))
WVPASS(r.exists(hashes[6]))
WVFAIL(r.exists('\0'*20))
if wvfailure_count() == initial_failures:
subprocess.call(['rm', '-rf', tmpdir])
@wvtest
def test_pack_name_lookup():
initial_failures = wvfailure_count()
tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tgit-')
os.environ['BUP_MAIN_EXE'] = bup_exe
os.environ['BUP_DIR'] = bupdir = tmpdir + "/bup"
git.init_repo(bupdir)
git.verbose = 1
packdir = git.repo('objects/pack')
idxnames = []
hashes = []
for start in range(0,28,2):
w = git.PackWriter()
for i in range(start, start+2):
hashes.append(w.new_blob(str(i)))
log('\n')
idxnames.append(os.path.basename(w.close() + '.idx'))
r = git.PackIdxList(packdir)
WVPASSEQ(len(r.packs), 2)
for e,idxname in enumerate(idxnames):
for i in range(e*2, (e+1)*2):
WVPASSEQ(r.exists(hashes[i], want_source=True), idxname)
if wvfailure_count() == initial_failures:
subprocess.call(['rm', '-rf', tmpdir])
@wvtest
def test_long_index():
initial_failures = wvfailure_count()
tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tgit-')
os.environ['BUP_MAIN_EXE'] = bup_exe
os.environ['BUP_DIR'] = bupdir = tmpdir + "/bup"
git.init_repo(bupdir)
w = git.PackWriter()
obj_bin = struct.pack('!IIIII',
0x00112233, 0x44556677, 0x88990011, 0x22334455, 0x66778899)
obj2_bin = struct.pack('!IIIII',
0x11223344, 0x55667788, 0x99001122, 0x33445566, 0x77889900)
obj3_bin = struct.pack('!IIIII',
0x22334455, 0x66778899, 0x00112233, 0x44556677, 0x88990011)
pack_bin = struct.pack('!IIIII',
0x99887766, 0x55443322, 0x11009988, 0x77665544, 0x33221100)
idx = list(list() for i in xrange(256))
idx[0].append((obj_bin, 1, 0xfffffffff))
idx[0x11].append((obj2_bin, 2, 0xffffffffff))
idx[0x22].append((obj3_bin, 3, 0xff))
(fd,name) = tempfile.mkstemp(suffix='.idx', dir=git.repo('objects'))
os.close(fd)
w.count = 3
r = w._write_pack_idx_v2(name, idx, pack_bin)
i = git.PackIdxV2(name, open(name, 'rb'))
WVPASSEQ(i.find_offset(obj_bin), 0xfffffffff)
WVPASSEQ(i.find_offset(obj2_bin), 0xffffffffff)
WVPASSEQ(i.find_offset(obj3_bin), 0xff)
if wvfailure_count() == initial_failures:
os.remove(name)
subprocess.call(['rm', '-rf', tmpdir])
@wvtest
def test_check_repo_or_die():
initial_failures = wvfailure_count()
orig_cwd = os.getcwd()
tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tgit-')
os.environ['BUP_DIR'] = bupdir = tmpdir + "/bup"
try:
os.chdir(tmpdir)
git.init_repo(bupdir)
git.check_repo_or_die()
WVPASS('check_repo_or_die') # if we reach this point the call above passed
os.rename(bupdir + '/objects/pack', bupdir + '/objects/pack.tmp')
open(bupdir + '/objects/pack', 'w').close()
try:
git.check_repo_or_die()
except SystemExit as e:
WVPASSEQ(e.code, 14)
else:
WVFAIL()
os.unlink(bupdir + '/objects/pack')
os.rename(bupdir + '/objects/pack.tmp', bupdir + '/objects/pack')
try:
git.check_repo_or_die('nonexistantbup.tmp')
except SystemExit as e:
WVPASSEQ(e.code, 15)
else:
WVFAIL()
finally:
os.chdir(orig_cwd)
if wvfailure_count() == initial_failures:
subprocess.call(['rm', '-rf', tmpdir])
@wvtest
def test_commit_parsing():
def restore_env_var(name, val):
if val is None:
del os.environ[name]
else:
os.environ[name] = val
def showval(commit, val):
return readpipe(['git', 'show', '-s',
'--pretty=format:%s' % val, commit]).strip()
initial_failures = wvfailure_count()
orig_cwd = os.getcwd()
tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tgit-')
workdir = tmpdir + "/work"
repodir = workdir + '/.git'
orig_author_name = os.environ.get('GIT_AUTHOR_NAME')
orig_author_email = os.environ.get('GIT_AUTHOR_EMAIL')
orig_committer_name = os.environ.get('GIT_COMMITTER_NAME')
orig_committer_email = os.environ.get('GIT_COMMITTER_EMAIL')
os.environ['GIT_AUTHOR_NAME'] = 'bup test'
os.environ['GIT_COMMITTER_NAME'] = os.environ['GIT_AUTHOR_NAME']
os.environ['GIT_AUTHOR_EMAIL'] = 'bup@a425bc70a02811e49bdf73ee56450e6f'
os.environ['GIT_COMMITTER_EMAIL'] = os.environ['GIT_AUTHOR_EMAIL']
try:
readpipe(['git', 'init', workdir])
os.environ['GIT_DIR'] = os.environ['BUP_DIR'] = repodir
git.check_repo_or_die(repodir)
os.chdir(workdir)
with open('foo', 'w') as f:
print >> f, 'bar'
readpipe(['git', 'add', '.'])
readpipe(['git', 'commit', '-am', 'Do something',
'--author', 'Someone <someone@somewhere>',
'--date', 'Sat Oct 3 19:48:49 2009 -0400'])
commit = readpipe(['git', 'show-ref', '-s', 'master']).strip()
parents = showval(commit, '%P')
tree = showval(commit, '%T')
cname = showval(commit, '%cn')
cmail = showval(commit, '%ce')
cdate = showval(commit, '%ct')
coffs = showval(commit, '%ci')
coffs = coffs[-5:]
coff = (int(coffs[-4:-2]) * 60 * 60) + (int(coffs[-2:]) * 60)
if coffs[-5] == '-':
coff = - coff
commit_items = git.get_commit_items(commit, git.cp())
WVPASSEQ(commit_items.parents, [])
WVPASSEQ(commit_items.tree, tree)
WVPASSEQ(commit_items.author_name, 'Someone')
WVPASSEQ(commit_items.author_mail, 'someone@somewhere')
WVPASSEQ(commit_items.author_sec, 1254613729)
WVPASSEQ(commit_items.author_offset, -(4 * 60 * 60))
WVPASSEQ(commit_items.committer_name, cname)
WVPASSEQ(commit_items.committer_mail, cmail)
WVPASSEQ(commit_items.committer_sec, int(cdate))
WVPASSEQ(commit_items.committer_offset, coff)
WVPASSEQ(commit_items.message, 'Do something\n')
with open('bar', 'w') as f:
print >> f, 'baz'
readpipe(['git', 'add', '.'])
readpipe(['git', 'commit', '-am', 'Do something else'])
child = readpipe(['git', 'show-ref', '-s', 'master']).strip()
parents = showval(child, '%P')
commit_items = git.get_commit_items(child, git.cp())
WVPASSEQ(commit_items.parents, [commit])
finally:
os.chdir(orig_cwd)
restore_env_var('GIT_AUTHOR_NAME', orig_author_name)
restore_env_var('GIT_AUTHOR_EMAIL', orig_author_email)
restore_env_var('GIT_COMMITTER_NAME', orig_committer_name)
restore_env_var('GIT_COMMITTER_EMAIL', orig_committer_email)
if wvfailure_count() == initial_failures:
subprocess.call(['rm', '-rf', tmpdir])
@wvtest
def test_list_refs():
initial_failures = wvfailure_count()
tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tgit-')
os.environ['BUP_MAIN_EXE'] = bup_exe
os.environ['BUP_DIR'] = bupdir = tmpdir + "/bup"
src = tmpdir + '/src'
mkdirp(src)
with open(src + '/1', 'w+') as f:
print f, 'something'
with open(src + '/2', 'w+') as f:
print f, 'something else'
git.init_repo(bupdir)
emptyset = frozenset()
WVPASSEQ(frozenset(git.list_refs()), emptyset)
WVPASSEQ(frozenset(git.list_refs(limit_to_tags=True)), emptyset)
WVPASSEQ(frozenset(git.list_refs(limit_to_heads=True)), emptyset)
exc(bup_exe, 'index', src)
exc(bup_exe, 'save', '-n', 'src', '--strip', src)
src_hash = exo('git', '--git-dir', bupdir,
'rev-parse', 'src').strip().split('\n')
assert(len(src_hash) == 1)
src_hash = src_hash[0].decode('hex')
tree_hash = exo('git', '--git-dir', bupdir,
'rev-parse', 'src:').strip().split('\n')[0].decode('hex')
blob_hash = exo('git', '--git-dir', bupdir,
'rev-parse', 'src:1').strip().split('\n')[0].decode('hex')
WVPASSEQ(frozenset(git.list_refs()),
frozenset([('refs/heads/src', src_hash)]))
WVPASSEQ(frozenset(git.list_refs(limit_to_tags=True)), emptyset)
WVPASSEQ(frozenset(git.list_refs(limit_to_heads=True)),
frozenset([('refs/heads/src', src_hash)]))
exc('git', '--git-dir', bupdir, 'tag', 'commit-tag', 'src')
WVPASSEQ(frozenset(git.list_refs()),
frozenset([('refs/heads/src', src_hash),
('refs/tags/commit-tag', src_hash)]))
WVPASSEQ(frozenset(git.list_refs(limit_to_tags=True)),
frozenset([('refs/tags/commit-tag', src_hash)]))
WVPASSEQ(frozenset(git.list_refs(limit_to_heads=True)),
frozenset([('refs/heads/src', src_hash)]))
exc('git', '--git-dir', bupdir, 'tag', 'tree-tag', 'src:')
exc('git', '--git-dir', bupdir, 'tag', 'blob-tag', 'src:1')
os.unlink(bupdir + '/refs/heads/src')
expected_tags = frozenset([('refs/tags/commit-tag', src_hash),
('refs/tags/tree-tag', tree_hash),
('refs/tags/blob-tag', blob_hash)])
WVPASSEQ(frozenset(git.list_refs()), expected_tags)
WVPASSEQ(frozenset(git.list_refs(limit_to_heads=True)), frozenset([]))
WVPASSEQ(frozenset(git.list_refs(limit_to_tags=True)), expected_tags)
if wvfailure_count() == initial_failures:
subprocess.call(['rm', '-rf', tmpdir])
|
tjanez/bup
|
lib/bup/t/tgit.py
|
Python
|
lgpl-2.1
| 13,004
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import flask
from flask import json
from sqlalchemy import sql
from dci.api.v1 import api
from dci.api.v1 import base
from dci import auth
from dci.common import exceptions as dci_exc
from dci.common.schemas import clean_json_with_schema, update_current_user_schema
from dci.common import utils
from dci.db import models2
from dci import decorators
# TODO: replace this properly with JSONEncoder
def _encode_dict(_dict):
res = {}
for d in _dict:
_values = {}
for i in _dict[d]:
_values[str(i)] = _dict[d][i]
res[str(d)] = _values
return res
@api.route("/identity", methods=["GET"])
@decorators.login_required
def get_identity(identity):
"""Returns some information about the currently authenticated identity"""
return flask.Response(
json.dumps(
{
"identity": {
"id": identity.id,
"etag": identity.etag,
"name": identity.name,
"fullname": identity.fullname,
"email": identity.email,
"timezone": identity.timezone,
"teams": _encode_dict(identity.teams),
}
}
),
200,
headers={"ETag": identity.etag},
content_type="application/json",
)
@api.route("/identity", methods=["PUT"])
@decorators.login_required
def put_identity(user):
if_match_etag = utils.check_and_get_etag(flask.request.headers)
values = clean_json_with_schema(update_current_user_schema, flask.request.json)
if user.is_not_read_only_user():
current_password = values["current_password"]
encrypted_password = user.password
if not auth.check_passwords_equal(current_password, encrypted_password):
raise dci_exc.DCIException("current_password invalid")
new_values = {}
new_password = values.get("new_password")
if new_password:
encrypted_password = auth.hash_password(new_password)
new_values["password"] = encrypted_password
etag = utils.gen_etag()
new_values.update(
{
"etag": etag,
"fullname": values.get("fullname") or user.fullname,
"email": values.get("email") or user.email,
"timezone": values.get("timezone") or user.timezone,
}
)
try:
flask.g.session.query(models2.User).filter(
sql.and_(models2.User.id == user.id, models2.User.etag == if_match_etag)
).update(new_values)
flask.g.session.commit()
except Exception as e:
flask.g.session.rollback()
raise dci_exc.DCIException(message=str(e), status_code=409)
user = base.get_resource_orm(models2.User, user.id)
user_serialized = user.serialize()
return flask.Response(
json.dumps({"user": user_serialized}),
200,
headers={"ETag": etag},
content_type="application/json",
)
|
redhat-cip/dci-control-server
|
dci/api/v1/identity.py
|
Python
|
apache-2.0
| 3,559
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import gc
import IECore
import Gaffer
import GafferTest
class ArrayPlugTest( GafferTest.TestCase ) :
def test( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" not in n["in"] )
self.assertEqual( len( n["in"] ), 1 )
self.assertTrue( n["in"]["e1"].isSame( n["in"][0] ) )
n["in"][0].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 2 )
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" in n["in"] )
n["in"][0].setInput( None )
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" not in n["in"] )
self.assertEqual( len( n["in"] ), 1 )
def testConnectionGaps( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
n["in"][0].setInput( a["sum"] )
n["in"][1].setInput( a["sum"] )
n["in"][2].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 4 )
self.assertTrue( n["in"]["e1"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e2"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e3"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e4"].getInput() is None )
n["in"][1].setInput( None )
self.assertEqual( len( n["in"] ), 4 )
self.assertTrue( n["in"]["e1"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e2"].getInput() is None )
self.assertTrue( n["in"]["e3"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e4"].getInput() is None )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
s["n"]["in"][2].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( None )
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].isSame( s["n"]["in"][0] ) )
self.assertTrue( s["n"]["in"]["e2"].isSame( s["n"]["in"][1] ) )
self.assertTrue( s["n"]["in"]["e3"].isSame( s["n"]["in"][2] ) )
self.assertTrue( s["n"]["in"]["e4"].isSame( s["n"]["in"][3] ) )
self.assertTrue( s["n"]["in"]["e1"].getInput(), s["a"]["sum"] )
self.assertTrue( s["n"]["in"]["e2"].getInput() is None )
self.assertTrue( s["n"]["in"]["e3"].getInput(), s["a"]["sum"] )
self.assertTrue( s["n"]["in"]["e4"].getInput() is None )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["n"]["in"] ), 4 )
self.assertTrue( s2["n"]["in"]["e1"].isSame( s2["n"]["in"][0] ) )
self.assertTrue( s2["n"]["in"]["e2"].isSame( s2["n"]["in"][1] ) )
self.assertTrue( s2["n"]["in"]["e3"].isSame( s2["n"]["in"][2] ) )
self.assertTrue( s2["n"]["in"]["e4"].isSame( s2["n"]["in"][3] ) )
self.assertTrue( s2["n"]["in"]["e1"].getInput(), s2["a"]["sum"] )
self.assertTrue( s2["n"]["in"]["e2"].getInput() is None )
self.assertTrue( s2["n"]["in"]["e3"].getInput(), s2["a"]["sum"] )
self.assertTrue( s2["n"]["in"]["e4"].getInput() is None )
def testMaximumInputs( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
# connect all inputs
for i in range( 0, 6 ) :
n["in"][i].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 6 )
for i in range( 0, 6 ) :
self.assertTrue( n["in"][i].getInput().isSame( a["sum"] ) )
# check that removing the one before the last
# leaves the last in place.
n["in"][4].setInput( None )
self.assertEqual( len( n["in"] ), 6 )
for i in range( 0, 6 ) :
if i != 4 :
self.assertTrue( n["in"][i].getInput().isSame( a["sum"] ) )
else :
self.assertTrue( n["in"][i].getInput() is None )
def testMakeConnectionAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
with Gaffer.UndoContext( s ) :
s["n"]["in"][0].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["in"] ), 2 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( s["n"]["in"][1].isSame( s["n"]["in"]["e2"] ) )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 1 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
s.redo()
self.assertEqual( len( s["n"]["in"] ), 2 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( s["n"]["in"][1].isSame( s["n"]["in"]["e2"] ) )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 1 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( "in" in s["n"] )
self.assertFalse( "in1" in s["n"] )
def testMinimumInputs( self ) :
a = GafferTest.AddNode()
n = Gaffer.Node()
n["in"] = Gaffer.ArrayPlug( "in", element = Gaffer.IntPlug( "e1" ), minSize=3 )
self.assertEqual( len( n["in"] ), 3 )
# connecting to the middle input shouldn't create
# any new inputs, because there is still one free on the end
n["in"]["e2"].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 3 )
# connecting to the last input should create a new
# one - there should always be one free input on the
# end (until the maximum is reached).
n["in"]["e3"].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 4 )
n["in"]["e3"].setInput( None )
self.assertEqual( len( n["in"] ), 3 )
def testDeleteAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"]["e1"].setInput( s["a"]["sum"] )
s["n"]["in"]["e2"].setInput( s["a"]["sum"] )
s["n"]["in"]["e3"].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
with Gaffer.UndoContext( s ) :
s.deleteNodes( s, Gaffer.StandardSet( [ s["n"] ] ) )
self.assertFalse( "n" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
s.redo()
self.assertFalse( "n" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
def testDeleteInputNodeAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
s["n"]["in"][2].setInput( s["a"]["sum"] )
n = s["n"]
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
with Gaffer.UndoContext( s ) :
s.deleteNodes( s, Gaffer.StandardSet( [ s["a"] ] ) )
self.assertFalse( "a" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
s.redo()
self.assertFalse( "a" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
def testFixedLengthDynamic( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = Gaffer.Node()
s["n"]["a"] = Gaffer.ArrayPlug( "a", element = Gaffer.IntPlug(), minSize = 4, maxSize = 4, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["a"][1].setInput( s["a"]["sum"] )
s["n"]["a"][2].setInput( s["a"]["sum"] )
self.assertEqual( s["n"]["a"].minSize(), 4 )
self.assertEqual( s["n"]["a"].maxSize(), 4 )
self.assertEqual( len( s["n"]["a"] ), 4 )
self.assertTrue( s["n"]["a"][0].getInput() is None )
self.assertTrue( s["n"]["a"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["a"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["a"][3].getInput() is None )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["n"]["a"].minSize(), 4 )
self.assertEqual( s2["n"]["a"].maxSize(), 4 )
self.assertEqual( len( s2["n"]["a"] ), 4 )
self.assertTrue( s2["n"]["a"][0].getInput() is None )
self.assertTrue( s2["n"]["a"][1].getInput().isSame( s2["a"]["sum"] ) )
self.assertTrue( s2["n"]["a"][1].getInput().isSame( s2["a"]["sum"] ) )
self.assertTrue( s2["n"]["a"][3].getInput() is None )
def testPythonElement( self ) :
class PythonElement( Gaffer.Plug ) :
def __init__( self, name = "PythonElement", direction = Gaffer.Plug.Direction.In, flags = Gaffer.Plug.Flags.Default ) :
Gaffer.Plug.__init__( self, name, direction, flags )
def createCounterpart( self, name, direction ) :
return PythonElement( name, direction, self.getFlags() )
n = Gaffer.Node()
n["a"] = Gaffer.ArrayPlug( element = PythonElement() )
self.assertEqual( len( n["a"] ), 1 )
self.assertTrue( isinstance( n["a"][0], PythonElement ) )
p = PythonElement()
n["a"][0].setInput( p )
self.assertEqual( len( n["a"] ), 2 )
self.assertTrue( isinstance( n["a"][1], PythonElement ) )
def tearDown( self ) :
# some bugs in the InputGenerator only showed themselves when
# the ScriptNode was deleted during garbage collection, often
# in totally unrelated tests. so we run the garbage collector
# here to localise any problems to this test, making them
# easier to diagnose and fix.
while gc.collect() :
pass
IECore.RefCounted.collectGarbage()
if __name__ == "__main__":
unittest.main()
|
goddardl/gaffer
|
python/GafferTest/ArrayPlugTest.py
|
Python
|
bsd-3-clause
| 11,868
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import amulet
class TestDeploy(unittest.TestCase):
"""
Trivial deployment test for Giraph.
"""
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
cls.d.add('giraph', 'cs:xenial/giraph')
cls.d.setup(timeout=900)
cls.d.sentry.wait(timeout=1800)
if __name__ == '__main__':
unittest.main()
|
panagiotisl/bigtop
|
bigtop-packages/src/charm/giraph/layer-giraph/tests/01-basic-deployment.py
|
Python
|
apache-2.0
| 1,194
|
#!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
import weblab.comm.server as RFS
import weblab.core.comm.user_manager as UPFM
try:
import weblab.core.comm.generated.weblabdeusto_interface as weblabdeusto_interface
except ImportError:
ZSI_AVAILABLE = False
else:
ZSI_AVAILABLE = True
USER_PROCESSING_FACADE_ZSI_LISTEN = 'core_facade_soap_bind'
DEFAULT_USER_PROCESSING_FACADE_ZSI_LISTEN = ''
USER_PROCESSING_FACADE_ZSI_PORT = 'core_facade_soap_port'
USER_PROCESSING_FACADE_ZSI_SERVICE_NAME = 'core_facade_soap_service_name'
DEFAULT_USER_PROCESSING_FACADE_ZSI_SERVICE_NAME = '/weblab/soap/'
USER_PROCESSING_FACADE_ZSI_PUBLIC_SERVER_HOST = 'core_facade_soap_public_server_host'
DEFAULT_USER_PROCESSING_FACADE_ZSI_PUBLIC_SERVER_HOST = 'www.weblab.deusto.es'
USER_PROCESSING_FACADE_ZSI_PUBLIC_SERVER_PORT = 'core_facade_soap_public_server_port'
DEFAULT_USER_PROCESSING_FACADE_ZSI_PUBLIC_SERVER_PORT = 80
USER_PROCESSING_FACADE_JSON_LISTEN = 'core_facade_json_bind'
DEFAULT_USER_PROCESSING_FACADE_JSON_LISTEN = ''
USER_PROCESSING_FACADE_JSON_PORT = 'core_facade_json_port'
USER_PROCESSING_FACADE_XMLRPC_LISTEN = 'core_facade_xmlrpc_bind'
DEFAULT_USER_PROCESSING_FACADE_XMLRPC_LISTEN = ''
USER_PROCESSING_FACADE_XMLRPC_PORT = 'core_facade_xmlrpc_port'
USER_PROCESSING_FACADE_SERVER_ROUTE = 'core_facade_server_route'
DEFAULT_USER_PROCESSING_SERVER_ROUTE = 'default-route-to-server'
class UserProcessingRemoteFacadeServer(RFS.AbstractRemoteFacadeServer):
if ZSI_AVAILABLE:
class RemoteFacadeServerZSI(RFS.AbstractRemoteFacadeServerZSI):
WebLabDeusto = weblabdeusto_interface.weblabdeusto
SERVERS = RFS.AbstractRemoteFacadeServer.SERVERS + (RemoteFacadeServerZSI,)
FACADE_ZSI_LISTEN = USER_PROCESSING_FACADE_ZSI_LISTEN
DEFAULT_FACADE_ZSI_LISTEN = DEFAULT_USER_PROCESSING_FACADE_ZSI_LISTEN
FACADE_ZSI_PORT = USER_PROCESSING_FACADE_ZSI_PORT
FACADE_ZSI_SERVICE_NAME = USER_PROCESSING_FACADE_ZSI_SERVICE_NAME
DEFAULT_FACADE_ZSI_SERVICE_NAME = DEFAULT_USER_PROCESSING_FACADE_ZSI_SERVICE_NAME
FACADE_ZSI_PUBLIC_SERVER_HOST = USER_PROCESSING_FACADE_ZSI_PUBLIC_SERVER_HOST
DEFAULT_FACADE_ZSI_PUBLIC_SERVER_HOST = DEFAULT_USER_PROCESSING_FACADE_ZSI_PUBLIC_SERVER_HOST
FACADE_ZSI_PUBLIC_SERVER_PORT = USER_PROCESSING_FACADE_ZSI_PUBLIC_SERVER_PORT
DEFAULT_FACADE_ZSI_PUBLIC_SERVER_PORT = DEFAULT_USER_PROCESSING_FACADE_ZSI_PUBLIC_SERVER_PORT
FACADE_JSON_LISTEN = USER_PROCESSING_FACADE_JSON_LISTEN
DEFAULT_FACADE_JSON_LISTEN = DEFAULT_USER_PROCESSING_FACADE_JSON_LISTEN
FACADE_JSON_PORT = USER_PROCESSING_FACADE_JSON_PORT
FACADE_XMLRPC_LISTEN = USER_PROCESSING_FACADE_XMLRPC_LISTEN
DEFAULT_FACADE_XMLRPC_LISTEN = DEFAULT_USER_PROCESSING_FACADE_XMLRPC_LISTEN
FACADE_XMLRPC_PORT = USER_PROCESSING_FACADE_XMLRPC_PORT
FACADE_SERVER_ROUTE = USER_PROCESSING_FACADE_SERVER_ROUTE
DEFAULT_SERVER_ROUTE = DEFAULT_USER_PROCESSING_SERVER_ROUTE
if ZSI_AVAILABLE:
def _create_zsi_remote_facade_manager(self, server, configuration_manager):
return UPFM.UserProcessingRemoteFacadeManagerZSI( configuration_manager, server)
def _create_xmlrpc_remote_facade_manager(self, server, configuration_manager):
return UPFM.UserProcessingRemoteFacadeManagerXMLRPC( configuration_manager, server)
def _create_json_remote_facade_manager(self, server, configuration_manager):
return UPFM.UserProcessingRemoteFacadeManagerJSON( configuration_manager, server)
|
ganeshgore/myremolab
|
server/src/weblab/core/comm/user_server.py
|
Python
|
bsd-2-clause
| 4,389
|
# Django
from django import template
# Django-DataTables
from core.datatables.utils import lookupattr
register = template.Library()
register.filter('lookupattr', lookupattr)
|
stavrik/test
|
core/datatables/templatetags/lookupattr.py
|
Python
|
apache-2.0
| 176
|
# -*- coding: utf-8 -*-
'''
Pupil Player Third Party Plugins by cpicanco
Copyright (C) 2016 Rafael Picanço.
The present file is distributed under the terms of the GNU General Public License (GPL v3.0).
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from os import path
from ast import literal_eval
from pyglui.cygl.utils import RGBA,draw_points,draw_polyline
from OpenGL.GL import *
from OpenGL.GLU import gluOrtho2D
from glfw import glfwGetWindowSize, glfwGetCurrentContext, GLFW_KEY_V, GLFW_KEY_COMMA
from pyglui import ui
import numpy as np
from plugin import Plugin
import logging
import zmq
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
class TrialContainer(object):
def __init__(self):
self.Angle = None
self.ExpectedResponse = None
self.TimeEvents = []
self.Timestamps = []
self.Distance = []
# self.GazePoints = []
# self.CirPoints = []
# self.CirRanking = []
# self.CirAPoints = []
# self.CirBPoints = []
class Segmentation(Plugin):
"""
The user can manually create events by pressing
keyboard keys (v).
This plugin will display vertical bars at the bottom seek bar
based on those events.
One should be able to send those events
as sections to the trim_marks plugin (auto-trim).
The auto-trim functionality includes two options:
- example events = list(1, 50, 100, 150)
- chain
- would return the following sections[(1,50), (51,100), (101, 150)]
- in out pairs
- would return the following sections[(1,50), (100, 150)]
Todo:
- import events from Pupil like timestamps
- selector to manage multiple saved sections
"""
def __init__(self, g_pool, custom_events=[], mode='chain', keep_create_order=True,
expected_response='NONE',filter_by_expresp=True,
angle='NONE',filter_by_angle=True,
distance='NONE',filter_by_distance=True,
offset=0.0,onset=0.0,
color='red'):
super(Segmentation, self).__init__(g_pool)
Trim_Marks_Extended_Exist = False
for p in g_pool.plugins:
if p.class_name == 'Trim_Marks_Extended':
Trim_Marks_Extended_Exist = True
break
if not Trim_Marks_Extended_Exist:
from trim_marks_patch import Trim_Marks_Extended
g_pool.plugins.add(Trim_Marks_Extended)
del Trim_Marks_Extended
# Pupil Player system configs
self.trim_marks = g_pool.trim_marks
self.order = .8
self.uniqueness = "by_class"
# Pupil Player data
self.capture = g_pool.capture
#self.current_frame_index = self.capture.get_frame_index()
self.frame_count = self.capture.get_frame_count()
self.frame_index = None
# self.timestamps = g_pool.timestamps
# display layout
self.padding = 30. #in screen pixel
# initialize empty menu and local variables
self.menu = None
self.mode = mode
self.keep_create_order = keep_create_order
# persistence
self.custom_events_path = path.join(self.g_pool.rec_dir,'custom_events.npy')
try:
self.custom_events = list(np.load(self.custom_events_path))
if not self.custom_events:
logger.warning("List is empty. Loading from cache.")
self.custom_events = custom_events
else:
logger.info("Custom events loaded: "+ self.custom_events_path)
except:
logger.warning("No custom events at: "+ self.custom_events_path)
self.custom_events = custom_events
if not self.custom_events:
logger.warning("No chached events were found.")
else:
logger.warning("Using chached events. Please, save them if necessary.")
# stimulus control application data
self.scapp_output = None
try:
self.load_scapp_output()
except:
logger.warning("scapp_output.timestamps error")
self.scapp_report = None
self.load_scapp_report()
self.scapp_output_npy = None
self.load_scapp_output_npy()
# todo: find a way to load these properties dinamically based on column names
self.expected_response = expected_response
self.filter_by_expresp = filter_by_expresp
self.angle = angle
self.filter_by_angle = filter_by_angle
self.distance = distance
self.filter_by_distance = filter_by_distance
self.offset = offset
self.onset = onset
self.color = color
def load_scapp_output_npy(self):
scapp_output_path = path.join(self.g_pool.rec_dir,"scapp_output.npy")
if not path.isfile(scapp_output_path):
logger.warning("File not found: "+ scapp_output_path)
return
scapp_output = np.load(scapp_output_path)
self.scapp_output_npy = [[]]
for line in scapp_output:
trial_no = line[0]
timestamp = line[1]
event = line[2]
i = int(trial_no)
if i > len(self.scapp_output_npy):
self.scapp_output_npy.append([])
self.scapp_output_npy[i - 1].append((timestamp, event))
def load_scapp_output(self): # timestamps of scapp events
"""
__________________________________________________________
dependency: validy self.g_pool.rec_dir + '\scapp_output' path
__________________________________________________________
- scapp is an acronymous for Stimulus Control Application
- scapp_output has the following format:
(Trial_No, timestamp, event:session_time)
where:
- 'Trial_No' is the trial number as in scapp_report
- 'timestamp' is the timestamps sent by Pupil Server and received by scapp Client
- 'event is an scapp event, now there are four types:
- S : Starter onset | Trial onset | ITI ending
- *R : first response after S | Starter ending
- R : responses after *R
- C : Consequence onset | Trial ending | ITI onset
- session_time is event occurence time in ms from the session onset.
> examples:
> scapp_output
('1', '232.5674', 'S:029367')
('1', '232.5675', '*R:029368')
('1', '232.5676', 'C:029369')
('2', '232.5684', 'S:029377')
('2', '232.5685', '*R:029378')
('2', '232.5686', 'R:029379')
('2', '232.5687', 'C:029380')
> scapp_output loaded
[ [ ('232.5674', 'S:029367'), ('232.5675', '*R:029368'), ('232.5676', 'C:029369') ],
[ ('232.5684', 'S:029377'), ('232.5685', '*R:029378'), ('232.5686', 'R:029379'), ('232.5687', 'C:029380') ] ]
"""
scapp_output_path = path.join(self.g_pool.rec_dir,'scapp_output.timestamps')
if path.isfile(scapp_output_path):
self.scapp_output = [[]]
with open(scapp_output_path, 'r') as scapp_output:
for line in scapp_output:
(trial_no, timestamp, event) = literal_eval(line)
i = int(trial_no)
if i > len(self.scapp_output):
self.scapp_output.append([])
self.scapp_output[i - 1].append((timestamp, event))
else:
logger.warning("File not found: "+ scapp_output_path)
def load_scapp_report(self):
"""
__________________________________________________________
dependency: validy self.g_pool.rec_dir + '\scapp_report' path
report_type: string | 'fpe', 'eos', 'vlh'
__________________________________________________________
Source Header Names for Feature Positive Effect (fpe) trials:
Trial_No : Trial increment number (unique). (INT)
Trial_Id : Trial identification number (can repeat). (INT)
TrialNam : Trial String Name. (STR)
ITIBegin : Consequence / Inter Trial Interval onset. (TIME)
__ITIEnd : Starter begin / End of Inter Trial Interval (TIME)
StartLat : Latency of the starter response. (TIME)
StmBegin : Trial stimulus/stimuli onset. (TIME)
_Latency : Latency. (TIME)
__StmEnd : End of the stimulus/stimuli removal. (TIME)
ExpcResp : Expected response / Contingency. (STR)
Positiva
Negativa
__Result : Type of the Response emmited. (STR)
MISS
HIT
NONE
RespFreq : Number of responses emmited (INT)
Source Header Names for Eye Orientation Study (eos) trials:
Trial_No : idem
Trial_Id : idem
TrialNam : idem
ITIBegin : idem
__ITIEnd : idem
StmBegin : idem
_Latency : idem
__StmEnd : idem
___Angle : Angle (STR)
0, 45, 90, 135
______X1 : left 1
______Y1 : top 1
______X2 : left 2
______Y2 : top 2
ExpcResp : Expected response
0 = no gap/false
1 = gap/true
RespFreq : idem
Source Header Names for Variable Limited Hold Study (vlh) trials:
Trial_No : idem
Trial_Id : idem
TrialNam : idem
ITIBegin : idem
__ITIEnd : idem
StmBegin : idem
_Latency : idem
___Cycle :
__Timer2 :
_Version :
____Mode :
RespFreq :
All time variables are in miliseconds. Counting started
at the beginning of the session.
"""
scapp_report_path = path.join(self.g_pool.rec_dir,'scapp_report.data')
if path.isfile(scapp_report_path):
try:
self.scapp_report = np.genfromtxt(scapp_report_path,
delimiter="\t", missing_values=["NA"], skip_header=6, skip_footer=1,
filling_values=None, names=True, deletechars='_', autostrip=True,
dtype=None)
except ValueError:
logger.warning("genfromtxt error")
else:
logger.warning("File not found: "+ scapp_report_path)
def event_undo(self, arg):
if self.custom_events:
self.custom_events.pop()
if not self.keep_create_order:
self.custom_events = sorted(self.custom_events, key=int)
def create_custom_event(self, arg):
if self.frame_index:
if self.frame_index not in self.custom_events:
self.custom_events.append(self.frame_index)
if not self.keep_create_order:
self.custom_events = sorted(self.custom_events, key=int)
def save_custom_events(self):
np.save(self.custom_events_path,np.asarray(self.custom_events))
def auto_trim(self):
# create sections and pass them to the trim_marks
sections = []
events = sorted(self.custom_events, key=int)
size = len(events)
if size > 1:
i = 0
while True:
if self.mode == 'chain':
if i == 0:
sections.append([events[i],events[i+1]])
elif (i > 0) and (i < (size-1)):
sections.append([events[i]+1,events[i+1]])
i += 1
elif self.mode == 'in out pairs':
if i < (size-1):
sections.append([events[i],events[i+1]])
i += 2
if i > (size-1):
break
self.trim_marks.sections = sections
self.trim_marks.focus = 0
def auto_trim_first_last(self):
# create sections and pass them to the trim_marks
sections = []
events = sorted(self.custom_events, key=int)
if len(events) > 0:
sections.append([events[0],events[-1]])
self.trim_marks.sections = sections
self.trim_marks.focus = 0
else:
logger.info("No custom events found, add some.")
def init_gui(self):
# print('zmq_version:',zmq.zmq_version())
# initialize the menu
self.menu = ui.Scrolling_Menu('Segmentation')
# add ui elements to the menu
self.menu.append(ui.Button('Close', self.unset_alive))
self.menu.append(ui.Info_Text('You can create custom events by pressing "v". To undo press ", (comma)". Remember to save them when your were done.'))
self.menu.append(ui.Switch('keep_create_order',self,label="Keep Creation Order"))
# maybe thumbs instead keyboard keys?
self.menu.append(ui.Hot_Key('create_event',setter=self.create_custom_event,getter=lambda:True,label='V',hotkey=GLFW_KEY_V))
self.menu.append(ui.Hot_Key('event_undo',setter=self.event_undo,getter=lambda:True,label=',',hotkey=GLFW_KEY_COMMA))
self.menu.append(ui.Button('Save Events',self.save_custom_events))
self.menu.append(ui.Button('Clean All Events',self.clean_custom_events))
self.menu.append(ui.Info_Text('You can auto-trim to get all sections based on available events. Choose the Trim Mode that fit your needs.'))
self.menu.append(ui.Selector('mode',self,label='Trim Mode',selection=['chain','in out pairs'] ))
self.menu.append(ui.Button('Auto-trim',self.auto_trim))
self.menu.append(ui.Info_Text('You can auto-trim to get a one section based on the first and last events.'))
self.menu.append(ui.Button('Auto-trim',self.auto_trim_first_last))
# todo: for each data column, load filters dinamically based on filtered lines (after removing repetition)
# first guess is to use switchs
# in order to allow easy to add/remove a filter, but list of switchs too long would not be good
# todo: not all reports will fit into this... Need a more abstract way
if self.scapp_report != None:
s_menu = ui.Growing_Menu("Filters")
s_menu.collapsed=False
unique_items = sorted(set(self.scapp_report['Angle']))
s_menu.append(ui.Switch('filter_by_angle',self,label="by Angle"))
selection = ['NONE']
[selection.append(str(i)) for i in unique_items]
s_menu.append(ui.Selector('angle',self,label='Angles',selection=selection))
unique_items = sorted(set(self.scapp_report['ExpcResp']))
selection = ['NONE']
[selection.append(str(i)) for i in unique_items]
s_menu.append(ui.Switch('filter_by_expresp',self,label="by Expected Response"))
s_menu.append(ui.Selector('expected_response',self,label='Expected Response',selection=selection))
unique_items = sorted(set(zip(self.scapp_report['Angle'],self.scapp_report['X1'],self.scapp_report['Y1'])))
selection = ['NONE']
[selection.append(str(i)) for i in unique_items]
s_menu.append(ui.Switch('filter_by_distance',self,label="by Distance"))
s_menu.append(ui.Selector('distance',self,label='Distance',selection=selection))
s_menu.append(ui.Slider('onset',self,min=0.00,step=0.1,max=2.0,label='onset'))
s_menu.append(ui.Slider('offset',self,min=0.00,step=0.1,max=2.0,label='offset'))
s_menu.append(ui.Button('Add Events',self.add_filtered_events))
s_menu.append(ui.Button('Clean, Add, Trim',self.clean_add_trim))
self.menu.append(s_menu)
s_menu = ui.Growing_Menu("Filters 2")
s_menu.collapsed=False
s_menu.append(ui.Selector('color',self,label='Color',selection=['red', 'blue'] ))
#s_menu.append(ui.Switch('filter_by_expresp',self,label="by Expected Response"))
#s_menu.append(ui.Selector('expected_response',self,label='Expected Response',selection=['0', '1'] ))
s_menu.append(ui.Button('Add Events',self.add_filtered_events_npy))
s_menu.append(ui.Button('Clean, Add, Trim',self.clean_add_trim_2))
# self.menu.append(ui.Info_Text('Dispersion'))
# 0, 1, 2, 3, 4 .. n
self.menu.append(s_menu)
# add menu to the window
self.g_pool.gui.append(self.menu)
self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext()))
def clean_add_trim(self):
self.clean_custom_events()
self.add_filtered_events()
self.auto_trim()
def clean_add_trim_2(self):
self.clean_custom_events()
self.add_filtered_events_npy()
self.auto_trim()
def trial_from_timestamp(self, timestamp):
"""
timestamp: float
Returns the nearest trial index associated with a given timestamp.
"""
for i, trial in enumerate(self.scapp_output):
trial_begin = float(trial[0][0])
trial_end = float(trial[-1][0])
if np.logical_and(trial_end >= timestamp, timestamp >= trial_begin):
#if trial_end >= timestamp >= trial_begin:
return i
def clean_custom_events(self):
self.custom_events = []
def add_filtered_events_npy(self):
def check_last_even():
if not (len(self.custom_events) % 2) == 0:
begin = np.abs(self.g_pool.timestamps - np.float64(Trials[len(Trials)-1].TimeEvents[-1][0])).argmin()
self.custom_events.append(begin)
if self.scapp_output_npy:
Trials = [TrialContainer() for _ in self.scapp_output_npy]
# fill it with data
if self.color == 'red':
for n, trial in enumerate(Trials):
trial.TimeEvents = self.scapp_output_npy[n]
begin = np.abs(self.g_pool.timestamps - np.float64(Trials[n].TimeEvents[0][0])).argmin()
self.custom_events.append(begin)
check_last_even()
if self.color == 'blue':
for n, trial in enumerate(Trials):
if n > 0:
trial.TimeEvents = self.scapp_output_npy[n]
begin = np.abs(self.g_pool.timestamps - np.float64(Trials[n].TimeEvents[0][0])).argmin()
self.custom_events.append(begin)
check_last_even()
else:
logger.error("The scapp_output_npy data was not loaded.")
def add_filtered_events(self):
# create a container with the size of the total trials
Trials = [TrialContainer() for _ in self.scapp_output]
# fill it with some data
for n, trial in enumerate(Trials):
trial.ExpectedResponse = self.scapp_report[n]['ExpcResp']
trial.Angle = str(self.scapp_report[n]['Angle'])
trial.Distance = (self.scapp_report[n]['Angle'],self.scapp_report[n]['X1'],self.scapp_report[n]['Y1'])
trial.TimeEvents = self.scapp_output[n]
# find frame of correspondent event (firstResponse, starter onset...)
firstResponse = np.abs(self.g_pool.timestamps - np.float64(Trials[n].TimeEvents[1][0])-self.onset).argmin()
endLimitedHold = np.abs(self.g_pool.timestamps - np.float64(Trials[n].TimeEvents[-1][0])+self.offset).argmin()
# conditions
filtering_conditions = []
if self.filter_by_expresp:
filtering_conditions.append(str(trial.ExpectedResponse) == self.expected_response)
if self.filter_by_angle:
filtering_conditions.append(trial.Angle == self.angle)
if self.filter_by_distance:
filtering_conditions.append(str(trial.Distance) == self.distance)
# add frames to the custom events if all conditions are true
if filtering_conditions != []:
if all(filtering_conditions):
self.custom_events.append(firstResponse)
self.custom_events.append(endLimitedHold)
else:
logger.warning("Check at least one filter condition before adding events.")
# 2 seconds interval
# frameInterval = range(firstResponse, endLimitedHold)
# print(firstResponse, endLimitedHold)
def on_window_resize(self,window,w,h):
self.window_size = w,h
self.h_pad = self.padding * self.frame_count/float(w)
self.v_pad = self.padding * 1./h
def recent_events(self,events):
frame = events['frame']
if frame is not None:
if self.frame_index != frame.index:
self.frame_index = frame.index
def gl_display(self):
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluOrtho2D(-self.h_pad, (self.frame_count)+self.h_pad, -self.v_pad, 1+self.v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
# custom events
for e in self.custom_events:
draw_polyline([(e,.06),(e,.005)], color = RGBA(.8, .8, .8, .8))
size = len(self.custom_events)
if size > 1:
for i, e in enumerate(self.custom_events):
draw_points([(e, .03)], size = 5, color = RGBA(.1, .5, .5, 1.))
i = 0
while True:
if i == 0:
draw_polyline([(self.custom_events[i],.03),(self.custom_events[i+1],0.03)], color = RGBA(.8, .8, .8, .8))
elif (i > 0) and (i < (size-1)):
draw_polyline([(self.custom_events[i] +1,.03),(self.custom_events[i+1],0.03)], color = RGBA(.8, .8, .8, .8))
if 'chain' in self.mode:
i += 1
elif 'in out pairs' in self.mode:
i += 2
if i > (size-1):
break
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
def deinit_gui(self):
if self.menu:
self.g_pool.gui.remove(self.menu)
self.menu = None
def cleanup(self):
""" called when the plugin gets terminated.
This happens either voluntarily or forced.
if you have a GUI or glfw window destroy it here.
"""
self.deinit_gui()
def unset_alive(self):
self.alive = False
def get_init_dict(self):
return {'custom_events':self.custom_events,
'mode':self.mode,
'keep_create_order':self.keep_create_order,
'expected_response':self.expected_response,
'filter_by_expresp':self.filter_by_expresp,
'angle':self.angle,
'filter_by_angle':self.filter_by_angle,
'distance':self.distance,
'filter_by_distance':self.filter_by_distance,
'offset':self.offset,
'onset':self.onset,
'color':self.color}
|
cpicanco/player_plugins
|
segmentation.py
|
Python
|
gpl-3.0
| 23,777
|
"""Models for WLED."""
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
from .coordinator import WLEDDataUpdateCoordinator
class WLEDEntity(CoordinatorEntity):
"""Defines a base WLED entity."""
coordinator: WLEDDataUpdateCoordinator
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this WLED device."""
return DeviceInfo(
identifiers={(DOMAIN, self.coordinator.data.info.mac_address)},
name=self.coordinator.data.info.name,
manufacturer=self.coordinator.data.info.brand,
model=self.coordinator.data.info.product,
sw_version=str(self.coordinator.data.info.version),
configuration_url=f"http://{self.coordinator.wled.host}",
)
|
jawilson/home-assistant
|
homeassistant/components/wled/models.py
|
Python
|
apache-2.0
| 875
|
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="x", parent_name="contour", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
implied_edits=kwargs.pop("implied_edits", {"xtype": "array"}),
role=kwargs.pop("role", "data"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/contour/_x.py
|
Python
|
mit
| 516
|
x = bool(input("Digite alguma coisa ou deixe em branco...? "))
if x: #não precisa usar True porque é isto que o if espera
print("Você digitou algo")
else:
print("Você não digiyou algo")
#https://pt.stackoverflow.com/q/170784/101
|
bigown/SOpt
|
Python/Algorithm/TrueFalse4.py
|
Python
|
mit
| 247
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/current/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htmpredictionmodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'f': {
'clipInput': True,
'fieldname': u'f',
'n': 100,
'name': u'f',
'minval': 0,
'maxval': 5,
'type': 'ScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.05,
'synPermInactiveDec': 0.008,
# boostStrength controls the strength of boosting. It should be a
# a number greater or equal than 0.0. No boosting is applied if
# boostStrength=0.0. Boosting encourages efficient usage of columns.
'boostStrength': 0.0,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable': True,
'tmParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'cerebro_dummy',
u'streams': [ { u'columns': [u'*'],
u'info': u'hotGym.csv',
u'source': u'file://'+os.path.join(os.path.dirname(__file__), 'data.csv')}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'f', u'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'f', metric='passThruPrediction', inferenceElement='anomalyScore', params={'window': 1000}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
marionleborgne/nupic
|
examples/opf/experiments/anomaly/temporal/simple/description.py
|
Python
|
agpl-3.0
| 14,436
|
#! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from sys import version_info
install_requires = []
if version_info[:2] <= (2, 5):
install_requires.append('simplejson >= 2.0.9')
setup(
name = 'avro',
version = '@AVRO_VERSION@',
packages = ['avro',],
package_dir = {'avro': 'src/avro'},
scripts = ["./scripts/avro"],
#include_package_data=True,
package_data={'avro': ['LICENSE', 'NOTICE']},
# Project uses simplejson, so ensure that it gets installed or upgraded
# on the target machine
install_requires = install_requires,
# metadata for upload to PyPI
author = 'Apache Avro',
author_email = 'dev@avro.apache.org',
description = 'Avro is a serialization and RPC framework.',
license = 'Apache License 2.0',
keywords = 'avro serialization rpc',
url = 'http://avro.apache.org/',
extras_require = {
'snappy': ['python-snappy'],
},
)
|
relateiq/avro
|
lang/py/setup.py
|
Python
|
apache-2.0
| 1,729
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import urlparse
def encode_unicode_dict(unicodedict, encoding="utf-8"):
bytedict = {}
for key in unicodedict:
if isinstance(unicodedict[key], unicode):
bytedict[key] = unicodedict[key].encode(encoding)
elif isinstance(unicodedict[key], dict):
bytedict[key] = encode_unicode_dict(unicodedict[key])
else:
bytedict[key] = unicodedict[key]
return bytedict
def decode_bytestring_dict(bytedict, encoding="utf-8"):
unicodedict = {}
for key in bytedict:
if isinstance(bytedict[key], str):
unicodedict[key] = bytedict[key].decode(encoding)
elif isinstance(bytedict[key], dict):
unicodedict[key] = decode_bytestring_dict(bytedict[key])
else:
unicodedict[key] = bytedict[key]
return unicodedict
def encode_values(values):
"""Returns a string with encode the values in the given dictionary.
:values: dictionary with key values pairs
:returns: String key1:value1,key2:value2...
"""
# Because urlencode can not handle unicode strings we encode the
# whole dictionary into utf8 bytestrings first.
return urllib.urlencode(encode_unicode_dict(values))
def decode_values(encoded):
"""Returns a dictionay with decoded values in the string. See
encode_values function.
:encoded : String key1:value1,key2:value2...
:returns: Dictionary with key values pairs
"""
# We convert the encoded querystring into a bystring to enforce that
# parse_pq returns a dictionary which can be later decoded using
# decode_bystring_dict. If we use the encoded string directly the
# returned dicionary would contain bytestring as unicode. e.g
# u'M\xc3\xbcller' which can't be decoded later.
encoded = str(encoded)
# Now convert the query string into a dictionary with UTF-8 encoded
# bytestring values.
values = urlparse.parse_qs(encoded)
for key in values:
values[key] = values[key][0]
# Finally convert this dictionary back into a unicode dictionary
return decode_bytestring_dict(values)
|
ThomasJunk/ringo
|
ringo/lib/request/helpers.py
|
Python
|
gpl-2.0
| 2,171
|
import unittest, os
from nose.plugins import PluginTester, Plugin
from nose.tools import eq_
from cStringIO import StringIO
class StubPlugin(Plugin):
def options(self, parser, env=os.environ):
super(StubPlugin, self).options(parser, env=env)
def configure(self, options, conf):
pass
class SomePluginTestCase(PluginTester):
activate = None # set this to --with-yourplugin, etc
plugins = [] # list of plugin instances
def makeSuite(self):
class SomeTest(unittest.TestCase):
def runTest(self):
raise ValueError("Now do something, plugin!")
return unittest.TestSuite([SomeTest()])
class TestPluginTester(unittest.TestCase):
def _runPluginTest(self, test_case):
loader = unittest.TestLoader()
suite = loader.loadTestsFromTestCase(test_case)
res = unittest.TestResult()
suite(res)
return res
def testPluginTesterExecsPlugin(self):
called = []
class MockExecPlugin(StubPlugin):
def configure(self, options, conf):
called.append('configure')
class MockExecTestCase(SomePluginTestCase, unittest.TestCase):
activate = '--with-mockexec'
plugins = [MockExecPlugin()]
def test_something_anything(self):
# here is where the test case would test
# that the plugin interacted with stub tests
pass
res = self._runPluginTest(MockExecTestCase)
eq_(res.testsRun, 1)
eq_(called[0], 'configure')
if __name__ == '__main__':
unittest.main()
|
DESHRAJ/fjord
|
vendor/packages/nose/functional_tests/test_plugintest.py
|
Python
|
bsd-3-clause
| 1,678
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from unittest import mock
import snapcraft
from snapcraft import tests
from snapcraft.plugins import make
class MakePluginTestCase(tests.TestCase):
def setUp(self):
super().setUp()
class Options:
makefile = None
make_parameters = []
make_install_var = 'DESTDIR'
disable_parallel = False
artifacts = []
self.options = Options()
self.project_options = snapcraft.ProjectOptions()
patcher = mock.patch('snapcraft.repo.Ubuntu')
self.ubuntu_mock = patcher.start()
self.addCleanup(patcher.stop)
def test_schema(self):
schema = make.MakePlugin.schema()
properties = schema['properties']
self.assertTrue('makefile' in properties,
'Expected "makefile" to be included in properties')
self.assertTrue(
'make-parameters' in properties,
'Expected "make-parameters" to be included in properties')
self.assertTrue(
'make-install-var' in properties,
'Expected "make-install-var" to be included in properties')
makefile = properties['makefile']
self.assertTrue('type' in makefile,
'Expected "type" to be included in "makefile"')
makefile_type = makefile['type']
self.assertEqual(makefile_type, 'string',
'Expected "makefile" "type" to be "string", but it '
'was "{}"'.format(makefile_type))
make_parameters = properties['make-parameters']
self.assertTrue('type' in make_parameters,
'Expected "type" to be included in "make-parameters"')
make_parameters_type = make_parameters['type']
self.assertEqual(
make_parameters_type, 'array',
'Expected "make-parameters" "type" to be "array", but it '
'was "{}"'.format(make_parameters_type))
make_install_var = properties['make-install-var']
self.assertTrue('type' in make_install_var,
'Expected "type" to be included in "make-install-var"')
make_install_var_type = make_install_var['type']
self.assertEqual(
make_install_var_type, 'string',
'Expected "make-install-var" "type" to be "string", but it '
'was "{}"'.format(make_install_var_type))
make_install_var_default = make_install_var['default']
self.assertEqual(
make_install_var_default, 'DESTDIR',
'Expected "make-install-var" "default" to be "DESTDIR", but it '
'was "{}"'.format(make_install_var_default))
build_properties = schema['build-properties']
self.assertEqual(3, len(build_properties))
self.assertTrue('makefile' in build_properties)
self.assertTrue('make-parameters' in build_properties)
self.assertTrue('make-install-var' in build_properties)
@mock.patch.object(make.MakePlugin, 'run')
def test_build(self, run_mock):
plugin = make.MakePlugin('test-part', self.options,
self.project_options)
os.makedirs(plugin.sourcedir)
plugin.build()
self.assertEqual(2, run_mock.call_count)
run_mock.assert_has_calls([
mock.call(['make', '-j2'], env=None),
mock.call(['make', 'install',
'DESTDIR={}'.format(plugin.installdir)], env=None)
])
@mock.patch.object(make.MakePlugin, 'run')
def test_build_disable_parallel(self, run_mock):
self.options.disable_parallel = True
plugin = make.MakePlugin('test-part', self.options,
self.project_options)
os.makedirs(plugin.sourcedir)
plugin.build()
self.assertEqual(2, run_mock.call_count)
run_mock.assert_has_calls([
mock.call(['make', '-j1'], env=None),
mock.call(['make', 'install',
'DESTDIR={}'.format(plugin.installdir)], env=None)
])
@mock.patch.object(make.MakePlugin, 'run')
def test_build_makefile(self, run_mock):
self.options.makefile = 'makefile.linux'
plugin = make.MakePlugin('test-part', self.options,
self.project_options)
os.makedirs(plugin.sourcedir)
plugin.build()
self.assertEqual(2, run_mock.call_count)
run_mock.assert_has_calls([
mock.call(['make', '-f', 'makefile.linux', '-j2'], env=None),
mock.call(['make', '-f', 'makefile.linux', 'install',
'DESTDIR={}'.format(plugin.installdir)], env=None)
])
@mock.patch.object(make.MakePlugin, 'run')
def test_build_install_var(self, run_mock):
self.options.make_install_var = 'PREFIX'
plugin = make.MakePlugin('test-part', self.options,
self.project_options)
os.makedirs(plugin.sourcedir)
plugin.build()
self.assertEqual(2, run_mock.call_count)
run_mock.assert_has_calls([
mock.call(['make', '-j2'], env=None),
mock.call(['make', 'install',
'PREFIX={}'.format(plugin.installdir)], env=None)
])
@mock.patch.object(make.MakePlugin, 'run')
@mock.patch('snapcraft.file_utils.link_or_copy_tree')
@mock.patch('snapcraft.file_utils.link_or_copy')
def test_build_artifacts(self, link_or_copy_mock,
link_or_copy_tree_mock, run_mock):
self.options.artifacts = ['dir_artifact', 'file_artifact']
plugin = make.MakePlugin('test-part', self.options,
self.project_options)
os.makedirs(os.path.join(plugin.builddir, 'dir_artifact'))
plugin.build()
self.assertEqual(1, run_mock.call_count)
run_mock.assert_has_calls([
mock.call(['make', '-j2'], env=None),
])
self.assertEqual(1, link_or_copy_mock.call_count)
link_or_copy_mock.assert_has_calls([
mock.call(
os.path.join(plugin.builddir, 'file_artifact'),
os.path.join(plugin.installdir, 'file_artifact'),
)])
self.assertEqual(1, link_or_copy_tree_mock.call_count)
link_or_copy_tree_mock.assert_has_calls([
mock.call(
os.path.join(plugin.builddir, 'dir_artifact'),
os.path.join(plugin.installdir, 'dir_artifact'),
)])
@mock.patch.object(make.MakePlugin, 'run')
def test_make_with_env(self, run_mock):
plugin = make.MakePlugin('test-part', self.options,
self.project_options)
os.makedirs(plugin.sourcedir)
env = {'foo': 'bar'}
plugin.make(env=env)
self.assertEqual(2, run_mock.call_count)
run_mock.assert_has_calls([
mock.call(['make', '-j2'], env=env),
mock.call(['make', 'install',
'DESTDIR={}'.format(plugin.installdir)], env=env)
])
|
tsdgeos/snapcraft
|
snapcraft/tests/test_plugin_make.py
|
Python
|
gpl-3.0
| 7,789
|
# Slicing code
# Courtesy: Stackoverflow and many other sites
from glob import iglob
import shutil
import os
import sys
print "\nHere we go"
print "*************\n"
""" FUNCTION DEFINITIONS """
############################
"""Find a string between two strings"""
def find_between(s, first, last):
try:
start = s.index(first)+ len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
#print "error"
return ""
'''
# Testing function find_between
s = "123vineeshabc"
a = find_between(s, "123", "abc")
print a
'''
"""Print the contents of a file"""
def print_file(file_name):
f_1 = open (file_name)
for line in f_1:
print line
'''Remove spaces in a list of strings'''
def rem_space(list_name):
while True:
try:
list_name.remove("")
except ValueError:
#print "No blank space in the string"
break
pass
""" Find out the modules which gives certain signals as output """
def locate_modules(verilog_file, output_signal):
lines = open(verilog_file, 'rt').read()
modules = []
k=0
i=0
l=0
with open('full_adder.v')as f:
for line in f:
if i==1:
"""Inside the module instantiation"""
m=line.split()[1]
#print "The module name is %s" %(m)
#print "output_signal is %s" %(output_signal)
i=0
k=1
#modules.append(line.split()[1])
#print ("inside module")
if k==1:
#print "k==1"
"""Extracting the module name"""
if l==1:
#print "l==1"
print find_between(line,"(",")")
if(find_between(line,"(",")")==output_signal):
#rem_space(modules.append(m))
modules.append(m)
print"The module list %s" %(modules)
i=0
k=0
elif "End of outputs" in line:
#print "End of outputs"
l=0
k=0
#print "l=0"
elif "Outputs" in line:
l=1
#print "Outputs detected"
else:
#print"Neither outputs not End of outputs"
#print line
l=0
k=1
if "Module instantiation" in line:
i = 1
k=0
l=0
#print "Module instantiation"
elif ");" in line:
i = 0
k=0
l=0
#print "Module ends"
return modules
"""Function to return the inputs and outputs of a module
Inputs: The name of MUT (Add the verilog file name later)
Outputs: Two lists inputs_test_module and outputs_test_module"""
def find_inputs_of_module(verilog_file, module_name):
lines = open(verilog_file, 'rt').read()
""" Extracting inputs and outputs of module to be tested """
inputs_test_module = []
outputs_test_module = []
k = 0
i = 0
with open('full_adder.v')as f:
for line in f:
if module_name in line:
i = 1
elif ");" in line:
i = 0
if i == 1:
if "Inputs" in line:
k = 1
elif "Outputs" in line:
k = 2
if k == 1:
#m = find_between (line, "(", ")")
#print m
inputs_test_module.append (find_between (line, "(", ")"))
if "End of inputs" in line:
k = 0
elif k == 2:
#m = find_between (line, "(", ")")
#print m
outputs_test_module.append (find_between (line, "(", ")"))
if "End of outputs" in line:
k = 0
""" Removing spaces in the strings 'inputs_test_module' and 'outputs_test_module """
rem_space (inputs_test_module)
#print inputs_test_module
rem_space (outputs_test_module)
#print outputs_test_module
return inputs_test_module, outputs_test_module
""" MAIN()"""
##############
module = raw_input ("Give the name of the module to be tested:")
"""Get the inputs and outputs of the MUT"""
print find_inputs_of_module("full_adder.v", module)
inputs_test_module,outputs_test_module=find_inputs_of_module("full_adder.v", module)
#locate_modules("full_adder.v","s3")
modules=locate_modules("full_adder.v",inputs_test_module[0])
print modules
|
vineeshvs/research
|
slice_backup_00_13_26_05_15_Functions_to_get_the_modulesAndInputQueueAreDefined_NeedToRemoveDuplicates .py
|
Python
|
gpl-3.0
| 4,581
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import flask
from designate.openstack.common import log as logging
from designate import schema
from designate.central import rpcapi as central_rpcapi
from designate.api.v1 import load_values
LOG = logging.getLogger(__name__)
central_api = central_rpcapi.CentralAPI()
blueprint = flask.Blueprint('domains', __name__)
domain_schema = schema.Schema('v1', 'domain')
domains_schema = schema.Schema('v1', 'domains')
servers_schema = schema.Schema('v1', 'servers')
@blueprint.route('/schemas/domain', methods=['GET'])
def get_domain_schema():
return flask.jsonify(domain_schema.raw)
@blueprint.route('/schemas/domains', methods=['GET'])
def get_domains_schema():
return flask.jsonify(domains_schema.raw)
@blueprint.route('/domains', methods=['POST'])
def create_domain():
valid_attributes = ['name', 'email', 'ttl', 'description']
context = flask.request.environ.get('context')
values = load_values(flask.request, valid_attributes)
domain_schema.validate(values)
domain = central_api.create_domain(context, values)
response = flask.jsonify(domain_schema.filter(domain))
response.status_int = 201
response.location = flask.url_for('.get_domain', domain_id=domain['id'])
return response
@blueprint.route('/domains', methods=['GET'])
def get_domains():
context = flask.request.environ.get('context')
# Modified this line to link properly to needed function.
domains = central_api.find_domains_custom(context)
return flask.jsonify(domains_schema.filter({'domains': domains}))
@blueprint.route('/domains/<uuid:domain_id>', methods=['GET'])
def get_domain(domain_id):
context = flask.request.environ.get('context')
domain = central_api.get_domain(context, domain_id)
return flask.jsonify(domain_schema.filter(domain))
@blueprint.route('/domains/<uuid:domain_id>', methods=['PUT'])
def update_domain(domain_id):
context = flask.request.environ.get('context')
values = flask.request.json
domain = central_api.get_domain(context, domain_id)
domain = domain_schema.filter(domain)
domain.update(values)
domain_schema.validate(domain)
domain = central_api.update_domain(context, domain_id, values)
return flask.jsonify(domain_schema.filter(domain))
@blueprint.route('/domains/<uuid:domain_id>', methods=['DELETE'])
def delete_domain(domain_id):
context = flask.request.environ.get('context')
central_api.delete_domain(context, domain_id)
return flask.Response(status=200)
@blueprint.route('/domains/<uuid:domain_id>/servers', methods=['GET'])
def get_domain_servers(domain_id):
context = flask.request.environ.get('context')
servers = central_api.get_domain_servers(context, domain_id)
return flask.jsonify(servers_schema.filter({'servers': servers}))
|
muraliselva10/designate
|
designate/api/v1/domains.py
|
Python
|
apache-2.0
| 3,421
|
"""Default Update.suggestion to unspecified
Revision ID: 18cad09c8ab6
Revises: 387fda7a1ff0
Create Date: 2013-10-15 17:44:04.526374
"""
# revision identifiers, used by Alembic.
revision = '18cad09c8ab6'
down_revision = '387fda7a1ff0'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm import scoped_session, sessionmaker
from zope.sqlalchemy import ZopeTransactionExtension
import transaction
from bodhi.models import Base, Update, UpdateSuggestion
def upgrade():
engine = op.get_bind()
Session = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Session.configure(bind=engine)
db = Session()
Base.metadata.bind = engine
with transaction.manager:
updates = db.query(Update)
for u in updates:
if u.suggest is None:
u.suggest = UpdateSuggestion.unspecified
def downgrade():
# There's really nothing to downgrade here
pass
|
farhaanbukhsh/bodhi
|
alembic/versions/18cad09c8ab6_default_update_sugge.py
|
Python
|
gpl-2.0
| 941
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-27 19:39
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('studies', '0029_auto_20170825_1505'),
('studies', '0024_merge_20170823_1352'),
]
operations = [
]
|
pattisdr/lookit-api
|
studies/migrations/0030_merge_20170827_1539.py
|
Python
|
mit
| 339
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-29 13:51
from __future__ import unicode_literals
from django.db import migrations
from multiselectfield import MultiSelectField
class CommaSeparatedCharField(MultiSelectField):
pass
class Migration(migrations.Migration):
dependencies = [
('ideascube', '0013_auto_20161028_1044'),
]
operations = [
migrations.AddField(
model_name='user',
name='disabilities',
field=CommaSeparatedCharField(blank=True, choices=[('visual', 'Visual'), ('auditive', 'Auditive'), ('physical', 'Physical'), ('cognitive', 'Cognitive'), ('mental', 'Mental')], max_length=128, verbose_name='Disabilities'),
),
]
|
ideascube/ideascube
|
ideascube/migrations/0014_user_disabilities.py
|
Python
|
agpl-3.0
| 737
|
"""Provide the functionality to group entities."""
import asyncio
import logging
from typing import Any, Iterable, List, Optional, cast
import voluptuous as vol
from homeassistant import core as ha
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_NAME,
CONF_ICON,
CONF_NAME,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
SERVICE_RELOAD,
STATE_CLOSED,
STATE_HOME,
STATE_LOCKED,
STATE_NOT_HOME,
STATE_OFF,
STATE_OK,
STATE_ON,
STATE_OPEN,
STATE_PROBLEM,
STATE_UNKNOWN,
STATE_UNLOCKED,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.loader import bind_hass
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
DOMAIN = "group"
GROUP_ORDER = "group_order"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CONF_ENTITIES = "entities"
CONF_ALL = "all"
ATTR_ADD_ENTITIES = "add_entities"
ATTR_AUTO = "auto"
ATTR_ENTITIES = "entities"
ATTR_OBJECT_ID = "object_id"
ATTR_ORDER = "order"
ATTR_ALL = "all"
SERVICE_SET = "set"
SERVICE_REMOVE = "remove"
_LOGGER = logging.getLogger(__name__)
def _conf_preprocess(value):
"""Preprocess alternative configuration formats."""
if not isinstance(value, dict):
value = {CONF_ENTITIES: value}
return value
GROUP_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_ENTITIES): vol.Any(cv.entity_ids, None),
CONF_NAME: cv.string,
CONF_ICON: cv.icon,
CONF_ALL: cv.boolean,
}
)
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({cv.match_all: vol.All(_conf_preprocess, GROUP_SCHEMA)})},
extra=vol.ALLOW_EXTRA,
)
# List of ON/OFF state tuples for groupable states
_GROUP_TYPES = [
(STATE_ON, STATE_OFF),
(STATE_HOME, STATE_NOT_HOME),
(STATE_OPEN, STATE_CLOSED),
(STATE_LOCKED, STATE_UNLOCKED),
(STATE_PROBLEM, STATE_OK),
]
def _get_group_on_off(state):
"""Determine the group on/off states based on a state."""
for states in _GROUP_TYPES:
if state in states:
return states
return None, None
@bind_hass
def is_on(hass, entity_id):
"""Test if the group state is in its ON-state."""
state = hass.states.get(entity_id)
if state:
group_on, _ = _get_group_on_off(state.state)
# If we found a group_type, compare to ON-state
return group_on is not None and state.state == group_on
return False
@bind_hass
def expand_entity_ids(hass: HomeAssistantType, entity_ids: Iterable[Any]) -> List[str]:
"""Return entity_ids with group entity ids replaced by their members.
Async friendly.
"""
found_ids: List[str] = []
for entity_id in entity_ids:
if not isinstance(entity_id, str) or entity_id in (
ENTITY_MATCH_NONE,
ENTITY_MATCH_ALL,
):
continue
entity_id = entity_id.lower()
try:
# If entity_id points at a group, expand it
domain, _ = ha.split_entity_id(entity_id)
if domain == DOMAIN:
child_entities = get_entity_ids(hass, entity_id)
if entity_id in child_entities:
child_entities = list(child_entities)
child_entities.remove(entity_id)
found_ids.extend(
ent_id
for ent_id in expand_entity_ids(hass, child_entities)
if ent_id not in found_ids
)
else:
if entity_id not in found_ids:
found_ids.append(entity_id)
except AttributeError:
# Raised by split_entity_id if entity_id is not a string
pass
return found_ids
@bind_hass
def get_entity_ids(
hass: HomeAssistantType, entity_id: str, domain_filter: Optional[str] = None
) -> List[str]:
"""Get members of this group.
Async friendly.
"""
group = hass.states.get(entity_id)
if not group or ATTR_ENTITY_ID not in group.attributes:
return []
entity_ids = group.attributes[ATTR_ENTITY_ID]
if not domain_filter:
return cast(List[str], entity_ids)
domain_filter = f"{domain_filter.lower()}."
return [ent_id for ent_id in entity_ids if ent_id.startswith(domain_filter)]
@bind_hass
def groups_with_entity(hass: HomeAssistantType, entity_id: str) -> List[str]:
"""Get all groups that contain this entity.
Async friendly.
"""
if DOMAIN not in hass.data:
return []
groups = []
for group in hass.data[DOMAIN].entities:
if entity_id in group.tracking:
groups.append(group.entity_id)
return groups
async def async_setup(hass, config):
"""Set up all groups found defined in the configuration."""
component = hass.data.get(DOMAIN)
if component is None:
component = hass.data[DOMAIN] = EntityComponent(_LOGGER, DOMAIN, hass)
await _async_process_config(hass, config, component)
async def reload_service_handler(service):
"""Remove all user-defined groups and load new ones from config."""
auto = list(filter(lambda e: not e.user_defined, component.entities))
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
await component.async_add_entities(auto)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=vol.Schema({})
)
service_lock = asyncio.Lock()
async def locked_service_handler(service):
"""Handle a service with an async lock."""
async with service_lock:
await groups_service_handler(service)
async def groups_service_handler(service):
"""Handle dynamic group service functions."""
object_id = service.data[ATTR_OBJECT_ID]
entity_id = f"{DOMAIN}.{object_id}"
group = component.get_entity(entity_id)
# new group
if service.service == SERVICE_SET and group is None:
entity_ids = (
service.data.get(ATTR_ENTITIES)
or service.data.get(ATTR_ADD_ENTITIES)
or None
)
extra_arg = {
attr: service.data[attr]
for attr in (ATTR_ICON,)
if service.data.get(attr) is not None
}
await Group.async_create_group(
hass,
service.data.get(ATTR_NAME, object_id),
object_id=object_id,
entity_ids=entity_ids,
user_defined=False,
mode=service.data.get(ATTR_ALL),
**extra_arg,
)
return
if group is None:
_LOGGER.warning("%s:Group '%s' doesn't exist!", service.service, object_id)
return
# update group
if service.service == SERVICE_SET:
need_update = False
if ATTR_ADD_ENTITIES in service.data:
delta = service.data[ATTR_ADD_ENTITIES]
entity_ids = set(group.tracking) | set(delta)
await group.async_update_tracked_entity_ids(entity_ids)
if ATTR_ENTITIES in service.data:
entity_ids = service.data[ATTR_ENTITIES]
await group.async_update_tracked_entity_ids(entity_ids)
if ATTR_NAME in service.data:
group.name = service.data[ATTR_NAME]
need_update = True
if ATTR_ICON in service.data:
group.icon = service.data[ATTR_ICON]
need_update = True
if ATTR_ALL in service.data:
group.mode = all if service.data[ATTR_ALL] else any
need_update = True
if need_update:
group.async_write_ha_state()
return
# remove group
if service.service == SERVICE_REMOVE:
await component.async_remove_entity(entity_id)
hass.services.async_register(
DOMAIN,
SERVICE_SET,
locked_service_handler,
schema=vol.All(
vol.Schema(
{
vol.Required(ATTR_OBJECT_ID): cv.slug,
vol.Optional(ATTR_NAME): cv.string,
vol.Optional(ATTR_ICON): cv.string,
vol.Optional(ATTR_ALL): cv.boolean,
vol.Exclusive(ATTR_ENTITIES, "entities"): cv.entity_ids,
vol.Exclusive(ATTR_ADD_ENTITIES, "entities"): cv.entity_ids,
}
)
),
)
hass.services.async_register(
DOMAIN,
SERVICE_REMOVE,
groups_service_handler,
schema=vol.Schema({vol.Required(ATTR_OBJECT_ID): cv.slug}),
)
return True
async def _async_process_config(hass, config, component):
"""Process group configuration."""
for object_id, conf in config.get(DOMAIN, {}).items():
name = conf.get(CONF_NAME, object_id)
entity_ids = conf.get(CONF_ENTITIES) or []
icon = conf.get(CONF_ICON)
mode = conf.get(CONF_ALL)
# Don't create tasks and await them all. The order is important as
# groups get a number based on creation order.
await Group.async_create_group(
hass, name, entity_ids, icon=icon, object_id=object_id, mode=mode
)
class Group(Entity):
"""Track a group of entity ids."""
def __init__(
self,
hass,
name,
order=None,
icon=None,
user_defined=True,
entity_ids=None,
mode=None,
):
"""Initialize a group.
This Object has factory function for creation.
"""
self.hass = hass
self._name = name
self._state = STATE_UNKNOWN
self._icon = icon
if entity_ids:
self.tracking = tuple(ent_id.lower() for ent_id in entity_ids)
else:
self.tracking = ()
self.group_on = None
self.group_off = None
self.user_defined = user_defined
self.mode = any
if mode:
self.mode = all
self._order = order
self._assumed_state = False
self._async_unsub_state_changed = None
@staticmethod
def create_group(
hass,
name,
entity_ids=None,
user_defined=True,
icon=None,
object_id=None,
mode=None,
):
"""Initialize a group."""
return asyncio.run_coroutine_threadsafe(
Group.async_create_group(
hass, name, entity_ids, user_defined, icon, object_id, mode
),
hass.loop,
).result()
@staticmethod
async def async_create_group(
hass,
name,
entity_ids=None,
user_defined=True,
icon=None,
object_id=None,
mode=None,
):
"""Initialize a group.
This method must be run in the event loop.
"""
hass.data.setdefault(GROUP_ORDER, 0)
group = Group(
hass,
name,
order=hass.data[GROUP_ORDER],
icon=icon,
user_defined=user_defined,
entity_ids=entity_ids,
mode=mode,
)
# Keep track of the group order without iterating
# every state in the state machine every time
# we setup a new group
hass.data[GROUP_ORDER] += 1
group.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, object_id or name, hass=hass
)
# If called before the platform async_setup is called (test cases)
component = hass.data.get(DOMAIN)
if component is None:
component = hass.data[DOMAIN] = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([group], True)
return group
@property
def should_poll(self):
"""No need to poll because groups will update themselves."""
return False
@property
def name(self):
"""Return the name of the group."""
return self._name
@name.setter
def name(self, value):
"""Set Group name."""
self._name = value
@property
def state(self):
"""Return the state of the group."""
return self._state
@property
def icon(self):
"""Return the icon of the group."""
return self._icon
@icon.setter
def icon(self, value):
"""Set Icon for group."""
self._icon = value
@property
def state_attributes(self):
"""Return the state attributes for the group."""
data = {ATTR_ENTITY_ID: self.tracking, ATTR_ORDER: self._order}
if not self.user_defined:
data[ATTR_AUTO] = True
return data
@property
def assumed_state(self):
"""Test if any member has an assumed state."""
return self._assumed_state
def update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs."""
asyncio.run_coroutine_threadsafe(
self.async_update_tracked_entity_ids(entity_ids), self.hass.loop
).result()
async def async_update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs.
This method must be run in the event loop.
"""
await self.async_stop()
self.tracking = tuple(ent_id.lower() for ent_id in entity_ids)
self.group_on, self.group_off = None, None
await self.async_update_ha_state(True)
self.async_start()
@callback
def async_start(self):
"""Start tracking members.
This method must be run in the event loop.
"""
if self._async_unsub_state_changed is None:
self._async_unsub_state_changed = async_track_state_change_event(
self.hass, self.tracking, self._async_state_changed_listener
)
async def async_stop(self):
"""Unregister the group from Home Assistant.
This method must be run in the event loop.
"""
if self._async_unsub_state_changed:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
async def async_update(self):
"""Query all members and determine current group state."""
self._state = STATE_UNKNOWN
self._async_update_group_state()
async def async_added_to_hass(self):
"""Handle addition to Home Assistant."""
if self.tracking:
self.async_start()
async def async_will_remove_from_hass(self):
"""Handle removal from Home Assistant."""
if self._async_unsub_state_changed:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
async def _async_state_changed_listener(self, event):
"""Respond to a member state changing.
This method must be run in the event loop.
"""
# removed
if self._async_unsub_state_changed is None:
return
self._async_update_group_state(event.data.get("new_state"))
self.async_write_ha_state()
@property
def _tracking_states(self):
"""Return the states that the group is tracking."""
states = []
for entity_id in self.tracking:
state = self.hass.states.get(entity_id)
if state is not None:
states.append(state)
return states
@callback
def _async_update_group_state(self, tr_state=None):
"""Update group state.
Optionally you can provide the only state changed since last update
allowing this method to take shortcuts.
This method must be run in the event loop.
"""
# To store current states of group entities. Might not be needed.
states = None
gr_state = self._state
gr_on = self.group_on
gr_off = self.group_off
# We have not determined type of group yet
if gr_on is None:
if tr_state is None:
states = self._tracking_states
for state in states:
gr_on, gr_off = _get_group_on_off(state.state)
if gr_on is not None:
break
else:
gr_on, gr_off = _get_group_on_off(tr_state.state)
if gr_on is not None:
self.group_on, self.group_off = gr_on, gr_off
# We cannot determine state of the group
if gr_on is None:
return
if tr_state is None or (
(gr_state == gr_on and tr_state.state == gr_off)
or (gr_state == gr_off and tr_state.state == gr_on)
or tr_state.state not in (gr_on, gr_off)
):
if states is None:
states = self._tracking_states
if self.mode(state.state == gr_on for state in states):
self._state = gr_on
else:
self._state = gr_off
elif tr_state.state in (gr_on, gr_off):
self._state = tr_state.state
if (
tr_state is None
or self._assumed_state
and not tr_state.attributes.get(ATTR_ASSUMED_STATE)
):
if states is None:
states = self._tracking_states
self._assumed_state = self.mode(
state.attributes.get(ATTR_ASSUMED_STATE) for state in states
)
elif tr_state.attributes.get(ATTR_ASSUMED_STATE):
self._assumed_state = True
|
mKeRix/home-assistant
|
homeassistant/components/group/__init__.py
|
Python
|
mit
| 17,952
|
class FailedAssumption(Exception):
'''an assume failed'''
class MissingStrategyError(Exception):
pass
class InvalidPartials(AssertionError):
def __init__(self, s, e):
super().__init__('{{{}}}: {}'.format(s, e))
|
BenSimner/speccer
|
speccer/_errors.py
|
Python
|
mit
| 233
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnGatewaysOperations:
"""VpnGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> "_models.VpnGateway":
"""Retrieves the details of a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.VpnGateway",
**kwargs: Any
) -> "_models.VpnGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'VpnGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.VpnGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Creates a virtual wan vpn gateway if it doesn't exist else updates the existing gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan vpn
gateway.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2020_03_01.models.VpnGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.VpnGateway":
"""Updates virtual wan vpn gateway tags.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to update a virtual wan vpn gateway tags.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _reset_initial(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> Optional["_models.VpnGateway"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
async def begin_reset(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Resets the primary of the vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnGatewaysResult"]:
"""Lists all the VpnGateways in a resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnGatewaysResult"]:
"""Lists all the VpnGateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnGateways'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_vpn_gateways_operations.py
|
Python
|
mit
| 32,520
|
#!/usr/bin/env python3
"""unit tests for landlab.io.obj module"""
import pathlib
import pytest
from landlab import HexModelGrid, RasterModelGrid
from landlab.io import write_obj
LITTLE_HEX_OBJ = """# landlabgrid
#
g landlabgrid
v 1.0 0.0 0.0
v 3.0 0.0 0.0
v 0.0 1.732051 0.0
v 2.0 1.732051 1.0
v 4.0 1.732051 0.0
v 1.0 3.464102 0.0
v 3.0 3.464102 0.0
f 4// 1// 2//
f 4// 3// 1//
f 5// 4// 2//
f 6// 3// 4//
f 7// 4// 5//
f 7// 6// 4//
"""
LITTLE_RAST_OBJ = """# landlabgrid
#
g landlabgrid
v 0.0 0.0 0.0
v 2.0 0.0 0.0
v 4.0 0.0 0.0
v 0.0 2.0 0.0
v 2.0 2.0 1.0
v 4.0 2.0 0.0
v 0.0 4.0 0.0
v 2.0 4.0 0.0
v 4.0 4.0 0.0
f 5// 4// 1//
f 1// 2// 5//
f 6// 5// 2//
f 2// 3// 6//
f 8// 7// 4//
f 4// 5// 8//
f 9// 8// 5//
f 5// 6// 9//
"""
def test_write_to_filelike(tmpdir):
grid = RasterModelGrid((3, 3), xy_spacing=2.0)
z = grid.add_zeros("topographic__elevation", at="node")
z[4] = 1.0
with tmpdir.as_cwd():
with open("test_quad.obj", "w") as fp:
write_obj(fp, grid)
with open("test_quad.obj", "r") as fp:
assert fp.read() == LITTLE_RAST_OBJ
@pytest.mark.parametrize("fname", (pathlib.Path("test_hex.obj"), "test_hex.obj"))
def test_write_hex_to_path(tmpdir, fname):
grid = HexModelGrid((3, 2), spacing=2.0)
z = grid.add_zeros("topographic__elevation", at="node")
z[3] = 1.0
with tmpdir.as_cwd():
actual = write_obj(fname, grid)
assert actual == fname
with open(fname, "r") as fp:
assert fp.read() == LITTLE_HEX_OBJ
def test_write_raster(tmpdir):
grid = RasterModelGrid((3, 3), xy_spacing=2.0)
z = grid.add_zeros("topographic__elevation", at="node")
z[4] = 1.0
with tmpdir.as_cwd():
write_obj("test_quad.obj", grid)
with open("test_quad.obj", "r") as fp:
assert fp.read() == LITTLE_RAST_OBJ
def test_field_name(tmpdir):
grid = RasterModelGrid((3, 3), xy_spacing=2.0)
z = grid.add_zeros("z", at="node")
z[4] = 1.0
with tmpdir.as_cwd():
write_obj("test_quad.obj", grid, field_for_z="z")
with open("test_quad.obj", "r") as fp:
assert fp.read() == LITTLE_RAST_OBJ
def test_clobber(tmpdir):
grid = RasterModelGrid((3, 3), xy_spacing=2.0)
z = grid.add_zeros("topographic__elevation", at="node")
z[4] = 1.0
with tmpdir.as_cwd():
with open("test_quad.obj", "w") as fp:
pass
with pytest.raises(ValueError):
write_obj("test_quad.obj", grid)
write_obj("test_quad.obj", grid, clobber=True)
with open("test_quad.obj", "r") as fp:
assert fp.read() == LITTLE_RAST_OBJ
|
cmshobe/landlab
|
tests/io/test_write_obj.py
|
Python
|
mit
| 2,651
|
# Copyright 2014 Tom SF Haines
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import xml.sax.saxutils as saxutils
from .login import app as login
def app(rfam, response):
# If the user is not logged in show them the login page...
if response.user==None or response.project==None:
login(rfam, response)
return
# Access the project...
db = rfam.proj(response.project)
if 'project.json' in db:
p = db['project.json'].read()
else:
p = None
if p==None:
p = {'title' : rfam.getLanguage(response.user)['default_title'], 'description' : '', 'roles' : {}, 'ext_assets' : {}, 'license' : rfam.getLanguage(response.user)['default_license'], 'default' : rfam.getDefault()}
db.get_root().new('project.json', p)
# Prepare the defaults part of the page...
choice_default = rfam.defaultChoice(p['default'])
# Prepare the add roles part of the page...
choice_people = rfam.userChoice(response.project)
# Prepare the roles part of the page...
roles = []
for key, value in sorted(p['roles'].items(), key = lambda p: p[1]['order']):
user_choice = rfam.userChoice(response.project, value['user'])
payload = {'ident' : key, 'role' : saxutils.escape(value['role']), 'user' : user_choice}
roles.append(rfam.template('role', payload, response))
roles = '\n'.join(roles)
# Prepare the external assets part of the page...
assets = []
for key, value in sorted(p['ext_assets'].items(), key=lambda p: p[1]['description']):
payload = {'ident' : key, 'description' : saxutils.escape(value['description']), 'license' : saxutils.escape(value['license']), 'origin' : saxutils.escape(value['origin'])}
assets.append(rfam.template('asset.ext', payload, response))
assets = '\n'.join(assets)
# Prepare the automatic credits button...
credits_button = rfam.template('button.credits', {}, response)
# Show the project page...
head = '<link rel="stylesheet" href="/stylesheets/project.css"/>\n<script src="/javascript/project.js"></script>'
payload = {'title' : rfam.getLanguage(response.user)['project'], 'head' : head, 'project_title' : saxutils.escape(p['title']), 'project_description' : saxutils.escape(p['description']), 'project_license' : saxutils.escape(p['license']), 'roles' : roles, 'choice_people' : choice_people, 'ext_assets' : assets, 'choice_default' : choice_default, 'header_extra' : credits_button}
html = rfam.template('project', payload, response)
response.append(html)
response.setHTML()
|
thaines/rfam
|
bin/project.py
|
Python
|
gpl-3.0
| 3,105
|
#!/usr/bin/env cctools_python
# CCTOOLS_PYTHON_VERSION 2.7 2.6
import os
import sys
import math
import tempfile
import warnings
import subprocess
def make_path(path):
try:
os.makedirs(path)
except:
pass
def find_gnuplot_version():
return float(os.popen("gnuplot --version | awk '{print $2}'").read())
def source_exists(path):
if not os.path.isdir(path):
print "source directory does not exist"
sys.exit(1)
def usage():
print "\nUsage: " + __file__ + " data_path destination_path workflow_name"
print "\nWhere:"
print "\tdata_path\t\tThe path to the data recorded by the resource_monitor."
print "\tdestination_path\tThe path in which to store the visualization."
print "\tworkflow_name\t\tThe name of the workflow being visualized."
print ""
def get_args():
if len(sys.argv) == 2 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
usage()
sys.exit(0)
if len(sys.argv) != 4:
usage()
sys.exit(1)
source = sys.argv[1]
destination = sys.argv[2]
name = sys.argv[3]
return source, destination, name
def find_summary_paths(source):
summary_paths = []
for r, d, f in os.walk(source):
for files in f:
if files.endswith(".summary"):
summary_paths.append(os.path.join(r, files))
return summary_paths
def load_summaries_by_group(paths):
groups = { '(all)' : [] }
for sp in paths:
data_stream = open(sp, 'r')
summary = {}
for line in data_stream:
data = line.strip().split(':', 2)
data = [x.strip() for x in data]
key = data[0]
value = data[1]
if key == 'bytes_written' or key == 'bytes_read':
value = str(scale_value(data[1], 'MB')) + ' MB'
summary[key] = value
summary['filename'] = os.path.basename(sp)
group_name = summary.get('command').split(' ')[0]
while group_name[0] == '.' or group_name[0] == '/':
group_name = group_name[1:]
if groups.get(group_name) == None:
groups[group_name] = [summary]
else:
groups[group_name].append(summary)
groups['(all)'].append(summary)
data_stream.close()
return groups
def gnuplot(commands):
child = subprocess.Popen("gnuplot", stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
child.stdin.write("%s\n" % commands)
child.stdin.write("quit\n")
child.stdin.flush()
child.wait()
def gnuplot_to_file(name, commands):
f = file(name, 'w')
f.write(commands)
f.close()
def fill_histogram_table_template(max_sorted, table_path, binwidth, data_path):
n = len(max_sorted)
min_x = max_sorted[0]
max_x = max_sorted[-1]
nbins = math.floor((max_x - min_x)/binwidth) - 2
result = "set table \"" + table_path + "\"\n"
result += "binwidth=" + str(binwidth) + "\n"
if(n < 4 or nbins < 4):
result += "binc(x,w)=x\n"
else:
result += "binc(x,w)=(w*floor(x/w))\n"
result += "plot \"" + data_path + "\" using (binc($1,binwidth)):(1.0) smooth freq\n"
result += "unset table\n\n"
return result
def fill_histogram_template(max_sorted, width, height, image_path, binwidth, resource_name, unit, data_path, table_path, axis_legends):
n = len(max_sorted)
min_x = max_sorted[0]
max_x = max_sorted[-1]
nbins = math.floor((max_x - min_x)/binwidth) - 2
if nbins < 2:
nbins = 1
result = "set terminal png size " + str(width) + "," + str(height) + "\n"
result += "plot \"" + table_path + "\" using 1:(stringcolumn(3) eq \"i\"? $2:1/0) w boxes\n"
result += "min_x = GPVAL_DATA_X_MIN\n"
result += "max_x = GPVAL_DATA_X_MAX\n"
result += "min_y = GPVAL_DATA_Y_MIN\n"
result += "max_y = GPVAL_DATA_Y_MAX\n"
result += "reset\n"
result += "set terminal png size " + str(width) + "," + str(height) + "\n"
result += "plot \"" + table_path + "\" using 1:((stringcolumn(3) eq \"i\") && ($2 == max_y) ? $2:1/0) w boxes\n"
result += "max_pos_x = GPVAL_DATA_X_MAX\n"
result += "reset\n"
result += "set terminal png transparent size " + str(width) + "," + str(height) + " enhanced font 'Times,10'\n"
result += "unset key\n"
result += "unset border\n"
result += "set style line 1 lc 16\n"
result += "set border 1 lc 16\n"
result += "set output \"" + image_path + "\"\n"
result += "binwidth=" + str(binwidth) + "\n"
result += "set style fill solid noborder 0.45\n"
result += "set boxwidth binwidth*0.8 absolute\n"
result += "set yrange [0:(max_y + max_y*0.1)]\n"
result += "unset tics\n"
#handle corner cases for xrange
if nbins == 1:
result += "set xrange [(min_x - 1):(max_x + 2)]\n"
else:
cent = (max_x - min_x)/5.0
quatr = (max_x - min_x + cent)/3
result += "set xrange [" + str(min_x - cent) + ":" + str(max_x + cent) + "]\n"
result += "set xtics out nomirror scale 1\n"
result += "set xtics (floor(min_x), ceil(max_x))\n"
#result += "set xtics ("
#result += ', '.join(['"" %.1f' % x for x in max_sorted])
#result += ")\n"
result += "set xlabel \"" + resource_name.replace('_', ' ')
if unit != " ":
result += " (" + unit + ")"
result += "\"\n"
#result += 'set label gprintf("%%g", %.1f) at first %f, graph -0.2 center front nopoint\n' % (int(min_x), int(min_x))
#result += 'set label gprintf("%%g", %.1f) at first %f, graph -0.2 center front nopoint\n' % (int(max_x), int(max_x))
result += 'set label gprintf("%g", max_y) at max_pos_x,graph 1.0 tc ls 1 center front nopoint\n'
result += 'if (max_y < (100*min_y))'
result += 'set ylabel "Frequency\"; '
result += 'else '
result += 'set ylabel "Frequency (log)"\n'
if not axis_legends:
result += 'set xlabel\n'
result += 'set ylabel\n'
result += "if (max_y < (100*min_y))"
result += "plot \"" + table_path + "\" using 1:(stringcolumn(3) eq \"i\"? $2:1/0) w boxes;"
result += "else "
result += "set yrange [-0.04:(1.1*log10(max_y))];"
result += "plot \"" + table_path + "\" using 1:((stringcolumn(3) eq \"i\" && $2 > 0) ? log10($2):1/0) w boxes\n"
return result
def rule_id_for_task(task):
rule_id = task.get('filename').split('.')
rule_id = rule_id[0].split('-')[-1]
return rule_id
def resource_group_page(name, group_name, resource, width, height, tasks, out_path):
page = "<!doctype html>\n"
page += "<meta name=\"viewport\" content=\"initial-scale=1.0, width=device-width\" />\n"
page += '<link rel="stylesheet" type="text/css" media="screen, projection" href="../../css/style.css" />' + "\n"
page += "<title>" + name + "</title>\n"
page += "<div class=\"content\">\n"
page += "<p> Showing <tt>" + resource.replace('_', ' ') + "</tt> for executable <tt>" + group_name + "</tt> in workflow <tt><a href=\"../../index.html\">" + name + "</a></tt>\n"
page += "<img src=\"../" + resource + "_" + str(width) + "x" + str(height) + "_hist.png\" class=\"center\" />\n"
page += "<table>\n"
columns = min(4, len(tasks))
header = "<th>id</th><th>" + resource.replace('_', ' ') + "</th>"
page += "<tr>"
page += header * columns
page += "</tr>\n"
comp = lambda x,y: cmp(float(x.get(resource).split(' ')[0]), float(y.get(resource).split(' ')[0]))
sorted_tasks = sorted(tasks, comp, reverse=True)
count = 0
for d in sorted_tasks:
if(count % columns == 0):
page += "<tr>"
rule_id = rule_id_for_task(d)
page += "<td><a href=\"../" + rule_id + ".html\">" + rule_id + "</a></td><td>" + str(d.get(resource)) + "</td>"
if(count % columns == (columns - 1)):
page += "</tr>\n"
count += 1
page += "</table>\n"
page += "</div>\n"
index_path = out_path + "/" + resource
make_path(index_path)
index_path += "/" + "index.html"
f = open(index_path, "w")
f.write("%s\n" % page)
f.close()
def compute_binwidth(maximum_value):
if maximum_value > 40:
binwidth = maximum_value/40.0
else:
binwidth = 1
return binwidth
def compute_binwidth_iqr(maximums):
n = len(maximums) - 1
q1 = maximums[int(math.ceil(n / 4.0))]
q3 = maximums[int(math.floor(3*n / 4.0))]
if(q1 >= q3):
return 1
else:
return 2*(q3 - q1)*math.pow(float(n), float(-1)/3)
def find_maximums(tasks, resource):
maximums = []
for d in tasks:
maximums.append(d.get(resource))
return maximums
def write_maximums(maximums, resource, group_name, base_directory):
directory = base_directory + "/" + group_name
make_path(directory)
data_path = directory + "/" + resource
f = open(data_path, "w")
for m in maximums:
f.write("%d\n" % m)
f.close()
return data_path
def scale_maximums(maximums, unit):
result = []
for m in maximums:
m = scale_value(m, unit)
result.append(m)
return result
def task_has_timeseries(task, source_directory):
base_name = task.get('filename').split('.')[0]
timeseries_name = base_name + '.series'
try:
f = open(source_directory + "/" + timeseries_name)
f.close()
except:
return None
return timeseries_name
def fill_in_time_series_format(resource, unit, data_path, column, out_path, width=1250, height=500):
if unit != " ":
unit = ' (' + unit + ')'
commands = 'set terminal png transparent size ' + str(width) + ',' + str(height) + "enhanced font 'Times,10'\n"
commands += "set bmargin 4\n"
commands += "unset key\n"
commands += 'set xlabel "Time (seconds)" offset 0,-2 character' + "\n"
commands += 'set ylabel "' + resource.replace('_', ' ') + unit + '" offset 0,-2 character' + "\n"
commands += 'set output "' + out_path + '"' + "\n"
commands += "set yrange [0:*]\n"
commands += "set xrange [0:*]\n"
commands += "set xtics right rotate by -45\n"
commands += "set bmargin 7\n"
commands += 'plot"' + data_path + '" using 1:' + str(column) + ' w lines lw 5 lc rgb"#465510"' + "\n"
return commands
def generate_time_series_plot(resource, unit, data_path, column, out_path, width, height):
commands = fill_in_time_series_format(resource, unit, data_path, column, out_path, width, height)
gnuplot(commands)
def scale_time_series(source_directory, data_file, units, aggregate_data, workspace):
start = -1
out_file_path = os.path.join(workspace, data_file + '.scaled')
out_stream = open(out_file_path, 'w')
data_stream = open(source_directory + '/' + data_file, 'r')
for line in data_stream:
if line[0] == '#':
continue
data = line.split()
if start < 0:
start = data[0]
data[6] = str(scale_value(data[6] + ' B', 'GB'))
data[7] = str(scale_value(data[7] + ' B', 'GB'))
# store in aggregate_data
key = round(int(data[0])/1000000)
previous_values = aggregate_data.get(key, [0,0,0,0,0,0,0,0,0])
for x in range(0,8):
previous_values[x] = previous_values[x] + float(data[x+1])
aggregate_data[key] = previous_values
data[0] = str((float(data[0]) - float(start))/10e5)
out_stream.write("%s\n" % str.join(' ', data))
data_stream.close()
out_stream.close()
return out_file_path, aggregate_data
def create_individual_pages(groups, destination_directory, name, resources, units, source_directory, workspace):
aggregate_data = {}
for group_name in groups:
for task in groups[group_name]:
timeseries_file = task_has_timeseries(task, source_directory)
has_timeseries = False
if timeseries_file != None:
has_timeseries = True
data_path, aggregate_data = scale_time_series(source_directory, timeseries_file, units, aggregate_data, workspace)
column = 1
for r in resources:
out_path = destination_directory + '/' + group_name + '/' + r + '/' + rule_id_for_task(task) + '.png'
if column > 1:
generate_time_series_plot(r, units.get(r), data_path, column, out_path, 600, 300)
column += 1
page = "<html>\n"
page += '<link rel="stylesheet" type="text/css" media="screen, projection" href="../css/style.css" />' + "\n"
page += "<p> Showing task <tt>" + rule_id_for_task(task) + "</tt> in workflow <tt><a href=\"../index.html\">" + name + "</a></tt><br><br>\n"
page += "<table>\n"
page += "<tr><td>command</td><td></td><td>" + task.get('command') + "</td></tr>\n"
for r in resources:
page += "<tr><td><a href=\"" + r + "/index.html\">" + r + "</a></td><td>" + task.get(r) + "</td>"
if has_timeseries and r != 'wall_time':
image_path = r + '/' + rule_id_for_task(task) + '.png'
page += '<td><img src="' + image_path +'" /></td>'
else:
page += '<td></td>'
page += "</tr>\n"
page += "</html>\n"
f = open(destination_directory + "/" + group_name + "/" + rule_id_for_task(task) + ".html", "w")
f.write("%s\n" % page)
f.close()
return aggregate_data
def write_aggregate_data(data, resources, work_directory):
sorted_keys = sorted(data.keys(), key=lambda x: float(x))
start_time = float(sorted_keys[0])
files = []
for index, r in enumerate(resources):
if index != 0:
f = open(work_directory + '/' + r + '.aggregate', 'w')
files.append(f)
for k in sorted_keys:
for index, f in enumerate(files):
f.write("%s %d\n" % ((k-start_time), data.get(k)[index]))
for f in files:
f.close()
def create_aggregate_plots(resources, units, work_directory, destination_directory):
for r in resources:
unit = units.get(r)
data_path = work_directory + '/' + r + '.aggregate'
column = 2
out_path = destination_directory + '/' + r + '_aggregate.png'
commands = fill_in_time_series_format(r, unit, data_path, column, out_path, 1250, 500)
gnuplot(commands)
def create_main_page(group_names, name, resources, resource_units, destination, hist_width=600, hist_height=600, timeseries_height=1250, timeseries_width=500, has_timeseries=False):
out_path = destination + "/index.html"
f = open(out_path, "w")
content = "<!doctype html>\n"
content += "<meta charset=\"UTF-8\">\n"
content += '<meta name="viewport" content="initial-scale=1.0, width=device-width" />' + "\n"
content += '<link rel="stylesheet" type="text/css" media="screen, projection" href="css/style.css" />' + "\n"
content += '<title>' + name + "Workflow</title>\n"
content += '<div class="content">' + "\n"
content += '<h1>' + name + "Workflow</h1>\n"
if has_timeseries:
content += '<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.min.js"></script>' + "\n"
content += '<script src="js/slides.min.jquery.js"></script>' + "\n"
content += '<script>' + "\n" + ' $(function(){' + "\n" + " $('#slides').slides({ preload: true, });\n });\n</script>\n"
content += '<!-- javascript and some images licensed under Apache-2.0 by Nathan Searles (http://nathansearles.com/) -->' + "\n"
content += '<section class="summary">' + "\n"
content += ' <div id="slides">' + "\n"
content += ' <div class="slides_container">' + "\n"
for index, r in enumerate(resources):
if index != 0:
content += '<div class="slide"><div class="item"><img src = "' + r + '_aggregate.png" /></div></div>' + "\n"
content += "</div>\n"
content += '<a href="#" class="prev"><img src="img/arrow-prev.png" width="24" height="43" alt="Arrow Prev"></a>' + "\n"
content += '<a href="#" class="next"><img src="img/arrow-next.png" width="24" height="43" alt="Arrow Next"></a>' + "\n"
content += " </div>\n</section>\n"
content += '<table>\n'
content += '<tr>'
content += '<td></td>'
for g in group_names:
content += '<td>' + g + '</td>'
content += '</tr>\n'
for r in resources:
unit = resource_units.get(r)
content += '<tr>\n'
content += '<td>'
content += r.replace('_', ' ')
if unit != " ":
content += '(' + unit + ')'
content += '</td>'
for g in group_names:
content += '<td><a href="' + g + '/' + r + '/index.html"><img src="' + g + "/" + r + "_" + str(hist_width) + "x" + str(hist_height) + '_hist.png" /></a></td>\n'
content += '</tr>\n'
content += "</div>\n"
f.write("%s\n" % content)
f.close()
def to_base(value, unit):
prefix = unit[0]
if prefix == "K":
value *= 1024
elif prefix == "M":
value *= 1024**2
elif prefix == "G":
value *= 1024**3
elif prefix == "T":
value *= 1024**4
return value
def to_target(value, target):
prefix = target[0]
if prefix == "K":
value /= 1024
elif prefix == "M":
value /= 1024**2
elif prefix == "G":
value /= 1024**3
elif prefix == "T":
value /= 1024**4
return value
def scale_value(initial, target_unit=" "):
value, unit = initial.split(' ', 2)
value = float(value)
unit = unit.strip()
v = to_target(to_base(value, unit), target_unit)
return v
def main():
warnings.filterwarnings("ignore", category=DeprecationWarning)
GNUPLOT_VERSION = find_gnuplot_version()
visualizer_home = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
(source_directory,
destination_directory,
name) = get_args()
source_exists(source_directory)
make_path(destination_directory)
workspace = tempfile.mkdtemp('rmv')
try:
summary_paths = find_summary_paths(source_directory)
resource_units = {"wall_time": "s",
"cpu_time": "s",
"max_concurrent_processes": " ",
"virtual_memory": "MB",
"resident_memory": "MB",
"swap_memory": "MB",
"bytes_read": "MB",
"bytes_written": "MB",
"workdir_num_files": " ",
"workdir_footprint": "MB"
}
resources = [ "wall_time",
"cpu_time",
"max_concurrent_processes",
"virtual_memory",
"resident_memory",
"swap_memory",
"bytes_read",
"bytes_written",
"workdir_num_files",
"workdir_footprint"
]
print "Reading summaries..."
groups = load_summaries_by_group(summary_paths)
print "Plotting summaries histograms..."
hist_large_height = 600
hist_large_width = int(1.5 * hist_large_height)
hist_small_height = 125
hist_small_width = int(1.5 * hist_small_height)
for r in resources:
unit = resource_units.get(r)
for group_name in groups:
maximums = find_maximums(groups[group_name], r)
maximums = scale_maximums(maximums, unit)
data_path = write_maximums(maximums, r, group_name, workspace)
out_path = destination_directory + "/" + group_name
make_path(out_path)
max_sorted = sorted(maximums)
binwidth = compute_binwidth_iqr(max_sorted)
table_path = out_path + "/" + r + "_hist.dat"
gnuplot_format = fill_histogram_table_template(max_sorted, table_path, binwidth, data_path)
gnuplot_to_file(table_path + ".gnuplot", gnuplot_format)
gnuplot(gnuplot_format)
image_path = out_path + "/" + r + "_" + str(hist_large_width) + "x" + str(hist_large_height) + "_hist.png"
gnuplot_format = fill_histogram_template(max_sorted, hist_large_width, hist_large_height, image_path, binwidth, r, unit, data_path, table_path, True)
gnuplot_to_file(image_path + ".gnuplot", gnuplot_format)
gnuplot(gnuplot_format)
image_path = out_path + "/" + r + "_" + str(hist_small_width) + "x" + str(hist_small_height) + "_hist.png"
gnuplot_format = fill_histogram_template(max_sorted, hist_small_width, hist_small_height, image_path, binwidth, r, unit, data_path, table_path, False)
gnuplot_to_file(image_path + ".gnuplot", gnuplot_format)
gnuplot(gnuplot_format)
resource_group_page(name, group_name, r, hist_large_width, hist_large_height, groups[group_name], out_path)
aggregate_height = 500
aggregate_width = int(1.5 * aggregate_height)
aggregate_data = create_individual_pages(groups, destination_directory, name, resources, resource_units, source_directory, workspace)
time_series_exist = False
if aggregate_data != {}:
print "Aggregating time series..."
time_series_exist = True
write_aggregate_data(aggregate_data, resources, workspace)
print "Plotting time series..."
create_aggregate_plots(resources, resource_units, workspace, destination_directory)
create_main_page(groups.keys(), name, resources, resource_units, destination_directory, hist_small_width, hist_small_height, aggregate_height, aggregate_width, time_series_exist)
lib_static_home = os.path.normpath(os.path.join(visualizer_home, 'lib/resource_monitor_visualizer_static'))
os.system("cp -r " + lib_static_home + "/* " + destination_directory)
finally:
print "Cleaning up..."
os.system("rm -rf " + workspace)
if __name__ == "__main__":
main()
|
isanwong/cctools
|
resource_monitor/src/resource_monitor_visualizer.py
|
Python
|
gpl-2.0
| 20,770
|
default_app_config = 'rds.apps.RdsConfig'
|
hyperwd/hwcram
|
rds/__init__.py
|
Python
|
mit
| 42
|
"""
Backports the ``register.assignment_tag`` functionality from Django 1.4 to
Django 1.3.
This code is almost entirely reproduced from
https://code.djangoproject.com/browser/django/trunk/django/template/base.py
and is the work of Django's authors:
https://code.djangoproject.com/browser/django/trunk/AUTHORS
It is licensed under Django's BSD license, available here:
https://code.djangoproject.com/browser/django/trunk/LICENSE
To use, simply import this code in your project prior to using the
``register.assignment_tag``. In general, the top of your template tag
python file would be a good place for the import.
"""
import re
from inspect import getargspec
from django.template import TemplateSyntaxError
from django.template.base import Node, Library
if not hasattr(Library, 'assignment_tag'):
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
A utility method for parsing token keyword arguments.
:param bits: A list containing remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments will be
removed from this list.
:param support_legacy: If set to true ``True``, the legacy format
``1 as foo`` will be accepted. Otherwise, only the standard
``foo=1`` format is allowed.
:returns: A dictionary of the arguments retrieved from the ``bits``
token list.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so the dictionary will be returned as soon as an invalid
argument format is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
def parse_bits(parser, bits, params, varargs, varkw, defaults,
takes_context, name):
"""
Parses bits for template tag helpers (simple_tag, include_tag and
assignment_tag), in particular by detecting syntax errors and by
extracting positional and keyword arguments.
"""
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError(
"'%s' is decorated with takes_context=True so it must "
"have a first argument of 'context'" % name)
args = []
kwargs = {}
unhandled_params = list(params)
for bit in bits:
# First we try to extract a potential kwarg from the bit
kwarg = token_kwargs([bit], parser)
if kwarg:
# The kwarg was successfully extracted
param, value = kwarg.items()[0]
if param not in params and varkw is None:
# An unexpected keyword argument was supplied
raise TemplateSyntaxError(
"'%s' received unexpected keyword argument '%s'" %
(name, param))
elif param in kwargs:
# The keyword argument has already been supplied once
raise TemplateSyntaxError(
"'%s' received multiple values for keyword argument "
"'%s'" % (name, param))
else:
# All good, record the keyword argument
kwargs[str(param)] = value
if param in unhandled_params:
# If using the keyword syntax for a positional arg,
# then consume it.
unhandled_params.remove(param)
else:
if kwargs:
raise TemplateSyntaxError(
"'%s' received some positional argument(s) after some "
"keyword argument(s)" % name)
else:
# Record the positional argument
args.append(parser.compile_filter(bit))
try:
# Consume from the expected positional arguments
unhandled_params.pop(0)
except IndexError:
if varargs is None:
raise TemplateSyntaxError(
"'%s' received too many positional arguments" %
name)
if defaults is not None:
# Consider the last n params handled, where n is the
# number of defaults.
unhandled_params = unhandled_params[:-len(defaults)]
if unhandled_params:
# Some positional arguments were not supplied
raise TemplateSyntaxError(
u"'%s' did not receive value(s) for the argument(s): %s" %
(name, u", ".join([u"'%s'" % p for p in unhandled_params])))
return args, kwargs
class TagHelperNode(Node):
"""
Base class for tag helper nodes such as SimpleNode, InclusionNode and
AssignmentNode. Manages the positional and keyword arguments to be
passed to the decorated function.
"""
def __init__(self, takes_context, args, kwargs):
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = dict((k, v.resolve(context))
for k, v in self.kwargs.items())
return resolved_args, resolved_kwargs
def assignment_tag(self, func=None, takes_context=None, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class AssignmentNode(TagHelperNode):
def __init__(self, takes_context, args, kwargs, target_var):
super(AssignmentNode, self) \
.__init__(takes_context, args, kwargs)
self.target_var = target_var
def render(self, context):
resolved_args, resolved_kwargs = \
self.get_resolved_arguments(context)
context[self.target_var] = func(*resolved_args,
**resolved_kwargs)
return ''
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
def compile_func(parser, token):
bits = token.split_contents()[1:]
if len(bits) < 2 or bits[-2] != 'as':
raise TemplateSyntaxError(
"'%s' tag takes at least 2 arguments and the "
"second last argument must be 'as'" % function_name)
target_var = bits[-1]
bits = bits[:-2]
args, kwargs = parse_bits(parser, bits, params,
varargs, varkw, defaults, takes_context, function_name)
return AssignmentNode(takes_context, args, kwargs, target_var)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
# @register.assignment_tag(...)
return dec
elif callable(func):
# @register.assignment_tag
return dec(func)
else:
raise TemplateSyntaxError("Invalid arguments provided to "
"assignment_tag")
Library.assignment_tag = assignment_tag
|
rcbops/horizon-buildpackage
|
horizon/utils/assignment_tag.py
|
Python
|
apache-2.0
| 8,684
|
from __future__ import nested_scopes
import sys, string, os, glob, re, math
from types import *
graph_counter = 1
# returns a list of lines in the file that matches the pattern
def grep(filename, pattern):
result = [];
file = open(filename,'r')
for line in file.readlines():
if re.match(pattern, line):
result.append(string.strip(line))
return result
# returns a list of lines in the file that DO NOT match the pattern
def inverse_grep(filename, pattern):
result = [];
file = open(filename,'r')
for line in file.readlines():
if not re.match(pattern, line):
result.append(string.strip(line))
return result
def mean(list):
length = len(list)
if length == 0:
return 0
else:
total = 0.0
for i in list:
total += i
return total/length
# returns the median of the list
def median(list):
sorted = list
sorted.sort()
length = len(list)
if length == 0:
return 0
elif length == 1:
return list[0]
elif length % 2 == 0:
# even
return (list[length/2]+list[(length/2)-1])/2.0
else:
# odd
return list[length/2]
# returns the Nth percentile element of the list
def nth_percentile(list, n):
sorted = list
sorted.sort()
length = len(list)
if length == 0:
return 0
elif length == 1:
return list[0]
else:
return list[length*n/100]
# returns the maximum element of the list
def maximum(list):
sorted = list
sorted.sort()
length = len(list)
if length == 0:
return 0
elif length == 1:
return list[0]
else:
return list[length-1]
# returns the minimum of the list
def minimum(list):
sorted = list
sorted.sort()
length = len(list)
if length == 0:
return 0
else:
return list[0]
# returns the average of the list
def average(list):
if (type(list) is TupleType) or (type(list) is ListType):
sum = reduce(lambda x, y: x+y, list, 0);
length = len(list)
if length == 0:
return 0
else:
return 1.0*sum/length
else:
return list
# returns the average of the list without the two extremes
def averageWithoutExtremes(list):
av = average(list)
length = len(list)
max = maximum(list)
min = minimum(list)
if length < 3:
return av
else:
return ((av*length-min-max)/(length-2))
# returns the standard deviation
def stddev(list):
if len(list) == 1:
print "Warning: standard deviation of a one element list"
return 0
if len(list) == 0:
print "Warning: standard deviation of a zero element list"
sum = 0.0;
sumsq = 0.0;
for num in list:
sum = sum + num
sumsq = sumsq + pow(num, 2)
size = float(len(list))
average = sum / size
variance = (sumsq - pow(sum, 2)/size)/(size-1.0)
if variance < 0.0:
print "Warning: negative variance"
variance = 0.0
stddev = math.sqrt(variance)
return stddev
#normalizes an associative array
def normalize(assoc_array, normal_key):
if not assoc_array.has_key(normal_key):
raise RuntimeError, "Normalization key missing"
base = assoc_array[normal_key]
if (type(base) is TupleType) or (type(base) is ListType):
base = base[0]
if base == 0:
raise RuntimeError, "Normalizing with zero base"
for key in assoc_array.keys():
item = assoc_array[key]
# act differently if we are a list or not
if (type(item) is TupleType) or (type(item) is ListType):
for index in range(len(item)):
item[index] = item[index]/base
else:
item = item/base
assoc_array[key] = item
def normalize_list(lines, config):
normal_map = {}
for line in lines:
if line[0] == config:
line = line[1:] # skip label
for pair in line:
normal_map[pair[0]] = pair[1]
counter = 0
for line in lines:
new_line = [line[0]]
line = line[1:] # strip off label
for pair in line:
x_value = pair[0]
new_pair = [x_value]
if normal_map.has_key(x_value):
for index in range(1, len(pair)):
new_pair.append(normal_map[x_value]/pair[index])
new_line.append(new_pair)
lines[counter] = new_line
counter += 1
# find the minimum value and inverted normalize to it
def make_relative(lines):
lst = []
for line in lines:
line = line[1:] # skip label
for pair in line:
lst.append(pair[1]) # the y-value
minimum = min(lst)
for line in lines:
line = line[1:] # strip off label
for pair in line:
for index in range(1, len(pair)):
pair[index] = minimum/pair[index]
# generates multiple jgraphs on the same page.
def multi_graph(data_list, # list of data, one element per graph
param_list = [], # list of parameters to the gen_graph function unique to each graph
cols = 2, # number of columns
row_space = 3,
col_space = 2.5,
**other_params # default parameters used by all the graphs generated by this function
):
num = 0
jgraph_input = []
for (data, params) in map(None, data_list, param_list):
new_params = other_params
if params:
new_params.update(params)
jgraph_input.append(gen_graph(data,
x_translate = (num % cols) * col_space,
y_translate = (num / cols) * -row_space,
**new_params
))
num += 1
return "\n".join(jgraph_input)
def stacked_bar_graph(stacks,
**params):
return gen_graph(stacks,
graph_type = "bar",
**params)
def line_graph(stacks,
**params):
return gen_graph(stacks,
graph_type = "line",
**params)
def gen_graph(data,
graph_type = "line", # should be either "bar" or "line"
# bar chart specific items
bar_space = 1.1,
bar_segment_labels = [],
bar_name_font_size = "12",
bar_name_rotate = 0.0,
stack_space = 1.0,
stack_name_font_size = "12",
stack_name_location = 5.0,
patterns = ["solid", "stripe -45", "solid", "stripe 45"],
yhash = "",
ymhash = "",
# line graph specific items
linetype = ["solid", "dotted", "longdash", "dotdash", "dashed"],
marktype = ["none"],
mrotate = None,
marksize = None,
line_thickness = 1.0,
hash_marks = [],
fills = ["0 0 0", "1 1 1", "0.5 0.5 0.5"],
# title
title = "",
title_fontsize = "24",
title_font = "Times-Roman",
title_y = 115,
# labels
xlabel = "",
ylabel = "",
label_fontsize = "14",
label_font = "Times-Roman",
ylabel_location = [],
xlabel_location = [],
# legends
legend = "on",
legend_fontsize = "14",
legend_font = "Times-Roman",
legend_x = "",
legend_y = "",
legend_default = "",
legend_hack = "",
# presentation
xsize = 7.0,
ysize = 7.0,
x_translate = 0.0,
y_translate = 0.0,
xmax = "",
ymax = "",
xmin = "",
ymin = 0,
xlog = "",
ylog = "",
clip = 0,
colors = ["1 0 0", "0 0 1", "0 .5 0", "1 0 1"],
):
# Figure out which graph number we are
global graph_counter
data_graph = graph_counter # used to plot data
position_graph = graph_counter + 1 # use to position titles, etc
graph_counter += 2
counter = 1.0
output = ""
output += "graph %d x_translate %f y_translate %f\n" % (data_graph, x_translate, y_translate)
if clip:
output += "clip\n"
output += "graph %d clip x_translate %f y_translate %f\n" % (position_graph, x_translate, y_translate)
if (title != ""):
output += "graph %d title y %d font %s fontsize %s : %s\n" % (position_graph, title_y, title_font, title_fontsize, title)
# place legends (using the position graph)
output += "graph %d\n" % position_graph
if legend != "on":
legend = "off"
output += "legend %s defaults %s font %s fontsize %s" % (legend, legend_default, legend_font, legend_fontsize)
if (legend_x != ""):
output += " x %s" % legend_x
if (legend_y != ""):
output += " y %s" % legend_y
output += "\n"
if len(bar_segment_labels) > 1: # bar legend - only if there is more than one type of bar
for bar_num in range(len(bar_segment_labels)):
output += "newcurve pts 0 0 marktype xbar marksize 1 5 pattern %s cfill %s label : %s\n" % \
(patterns[bar_num%len(patterns)], colors[bar_num%len(colors)], bar_segment_labels[bar_num])
if graph_type == "bar":
for stack in data:
stack_name = stack[0]
bars = stack[1:]
x_begin = counter
x_end = counter
bar_num = 0 # should this be this way
for bar in bars:
bar_name = bar[0]
components = bar[1:]
comp_num = 0
for component in components:
# if it is a list, take the first element
if (type(component) is TupleType) or (type(component) is ListType):
value = component[0]
output += "graph %d newcurve marksize 1 1 marktype xbar pattern %s cfill %s pts %f %f\n" % \
(data_graph, patterns[bar_num%len(patterns)], colors[bar_num%len(colors)], counter, value)
# error bars
output += "graph %d newcurve marksize 0.5 1 marktype none y_epts %f %f %f %f\n" % \
(data_graph, counter, value, component[1], component[2])
else:
value = component
output += "graph %d newcurve marksize 1 1 marktype xbar pattern %s cfill %s pts %f %f\n" % \
(data_graph, patterns[comp_num%len(patterns)], colors[comp_num%len(colors)], counter, value)
comp_num += 1
bar_num += 1
# bar name
if bar_name_rotate == 0:
output += "graph %d newstring fontsize %s x %f y -2.5 hjc vjt : %s\n" % (position_graph, bar_name_font_size, counter, bar_name)
else:
output += "graph %d newstring fontsize %s x %f y -2.5 hjr vjc rotate %f : %s\n" % (position_graph, bar_name_font_size, counter, bar_name_rotate, bar_name)
x_end = counter
counter += bar_space
counter = counter + stack_space
average = (x_end+x_begin) / 2.0
output += "graph %d newstring fontsize %s hjc vjt x %f y %f : %s\n" % (position_graph, stack_name_font_size, average, -stack_name_location, stack_name)
if graph_type == "line":
counter = 100
# add error bars first
line_num = 0
for line in data:
if len(line) < 1:
continue
line_title = line[0]
line = line[1:]
error_bars = ""
for point in line:
if len(point) == 4:
error_bars += "%f %f %f %f\n" % (point[0], point[1], point[2], point[3])
if error_bars != "":
output += "graph %d newline\n" % (data_graph)
output += "y_epts %s\n" % error_bars
output += "color %s\n" % colors[line_num%len(colors)]
output += "linetype none\n"
line_num += 1
# add lines and marks
line_num = 0
for line in data:
if len(line) < 1:
continue
# line style
style = "color %s\n" % colors[line_num%len(colors)]
style += "cfill %s\n" % fills[line_num%len(fills)]
style += "marktype %s\n" % (marktype[line_num%len(marktype)])
if marksize:
style += "marksize %f\n" % (marksize[line_num%len(marksize)])
if mrotate:
style += "mrotate %f\n" % (mrotate[line_num%len(mrotate)])
style += "linetype %s\n" % linetype[line_num%len(linetype)]
style += "linethickness %f\n" % line_thickness
line_title = line[0]
line = line[1:]
output += "graph %d newline\n" % (data_graph)
output += "pts\n"
for point in line:
output += "%f %f\n" % (point[0], point[1])
output += style
# we plot a point to get it to show up in the label (position graph)
output += "graph %d newline pts -10 -10 %s\n" % (position_graph, style)
if line_title != "":
output += "label : %s\n" % (line_title)
line_num += 1
# x- and y-axis: sizes and ranges
output += "graph %d\n" % (position_graph)
output += "xaxis size %f min 0 nodraw max %f\n" % (xsize, counter)
output += "yaxis size %f min 0 nodraw max 100\n" % (ysize)
output += "graph %d\n" % (data_graph)
output += "xaxis size %f\n" % (xsize)
output += "yaxis size %f\n" % (ysize)
if graph_type == "bar":
xmin = 0
output += "xaxis no_auto_hash_labels no_draw_hash_marks max %f\n" % (counter)
output += "yaxis grid_lines\n"
if (ymax != ""):
output += "yaxis max %f\n" % ymax
if (xmax != ""):
output += "xaxis max %f\n" % xmax
if (ymin != ""):
output += "yaxis min %f\n" % ymin
if (xmin != ""):
output += "xaxis min %f\n" % xmin
if (xlog != ""):
output += "xaxis log log_base %d\n" % (xlog) # xlog is the base
if (ylog != ""):
output += "yaxis log log_base %d\n" % (ylog) # ylog is the base
# hashes (always on the data graph)
output += "graph %d\n" % (data_graph)
if len(hash_marks) > 1:
output += "xaxis no_auto_hash_marks\n"
for mark in hash_marks:
output += "xaxis hash_at %s hash_label at %s : %s\n" % (mark, mark, mark)
if yhash != "":
output += "yaxis hash %f\n" % (yhash)
if ymhash != "":
output += "yaxis mhash %f\n" % (ymhash)
output += "xaxis hash_labels font %s fontsize %s\n" % (label_font, label_fontsize);
output += "yaxis hash_labels font %s fontsize %s\n" % (label_font, label_fontsize);
# axis labels (position graph)
output += "graph %d\n" % (position_graph)
if xlabel_location != []:
output += "graph %d\n" % (position_graph) # put us in the data independent scale
label_pos = "y -%f " % (xlabel_location) # no newline
else:
output += "graph %d\n" % (data_graph)
label_pos = ""
output += "xaxis draw_axis_label label %s font %s fontsize %s : %s\n" % (label_pos, label_font, label_fontsize, xlabel)
if ylabel_location != []:
output += "graph %d\n" % (position_graph) # put us in the data independent scale
label_pos += " x -%f " % (ylabel_location) # no newline
else:
output += "graph %d\n" % (data_graph)
label_pos = ""
output += "yaxis draw_axis_label label %s font %s fontsize %s : %s\n" % (label_pos, label_font, label_fontsize, ylabel)
output += "graph %d\n" % (data_graph)
return output
# converts a list of values to the 'stacked' equivalent
def stack_bars(list):
data = list
sum = 0.0
lst = range(len(data))
lst.reverse()
for num in lst:
data[num] += sum
sum = data[num]
return data
# generate .eps, .ps, and .pdf
def run_jgraph(input_str, base_filename):
jgr_file = open("%s.jgr" % base_filename, "w")
jgr_file.write(input_str)
jgr_file.close()
# generate .eps (ghostview-able)
(in_file, out_file) = os.popen2("jgraph")
in_file.write(input_str)
in_file.close()
eps_file = open("%s.eps" % base_filename, "w")
eps_file.writelines(out_file.readlines())
eps_file.close()
# generate .ps (lpr-able)
(in_file, out_file) = os.popen2("jgraph -P")
in_file.write(input_str)
in_file.close()
ps_file = open("%s.ps" % base_filename, "w")
ps_file.writelines(out_file.readlines())
ps_file.close()
# generate .pdf
os.system("ps2pdf %s.ps" % base_filename)
# generate .gif
# (in_file, out_file) = os.popen2("/p/multifacet/scripts/jgrtogif 1")
# in_file.write(input_str)
# in_file.close()
# eps_file = open("%s.gif" % base_filename, "w")
# eps_file.writelines(out_file.readlines())
# eps_file.close()
# generate .epsi
# os.system("ps2epsi %s.ps" % base_filename)
# generates a .eps file from a .jgr file
def run_jgraph_from_file(input_filename, base_filename):
input_str = ""
jgr_file = open("%s.jgr" % input_filename, "r")
for line in jgr_file.readlines():
input_str += line
jgr_file.close()
# generate .eps (ghostview-able)
(in_file, out_file) = os.popen2("jgraph")
in_file.write(input_str)
in_file.close()
eps_file = open("%s.eps" % base_filename, "w")
eps_file.writelines(out_file.readlines())
eps_file.close()
##############################
# Note: The linreg() function for linear regression was taken from a
# web page "Simple Recipes in Python" by William Park
# http://www.python.org/topics/scicomp/recipes_in_python.html
"""
Returns coefficients to the regression line 'y=ax+b' from x[] and
y[]. Basically, it solves
Sxx a + Sx b = Sxy
Sx a + N b = Sy
where Sxy = \sum_i x_i y_i, Sx = \sum_i x_i, and Sy = \sum_i y_i. The
solution is
a = (Sxy N - Sy Sx)/det
b = (Sxx Sy - Sx Sxy)/det
where det = Sxx N - Sx^2. In addition,
Var|a| = s^2 |Sxx Sx|^-1 = s^2 | N -Sx| / det
|b| |Sx N | |-Sx Sxx|
s^2 = {\sum_i (y_i - \hat{y_i})^2 \over N-2}
= {\sum_i (y_i - ax_i - b)^2 \over N-2}
= residual / (N-2)
R^2 = 1 - {\sum_i (y_i - \hat{y_i})^2 \over \sum_i (y_i - \mean{y})^2}
= 1 - residual/meanerror
It also prints to <stdout> few other data,
N, a, b, R^2, s^2,
which are useful in assessing the confidence of estimation.
"""
def linreg(X, Y):
from math import sqrt
if len(X) != len(Y): raise ValueError, 'unequal length'
N = len(X)
Sx = Sy = Sxx = Syy = Sxy = 0.0
for x, y in map(None, X, Y):
Sx = Sx + x
Sy = Sy + y
Sxx = Sxx + x*x
Syy = Syy + y*y
Sxy = Sxy + x*y
det = Sxx * N - Sx * Sx
a, b = (Sxy * N - Sy * Sx)/det, (Sxx * Sy - Sx * Sxy)/det
meanerror = residual = 0.0
for x, y in map(None, X, Y):
meanerror = meanerror + (y - Sy/N)**2
residual = residual + (y - a * x - b)**2
# RR = 1 - residual/meanerror
ss = residual / (N-2)
Var_a, Var_b = ss * N / det, ss * Sxx / det
# print "y=ax+b"
# print "N= %d" % N
# print "a= %g \\pm t_{%d;\\alpha/2} %g" % (a, N-2, sqrt(Var_a))
# print "b= %g \\pm t_{%d;\\alpha/2} %g" % (b, N-2, sqrt(Var_b))
# print "R^2= %g" % RR
# print "s^2= %g" % ss
return a, b
##############################
# Code to calculate exponential regressions using a transformation and
# a linear regression
def exp_regress(X, Y):
# map into logarithmic space
Y_prime = map(math.log, Y)
# perform a linear regression in log space
a, b = linreg(X, Y_prime)
# Calculate the rate of growth. # The continuously compounding
# rate equation for r:
# F = P*e^(Tr) ---> r = ln(F/P) / T
# where F is the final value, P is the starting value, T is time,
# and r is the rate
# Note: a, the slope of the fit, is the rate of growth of the
# exponential curve. This is only true because we're using the
# natural logarithm as the base of our transformation
rate = a
# calculate the smooth line in log space
Y_fit_prime = map(lambda x:(a*x)+b, X)
# translate the log space back into original coordinates
Y_fit = map(lambda x:math.pow(math.e, x), Y_fit_prime)
return Y_fit, rate
#### Function for calculating confidence intervals
def t_distribution(v):
if v > len(t_distribution_95_percent_lookup_table):
return 1.96 # this is the value in the limit
else:
return t_distribution_95_percent_lookup_table[v-1]
def confidence_interval_95_percent(lst):
n = len(lst)
sd = stddev(lst) # standard deviation
se = sd / math.sqrt(n) # standard error
confidence_interval_95p = se * t_distribution(n-1)
return confidence_interval_95p
# Note: for n < 5, the confidence interval is actually larger than the
# standard deviation. At about n=6 is where the two are about the
# same, and around n=18 is where the 95% confidence interval is about
# half the standard deviation. At n=60 the 95% confidence interval is
# about 1/4th the standard deviation. The above data can be found by
# using the following code:
#for n in range(2, 100):
# sd = 1
# se = sd / math.sqrt(n) # standard error
# confidence_interval_95p = se * t_distribution(n-1)
# print n, confidence_interval_95p
# T-distribution table used in calculating 95% confidence intervals.
# The alpha for the table is 0.025 which corresponds to a 95%
# confidence interval. (Note: a C-language stats package was used to
# generate this table, but it can also be calculated using Microsoft
# Excel's TINV() function.)
t_distribution_95_percent_lookup_table = (
12.7062,
4.30265,
3.18245,
2.77645,
2.57058,
2.44691,
2.36462,
2.306,
2.26216,
2.22814,
2.20099,
2.17881,
2.16037,
2.14479,
2.13145,
2.11991,
2.10982,
2.10092,
2.09302,
2.08596,
2.07961,
2.07387,
2.06866,
2.0639,
2.05954,
2.05553,
2.05183,
2.04841,
2.04523,
2.04227,
2.03951,
2.03693,
2.03452,
2.03224,
2.03011,
2.02809,
2.02619,
2.02439,
2.02269,
2.02108,
2.01954,
2.01808,
2.01669,
2.01537,
2.0141,
2.0129,
2.01174,
2.01063,
2.00958,
2.00856,
2.00758,
2.00665,
2.00575,
2.00488,
2.00404,
2.00324,
2.00247,
2.00172,
2.001,
2.0003,
1.99962,
1.99897,
1.99834,
1.99773,
1.99714,
1.99656,
1.99601,
1.99547,
1.99495,
1.99444,
1.99394,
1.99346,
1.993,
1.99254,
1.9921,
1.99167,
1.99125,
1.99085,
1.99045,
1.99006,
1.98969,
1.98932,
1.98896,
1.98861,
1.98827,
1.98793,
1.98761,
1.98729,
1.98698,
1.98667,
1.98638,
1.98609,
1.9858,
1.98552,
1.98525,
1.98498,
1.98472,
1.98447,
1.98422,
)
def make_eps(input_str, base_filename, eps_dir):
filename = "%s/%s" % (eps_dir, base_filename)
print "making eps file: %s" % filename
jgr_file = open("%s.jgr" % filename, "w")
jgr_file.write(input_str)
jgr_file.close()
# generate .eps (ghostview-able)
(in_file, out_file) = os.popen2("jgraph")
in_file.write(input_str)
in_file.close()
eps_file = open("%s.eps" % filename, "w")
eps_file.writelines(out_file.readlines())
eps_file.close()
def cdf(count, input):
data = []
total = 0.0
for tuple in input:
total += float(tuple[1])/float(count)
data.append([tuple[0], total])
return data
def pdf(count, input):
data = []
for tuple in input:
p = float(tuple[1])/float(count)
data.append([tuple[0], p])
return data
############################################
# Merge Data:
#
# This is a utility to merge a list of
# data points from several runs into a
# single graph. Points are collected into
# bins by their x value. The resulting
# set has tuples with 4 entries, the x
# value, the average, and the average plus
# and minus 1 std dev.
############################################
def merge_data(group_title, data):
map = {}
for tuple in data:
key = tuple[0]
if map.has_key(key):
map[key].append(tuple[1])
else:
map[key] = []
map[key].append(tuple[1])
points = []
for key in map.keys():
avg = average(map[key])
dev = stddev(map[key])
points.append([key, avg, avg+dev, avg-dev])
points.sort()
graph_lines = [group_title]
graph_lines += points
return graph_lines
############################################
# Excel Line Graphs
#
# This function generates a text file
# that can be imported, and (through a
# somewhat ugly macro) easily plotted
# as a line graph.
############################################
excel_dir = "excel_files"
def make_excel_line(name, data):
filename = "%s/%s.txt" % (excel_dir, name)
print "making excel text file: %s" % filename
excel_file = open(filename, "w")
for set in data:
is_first = 1
for point in set:
if is_first == 1:
excel_file.write(set[0])
is_first = 0
continue
else:
excel_file.write("\t")
print point
x_val = point[0]
avg = point[1]
# tuples are expected to be formed
# with "mfgraph.merge_data" and in the
# form [x, avg, avg+stdd, avg-stdd]
if len(point) >= 3:
std_dev = point[2] - point[1]
excel_file.write("%f\t%f\t%f\n" % (x_val, avg, std_dev))
else:
excel_file.write("%f\t%f\n" % (x_val, avg))
#excel_file.write("%f\t%f\n" % (x_val, avg))
#excel_file.write("\t\t%f\n" % (std_dev))
excel_file.close()
############################################
# Excel Bar Graphs
#
# This function generates a text file
# that can be imported, and (hopefully)
# easily plotted as a stacked bar graph.
############################################
def make_excel_stacked_bar(name, fields, data):
filename = "%s/%s.txt" % (excel_dir, name)
print "making excel text file: %s" % filename
excel_file = open(filename, "w")
fields.reverse()
excel_file.write("\t")
for f in fields:
excel_file.write("\t%s" % f)
excel_file.write("\n")
for set in data:
is_first = 1
for tuple in set:
if is_first == 1:
excel_file.write(tuple) # name of the set
is_first = 0
continue
else:
excel_file.write("\t")
excel_file.write("%f" % tuple[0])
values = tuple[1:]
values.reverse()
for value in values:
excel_file.write("\t%f" % (value))
excel_file.write("\n")
excel_file.close()
############################################
# Excel Bar Graphs
#
# This function generates a text file
# that can be imported, and (hopefully)
# easily plotted as a stacked bar graph.
############################################
def make_excel_bar(name, data):
filename = "%s/%s.txt" % (excel_dir, name)
print "making excel text file: %s" % filename
excel_file = open(filename, "w")
for set in data:
is_first = 1
for tuple in set:
if is_first == 1:
excel_file.write(tuple) # name of the set
is_first = 0
#continue
else:
print "tuple:"
print tuple
excel_file.write("\t")
excel_file.write(tuple[0]) # name of the set
if len(tuple) == 2:
values = tuple[1]
else:
values = tuple[1:]
for value in values:
excel_file.write("\t%f" % (value))
excel_file.write("\n")
excel_file.close()
|
dberc/tpzsimul.gems
|
jgraph/mfgraph.py
|
Python
|
gpl-2.0
| 28,880
|
from django.conf.urls.defaults import *
from lifeflow.feeds import *
from lifeflow.models import *
from lifeflow.sitemaps import ProjectSitemap
from django.contrib.sitemaps import GenericSitemap
from django.views.decorators.cache import cache_page
from django.contrib.syndication.views import feed
# Cache
def cache(type):
return cache_page(type, 60*30)
handler500 = 'lifeflow.views.server_error'
flows = Flow.objects.all()
projects = Project.objects.all()
tags = Tag.objects.all()
languages = Language.objects.all()
authors = Author.objects.all()
feeds = {
'author': AuthorFeed,
'all' : AllFeed,
'flow' : FlowFeed,
'tag' : TagFeed,
'series' : SeriesFeed,
'translations' : TranslationFeed,
'projects' : ProjectFeed,
'comment' : CommentFeed,
'entry_comment' : EntryCommentFeed,
'language' : LanguageFeed,
}
all_dict = {
'queryset' : Entry.objects.all(),
'date_field' : 'pub_date',
}
sitemaps = {
'projects' : ProjectSitemap,
'entries' : GenericSitemap(all_dict, priority=0.6),
}
urlpatterns = patterns(
'',
url(r'^$', 'lifeflow.views.front'),
url(r'^sitemap.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
# comments
url(r'^comments/create/$', 'lifeflow.views.comments'),
url(r'^comments/create/(?P<entry_id>\d+)/$', 'lifeflow.views.comments'),
url(r'^comments/create/(?P<entry_id>\d+)/(?P<parent_id>\d+)/$', 'lifeflow.views.comments'),
# feeds and rss views
url(r'^feeds/(?P<url>.*)/$', cache(feed), {'feed_dict': feeds}),
url(r'^meta/rss/$', 'lifeflow.views.rss'),
# date based generic views
url(r'^entry/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/(?P<slug>[-\w]+)/$', 'django.views.generic.date_based.object_detail', dict(all_dict, slug_field='slug')),
url(r'^entry/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/$', 'django.views.generic.date_based.archive_day', all_dict),
url(r'^entry/(?P<year>\d{4})/(?P<month>[a-z]{3})/$', 'django.views.generic.date_based.archive_month', all_dict),
url(r'^entry/(?P<year>\d{4})/$', 'django.views.generic.date_based.archive_year', all_dict),
url(r'^entry/$', 'django.views.generic.date_based.archive_index', all_dict),
# tag generic views
url(r'^tags/$', 'django.views.generic.list_detail.object_list', dict(queryset=tags)),
url(r'^tags/(?P<slug>[-\w]+)/$', 'django.views.generic.list_detail.object_detail', dict(queryset=tags, slug_field='slug')),
# language generic views
url(r'^language/$', 'django.views.generic.list_detail.object_list', dict(queryset=languages)),
url(r'^language/(?P<slug>[-\w]+)/$', 'django.views.generic.list_detail.object_detail', dict(queryset=languages, slug_field='slug')),
# author generic views
url(r'^author/(?P<slug>[-\w]+)/$', 'django.views.generic.list_detail.object_detail', dict(queryset=authors, slug_field='slug')),
url(r'^author/$', 'django.views.generic.list_detail.object_list', dict(queryset=authors)),
# articles views (custom view)
url(r'^articles/$', 'lifeflow.views.articles'),
# projects views
url(r'^projects/$', 'django.views.generic.list_detail.object_list', dict(queryset=projects)),
url(r'^projects/(?P<slug>[-\w]+)/$', 'django.views.generic.list_detail.object_detail', dict(queryset=projects, slug_field='slug')),
# editor
url(r'^editor/', include('lifeflow.editor.urls')),
# flows
url(r'^(?P<slug>[-\w]+)/$', 'lifeflow.views.flow'),
)
|
lethain/lifeflow
|
urls.py
|
Python
|
mit
| 3,487
|
#!/usr/bin/python
# *
# * Copyright (C) 2012-2013 Garrett Brown
# * Copyright (C) 2010 j48antialias
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with XBMC; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# * Based on code by tknorris:
# * https://offshoregit.com/tknorris/tknorris-release-repo/raw/master/addons_xml_generator2.py
# * Based on code by j48antialias:
# * https://anarchintosh-projects.googlecode.com/files/addons_xml_generator.py
# *
# * Changes since v2:
# * - (assumed) zips reside in folder "download"
# * - md5 checksum creation added for zips
# * - Skip moving files and zip creation if zip file for the same version already exists
# * - alphabetical sorting
""" addons.xml generator """
import os
import sys
import time
import re
import xml.etree.ElementTree as ET
try:
import shutil, zipfile
except Exception as e:
print('An error occurred importing module!\n%s\n' % e)
# Compatibility with 3.0, 3.1 and 3.2 not supporting u"" literals
print(sys.version)
if sys.version < '3':
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
class Generator:
"""
Generates a new addons.xml file from each addons addon.xml file
and a new addons.xml.md5 hash file. Must be run from the root of
the checked-out repo. Only handles single depth folder structure.
"""
def __init__(self):
# generate files
self._generate_addons_file()
self._generate_md5_file()
# notify user
print("Finished updating addons xml and md5 files\n")
def _generate_addons_file(self):
# addon list
addons = sorted(os.listdir("."))
# final addons text
addons_xml = u("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<addons>\n")
# loop thru and add each addons addon.xml file
for addon in addons:
try:
# skip any file or .svn folder or .git folder
if (not os.path.isdir(addon) or addon == ".svn" or addon == ".git" or addon == ".github" or addon == "download"): continue
# create path
_path = os.path.join(addon, "addon.xml")
# split lines for stripping
xml_lines = open(_path, "r").read().splitlines()
# new addon
addon_xml = ""
# loop thru cleaning each line
for line in xml_lines:
# skip encoding format line
if (line.find("<?xml") >= 0): continue
# add line
if sys.version < '3':
addon_xml += unicode(line.rstrip() + "\n", "UTF-8")
else:
addon_xml += line.rstrip() + "\n"
# we succeeded so add to our final addons.xml text
addons_xml += addon_xml.rstrip() + "\n\n"
except Exception as e:
# missing or poorly formatted addon.xml
print("Excluding %s for %s" % (_path, e))
# clean and add closing tag
addons_xml = addons_xml.strip() + u("\n</addons>\n")
# save file
self._save_file(addons_xml.encode("UTF-8"), file="addons.xml")
def _generate_md5_file(self):
# create a new md5 hash
try:
import md5
m = md5.new(open("addons.xml", "r").read()).hexdigest()
except ImportError:
import hashlib
m = hashlib.md5(open("addons.xml", "r", encoding="UTF-8").read().encode("UTF-8")).hexdigest()
# save file
try:
self._save_file(m.encode("UTF-8"), file="addons.xml.md5")
except Exception as e:
# oops
print("An error occurred creating addons.xml.md5 file!\n%s" % e)
def _save_file(self, data, file):
try:
# write data to the file (use b for Python 3)
open(file, "wb").write(data)
except Exception as e:
# oops
print("An error occurred saving %s file!\n%s" % (file, e))
def zipfolder(foldername, target_dir, zips_dir, addon_dir):
zipobj = zipfile.ZipFile(zips_dir + foldername, 'w', zipfile.ZIP_DEFLATED)
rootlen = len(target_dir) + 1
for base, dirs, files in os.walk(target_dir):
for f in files:
fn = os.path.join(base, f)
zipobj.write(fn, os.path.join(addon_dir, fn[rootlen:]))
zipobj.close()
if (__name__ == "__main__"):
# start
Generator()
# rezip files and move
try:
print('Starting zip file creation...')
rootdir = sys.path[0]
zipsdir = rootdir + os.sep + 'download'
filesinrootdir = sorted(os.listdir(rootdir))
for x in filesinrootdir:
if re.search("^(context|plugin|script|service|skin|repository|docker)" , x) and not re.search('.zip', x):
zipfilename = x + '.zip'
zipfilenamefirstpart = zipfilename[:-4]
zipfilenamelastpart = zipfilename[len(zipfilename) - 4:]
zipsfolder = os.path.normpath(os.path.join('download', x)) + os.sep
foldertozip = rootdir + os.sep + x
filesinfoldertozip = sorted(os.listdir(foldertozip))
# #check if download folder exists
if not os.path.exists(zipsfolder):
os.makedirs(zipsfolder)
print('Directory doesn\'t exist, creating: ' + zipsfolder)
# #get addon version number
if "addon.xml" in filesinfoldertozip:
tree = ET.parse(os.path.join(rootdir, x, "addon.xml"))
root = tree.getroot()
for elem in root.iter('addon'):
print('%s %s version: %s' % (x, elem.tag, elem.attrib['version']))
version = '-' + elem.attrib['version']
# # #check for existing zips
if not os.path.exists(zipsfolder + x + version + '.zip'):
# #check if and move addon, changelog, fanart and icon to zipdir
for y in filesinfoldertozip:
# print('processing file: ' + os.path.join(rootdir,x,y))
if re.search("addon|changelog|icon|fanart", y):
shutil.copyfile(os.path.join(rootdir, x, y), os.path.join(zipsfolder, y))
print('Copying %s to %s' % (y, zipsfolder))
# #check for and zip the folders
print('Zipping %s and moving to %s\n' % (x, zipsfolder))
try:
zipfolder(zipfilenamefirstpart + version + zipfilenamelastpart, foldertozip, zipsfolder, x)
print('zipped with zipfolder')
# # #create md5 checksum for zips
import hashlib
try:
m = hashlib.md5(open("%s" % (zipsfolder + x + version + '.zip'), "rb").read()).hexdigest()
open("%s" % (zipsfolder + x + version + '.zip.md5'), "wb").write(m.encode("UTF-8"))
print("zip.md5 file created\n")
except Exception as e:
print("An error occurred creating zip.md5 file!\n%s" % e)
except:
if os.path.exists(zipsfolder + x + version + '.zip'):
os.remove(zipsfolder + x + version + '.zip')
print('trying shutil')
try:
shutil.move(shutil.make_archive(foldertozip + version, 'zip', rootdir, x), zipsfolder)
print('zipped with shutil\n')
except Exception as e:
print('Cannot create zip file\nshutil %s\n' % e)
else:
print('Zip file for %s version %s already exists, skipping moving files and zip creation.\n' % (x, version))
except Exception as e:
print('Cannot create or move the needed files\n%s' % e)
print('Done')
|
dknlght/dkodi
|
src/addons_xml_generator3.py
|
Python
|
gpl-2.0
| 8,998
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.