code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import numpy as np
import theano as theano
import theano.tensor as T
from utils import *
import operator
import os
import sys
class RNNEMBEDTheano:
def __init__(self, word_dim, hidden_dim=100, embed_dim=100, bptt_truncate=4):
# Assign instance variables
self.word_dim = word_dim
self.hidden_dim = hidden_dim
self.embed_dim = embed_dim
self.bptt_truncate = bptt_truncate
# Randomly initialize the network parameters
# U sends signals from input to embed layer
U = np.random.uniform(-np.sqrt(1./word_dim),
np.sqrt(1./word_dim),
(embed_dim, word_dim))
# V sends signals from hidden to output layer
V = np.random.uniform(-np.sqrt(1./hidden_dim),
np.sqrt(1./hidden_dim),
(word_dim, hidden_dim))
# W sends signals from context to hidden layer, which means a recurrent
W = np.random.uniform(-np.sqrt(1./hidden_dim),
np.sqrt(1./hidden_dim),
(hidden_dim, hidden_dim))
# E sends signals from embed to hidden layer
E = np.random.uniform(-np.sqrt(1./embed_dim),
np.sqrt(1./embed_dim),
(hidden_dim, embed_dim))
# Theano: Created shared variables
self.U = theano.shared(name='U', value=U.astype(theano.config.floatX))
self.V = theano.shared(name='V', value=V.astype(theano.config.floatX))
self.W = theano.shared(name='W', value=W.astype(theano.config.floatX))
self.E = theano.shared(name='E', value=E.astype(theano.config.floatX))
# We store the Theano graph here
self.theano = {}
self.__theano_build__()
def __theano_build__(self):
U, V, W, E = self.U, self.V, self.W, self.E
x = T.ivector('x')
y = T.ivector('y')
x1 = T.dvector('x1')
x2 = T.dvector('x2')
def forward_prop_step(x_t, s_t_prev, U, V, W, E):
x1 = T.tanh(U[:,x_t])
x2 = T.tanh(E.dot(x1))
s_t = T.tanh(x2 + W.dot(s_t_prev))
#s_t = T.tanh(U[:x_t] + W.dot(s_t_prev))
o_t = T.nnet.softmax(V.dot(s_t))
return [o_t[0], s_t]
[o,s], updates = theano.scan(
forward_prop_step,
sequences=x,
outputs_info=[None, dict(initial=T.zeros(self.hidden_dim))],
non_sequences=[U, V, W, E],
truncate_gradient=self.bptt_truncate,
strict=True)
prediction = T.argmax(o, axis=1)
o_error = T.sum(T.nnet.categorical_crossentropy(o, y))
# Gradients
dU = T.grad(o_error, U)
dV = T.grad(o_error, V)
dW = T.grad(o_error, W)
dE = T.grad(o_error, E)
# Assign functions
self.forward_propagation = theano.function([x], o)
self.predict = theano.function([x], prediction)
self.ce_error = theano.function([x, y], o_error)
self.bptt = theano.function([x, y], [dU, dV, dW, dE])
# SGD
learning_rate = T.scalar('learning_rate')
self.sgd_step = theano.function([x,y,learning_rate], [],
updates=[(self.U, self.U - learning_rate * dU),
(self.V, self.V - learning_rate * dV),
(self.W, self.W - learning_rate * dW),
(self.E, self.E - learning_rate * dE)])
def calculate_total_loss(self, X, Y):
return np.sum([self.ce_error(x,y) for x,y in zip(X,Y)])
def calculate_loss(self, X, Y):
# Divide calculate_loss by the number of words
num_words = np.sum([len(y) for y in Y])
return self.calculate_total_loss(X,Y)/float(num_words)
def gradient_check_theano(model, x, y, h=0.001, error_threshold=0.01):
# Overwrite the bptt attribute. We need to backpropagate all the way to get the correct gradient
model.bptt_truncate = 1000
# Calculate the gradients using backprop
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to chec.
model_parameters = ['U', 'V', 'W', 'E']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter_T = operator.attrgetter(pname)(model)
parameter = parameter_T.get_value()
print "Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
parameter_T.set_value(parameter)
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
parameter_T.set_value(parameter)
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
parameter[ix] = original_value
parameter_T.set_value(parameter)
# The gradient for this parameter calculated using backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print "Gradient Check ERROR: parameter=%s ix=%s" % (pname, ix)
print "+h Loss: %f" % gradplus
print "-h Loss: %f" % gradminus
print "Estimated_gradient: %f" % estimated_gradient
print "Backpropagation gradient: %f" % backprop_gradient
print "Relative Error: %f" % relative_error
return
it.iternext()
print "Gradient check for parameter %s passed." % (pname)
|
asapypy/theano_rnn_embed
|
rnn_theano_embed.py
|
Python
|
apache-2.0
| 6,256
|
#!/usr/bin/env python
# coding=utf-8
'''
这个题目很无聊,纯粹考操作和递归,居然没看清楚矩阵可以不是方的,还检查了好一会儿
'''
class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
if board[click[0]][click[1]] == 'M': self.revealMine(board,click)
if board[click[0]][click[1]] == 'E': self.revealEmptySquare(board,click)
return board
def revealMine(self, board, click):
board[click[0]][click[1]] = 'X'
return
def revealEmptySquare(self, board, click):
xl = len(board)
yl = len(board[0])
adjList = []
count = 0
idir = [-1,-1,-1,0,1,1,1,0]
jdir = [-1,0,1,1,1,0,-1,-1]
for i in range(8):
x, y = click[0]+idir[i], click[1]+jdir[i]
if x>=0 and x<xl and y>=0 and y<yl:
if board[x][y] == 'M':
count += 1
elif board[x][y] == 'E':
adjList.append([x,y])
if count:
board[click[0]][click[1]] = str(count)
else:
board[click[0]][click[1]] = 'B'
for c in adjList:
self.revealEmptySquare(board, c)
return
if __name__ == '__main__':
s = Solution()
print s.updateBoard([['E', 'E', 'E', 'E', 'E'],
['E', 'E', 'M', 'E', 'E'],
['E', 'E', 'E', 'E', 'E'],
['E', 'E', 'E', 'E', 'E']],[3,0])
|
xijunlee/leetcode
|
529.py
|
Python
|
mit
| 1,422
|
# Copyright (c) 2014 Olli Wang. All right reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
from svg2nvg.parser import SVGParser
parser = argparse.ArgumentParser(
description='Convert SVG files to NVG source code')
parser.add_argument('svg_path', help='path to a SVG file')
parser.add_argument('-c', '--context', default='context',
help='the variable name of nanovg context')
parser.add_argument('-d', '--dest', default=os.curdir,
help='the directory to keep generated files')
parser.add_argument('--header_file', action='store_true',
help='generate header file')
parser.add_argument('-i', '--include_path', default='',
help='the include path for generated headers')
parser.add_argument('-vg', '--nanovg_include_path', default='nanovg.h',
help='the include path for nanovg')
parser.add_argument('--source_file', action='store_true',
help='generate source file')
parser.add_argument('-ns', '--namespace', action='store_true',
dest='uses_namespace',
help='add C++ namespace to header file')
def execute_from_command_line():
if len(sys.argv) == 1:
parser.print_help()
return
args = parser.parse_args()
svg_parser = SVGParser(args.context)
svg_parser.parse_file(args.svg_path)
basename = os.path.splitext(os.path.basename(args.svg_path))[0]
dest_path = os.path.join(os.path.abspath(args.dest), basename)
filename = '%s.h' % basename
if args.source_file:
result = svg_parser.get_header_file_content(basename,
args.nanovg_include_path,
args.uses_namespace,
prototype_only=True)
if args.dest is not None:
header_file = open('%s.h' % dest_path, 'w')
header_file.write(result)
header_file.close()
result = svg_parser.get_source_file_content(basename,
args.nanovg_include_path,
args.uses_namespace,
args.include_path)
if args.dest is not None:
source_file = open('%s.cc' % dest_path, 'w')
source_file.write(result)
source_file.close()
elif args.header_file:
result = svg_parser.get_header_file_content(args.svg_path,
args.nanovg_include_path,
args.uses_namespace,
prototype_only=False)
if args.dest is not None:
header_file = open('%s.h' % dest_path, 'w')
header_file.write(result)
header_file.close()
else:
result = svg_parser.get_content()
print(result)
|
ollix/svg2nvg
|
svg2nvg/command.py
|
Python
|
apache-2.0
| 3,567
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo_config import cfg
from oslotest import base as test_base
from oslotest import moxstubout
import six
if six.PY3:
@contextlib.contextmanager
def nested(*contexts):
with contextlib.ExitStack() as stack:
yield [stack.enter_context(c) for c in contexts]
else:
nested = contextlib.nested
class BaseTestCase(test_base.BaseTestCase):
def setUp(self, conf=cfg.CONF):
super(BaseTestCase, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.mox = moxfixture.mox
self.stubs = moxfixture.stubs
self.conf = conf
self.addCleanup(self.conf.reset)
|
magic0704/oslo.db
|
oslo_db/tests/utils.py
|
Python
|
apache-2.0
| 1,310
|
# Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections.abc import Mapping, Iterable
from operator import itemgetter
from numbers import Number
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, tosequence
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(TransformerMixin, BaseEstimator):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
If a feature value is a sequence or set of strings, this transformer
will iterate over the values and will count the occurrences of each string
value.
However, note that this transformer will only do a binary one-hot encoding
when feature values are of type string. If categorical features are
represented as numeric values such as int or iterables of strings, the
DictVectorizer can be followed by
:class:`~sklearn.preprocessing.OneHotEncoder` to complete
binary one-hot encoding.
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : dtype, default=np.float64
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator : str, default="="
Separator string used when constructing new features for one-hot
coding.
sparse : bool, default=True
Whether transform should produce scipy.sparse matrices.
sort : bool, default=True
Whether ``feature_names_`` and ``vocabulary_`` should be
sorted when fitting.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[2., 0., 1.],
[0., 1., 3.]])
>>> v.inverse_transform(X) == [{'bar': 2.0, 'foo': 1.0},
... {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[0., 0., 4.]])
See Also
--------
FeatureHasher : Performs vectorization using only a hash function.
sklearn.preprocessing.OrdinalEncoder : Handles nominal/categorical
features encoded as columns of arbitrary data types.
"""
def __init__(self, *, dtype=np.float64, separator="=", sparse=True, sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def _add_iterable_element(
self,
f,
v,
feature_names,
vocab,
*,
fitting=True,
transforming=False,
indices=None,
values=None,
):
"""Add feature names for iterable of strings"""
for vv in v:
if isinstance(vv, str):
feature_name = "%s%s%s" % (f, self.separator, vv)
vv = 1
else:
raise TypeError(
f"Unsupported type {type(vv)} in iterable "
"value. Only iterables of string are "
"supported."
)
if fitting and feature_name not in vocab:
vocab[feature_name] = len(feature_names)
feature_names.append(feature_name)
if transforming and feature_name in vocab:
indices.append(vocab[feature_name])
values.append(self.dtype(vv))
return
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
.. versionchanged:: 0.24
Accepts multiple string values for one categorical feature.
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in x.items():
if isinstance(v, str):
feature_name = "%s%s%s" % (f, self.separator, v)
v = 1
elif isinstance(v, Number) or (v is None):
feature_name = f
elif isinstance(v, Mapping):
raise TypeError(
f"Unsupported value type {type(v)} "
f"for {f}: {v}.\n"
"Mapping objects are not supported."
)
elif isinstance(v, Iterable):
feature_name = None
self._add_iterable_element(f, v, feature_names, vocab)
if feature_name is not None:
if feature_name not in vocab:
vocab[feature_name] = len(feature_names)
feature_names.append(feature_name)
if self.sort:
feature_names.sort()
vocab = {f: i for i, f in enumerate(feature_names)}
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report"
)
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
transforming = True
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = [0]
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in x.items():
if isinstance(v, str):
feature_name = "%s%s%s" % (f, self.separator, v)
v = 1
elif isinstance(v, Number) or (v is None):
feature_name = f
elif not isinstance(v, Mapping) and isinstance(v, Iterable):
feature_name = None
self._add_iterable_element(
f,
v,
feature_names,
vocab,
fitting=fitting,
transforming=transforming,
indices=indices,
values=values,
)
else:
raise TypeError(
f"Unsupported value Type {type(v)} "
f"for {f}: {v}.\n"
f"{type(v)} objects are not supported."
)
if feature_name is not None:
if fitting and feature_name not in vocab:
vocab[feature_name] = len(feature_names)
feature_names.append(feature_name)
if feature_name in vocab:
indices.append(vocab[feature_name])
values.append(self.dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = np.frombuffer(indices, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix(
(values, indices, indptr), shape=shape, dtype=dtype
)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
.. versionchanged:: 0.24
Accepts multiple string values for one categorical feature.
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample matrix.
dict_type : type, default=dict
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects of shape (n_samples,)
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=["csr", "csc"])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in range(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings of shape (n_samples,)
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=False)
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : bool, default=False
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support())
DictVectorizer()
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [
f for f, i in sorted(new_vocab.items(), key=itemgetter(1))
]
return self
def _more_tags(self):
return {"X_types": ["dict"]}
|
shyamalschandra/scikit-learn
|
sklearn/feature_extraction/_dict_vectorizer.py
|
Python
|
bsd-3-clause
| 14,656
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import mox
import unittest
from quantum.openstack.common import uuidutils
from quantum.plugins.nec.common import ofc_client
from quantum.plugins.nec.db import models as nmodels
from quantum.plugins.nec import drivers
class TestConfig(object):
"""Configuration for this test"""
host = '127.0.0.1'
port = 8888
class TremaDriverTestBase():
driver_name = "trema"
def setUp(self):
self.mox = mox.Mox()
self.driver = drivers.get_driver(self.driver_name)(TestConfig)
self.mox.StubOutWithMock(ofc_client.OFCClient, 'do_request')
def tearDown(self):
self.mox.UnsetStubs()
def get_ofc_item_random_params(self):
"""create random parameters for ofc_item test"""
tenant_id = uuidutils.generate_uuid()
network_id = uuidutils.generate_uuid()
port_id = uuidutils.generate_uuid()
portinfo = nmodels.PortInfo(id=port_id, datapath_id="0x123456789",
port_no=1234, vlan_id=321,
mac="11:22:33:44:55:66")
return tenant_id, network_id, portinfo
class TremaDriverNetworkTestBase(TremaDriverTestBase):
def testa_create_network(self):
t, n, p = self.get_ofc_item_random_params()
description = "desc of %s" % n
body = {'id': n, 'description': description}
ofc_client.OFCClient.do_request("POST", "/networks", body=body)
self.mox.ReplayAll()
self.driver.create_network(t, description, n)
self.mox.VerifyAll()
def testb_update_network(self):
t, n, p = self.get_ofc_item_random_params()
description = "desc of %s" % n
body = {'description': description}
ofc_client.OFCClient.do_request("PUT", "/networks/%s" % n, body=body)
self.mox.ReplayAll()
self.driver.update_network(t, n, description)
self.mox.VerifyAll()
def testc_delete_network(self):
t, n, p = self.get_ofc_item_random_params()
ofc_client.OFCClient.do_request("DELETE", "/networks/%s" % n)
self.mox.ReplayAll()
self.driver.delete_network(t, n)
self.mox.VerifyAll()
class TremaPortBaseDriverTest(TremaDriverNetworkTestBase, unittest.TestCase):
driver_name = "trema_port"
def testd_create_port(self):
t, n, p = self.get_ofc_item_random_params()
body = {'id': p.id,
'datapath_id': p.datapath_id,
'port': str(p.port_no),
'vid': str(p.vlan_id)}
ofc_client.OFCClient.do_request("POST",
"/networks/%s/ports" % n, body=body)
self.mox.ReplayAll()
self.driver.create_port(t, n, p, p.id)
self.mox.VerifyAll()
def testd_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
ofc_client.OFCClient.do_request("DELETE",
"/networks/%s/ports/%s" % (n, p.id))
self.mox.ReplayAll()
self.driver.delete_port(t, n, p.id)
self.mox.VerifyAll()
class TremaPortMACBaseDriverTest(TremaDriverNetworkTestBase,
unittest.TestCase):
driver_name = "trema_portmac"
def testd_create_port(self):
t, n, p = self.get_ofc_item_random_params()
dummy_port = "dummy-%s" % p.id
path_1 = "/networks/%s/ports" % n
body_1 = {'id': dummy_port,
'datapath_id': p.datapath_id,
'port': str(p.port_no),
'vid': str(p.vlan_id)}
ofc_client.OFCClient.do_request("POST", path_1, body=body_1)
path_2 = "/networks/%s/ports/%s/attachments" % (n, dummy_port)
body_2 = {'id': p.id, 'mac': p.mac}
ofc_client.OFCClient.do_request("POST", path_2, body=body_2)
path_3 = "/networks/%s/ports/%s" % (n, dummy_port)
ofc_client.OFCClient.do_request("DELETE", path_3)
self.mox.ReplayAll()
self.driver.create_port(t, n, p, p.id)
self.mox.VerifyAll()
def testd_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
dummy_port = "dummy-%s" % p.id
path = "/networks/%s/ports/%s/attachments/%s" % (n, dummy_port, p.id)
ofc_client.OFCClient.do_request("DELETE", path)
self.mox.ReplayAll()
self.driver.delete_port(t, n, p.id)
self.mox.VerifyAll()
class TremaMACBaseDriverTest(TremaDriverNetworkTestBase, unittest.TestCase):
driver_name = "trema_mac"
def testd_create_port(self):
t, n, p = self.get_ofc_item_random_params()
path = "/networks/%s/attachments" % n
body = {'id': p.id, 'mac': p.mac}
ofc_client.OFCClient.do_request("POST", path, body=body)
self.mox.ReplayAll()
self.driver.create_port(t, n, p, p.id)
self.mox.VerifyAll()
def testd_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
path = "/networks/%s/attachments/%s" % (n, p.id)
ofc_client.OFCClient.do_request("DELETE", path)
self.mox.ReplayAll()
self.driver.delete_port(t, n, p.id)
self.mox.VerifyAll()
class TremaFilterDriverTest(TremaDriverTestBase, unittest.TestCase):
def get_ofc_item_random_params(self):
"""create random parameters for ofc_item test"""
t, n, p = (super(TremaFilterDriverTest, self).
get_ofc_item_random_params())
filter_id = uuidutils.generate_uuid()
filter_dict = {'tenant_id': t,
'id': filter_id,
'network_id': n,
'priority': 123,
'action': "ACCEPT",
'in_port': p.id,
'src_mac': p.mac,
'dst_mac': "",
'eth_type': 0,
'src_cidr': "",
'dst_cidr': "",
'src_port': 0,
'dst_port': 0,
'protocol': "TCP",
'admin_state_up': True,
'status': "ACTIVE"}
filter_item = nmodels.PacketFilter(**filter_dict)
return t, n, p, filter_item
def testa_create_filter(self):
t, n, p, f = self.get_ofc_item_random_params()
ofp_wildcards = 'dl_vlan,dl_vlan_pcp,nw_tos,dl_dst,' + \
'nw_src:32,nw_dst:32,tp_src,tp_dst'
body = {'id': f.id,
'action': 'ALLOW',
'priority': 123,
'slice': n,
'in_datapath_id': '0x123456789',
'in_port': 1234,
'nw_proto': '0x6',
'dl_type': '0x800',
'dl_src': p.mac,
'ofp_wildcards': ofp_wildcards}
ofc_client.OFCClient.do_request("POST", "/filters", body=body)
self.mox.ReplayAll()
self.driver.create_filter(t, n, f, p, f.id)
self.mox.VerifyAll()
def testb_delete_filter(self):
t, n, p, f = self.get_ofc_item_random_params()
ofc_client.OFCClient.do_request("DELETE", "/filters/%s" % f.id)
self.mox.ReplayAll()
self.driver.delete_filter(t, n, f.id)
self.mox.VerifyAll()
|
aristanetworks/arista-ovs-quantum
|
quantum/tests/unit/nec/test_trema_driver.py
|
Python
|
apache-2.0
| 7,926
|
"""
Demo for survival analysis (regression) using Accelerated Failure Time (AFT) model, using Optuna
to tune hyperparameters
"""
from sklearn.model_selection import ShuffleSplit
import pandas as pd
import numpy as np
import xgboost as xgb
import optuna
# The Veterans' Administration Lung Cancer Trial
# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980)
df = pd.read_csv('../data/veterans_lung_cancer.csv')
print('Training data:')
print(df)
# Split features and labels
y_lower_bound = df['Survival_label_lower_bound']
y_upper_bound = df['Survival_label_upper_bound']
X = df.drop(['Survival_label_lower_bound', 'Survival_label_upper_bound'], axis=1)
# Split data into training and validation sets
rs = ShuffleSplit(n_splits=2, test_size=.7, random_state=0)
train_index, valid_index = next(rs.split(X))
dtrain = xgb.DMatrix(X.values[train_index, :])
dtrain.set_float_info('label_lower_bound', y_lower_bound[train_index])
dtrain.set_float_info('label_upper_bound', y_upper_bound[train_index])
dvalid = xgb.DMatrix(X.values[valid_index, :])
dvalid.set_float_info('label_lower_bound', y_lower_bound[valid_index])
dvalid.set_float_info('label_upper_bound', y_upper_bound[valid_index])
# Define hyperparameter search space
base_params = {'verbosity': 0,
'objective': 'survival:aft',
'eval_metric': 'aft-nloglik',
'tree_method': 'hist'} # Hyperparameters common to all trials
def objective(trial):
params = {'learning_rate': trial.suggest_loguniform('learning_rate', 0.01, 1.0),
'aft_loss_distribution': trial.suggest_categorical('aft_loss_distribution',
['normal', 'logistic', 'extreme']),
'aft_loss_distribution_scale': trial.suggest_loguniform('aft_loss_distribution_scale', 0.1, 10.0),
'max_depth': trial.suggest_int('max_depth', 3, 8),
'lambda': trial.suggest_loguniform('lambda', 1e-8, 1.0),
'alpha': trial.suggest_loguniform('alpha', 1e-8, 1.0)} # Search space
params.update(base_params)
pruning_callback = optuna.integration.XGBoostPruningCallback(trial, 'valid-aft-nloglik')
bst = xgb.train(params, dtrain, num_boost_round=10000,
evals=[(dtrain, 'train'), (dvalid, 'valid')],
early_stopping_rounds=50, verbose_eval=False, callbacks=[pruning_callback])
if bst.best_iteration >= 25:
return bst.best_score
else:
return np.inf # Reject models with < 25 trees
# Run hyperparameter search
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=200)
print('Completed hyperparameter tuning with best aft-nloglik = {}.'.format(study.best_trial.value))
params = {}
params.update(base_params)
params.update(study.best_trial.params)
# Re-run training with the best hyperparameter combination
print('Re-running the best trial... params = {}'.format(params))
bst = xgb.train(params, dtrain, num_boost_round=10000,
evals=[(dtrain, 'train'), (dvalid, 'valid')],
early_stopping_rounds=50)
# Run prediction on the validation set
df = pd.DataFrame({'Label (lower bound)': y_lower_bound[valid_index],
'Label (upper bound)': y_upper_bound[valid_index],
'Predicted label': bst.predict(dvalid)})
print(df)
# Show only data points with right-censored labels
print(df[np.isinf(df['Label (upper bound)'])])
# Save trained model
bst.save_model('aft_best_model.json')
|
dmlc/xgboost
|
demo/aft_survival/aft_survival_demo_with_optuna.py
|
Python
|
apache-2.0
| 3,548
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example downloads an adgroup performance report for all child accounts.
To get report fields, run get_report_fields.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import multiprocessing
import os
from Queue import Empty
import time
import googleads.adwords
import googleads.errors
# Timeout between retries in seconds.
BACKOFF_FACTOR = 5
# Maximum number of processes to spawn.
MAX_PROCESSES = multiprocessing.cpu_count()
# Maximum number of retries for 500 errors.
MAX_RETRIES = 5
# Maximum number of items to be sent in a single API response.
PAGE_SIZE = 100
# Directory to download the reports to.
REPORT_DOWNLOAD_DIRECTORY = 'INSERT_REPORT_DOWNLOAD_DIRECTORY'
def _DownloadReport(process_id, report_download_directory, customer_id,
report_definition):
"""Helper function used by ReportWorker to download customer report.
Note that multiprocessing differs between Windows / Unix environments. A
Process or its subclasses in Windows must be serializable with pickle, but
that is not possible for AdWordsClient or ReportDownloader. This top-level
function is used as a work-around for Windows support.
Args:
process_id: The PID of the process downloading the report.
report_download_directory: A string indicating the directory where you
would like to download the reports.
customer_id: A str AdWords customer ID for which the report is being
downloaded.
report_definition: A dict containing the report definition to be used.
Returns:
A tuple indicating a boolean success/failure status, and dict request
context.
"""
report_downloader = (googleads.adwords.AdWordsClient.LoadFromStorage()
.GetReportDownloader())
filepath = os.path.join(report_download_directory,
'adgroup_%d.csv' % customer_id)
retry_count = 0
while True:
print('[%d/%d] Loading report for customer ID "%s" into "%s"...'
% (process_id, retry_count, customer_id, filepath))
try:
with open(filepath, 'wb') as handler:
report_downloader.DownloadReport(
report_definition, output=handler,
client_customer_id=customer_id)
return (True, {'customerId': customer_id})
except googleads.errors.AdWordsReportError as e:
if e.code == 500 and retry_count < MAX_RETRIES:
time.sleep(retry_count * BACKOFF_FACTOR)
else:
print('Report failed for customer ID "%s" with code "%d" after "%d" '
'retries.' % (customer_id, e.code, retry_count+1))
return (False, {'customerId': customer_id, 'code': e.code,
'message': e.message})
class ReportWorker(multiprocessing.Process):
"""A worker Process used to download reports for a set of customer IDs."""
def __init__(self, report_download_directory, report_definition,
input_queue, success_queue, failure_queue):
"""Initializes a ReportWorker.
Args:
report_download_directory: A string indicating the directory where you
would like to download the reports.
report_definition: A dict containing the report definition that you would
like to run against all customer IDs in the input_queue.
input_queue: A Queue instance containing all of the customer IDs that
the report_definition will be run against.
success_queue: A Queue instance that the details of successful report
downloads will be saved to.
failure_queue: A Queue instance that the details of failed report
downloads will be saved to.
"""
super(ReportWorker, self).__init__()
self.report_download_directory = report_download_directory
self.report_definition = report_definition
self.input_queue = input_queue
self.success_queue = success_queue
self.failure_queue = failure_queue
def run(self):
while True:
try:
customer_id = self.input_queue.get(timeout=0.01)
except Empty:
break
result = _DownloadReport(self.ident, self.report_download_directory,
customer_id, self.report_definition)
(self.success_queue if result[0] else self.failure_queue).put(result[1])
def GetCustomerIDs(client):
"""Retrieves all CustomerIds in the account hierarchy.
Note that your configuration file must specify a client_customer_id belonging
to an AdWords manager account.
Args:
client: an AdWordsClient instance.
Raises:
Exception: if no CustomerIds could be found.
Returns:
A Queue instance containing all CustomerIds in the account hierarchy.
"""
# For this example, we will use ManagedCustomerService to get all IDs in
# hierarchy that do not belong to MCC accounts.
managed_customer_service = client.GetService('ManagedCustomerService',
version='v201809')
offset = 0
# Get the account hierarchy for this account.
selector = {
'fields': ['CustomerId'],
'predicates': [{
'field': 'CanManageClients',
'operator': 'EQUALS',
'values': [False]
}],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
# Using Queue to balance load between processes.
queue = multiprocessing.Queue()
more_pages = True
while more_pages:
page = managed_customer_service.get(selector)
if page and 'entries' in page and page['entries']:
for entry in page['entries']:
queue.put(entry['customerId'])
else:
raise Exception('Can\'t retrieve any customer ID.')
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
return queue
def main(client, report_download_directory):
# Determine list of customer IDs to retrieve report for.
input_queue = GetCustomerIDs(client)
reports_succeeded = multiprocessing.Queue()
reports_failed = multiprocessing.Queue()
# Create report definition.
report_definition = {
'reportName': 'Custom ADGROUP_PERFORMANCE_REPORT',
'dateRangeType': 'LAST_7_DAYS',
'reportType': 'ADGROUP_PERFORMANCE_REPORT',
'downloadFormat': 'CSV',
'selector': {
'fields': ['CampaignId', 'AdGroupId', 'Impressions', 'Clicks',
'Cost'],
# Predicates are optional.
'predicates': {
'field': 'AdGroupStatus',
'operator': 'IN',
'values': ['ENABLED', 'PAUSED']
}
},
}
queue_size = input_queue.qsize()
num_processes = min(queue_size, MAX_PROCESSES)
print('Retrieving %d reports with %d processes:' % (queue_size, num_processes))
# Start all the processes.
processes = [ReportWorker(report_download_directory,
report_definition, input_queue, reports_succeeded,
reports_failed)
for _ in range(num_processes)]
for process in processes:
process.start()
for process in processes:
process.join()
print('Finished downloading reports with the following results:')
while True:
try:
success = reports_succeeded.get(timeout=0.01)
except Empty:
break
print('\tReport for CustomerId "%d" succeeded.' % success['customerId'])
while True:
try:
failure = reports_failed.get(timeout=0.01)
except Empty:
break
print('\tReport for CustomerId "%d" failed with error code "%s" and '
'message: %s.' % (failure['customerId'], failure['code'],
failure['message']))
if __name__ == '__main__':
adwords_client = googleads.adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, REPORT_DOWNLOAD_DIRECTORY)
|
googleads/googleads-python-lib
|
examples/adwords/v201809/reporting/parallel_report_download.py
|
Python
|
apache-2.0
| 8,577
|
# WeatherDialog.py
# Copyright 2010 Ben Sampson (pigeonfeather@cerium.org)
# This file is part of Pigeon Feather (code.google.com/p/pigeonfeather)
#
# Pigeon Feather is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or any later
# version.
#
# Pigeon Feather is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License
# along with Pigeon Feather. If not, see <http://www.gnu.org/licenses/>.
"""Class to display a weather report dialog"""
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import ui_weather
class WeatherDialog(QDialog, ui_weather.Ui_Weather):
"""Displays a weather dialog, inherits class generated from pyuic"""
def __init__(self, parent=None):
super(WeatherDialog, self).__init__(parent)
# Set up the application
self.setup()
def setup(self):
"""Setup and start the application"""
super(WeatherDialog, self).setupUi(self)
if __name__ == "__main__":
app = QApplication(sys.argv)
main = WeatherDialog()
main.show()
app.exec_()
|
billyrayvalentine/python-pigeonfeather
|
WeatherDialog.py
|
Python
|
gpl-3.0
| 1,412
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes Spearman's rho with respect to human judgements.
Given a set of row (and potentially column) embeddings, this computes Spearman's
rho between the rank ordering of predicted word similarity and human judgements.
Usage:
wordim.py --embeddings=<binvecs> --vocab=<vocab> eval1.tab eval2.tab ...
Options:
--embeddings=<filename>: the vectors to test
--vocab=<filename>: the vocabulary file
Evaluation files are assumed to be tab-separated files with exactly three
columns. The first two columns contain the words, and the third column contains
the scored human judgement.
"""
import scipy.stats
import sys
from getopt import GetoptError, getopt
from vecs import Vecs
try:
opts, args = getopt(sys.argv[1:], '', ['embeddings=', 'vocab='])
except GetoptError, e:
print >> sys.stderr, e
sys.exit(2)
opt_embeddings = None
opt_vocab = None
for o, a in opts:
if o == '--embeddings':
opt_embeddings = a
if o == '--vocab':
opt_vocab = a
if not opt_vocab:
print >> sys.stderr, 'please specify a vocabulary file with "--vocab"'
sys.exit(2)
if not opt_embeddings:
print >> sys.stderr, 'please specify the embeddings with "--embeddings"'
sys.exit(2)
try:
vecs = Vecs(opt_vocab, opt_embeddings)
except IOError, e:
print >> sys.stderr, e
sys.exit(1)
def evaluate(lines):
acts, preds = [], []
with open(filename, 'r') as lines:
for line in lines:
w1, w2, act = line.strip().split('\t')
pred = vecs.similarity(w1, w2)
if pred is None:
continue
acts.append(float(act))
preds.append(pred)
rho, _ = scipy.stats.spearmanr(acts, preds)
return rho
for filename in args:
with open(filename, 'r') as lines:
print '%0.3f %s' % (evaluate(lines), filename)
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tensorflow/models/swivel/wordsim.py
|
Python
|
bsd-2-clause
| 2,372
|
# -*- coding: utf-8 -*-
# setup.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
setup file for leap.common
"""
import re
from setuptools import setup, find_packages
from setuptools import Command
import versioneer
from pkg import utils
requirements = utils.parse_requirements()
dependency_links = [requirement for requirement
in requirements if requirement.startswith('http')]
requirements = [requirement for requirement
in requirements if requirement not in dependency_links]
tests_requirements = [
'mock',
]
trove_classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
("License :: OSI Approved :: GNU General "
"Public License v3 or later (GPLv3+)"),
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Communications",
"Topic :: Security",
"Topic :: Utilities"
]
DOWNLOAD_BASE = ('https://github.com/leapcode/bitmask-dev/'
'archive/%s.tar.gz')
_versions = versioneer.get_versions()
VERSION = _versions['version']
VERSION_REVISION = _versions['full-revisionid']
DOWNLOAD_URL = ""
# get the short version for the download url
_version_short = re.findall('\d+\.\d+\.\d+', VERSION)
if len(_version_short) > 0:
VERSION_SHORT = _version_short[0]
DOWNLOAD_URL = DOWNLOAD_BASE % VERSION_SHORT
class freeze_debianver(Command):
"""
Freezes the version in a debian branch.
To be used after merging the development branch onto the debian one.
"""
user_options = []
template = r"""
# This file was generated by the `freeze_debianver` command in setup.py
# Using 'versioneer.py' (0.16) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
import sys
version_json = '''
{
"dirty": false,
"error": null,
"full-revisionid": "FULL_REVISIONID",
"version": "VERSION_STRING"
}
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
proceed = str(raw_input(
"This will overwrite the file _version.py. Continue? [y/N] "))
if proceed != "y":
print("He. You scared. Aborting.")
return
subst_template = self.template.replace(
'VERSION_STRING', VERSION_SHORT).replace(
'FULL_REVISIONID', VERSION_REVISION)
versioneer_cfg = versioneer.get_config_from_root('.')
with open(versioneer_cfg.versionfile_source, 'w') as f:
f.write(subst_template)
try:
long_description = open('README.rst').read() + '\n\n\n' + \
open('CHANGELOG').read()
except Exception:
long_description = ""
cmdclass = versioneer.get_cmdclass()
cmdclass["freeze_debianver"] = freeze_debianver
setup(
name='leap.common',
version=VERSION,
cmdclass=cmdclass,
url='https://leap.se/',
download_url=DOWNLOAD_URL,
license='GPLv3+',
author='The LEAP Encryption Access Project',
author_email='info@leap.se',
maintainer='Kali Kaneko',
maintainer_email='kali@leap.se',
description='Common files used by the LEAP project.',
long_description=long_description,
classifiers=trove_classifiers,
namespace_packages=["leap"],
package_dir={'': 'src'},
package_data={'': ['*.pem']},
# For now, we do not exclude tests because of the circular dependency
# between leap.common and leap.soledad.
# packages=find_packages('src', exclude=['leap.common.tests']),
packages=find_packages('src'),
test_suite='leap.common.tests',
install_requires=requirements,
dependency_links=dependency_links,
tests_require=tests_requirements,
include_package_data=True,
zip_safe=False,
extras_require={
# needed for leap.common.http
# service_identity needed for propper hostname identification,
# see http://twistedmatrix.com/documents/current/core/howto/ssl.html
'http': [
"Twisted>=14.0.2",
"service_identity", "zope.interface",
"certifi"]
},
)
|
leapcode/leap_pycommon
|
setup.py
|
Python
|
gpl-3.0
| 4,945
|
"""
Approach: Scan for links, filter to links if they contain
a month AND do NOT contain 'archive/...'
Then, set(links) to get unique, then
for each link, get the page and grab all films found on that page.
http://www.wesleyan.edu/filmseries/index.html
"""
import urllib2
import BeautifulSoup
import datetime
import re
def scrape_film_series():
# try:
raw_week_pages = get_week_pages()
processed_week_pages = process_week_pages(raw_week_pages)
return processed_week_pages
# except:
print "FILM: Unable to scrape film series."
return None
def get_week_pages():
url = "http://www.wesleyan.edu/filmseries/index.html"
text = urllib2.urlopen(url).read()
soup = BeautifulSoup.BeautifulSoup(text)
#gather links
all_links = soup.findAll('a')
months = ["January","February","March","April","May","June","July","August","September","October","November","December"]
week_pages= set()
for link in all_links:
link_text = link.get('href')
if not link_text:
print "FILM: NO LINK TEXT??",link
continue
for month in months:
if month in link_text and "archive" not in link_text:
week_pages.add(link_text)
return week_pages
def process_week_pages(week_pages):
base_url = "http://www.wesleyan.edu/filmseries/"
all_processed = []
for week in week_pages:
url = base_url + urllib2.quote(week)
text = urllib2.urlopen(url).read()
soup = BeautifulSoup.BeautifulSoup(text)
#There seem to be TWO different formats
#One with paragraphs, one with divs.
#Style 1
style1 = soup.findAll('p',{'class':'ParagraphStyle1'})
if style1:
processed = [all_processed.append(process_p_style(i)) for i in style1]
else:
#Style 2
style2 = soup.findAll('div',{'class':'movie'})
if style2:
processed = [all_processed.append(process_div_style(i)) for i in style2]
else:
return False
return all_processed
def remove_date_end(s):
return re.sub(r'(\d)(st|nd|rd|th)', r'\1', s)
def process_date(date):
"""
Takes in a date like:
September 22
and outputs a Python datetime obj with
the current year.
"""
curr_year = str(datetime.date.today().year)
date_with_year = curr_year+" "+remove_date_end(date).strip()
return datetime.datetime.strptime(date_with_year,"%Y %B %d")
def process_p_style(soup):
time = process_date(soup.next)
movie_span = soup.find('span',{"class":"LargeText"})
movie_title = movie_span.text
short_description = movie_span.next.next.next
#Catch italic tag cases for this.. hacky :(
long_description = movie_span.next.next.next.next.next
if type(long_description) not in [BeautifulSoup.NavigableString,unicode]:
long_description = long_description.text + long_description.nextSibling
movie_obj = {"time":time,"title":movie_title,
"short_description":short_description,
"long_description":long_description}
return movie_obj
def process_div_style(soup):
time = process_date(soup.next)
movie_h3 = soup.find('h3',{'class':'movietitle'})
movie_title = movie_h3.next
short_description = movie_h3.next.next
long_description = soup.find('p').text
#Catch italic
if type(long_description) not in [BeautifulSoup.NavigableString,unicode]:
long_description = long_description.next + long_description.next.nextSibling
movie_obj = {"time":time,"title":movie_title,
"short_description":short_description,
"long_description":long_description}
return movie_obj
|
WesApps/wes_api
|
lib/scraping/filmSeries/film_series.py
|
Python
|
mit
| 3,407
|
# -*- coding: utf-8 -*-
"""Variables controller"""
import collections
import datetime
from openfisca_core import periods, simulations
from .. import contexts, conv, environment, model, wsgihelpers
@wsgihelpers.wsgify
def api1_variables(req):
ctx = contexts.Ctx(req)
headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx)
assert req.method == 'GET', req.method
params = req.GET
inputs = dict(
names = params.getall('name'),
)
tax_benefit_system = model.tax_benefit_system
tax_benefit_system_variables_name = tax_benefit_system.column_by_name.keys()
data, errors = conv.pipe(
conv.struct(
dict(
names = conv.pipe(
conv.uniform_sequence(
conv.pipe(
conv.empty_to_none,
conv.test_in(tax_benefit_system_variables_name, error = u'Variable does not exist'),
),
drop_none_items = True,
),
conv.empty_to_none,
),
),
default = 'drop',
),
)(inputs, state = ctx)
if errors is not None:
return wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
error = collections.OrderedDict(sorted(dict(
code = 400, # Bad Request
errors = [conv.jsonify_value(errors)],
message = ctx._(u'Bad parameters in request'),
).iteritems())),
method = req.script_name,
params = inputs,
url = req.url.decode('utf-8'),
).iteritems())),
headers = headers,
)
simulation = None
variables_json = []
for variable_name in data['names'] or tax_benefit_system_variables_name:
column = tax_benefit_system.column_by_name[variable_name]
variable_json = column.to_json()
label = variable_json.get('label')
if label is not None and label == variable_name:
del variable_json['label']
if not column.is_input_variable():
if simulation is None:
simulation = simulations.Simulation(
period = periods.period(datetime.date.today().year),
tax_benefit_system = model.tax_benefit_system,
)
holder = simulation.get_or_new_holder(variable_name)
variable_json['formula'] = holder.formula.to_json(
get_input_variables_and_parameters = model.get_cached_input_variables_and_parameters,
)
variables_json.append(variable_json)
return wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
country_package_git_head_sha = environment.country_package_git_head_sha,
method = req.script_name,
url = req.url.decode('utf-8'),
variables = variables_json,
).iteritems())),
headers = headers,
)
|
sgmap/openfisca-web-api
|
openfisca_web_api/controllers/variables.py
|
Python
|
agpl-3.0
| 3,187
|
import _plotly_utils.basevalidators
class LabelformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="labelformat", parent_name="contour.contours", **kwargs
):
super(LabelformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/contour/contours/_labelformat.py
|
Python
|
mit
| 431
|
import unittest
"""
Given a string, find the longest substring which is a palindrome.
Input: forgeeksskeegfor
Output: geeksskeeg
"""
def longest_palindromic_substring(string):
n = len(string)
# table[i][j] is the length of palindromic substring starting at str[i] and ending at str[j].
# The max value in the table is the final result.
table = [[0] * n for _ in range(n)]
for i in range(n):
table[i][i] = 1
for l in range(2, n+1):
for i in range(n-l+1):
j = i+l-1
if string[i] == string[j] and l == 2:
table[i][j] = 2
elif string[i] == string[j]:
table[i][j] = 2 + table[i+1][j-1]
start = 0
max_length = 0
for i in range(n):
for j in range(n):
if max_length < table[i][j]:
max_length = table[i][j]
start = i
return string[start:start+max_length]
class TestLongestPalindromic(unittest.TestCase):
def test_longest_palindromic(self):
string = 'forgeeksskeegfor'
self.assertEqual(longest_palindromic_substring(string), 'geeksskeeg')
|
prathamtandon/g4gproblems
|
DP/longest_palindromic_substring.py
|
Python
|
mit
| 1,133
|
from django.conf import settings
from django.http import HttpResponseRedirect
class LocaleRedirectionMiddleware(object):
"""Remove the /en-US/ locale part from the URL.
The sugardough based version of the app doesn't not enable the
locale middleware for simplicity since the site is not localized.
To avoid breaking old links that include /en-US/ locale
identification string, this middleware will automatically convert
them.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if not request.path.startswith('/en-US/'):
return self.get_response(request)
url = request.get_full_path()[6:]
return HttpResponseRedirect(url)
class HostnameMiddleware(object):
def __init__(self, get_response):
values = [getattr(settings, x) for x in ['HOSTNAME', 'DEIS_APP', 'DEIS_DOMAIN']]
self.backend_server = '.'.join(x for x in values if x)
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
response['X-Backend-Server'] = self.backend_server
return response
|
mozilla/lumbergh
|
careers/base/middleware.py
|
Python
|
mpl-2.0
| 1,186
|
from django.contrib.auth.models import User, Permission
class ModelBackend(object):
"""
Authenticates against django.contrib.auth.models.User.
"""
# TODO: Model, login attribute name and password attribute name should be
# configurable.
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(username=username)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def get_group_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_group_perm_cache'):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = Permission.objects.filter(group__user=user_obj)
perms = perms.values_list('content_type__app_label', 'codename').order_by()
user_obj._group_perm_cache = set(["%s.%s" % (ct, name) for ct, name in perms])
return user_obj._group_perm_cache
def get_all_permissions(self, user_obj, obj=None):
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set([u"%s.%s" % (p.content_type.app_label, p.codename) for p in user_obj.user_permissions.select_related()])
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj, obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = User.objects.get_or_create(username=username)
if created:
user = self.configure_user(user)
else:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
return user
|
lzw120/django
|
django/contrib/auth/backends.py
|
Python
|
bsd-3-clause
| 4,574
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
### @brief creates swagger json files from doc headers of rest files
###
### find files in
### arangod/RestHandler/*.cpp
### js/actions/api-*.js
###
### @usage generateSwagger.py < RestXXXX.cpp > restSwagger.json
###
### @file
###
### DISCLAIMER
###
### Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
###
### Copyright holder is triAGENS GmbH, Cologne, Germany
###
### @author Dr. Frank Celler
### @author Thomas Richter
### @author Copyright 2014, triAGENS GmbH, Cologne, Germany
################################################################################
from __future__ import print_function # py2 compat
from __future__ import unicode_literals
import sys
import re
import json
import operator
import os
import os.path
import io
#string,
#from pygments import highlight
#from pygments.lexers import YamlLexer
#from pygments.formatters import TerminalFormatter
#, yaml
#import ruamel.yaml as yaml
rc = re.compile
MS = re.M | re.S
################################################################################
### @brief swagger
################################################################################
swagger = {
"swagger": "2.0",
"info": {
"description": "ArangoDB REST API Interface",
"version": "1.0", # placeholder
"title": "ArangoDB",
"license": {
"name": "Apache License, Version 2.0"
}
},
"basePath": "/",
"definitions": {
"ARANGO_ERROR": {
"description": "An ArangoDB Error code",
"type": "integer"
},
"ArangoError": {
"description": "the arangodb error type",
"properties": {
"code": {
"description": "the HTTP Status code",
"type": "integer"
},
"error": {
"description": "boolean flag to indicate whether an error occurred (*true* in this case)",
"type": "boolean"
},
"errorMessage": {
"description": "a descriptive error message describing what happened, may contain additional information",
"type": "string"
},
"errorNum": {
"description": "the ARANGO_ERROR code",
"type": "integer"
}
}
}
},
"paths" : {}
}
################################################################################
### @brief native swagger types
################################################################################
swaggerBaseTypes = [
'object',
'array',
'number',
'integer',
'long',
'float',
'double',
'string',
'byte',
'binary',
'boolean',
'date',
'dateTime',
'password'
]
swaggerFormats = {
"number": ["", "float", "double"],
"integer": ["", "int32", "int64"]
}
################################################################################
### @brief length of the swagger definition namespace
################################################################################
defLen = len('#/definitions/')
################################################################################
### @brief operation
################################################################################
httpPath = ''
################################################################################
### @brief operation
################################################################################
method = ''
################################################################################
### @brief operation
################################################################################
operation = {}
################################################################################
### @brief current filename
################################################################################
fn = ''
################################################################################
### @brief current section
################################################################################
currentTag = ''
################################################################################
### @brief current docublock
################################################################################
currentDocuBlock = None
lastDocuBlock = None
################################################################################
### @brief index of example block we're reading
################################################################################
currentExample = 0
################################################################################
### @brief the current returncode we're working on
################################################################################
currentReturnCode = 0
################################################################################
### @brief collect json body parameter definitions:
################################################################################
restBodyParam = None
restReplyBodyParam = None
restSubBodyParam = None
operationIDs = []
################################################################################
### @brief DEBUG
################################################################################
DEBUG = True
DEBUG = False
################################################################################
### @brief facility to remove leading and trailing html-linebreaks
################################################################################
removeTrailingBR = re.compile("<br>$")
removeLeadingBR = re.compile("^<br>")
def brTrim(text):
return removeLeadingBR.sub("", removeTrailingBR.sub("", text.strip(' ')))
################################################################################
### @brief check for token to be right
################################################################################
reqOpt = ["required", "optional"]
def CheckReqOpt(token):
if token not in reqOpt:
print("This is supposed to be required or optional!", file=sys.stderr)
raise Exception("invalid value '%s'" % token)
################################################################################
### @brief trim_text
################################################################################
def trim_text(txt):
r = rc(r"""[ \t]+$""")
txt = r.sub("", txt)
return txt
################################################################################
### @brief parameters
###
### suche die erste {
### suche die letzten }
### gib alles dazwischen zurck
################################################################################
def parameters(line):
(_l, _c, line) = line.partition('{')
(line, _c, _r) = line.rpartition('}')
line = BackTicks(line, wordboundary=['{', '}'])
return line
################################################################################
### @brief BackTicks
###
### `word` -> <b>word</b>
################################################################################
def BackTicks(txt, wordboundary=['<em>', '</em>']):
r = rc(r"""([\(\s'/">]|^|.)\`(.*?)\`([<\s\.\),:;'"?!/-]|$)""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief AsteriskItalic
###
### *word* -> <b>word</b>
################################################################################
def AsteriskItalic(txt, wordboundary=['<strong>', '</strong>']):
r = rc(r"""([\(\s'/">]|^|.)\*(.*?)\*([<\s\.\),:;'"?!/-]|$)""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief AsteriskBold
###
### **word** -> <b>word</b>
################################################################################
def AsteriskBold(txt, wordboundary=['<strong>', '</strong>']):
r = rc(r"""([\(\s'/">]|^|.)\*\*(.*?)\*\*([<\s\.\),:;'"?!/-]|$)""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief FA
###
### @FA{word} -> <b>word</b>
################################################################################
def FA(txt, wordboundary=['<b>', '</b>']):
r = rc(r"""([\(\s'/">]|^|.)@FA\{(.*?)\}([<\s\.\),:;'"?!/-]|$)""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief FN
###
### @FN{word} -> <b>word</b>
################################################################################
def FN(txt, wordboundary=['<b>', '</b>']):
r = rc(r"""([\(\s'/">]|^|.)@FN\{(.*?)\}([<\s\.\),:;'"?!/-])""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief LIT
###
### @LIT{word} -> <b>word</b>
################################################################################
def LIT(txt, wordboundary=['<b>', '</b>']):
r = rc(r"""([\(\s'/">]|^)@LIT\{(.*?)\}([<\s\.\),:;'"?!/-])""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief LIT
###
### \ -> needs to become \\ so \n's in the text can be differciated.
################################################################################
def BACKSLASH(txt):
return txt.replace('\\', '\\\\\\')
################################################################################
### @brief Typegraphy
################################################################################
def Typography(txt):
txt = txt[0:-1]
# txt = BackTicks(txt)
# txt = AsteriskBold(txt)
# txt = AsteriskItalic(txt)
# txt = FN(txt)
# txt = LIT(txt)
# txt = FA(txt)
#
# no way to find out the correct link for Swagger,
# so replace all @ref elements with just "the manual"
r = rc(r"""@ref [a-zA-Z0-9]+""", MS)
txt = r.sub("the manual", txt)
txt = re.sub(r"@endDocuBlock", "", txt)
txt = BACKSLASH(txt)
return txt
################################################################################
### @brief InitializationError
################################################################################
class InitializationError(Exception):
pass
################################################################################
### @brief StateMachine
################################################################################
class StateMachine:
def __init__(self):
self.handlers = []
self.startState = None
self.endStates = []
self.file = ''
self.fn = ''
def add_state(self, handler, end_state=0):
self.handlers.append(handler)
if end_state:
self.endStates.append(handler)
def set_fn(self, filename):
self.fn = filename
global fn
fn = filename
def set_start(self, handler):
self.startState = handler
def run(self, cargo=None):
if not self.startState:
raise InitializationError("must call .set_start() before .run()")
if not self.endStates:
raise InitializationError("at least one state must be an end_state")
handler = self.startState
try:
while 1:
(newState, cargo) = handler(cargo)
if newState in self.endStates:
newState(cargo)
break
elif newState not in self.handlers:
raise RuntimeError("Invalid target %s" % newState)
else:
handler = newState
except Exception as x:
print("while parsing '" + self.fn + "'", file=sys.stderr)
print("trying to use handler '" + handler.__name__ + "'", file=sys.stderr)
raise x
################################################################################
### @brief Regexen
################################################################################
class Regexen:
def __init__(self):
self.DESCRIPTION_LI = re.compile(r'^-\s.*$')
self.DESCRIPTION_SP = re.compile(r'^\s\s.*$')
self.DESCRIPTION_BL = re.compile(r'^\s*$')
self.EMPTY_LINE = re.compile(r'^\s*$')
self.START_DOCUBLOCK = re.compile('.*@startDocuBlock ')
self.HINTS = re.compile('.*@HINTS')
self.END_EXAMPLE_ARANGOSH_RUN = re.compile('.*@END_EXAMPLE_ARANGOSH_RUN')
self.EXAMPLES = re.compile('.*@EXAMPLES')
self.EXAMPLE_ARANGOSH_RUN = re.compile('.*@EXAMPLE_ARANGOSH_RUN{')
self.RESTBODYPARAM = re.compile('.*@RESTBODYPARAM')
self.RESTSTRUCT = re.compile('.*@RESTSTRUCT')
self.RESTALLBODYPARAM = re.compile('.*@RESTALLBODYPARAM')
self.RESTDESCRIPTION = re.compile('.*@RESTDESCRIPTION')
self.RESTDONE = re.compile('.*@RESTDONE')
self.RESTHEADER = re.compile('.*@RESTHEADER{')
self.RESTHEADERPARAM = re.compile('.*@RESTHEADERPARAM{')
self.RESTHEADERPARAMETERS = re.compile('.*@RESTHEADERPARAMETERS')
self.RESTQUERYPARAM = re.compile('.*@RESTQUERYPARAM{')
self.RESTQUERYPARAMETERS = re.compile('.*@RESTQUERYPARAMETERS')
self.RESTREPLYBODY = re.compile('.*@RESTREPLYBODY')
self.RESTRETURNCODE = re.compile('.*@RESTRETURNCODE{')
self.RESTRETURNCODES = re.compile('.*@RESTRETURNCODES')
self.RESTURLPARAM = re.compile('.*@RESTURLPARAM{')
self.RESTURLPARAMETERS = re.compile('.*@RESTURLPARAMETERS')
self.RESTPARAM = re.compile('.*@RESTPARAM')
self.RESTQUERYPARAMS = re.compile('.*@RESTQUERYPARAMS')
self.TRIPLENEWLINEATSTART = re.compile(r'^\n\n\n')
################################################################################
### @brief checks for end of comment
################################################################################
def check_end_of_comment(line, r):
return r.RESTDONE.match(line)
################################################################################
### @brief next_step
################################################################################
def next_step(fp, line, r):
global operation
if not line:
return eof, (fp, line)
elif check_end_of_comment(line, r):
return skip_code, (fp, line)
elif r.START_DOCUBLOCK.match(line):
return start_docublock, (fp, line)
elif r.HINTS.match(line):
return hints, (fp, line)
elif r.EXAMPLE_ARANGOSH_RUN.match(line):
return example_arangosh_run, (fp, line)
elif r.RESTBODYPARAM.match(line):
return restbodyparam, (fp, line)
elif r.RESTSTRUCT.match(line):
return reststruct, (fp, line)
elif r.RESTALLBODYPARAM.match(line):
return restallbodyparam, (fp, line)
elif r.RESTDESCRIPTION.match(line):
return restdescription, (fp, line)
elif r.RESTHEADER.match(line):
return restheader, (fp, line)
elif r.RESTHEADERPARAM.match(line):
return restheaderparam, (fp, line)
elif r.RESTHEADERPARAMETERS.match(line):
return restheaderparameters, (fp, line)
elif r.RESTQUERYPARAM.match(line):
return restqueryparam, (fp, line)
elif r.RESTQUERYPARAMETERS.match(line):
return restqueryparameters, (fp, line)
elif r.RESTREPLYBODY.match(line):
return restreplybody, (fp, line)
elif r.RESTRETURNCODE.match(line):
return restreturncode, (fp, line)
elif r.RESTRETURNCODES.match(line):
return restreturncodes, (fp, line)
elif r.RESTURLPARAM.match(line):
return resturlparam, (fp, line)
elif r.RESTURLPARAMETERS.match(line):
return resturlparameters, (fp, line)
elif r.RESTPARAM.match(line):
return restparam, (fp, line)
elif r.RESTQUERYPARAMS.match(line):
return restqueryparams, (fp, line)
elif r.EXAMPLES.match(line):
return examples, (fp, line)
return None, None
################################################################################
### @brief generic handler
################################################################################
def generic_handler(cargo, r, message):
global DEBUG
if DEBUG:
print(message, file=sys.stderr)
(fp, last) = cargo
last
while 1:
(next, c) = next_step(fp, fp.readline(), r)
if next:
return next, c
################################################################################
### @brief generic handler with description
### @param cargo the file we're working on
### @param r the regex that matched
### @param message
### @param op
### @param para an object were we should write to
### @param name the key in para we should write to
################################################################################
def generic_handler_desc(cargo, r, message, op, para, name):
global DEBUG, operation
if DEBUG:
print(message, file=sys.stderr)
(fp, dummy) = cargo
while 1:
line = fp.readline()
(next, c) = next_step(fp, line, r)
if next:
para[name] = trim_text(para[name])
if op:
try:
operation[op].append(para)
except AttributeError as x:
print("trying to set '%s' on operations - failed. '%s'" % (op, para), file=sys.stderr)
raise x
return next, c
line = Typography(line)
para[name] += line + '\n'
def start_docublock(cargo, r=Regexen()):
global currentDocuBlock
(dummy, last) = cargo
try:
# TODO remove when all /// are removed from the docublocks
if last.startswith('/// '):
currentDocuBlock = last.split(' ')[2].rstrip()
else:
currentDocuBlock = last.split(' ')[1].rstrip()
except Exception as x:
print("failed to fetch docublock in '" + last + "': " + str(x), file=sys.stderr)
raise x
return generic_handler(cargo, r, 'start_docublock')
def setRequired(where, which):
if not 'required' in where:
where['required'] = []
where['required'].append(which)
################################################################################
### @brief restparam - deprecated - abort.
################################################################################
def restparam(cargo, r=Regexen()):
global swagger, operation, httpPath, method, restBodyParam, fn, currentExample, currentReturnCode, currentDocuBlock, lastDocuBlock, restReplyBodyParam
print("deprecated RESTPARAM declaration detected:", file=sys.stderr)
print(json.dumps(
swagger['paths'][httpPath],
indent=4,
separators=(', ', ': '),
sort_keys=True), file=sys.stderr)
raise Exception("RESTPARAM not supported anymore.")
################################################################################
### @brief restparam - deprecated - abort.
################################################################################
def restqueryparams(cargo, r=Regexen()):
global swagger, operation, httpPath, method, restBodyParam, fn, currentExample, currentReturnCode, currentDocuBlock, lastDocuBlock, restReplyBodyParam
print("deprecated RESTQUERYPARAMS declaration detected:", file=sys.stderr)
print(json.dumps(
swagger['paths'][httpPath],
indent=4,
separators=(', ', ': '),
sort_keys=True), file=sys.stderr)
raise Exception("RESTQUERYPARAMS not supported anymore. Use RESTQUERYPARAMETERS instead.")
################################################################################
### @brief restheader
################################################################################
def restheader(cargo, r=Regexen()):
global swagger, operation, httpPath, method, restBodyParam, fn, currentExample, currentReturnCode, currentDocuBlock, lastDocuBlock, restReplyBodyParam
currentReturnCode = 0
currentExample = 0
restReplyBodyParam = None
restBodyParam = None
(dummy, last) = cargo
temp = parameters(last).split(',')
if temp == "":
raise Exception("Invalid restheader value. got empty string. Maybe missing closing bracket? " + last)
(ucmethod, path) = temp[0].split()
#TODO: hier checken, ob der letzte alles hatte (responses)
summary = temp[1]
summaryList = summary.split()
method = ucmethod.lower()
nickname = summaryList[0] + ''.join([word.capitalize() for word in summaryList[1:]])
httpPath = FA(path, wordboundary=['{', '}'])
if not httpPath in swagger['paths']:
swagger['paths'][httpPath] = {}
if method in swagger['paths'][httpPath]:
print("duplicate route detected:", file=sys.stderr)
print("There already is a route [" + ucmethod + " " + httpPath + "]: ", file=sys.stderr)
print(json.dumps(
swagger['paths'][httpPath],
indent=4,
separators=(', ', ': '),
sort_keys=True), file=sys.stderr)
raise Exception("Duplicate route")
if currentDocuBlock == None:
raise Exception("No docublock started for this restheader: " + ucmethod + " " + path)
if lastDocuBlock != None and currentDocuBlock == lastDocuBlock:
raise Exception("No new docublock started for this restheader: " + ucmethod + " " + path + ' : ' + currentDocuBlock)
operationId = nickname
if len(temp) > 2:
operationId = temp[2]
if operationId in operationIDs:
print(operationIDs)
raise Exception("duplicate operation ID! " + operationId)
lastDocuBlock = currentDocuBlock
swagger['paths'][httpPath][method] = {
'operationId': operationId.strip(),
'x-filename': fn,
'x-hints': '',
'x-examples': [],
'tags': [currentTag],
'summary': summary.strip(),
'description': '',
'parameters' : [],
}
operation = swagger['paths'][httpPath][method]
return generic_handler(cargo, r, "resturlparameters")
################################################################################
### @brief resturlparameters
################################################################################
def resturlparameters(cargo, r=Regexen()):
return generic_handler(cargo, r, "resturlparameters")
################################################################################
### @brief resturlparam
################################################################################
def resturlparam(cargo, r=Regexen()):
global swagger, operation, httpPath, method
(dummy, last) = cargo
name = ""
pformat = ""
required = ""
try:
(name, pformat, required) = parameters(last).split(',')
except Exception:
print("RESTURLPARAM: 3 arguments required. You gave me: " + parameters(last), file=sys.stderr)
raise x
if required.strip() != 'required':
print("only required is supported in RESTURLPARAM", file=sys.stderr)
raise Exception("invalid url parameter")
para = {
'name': name.strip(),
'in': 'path',
'format': pformat.strip(),
'description': '',
'type': pformat.strip().lower(),
'required': True
}
swagger['paths'][httpPath][method]['parameters'].append(para)
return generic_handler_desc(cargo, r, "resturlparam", None, para, 'description')
################################################################################
### @brief restqueryparameters
################################################################################
def restqueryparameters(cargo, r=Regexen()):
return generic_handler(cargo, r, "restqueryparameters")
################################################################################
### @brief restheaderparameters
################################################################################
def restheaderparameters(cargo, r=Regexen()):
return generic_handler(cargo, r, "restheaderparameters")
################################################################################
### @brief restheaderparameters
################################################################################
def restheaderparam(cargo, r=Regexen()):
global swagger, operation, httpPath, method
(dummy, last) = cargo
parametersList = parameters(last).split(',')
para = {
'in': 'header',
'type': parametersList[1].lower(),
'name': parametersList[0],
'description': ''
}
swagger['paths'][httpPath][method]['parameters'].append(para)
return generic_handler_desc(cargo, r, "restheaderparam", None, para, 'description')
################################################################################
### @brief restbodyparam
################################################################################
def restbodyparam(cargo, r=Regexen()):
global swagger, operation, httpPath, method, restBodyParam, fn, currentDocuBlock
(dummy, last) = cargo
try:
(name, ptype, required, ptype2) = parameters(last).split(',')
except Exception:
print("RESTBODYPARAM: 4 arguments required. You gave me: " + parameters(last), file=sys.stderr)
print("In this docublock: " + currentDocuBlock, file=sys.stderr)
raise Exception("Argument count error")
CheckReqOpt(required)
if required == 'required':
required = True
else:
required = False
if restBodyParam == None:
# https://github.com/swagger-api/swagger-ui/issues/1430
# once this is solved we can skip this:
operation['description'] += "\n**A JSON object with these properties is required:**\n"
restBodyParam = {
'name': 'Json Request Body',
'x-description-offset': len(swagger['paths'][httpPath][method]['description']),
'in': 'body',
'required': True,
'schema': {
'$ref': '#/definitions/' + currentDocuBlock
}
}
swagger['paths'][httpPath][method]['parameters'].append(restBodyParam)
if not currentDocuBlock in swagger['definitions']:
swagger['definitions'][currentDocuBlock] = {
'x-filename': fn,
'type' : 'object',
'properties': {},
}
swagger['definitions'][currentDocuBlock]['properties'][name] = {
'type': ptype,
'description': ''
}
if ptype == 'object' and len(ptype2) > 0:
swagger['definitions'][currentDocuBlock]['properties'][name] = {
'$ref': '#/definitions/' + ptype2
}
if not ptype2 in swagger['definitions']:
swagger['definitions'][ptype2] = {
'x-filename': fn,
'type': 'object',
'properties' : {},
'description': ''
}
if required:
setRequired(swagger['definitions'][ptype2], name)
return generic_handler_desc(cargo, r, "restbodyparam", None,
swagger['definitions'][ptype2],
'description')
if ptype == 'array':
if ptype2 not in swaggerBaseTypes:
swagger['definitions'][currentDocuBlock]['properties'][name]['items'] = {
'$ref': '#/definitions/' + ptype2
}
else:
swagger['definitions'][currentDocuBlock]['properties'][name]['items'] = {
'type': ptype2
}
if ptype2 == 'object':
swagger['definitions'][currentDocuBlock]['properties'][name]['items']['additionalProperties'] = {}
elif ptype == 'object':
swagger['definitions'][currentDocuBlock]['properties'][name]['additionalProperties'] = {}
elif ptype != 'string':
if ptype in swaggerFormats and ptype2 not in swaggerFormats[ptype]:
print("RESTSTRUCT: ptype2 (format)[" + ptype2 + "] not valid: " + parameters(last), file=sys.stderr)
raise Exception("'%s' is not one of %s!" % (ptype2, str(swaggerFormats)))
swagger['definitions'][currentDocuBlock]['properties'][name]['format'] = ptype2
if required:
setRequired(swagger['definitions'][currentDocuBlock], name)
return generic_handler_desc(cargo, r, "restbodyparam", None,
swagger['definitions'][currentDocuBlock]['properties'][name],
'description')
################################################################################
### @brief restallbodyparam
################################################################################
def restallbodyparam(cargo, r=Regexen()):
global swagger, operation, httpPath, method, restBodyParam
(dummy, last) = cargo
try:
(_name, _ptype, required) = parameters(last).split(',')
except Exception:
print("RESTALLBODYPARAM: 3 arguments required. You gave me: " + parameters(last), file=sys.stderr)
CheckReqOpt(required)
if required == 'required':
required = True
else:
required = False
if restBodyParam != None:
raise Exception("May only have one 'ALLBODY'")
restBodyParam = {
'name': 'Json Request Body',
'description': '',
'in': 'body',
'x-description-offset': len(swagger['paths'][httpPath][method]['description']),
'required': required,
'schema': {
'type': 'object',
'additionalProperties': {}
}
}
if _ptype != "json" and _ptype != "object":
if _ptype == "string":
restBodyParam['name'] = "plain text body"
restBodyParam['schema'] = {
'type': 'string',
'additionalProperties': {}
}
else:
raise Exception("Unknown body type " + _ptype + " - supported are json, object and string")
swagger['paths'][httpPath][method]['parameters'].append(restBodyParam)
return generic_handler_desc(cargo, r, "restbodyparam", None,
restBodyParam,
'description')
################################################################################
### @brief reststruct
################################################################################
def reststruct(cargo, r=Regexen()):
global swagger, operation, httpPath, method, restBodyParam, restSubBodyParam, fn
(dummy, last) = cargo
try:
(name, className, ptype, required, ptype2) = parameters(last).split(',')
except Exception:
print("RESTSTRUCT: 5 arguments required (name, className, ptype, required, ptype2). You gave me: " + parameters(last), file=sys.stderr)
raise Exception("Argument count error")
CheckReqOpt(required)
if required == 'required':
required = True
else:
required = False
if className not in swagger['definitions']:
swagger['definitions'][className] = {
'type': 'object',
'properties' : {},
'description': '',
'x-filename': fn
}
swagger['definitions'][className]['properties'][name] = {
'type': ptype,
'description': ''
}
if ptype == 'array':
if ptype2 not in swaggerBaseTypes:
swagger['definitions'][className]['properties'][name]['items'] = {
'$ref': '#/definitions/' + ptype2
}
else:
swagger['definitions'][className]['properties'][name]['items'] = {
'type': ptype2
}
if ptype == 'object' and len(ptype2) > 0:
if not ptype2 in swagger['definitions']:
swagger['definitions'][ptype2] = {
'x-filename': fn,
'type': 'object',
'properties' : {},
'description': ''
}
swagger['definitions'][className]['properties'][name] = {
'$ref': '#/definitions/' + ptype2
}
if required:
setRequired(swagger['definitions'][className], name)
return generic_handler_desc(cargo, r, "reststruct", None,
swagger['definitions'][ptype2],
'description')
elif ptype != 'string' and ptype != 'boolean':
if ptype in swaggerFormats and ptype2 not in swaggerFormats[ptype]:
print("RESTSTRUCT: ptype2 (format)[" + ptype2 + "] not valid: " + parameters(last), file=sys.stderr)
raise Exception("'%s' is not one of %s!" % (ptype2, str(swaggerFormats)))
swagger['definitions'][className]['properties'][name]['format'] = ptype2
return generic_handler_desc(cargo, r, "restbodyparam", None,
swagger['definitions'][className]['properties'][name],
'description')
################################################################################
### @brief restqueryparam
################################################################################
def restqueryparam(cargo, r=Regexen()):
global swagger, operation, httpPath, method, swaggerBaseTypes
(dummy, last) = cargo
parametersList = parameters(last).split(',')
CheckReqOpt(parametersList[2])
if parametersList[2] == 'required':
required = True
else:
required = False
swaggerType = parametersList[1].lower()
if swaggerType not in swaggerBaseTypes:
print("RESTQUERYPARAM is supposed to be a swagger type.", file=sys.stderr)
raise Exception("'%s' is not one of %s!" % (swaggerType, str(swaggerBaseTypes)))
para = {
'name': parametersList[0],
'in': 'query',
'description': '',
'type': swaggerType,
'required': required
}
swagger['paths'][httpPath][method]['parameters'].append(para)
return generic_handler_desc(cargo, r, "restqueryparam", None, para, 'description')
################################################################################
### @brief hints
################################################################################
def hints(cargo, r=Regexen()):
global swagger, operation, httpPath, method
ret = generic_handler_desc(cargo, r, "hints", None,
swagger['paths'][httpPath][method], 'x-hints')
if r.TRIPLENEWLINEATSTART.match(swagger['paths'][httpPath][method]['x-hints']):
(fp, dummy) = cargo
print('remove newline after @HINTS in file %s' % (fp.name), file=sys.stderr)
exit(1)
return ret
################################################################################
### @brief restdescription
################################################################################
def restdescription(cargo, r=Regexen()):
global swagger, operation, httpPath, method
swagger['paths'][httpPath][method]['description'] += '\n\n'
ret = generic_handler_desc(cargo, r, "restdescription", None,
swagger['paths'][httpPath][method],
'description')
if r.TRIPLENEWLINEATSTART.match(swagger['paths'][httpPath][method]['description']):
(fp, dummy) = cargo
print('remove newline after @RESTDESCRIPTION in file %s' % (fp.name), file=sys.stderr)
exit(1)
return ret
################################################################################
### @brief restreplybody
################################################################################
def restreplybody(cargo, r=Regexen()):
global swagger, operation, httpPath, method, restReplyBodyParam, fn
(dummy, last) = cargo
try:
(name, ptype, required, ptype2) = parameters(last).split(',')
except Exception:
print("RESTREPLYBODY: 4 arguments required. You gave me: " + parameters(last), file=sys.stderr)
raise x
CheckReqOpt(required)
if required == 'required':
required = True
else:
required = False
if currentReturnCode == 0:
raise Exception("failed to add text to response body: (have to specify the HTTP-code first) " + parameters(last))
rcBlock = ''
if name == '':
if ptype == 'object':
rcBlock = ptype2
elif ptype == 'array':
rcBlock = currentDocuBlock + '_rc_' + currentReturnCode
else:
rcBlock = currentDocuBlock + '_rc_' + currentReturnCode
#if currentReturnCode:
if restReplyBodyParam == None:
# https://github.com/swagger-api/swagger-ui/issues/1430
# once this is solved we can skip this:
operation['description'] += '\n**HTTP ' + currentReturnCode + '**\n'
operation['description'] += "*A json document with these Properties is returned:*\n"
operation['responses'][currentReturnCode]['x-description-offset'] = len(operation['description'])
operation['responses'][currentReturnCode]['schema'] = {
'$ref': '#/definitions/' + rcBlock
}
swagger['paths'][httpPath][method]['produces'] = [
"application/json"
]
restReplyBodyParam = ''
if not rcBlock in swagger['definitions']:
swagger['definitions'][rcBlock] = {
'x-filename': fn,
'type' : 'object',
'properties': {},
}
if len(name) > 0:
swagger['definitions'][rcBlock]['properties'][name] = {
'type': ptype,
'description': ''
}
if ptype == 'object' and len(ptype2) > 0:
if len(name) > 0:
swagger['definitions'][rcBlock]['properties'][name] = {
'$ref': '#/definitions/' + ptype2
}
if not ptype2 in swagger['definitions']:
swagger['definitions'][ptype2] = {
'x-filename': fn,
'type': 'object',
'properties' : {},
'description': ''
}
if required:
setRequired(swagger['definitions'][ptype2], name)
return generic_handler_desc(cargo, r, "restbodyparam", None,
swagger['definitions'][ptype2],
'description')
if ptype == 'array':
if len(name) == 0:
swagger['definitions'][rcBlock] = {
'type': ptype,
'description': ''
}
swagger['definitions'][rcBlock]['items'] = {
'$ref': '#/definitions/' + ptype2
}
return generic_handler_desc(cargo, r, "restreplybody", None,
swagger['definitions'][rcBlock],
'description')
else:
if len(ptype2) == 0:
swagger['definitions'][rcBlock]['properties'][name]['items'] = {
}
elif ptype2 not in swaggerBaseTypes:
swagger['definitions'][rcBlock]['properties'][name]['items'] = {
'$ref': '#/definitions/' + ptype2
}
else:
swagger['definitions'][rcBlock]['properties'][name]['items'] = {
'type': ptype2
}
if ptype2 == 'object':
swagger['definitions'][rcBlock]['properties']\
[name]['items']['additionalProperties'] = {}
elif ptype == 'object':
if len(name) > 0:
swagger['definitions'][rcBlock]['properties'][name]['additionalProperties'] = {}
elif ptype != 'string':
swagger['definitions'][rcBlock]['properties'][name]['format'] = ptype2
if len(name) > 0 & required:
setRequired(swagger['definitions'][rcBlock], name)
if len(name) > 0:
if 'description' not in swagger['definitions'][rcBlock]['properties']:
swagger['definitions'][rcBlock]['properties'][name]['description'] = ''
return generic_handler_desc(
cargo, r, "restreplybody", None,
swagger['definitions'][rcBlock]['properties'][name],
'description')
else:
swagger['definitions'][rcBlock]['description'] = ''
return generic_handler_desc(cargo, r, "restreplybody", None,
swagger['definitions'][rcBlock],
'description')
################################################################################
### @brief restreturncodes
################################################################################
def restreturncodes(cargo, r=Regexen()):
return generic_handler(cargo, r, "restreturncodes")
################################################################################
### @brief restreturncode
################################################################################
def restreturncode(cargo, r=Regexen()):
global currentReturnCode, restReplyBodyParam
(dummy, last) = cargo
restReplyBodyParam = None
currentReturnCode = parameters(last)
if not 'responses' in swagger['paths'][httpPath][method]:
swagger['paths'][httpPath][method]['responses'] = {}
swagger['paths'][httpPath][method]['responses'][currentReturnCode] = {
#'code': parameters(last),
'description': ''
}
return generic_handler_desc(
cargo, r, "restreturncode", None,
swagger['paths'][httpPath][method]['responses'][parameters(last)],
'description')
################################################################################
### @brief examples
################################################################################
def examples(cargo, r=Regexen()):
global currentExample
operation['x-examples'].append('')
return generic_handler_desc(cargo, r, "x-examples", None, operation['x-examples'], currentExample)
################################################################################
### @brief example_arangosh_run
################################################################################
def example_arangosh_run(cargo, r=Regexen()):
global currentExample, DEBUG
if DEBUG:
print("example_arangosh_run", file=sys.stderr)
(fp, last) = cargo
exampleHeader = brTrim(operation['x-examples'][currentExample]).strip()
# new examples code TODO should include for each example own object in json file
fn = os.path.join(os.path.dirname(__file__), '../Documentation/Examples/' + parameters(last) + '.generated')
try:
examplefile = io.open(fn, encoding='utf-8', newline=None)
except:
print("Failed to open example file:\n '%s'" % fn, file=sys.stderr)
raise Exception("failed to open example file:" + fn)
operation['x-examples'][currentExample] = '\n\n**Example:**\n ' + exampleHeader.strip('\n ') + '\n\n<pre>'
for line in examplefile.readlines():
operation['x-examples'][currentExample] += '<code>' + line + '</code>'
examplefile.close()
operation['x-examples'][currentExample] += '</pre>\n\n\n'
line = ""
while not r.END_EXAMPLE_ARANGOSH_RUN.match(line):
line = fp.readline()
if not line:
return eof, (fp, line)
currentExample += 1
return examples, (fp, line)
################################################################################
### @brief eof
################################################################################
def eof(cargo):
global DEBUG
if DEBUG:
print("eof", file=sys.stderr)
################################################################################
### @brief error
################################################################################
def error(cargo):
global DEBUG
if DEBUG:
print("error", file=sys.stderr)
sys.stderr.write('Unidentifiable line:\n' + cargo)
################################################################################
### @brief comment
################################################################################
def comment(cargo, r=Regexen()):
global DEBUG
if DEBUG:
print("comment", file=sys.stderr)
(fp, dummy) = cargo
while 1:
line = fp.readline()
if not line:
return eof, (fp, line)
next, c = next_step(fp, line, r)
if next:
return next, c
################################################################################
### @brief skip_code
###
### skip all non comment lines
################################################################################
def skip_code(cargo, r=Regexen()):
global DEBUG
if DEBUG:
print("skip_code", file=sys.stderr)
(fp, last) = cargo
return comment((fp, last), r)
################################################################################
### @brief main
################################################################################
automat = StateMachine()
automat.add_state(comment)
automat.add_state(eof, end_state=1)
automat.add_state(error, end_state=1)
automat.add_state(start_docublock)
automat.add_state(hints)
automat.add_state(example_arangosh_run)
automat.add_state(examples)
automat.add_state(skip_code)
automat.add_state(restbodyparam)
automat.add_state(reststruct)
automat.add_state(restallbodyparam)
automat.add_state(restdescription)
automat.add_state(restheader)
automat.add_state(restheaderparam)
automat.add_state(restheaderparameters)
automat.add_state(restqueryparam)
automat.add_state(restqueryparameters)
automat.add_state(restreturncode)
automat.add_state(restreturncodes)
automat.add_state(restreplybody)
automat.add_state(resturlparam)
automat.add_state(resturlparameters)
automat.add_state(restparam)
automat.add_state(restqueryparam)
def getOneApi(infile, filename, thisFn):
automat.set_start(skip_code)
automat.set_fn(thisFn)
automat.run((infile, ''))
################################################################################
### Swagger Markdown rendering
################################################################################
def getReference(name, source, verb):
try:
ref = name['$ref'][defLen:]
except Exception as ex:
print("No reference in: ", file=sys.stderr)
print(name, file=sys.stderr)
raise Exception("No reference in: " + str(name)) from ex
if not ref in swagger['definitions']:
fn = ''
if verb:
fn = swagger['paths'][route][verb]['x-filename']
else:
fn = swagger['definitions'][source]['x-filename']
print(json.dumps(
swagger['definitions'],
indent=4,
separators=(', ', ': '),
sort_keys=True), file=sys.stderr)
raise Exception("invalid reference: " + ref + " in " + fn)
return ref
removeDoubleLF = re.compile("\n\n")
removeLF = re.compile("\n")
def TrimThisParam(text, indent):
text = text.rstrip('\n').lstrip('\n')
text = removeDoubleLF.sub("\n", text)
if indent > 0:
indent = (indent + 4) # align the text right of the list...
return removeLF.sub("\n" + ' ' * indent, text)
def unwrapPostJson(reference, layer):
swaggerDataTypes = ["number", "integer", "string", "boolean", "array", "object"]
####
# print >>sys.stderr, "xx" * layer + reference
global swagger
rc = ''
if not 'properties' in swagger['definitions'][reference]:
if 'items' in swagger['definitions'][reference]:
if swagger['definitions'][reference]['type'] == 'array':
rc += '[\n'
subStructRef = getReference(swagger['definitions'][reference]['items'], reference, None)
rc += unwrapPostJson(subStructRef, layer + 1)
if swagger['definitions'][reference]['type'] == 'array':
rc += ']\n'
else:
for param in swagger['definitions'][reference]['properties'].keys():
thisParam = swagger['definitions'][reference]['properties'][param]
#required = ('required' in swagger['definitions'][reference] and
# param in swagger['definitions'][reference]['required'])
# print >> sys.stderr, thisParam
if '$ref' in thisParam:
subStructRef = getReference(thisParam, reference, None)
rc += ' ' * layer + "- **" + param + "**:\n"
####
# print >>sys.stderr, "yy" * layer + param
rc += unwrapPostJson(subStructRef, layer + 1)
elif thisParam['type'] == 'object':
rc += ' ' * layer + "- **" + param + "**: " + TrimThisParam(brTrim(thisParam['description']), layer) + "\n"
elif thisParam['type'] == 'array':
rc += ' ' * layer + "- **" + param + "**"
trySubStruct = False
lf = ""
####
# print >>sys.stderr, "zz" * layer + param
if 'type' in thisParam['items']:
rc += " (" + thisParam['items']['type'] + ")"
lf = "\n"
else:
if len(thisParam['items']) == 0:
rc += " (anonymous json object)"
lf = "\n"
else:
trySubStruct = True
rc += ": " + TrimThisParam(brTrim(thisParam['description']), layer) + lf
if trySubStruct:
try:
subStructRef = getReference(thisParam['items'], reference, None)
except:
print("while analyzing: " + param, file=sys.stderr)
print(thisParam, file=sys.stderr)
rc += "\n" + unwrapPostJson(subStructRef, layer + 1)
else:
if thisParam['type'] not in swaggerDataTypes:
print("while analyzing: " + param, file=sys.stderr)
print(thisParam['type'] + " is not a valid swagger datatype; supported ones: " + str(swaggerDataTypes), file=sys.stderr)
raise Exception("invalid swagger type")
rc += ' ' * layer + "- **" + param + "**: " + TrimThisParam(thisParam['description'], layer) + '\n'
return rc
if len(sys.argv) < 4:
print("usage: " + sys.argv[0] + " <scriptDir> <outDir> <relDir> <docublockdir> <optional: filter>", file=sys.stderr)
sys.exit(1)
scriptDir = sys.argv[1]
if not scriptDir.endswith("/"):
scriptDir += "/"
outDir = sys.argv[2]
if not outDir.endswith("/"):
outDir += "/"
relDir = sys.argv[3]
if not relDir.endswith("/"):
relDir += "/"
fileFilter = ""
if len(sys.argv) > 5:
fileFilter = sys.argv[5]
print("Filtering for: [" + fileFilter + "]", file=sys.stderr)
# read ArangoDB version and use it as API version
f = io.open(scriptDir + "ARANGO-VERSION", encoding="utf-8", newline=None)
version = f.read().strip()
f.close()
swagger['info']['version'] = version
paths = {}
topdir = sys.argv[4]
files = {}
for chapter in os.listdir(topdir):
if not os.path.isdir(os.path.join(topdir, chapter)) or chapter[0] == ".":
continue
files[chapter] = []
curPath = os.path.join(topdir, chapter)
for oneFile in os.listdir(curPath):
if fileFilter != "" and oneFile != fileFilter:
print("Skipping: [" + oneFile + "]", file=sys.stderr)
continue
curPath2 = os.path.join(curPath, oneFile)
if os.path.isfile(curPath2) and oneFile[0] != "." and oneFile.endswith(".md"):
files[chapter].append(os.path.join(topdir, chapter, oneFile))
for name, filenames in sorted(files.items(), key=operator.itemgetter(0)):
currentTag = name
for fn in filenames:
thisfn = fn
infile = io.open(fn, encoding='utf-8', newline=None)
try:
getOneApi(infile, name + " - " + ', '.join(filenames), fn)
except Exception as x:
print("\nwhile parsing file: '%s' error: %s" % (thisfn, x), file=sys.stderr)
raise Exception("while parsing file '%s' error: %s" %(thisfn, x))
infile.close()
currentDocuBlock = None
lastDocuBlock = None
# Sort arrays by offset helper:
def descOffsetGet(value):
return value["descOffset"]
for route in swagger['paths'].keys():
for verb in swagger['paths'][route].keys():
offsetPlus = 0
thisVerb = swagger['paths'][route][verb]
if not thisVerb['description']:
print("Description of Route empty; @RESTDESCRIPTION missing?", file=sys.stderr)
print("in :" + verb + " " + route, file=sys.stderr)
#raise TODO
# insert the post json description into the place we extracted it:
# Collect the blocks we want to work on, sort them by replacement place:
sortVec = []
for nParam in range(0, len(thisVerb['parameters'])):
if thisVerb['parameters'][nParam]['in'] == 'body':
sortVec.append({
"nParam": nParam,
"descOffset": thisVerb['parameters'][nParam]['x-description-offset']
})
sortVec.sort(key=descOffsetGet)
for oneItem in sortVec:
nParam = oneItem["nParam"]
descOffset = thisVerb['parameters'][nParam]['x-description-offset']
addText = ''
postText = ''
paramDesc = thisVerb['description'][:(descOffset+offsetPlus)]
if paramDesc:
postText += paramDesc
if 'additionalProperties' not in thisVerb['parameters'][nParam]['schema']:
addText = "\n" + unwrapPostJson(
getReference(thisVerb['parameters'][nParam]['schema'],
route,
verb),
1) + "\n\n"
postText += addText
postText += thisVerb['description'][(offsetPlus+descOffset):]
offsetPlus += len(addText)
thisVerb['description'] = postText
# insert the reply json description into the place we extracted it:
if 'responses' in thisVerb:
# Collect the blocks we want to work on, sort them by replacement place:
sortVec = []
for nRC in thisVerb['responses']:
if 'x-description-offset' in thisVerb['responses'][nRC]:
sortVec.append({
"nParam": nRC,
"descOffset": thisVerb['responses'][nRC]['x-description-offset']
})
sortVec.sort(key=descOffsetGet)
for oneItem in sortVec:
nRC = oneItem["nParam"]
descOffset = thisVerb['responses'][nRC]['x-description-offset']
#print descOffset
#print offsetPlus
descOffset += offsetPlus
addText = ''
#print thisVerb['responses'][nRC]['description']
postText = thisVerb['description'][:descOffset]
#print postText
replyDescription = TrimThisParam(thisVerb['responses'][nRC]['description'], 0)
if replyDescription:
addText += '\n' + replyDescription + '\n'
if 'additionalProperties' not in thisVerb['responses'][nRC]['schema']:
addText += "\n" + unwrapPostJson(
getReference(thisVerb['responses'][nRC]['schema'],
route,
verb),
0) + '\n'
# print addText
postText += addText
postText += thisVerb['description'][descOffset:]
offsetPlus += len(addText)
thisVerb['description'] = postText
#print '-'*80
#print thisVerb['description']
# Simplify hint box code to something that works in Swagger UI
# Append the result to the description field
# Place invisible markers, so that hints can be removed again
if 'x-hints' in thisVerb and thisVerb['x-hints']:
thisVerb['description'] += '\n<!-- Hints Start -->'
tmp = re.sub("{% hint '([^']+?)' %}",
lambda match: "\n\n**{}:** ".format(match.group(1).title()),
thisVerb['x-hints'])
tmp = re.sub('{%[^%]*?%}', '', tmp)
thisVerb['description'] += tmp
thisVerb['description'] += '\n<!-- Hints End -->'
# Append the examples to the description:
if 'x-examples' in thisVerb and thisVerb['x-examples']:
thisVerb['description'] += '\n'
for nExample in range(0, len(thisVerb['x-examples'])):
thisVerb['description'] += thisVerb['x-examples'][nExample]
thisVerb['x-examples'] = []# todo unset!
#print highlight(yaml.dump(swagger, Dumper=yaml.RoundTripDumper), YamlLexer(), TerminalFormatter())
#print yaml.dump(swagger, Dumper=yaml.RoundTripDumper)
print(json.dumps(swagger, indent=4, separators=(', ', ': '), sort_keys=True))
#print json.dumps(swagger['paths'], indent=4, separators=(', ',': '), sort_keys=True)
#print highlight(yaml.dump(swagger, Dumper=yaml.RoundTripDumper), YamlLexer(), TerminalFormatter())
## -----------------------------------------------------------------------------
## --SECTION-- END-OF-FILE
## -----------------------------------------------------------------------------
## Local Variables:
## mode: outline-minor
## outline-regexp: "^\\(### @brief\\|## --SECTION--\\|# -\\*- \\)"
## End:
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
arangodb/arangodb
|
utils/generateSwagger.py
|
Python
|
apache-2.0
| 59,104
|
# -*- coding: utf-8 -*-
"""
Package entry point.
"""
__name__ = 'mp3sum'
__description__ = 'An integrity-checking tool for LAME-encoded MP3s'
__url__ = 'https://github.com/okdana/mp3sum/'
__author__ = 'dana'
__author_email__ = 'dana@dana.is'
__version__ = '1.0.1'
|
okdana/mp3civ
|
mp3sum/__init__.py
|
Python
|
mit
| 295
|
var1 = True
def func1():
pass
|
stencila/stencila
|
fixtures/projects/daggy/module2.py
|
Python
|
apache-2.0
| 35
|
import selectingDataSet.py
import pca.py
import train.py
import scrollBar.py
from kivy.uix.screenmanager import ScreenManager, Screen
import kivy
kivy.require('1.8.0')
from kivy.app import App
from kivy.lang import Builder
class TestApp(App):
def build(self):
my_screenmanager = ScreenManager()
screen1 = SelectingDataSet(name='screen1')
screen2 = RunPCA(name='screen2')
screen3 = train(name='screen3')
my_screenmanager.add_widget(screen1)
my_screenmanager.add_widget(screen2)
my_screenmanager.add_widget(screen3)
return my_screenmanager
if __name__ == '__main__':
TestApp().run()
|
ttsuchi/neural-network-demo
|
demos/fullDemo.py
|
Python
|
mit
| 656
|
import sys
import seq
import os
from logger import Logger
"""
right now this just chooses the longest
BEWARE, this writes over the file
"""
if __name__ == "__main__":
if len(sys.argv) != 4 and len(sys.argv) != 5:
print("python "+sys.argv[0]+" table clusterdir fending [logfile]")
sys.exit(0)
fend = sys.argv[3]
LOGFILE = "pyphlawd.log"
if len(sys.argv) == 5:
LOGFILE = sys.argv[4]
log = Logger(LOGFILE)
log.a()
tab = open(sys.argv[1],"r")
idn = {}
for i in tab:
spls = i.strip().split("\t")
idn[spls[3]] = spls[4]
tab.close()
dirr = sys.argv[2]
for o in os.listdir(dirr):
if fend != None:
if fend not in o:
continue
seqs = {}
for i in seq.read_fasta_file_iter(dirr+"/"+o):
if idn[i.name] not in seqs:
seqs[idn[i.name]] = []
seqs[idn[i.name]].append(i)
for i in seqs:
if len(seqs[i]) > 1:
longest = None
longestn = 0
for j in seqs[i]:
if len(j.seq) > longestn:
longest = j
longestn = len(j.seq)
seqs[i] = [longest]
fn = open(dirr+"/"+o,"w")
for i in seqs:
for j in seqs[i]:
fn.write(j.get_fasta())
fn.close()
log.c()
|
FePhyFoFum/PyPHLAWD
|
src/choose_one_species_cluster.py
|
Python
|
gpl-2.0
| 1,407
|
# coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import octoprint.plugin
import flask
import os
import threading
import time
import hashlib
from . import version_checks, updaters, exceptions, util
from octoprint.server.util.flask import restricted_access
from octoprint.server import admin_permission
from octoprint.util import dict_merge
import octoprint.settings
##~~ Plugin
class SoftwareUpdatePlugin(octoprint.plugin.BlueprintPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin):
def __init__(self):
self._update_in_progress = False
self._configured_checks_mutex = threading.Lock()
self._configured_checks = None
self._refresh_configured_checks = False
self._version_cache = dict()
self._version_cache_ttl = 0
self._version_cache_path = None
self._version_cache_dirty = False
def initialize(self):
self._version_cache_ttl = self._settings.get_int(["cache_ttl"]) * 60
self._version_cache_path = os.path.join(self.get_plugin_data_folder(), "versioncache.yaml")
self._load_version_cache()
def refresh_checks(name, plugin):
self._refresh_configured_checks = True
self._send_client_message("update_versions")
self._plugin_lifecycle_manager.add_callback("enabled", refresh_checks)
self._plugin_lifecycle_manager.add_callback("disabled", refresh_checks)
def _get_configured_checks(self):
with self._configured_checks_mutex:
if self._refresh_configured_checks or self._configured_checks is None:
self._refresh_configured_checks = False
self._configured_checks = self._settings.get(["checks"], merged=True)
update_check_hooks = self._plugin_manager.get_hooks("octoprint.plugin.softwareupdate.check_config")
for name, hook in update_check_hooks.items():
try:
hook_checks = hook()
except:
self._logger.exception("Error while retrieving update information from plugin {name}".format(**locals()))
else:
for key, data in hook_checks.items():
if key in self._configured_checks:
data = dict_merge(data, self._configured_checks[key])
self._configured_checks[key] = data
return self._configured_checks
def _load_version_cache(self):
if not os.path.isfile(self._version_cache_path):
return
import yaml
try:
with open(self._version_cache_path) as f:
data = yaml.safe_load(f)
except:
self._logger.exception("Error while loading version cache from disk")
else:
try:
if not isinstance(data, dict):
self._logger.info("Version cache was created in a different format, not using it")
return
if "__version" in data:
data_version = data["__version"]
else:
self._logger.info("Can't determine version of OctoPrint version cache was created for, not using it")
return
from octoprint._version import get_versions
octoprint_version = get_versions()["version"]
if data_version != octoprint_version:
self._logger.info("Version cache was created for another version of OctoPrint, not using it")
return
self._version_cache = data
self._version_cache_dirty = False
self._logger.info("Loaded version cache from disk")
except:
self._logger.exception("Error parsing in version cache data")
def _save_version_cache(self):
import yaml
from octoprint.util import atomic_write
from octoprint._version import get_versions
octoprint_version = get_versions()["version"]
self._version_cache["__version"] = octoprint_version
with atomic_write(self._version_cache_path) as file_obj:
yaml.safe_dump(self._version_cache, stream=file_obj, default_flow_style=False, indent=" ", allow_unicode=True)
self._version_cache_dirty = False
self._logger.info("Saved version cache to disk")
#~~ SettingsPlugin API
def get_settings_defaults(self):
return {
"checks": {
"octoprint": {
"checkout_folder": "/home/pi/OctoPrint",
"type": "github_commit",
"repo": "OctoPrint",
"user": "mrbeam",
"branch": "stable-1.2.2",
"update_script": "{{python}} \"{update_script}\" --python=\"{{python}}\" \"{{folder}}\" {{target}}".format(update_script=os.path.join(self._basefolder, "scripts", "update-octoprint.py")),
"restart": "octoprint",
"current": "Unknown"
},
"svgtogcode": {
"checkout_folder": "/home/pi/mrbeam-inkscape-ext",
"type": "github_commit",
"repo": "mrbeam-inkscape-ext",
"user": "mrbeam",
"branch": "stable-1.2.2",
"update_script": "{{python}} \"{update_script}\" \"{{folder}}\" {{target}}".format(update_script=os.path.join(self._basefolder, "scripts", "git-pull.py")),
"restart": "octoprint",
"current": "Unknown"
},
"lcd": {
"checkout_folder": "/home/pi/lcd",
"type": "github_commit",
"repo": "lcd",
"user": "mrbeam",
"branch": "stable-1.2.2",
"update_script": "{{python}} \"{update_script}\" \"{{folder}}\" {{target}}".format(update_script=os.path.join(self._basefolder, "scripts", "git-pull.py")),
"restart": "environment",
"current": "Unknown"
},
"netconnectd": {
"checkout_folder": "/home/pi/netconnectd",
"type": "github_commit",
"repo": "netconnectd",
"user": "mrbeam",
"branch": "stable-1.2.2",
"update_script": "{{python}} \"{update_script}\" \"{{folder}}\" {{target}}".format(update_script=os.path.join(self._basefolder, "scripts", "git-pull.py")),
"restart": "environment",
"current": "Unknown"
},
},
"octoprint_restart_command": "sudo service octoprint restart",
"environment_restart_command": "sudo shutdown -r now",
"pip_command": None,
"cache_ttl": 12 * 60,
}
def on_settings_load(self):
data = dict(octoprint.plugin.SettingsPlugin.on_settings_load(self))
if "checks" in data:
del data["checks"]
checks = self._get_configured_checks()
if "octoprint" in checks:
if "checkout_folder" in checks["octoprint"]:
data["octoprint_checkout_folder"] = checks["octoprint"]["checkout_folder"]
elif "update_folder" in checks["octoprint"]:
data["octoprint_checkout_folder"] = checks["octoprint"]["update_folder"]
else:
data["octoprint_checkout_folder"] = None
data["octoprint_type"] = checks["octoprint"].get("type", None)
else:
data["octoprint_checkout_folder"] = None
data["octoprint_type"] = None
return data
def on_settings_save(self, data):
for key in self.get_settings_defaults():
if key == "checks" or key == "cache_ttl" or key == "octoprint_checkout_folder" or key == "octoprint_type":
continue
if key in data:
self._settings.set([key], data[key])
if "cache_ttl" in data:
self._settings.set_int(["cache_ttl"], data["cache_ttl"])
self._version_cache_ttl = self._settings.get_int(["cache_ttl"]) * 60
checks = self._get_configured_checks()
if "octoprint" in checks:
check = checks["octoprint"]
update_type = check.get("type", None)
checkout_folder = check.get("checkout_folder", None)
update_folder = check.get("update_folder", None)
defaults = dict(
plugins=dict(softwareupdate=dict(
checks=dict(
octoprint=dict(
type=update_type,
checkout_folder=checkout_folder,
update_folder=update_folder
)
)
))
)
if "octoprint_checkout_folder" in data:
self._settings.set(["checks", "octoprint", "checkout_folder"], data["octoprint_checkout_folder"], defaults=defaults, force=True)
if update_folder and data["octoprint_checkout_folder"]:
self._settings.set(["checks", "octoprint", "update_folder"], None, defaults=defaults, force=True)
self._refresh_configured_checks = True
if "octoprint_type" in data and data["octoprint_type"] in ("github_release", "git_commit"):
self._settings.set(["checks", "octoprint", "type"], data["octoprint_type"], defaults=defaults, force=True)
self._refresh_configured_checks = True
def get_settings_version(self):
return 3
def on_settings_migrate(self, target, current=None):
if current is None or current == 2:
# there might be some left over data from the time we still persisted everything to settings,
# even the stuff that shouldn't be persisted but always provided by the hook - let's
# clean up
configured_checks = self._settings.get(["checks"], incl_defaults=False)
if configured_checks is None:
configured_checks = dict()
check_keys = configured_checks.keys()
# take care of the octoprint entry
if "octoprint" in configured_checks:
octoprint_check = dict(configured_checks["octoprint"])
if "type" not in octoprint_check or octoprint_check["type"] != "github_commit":
deletables=["current", "displayName", "displayVersion"]
else:
deletables=[]
octoprint_check = self._clean_settings_check("octoprint", octoprint_check, self.get_settings_defaults()["checks"]["octoprint"], delete=deletables, save=False)
check_keys.remove("octoprint")
# and the hooks
update_check_hooks = self._plugin_manager.get_hooks("octoprint.plugin.softwareupdate.check_config")
for name, hook in update_check_hooks.items():
try:
hook_checks = hook()
except:
self._logger.exception("Error while retrieving update information from plugin {name}".format(**locals()))
else:
for key, data in hook_checks.items():
if key in configured_checks:
settings_check = dict(configured_checks[key])
merged = dict_merge(data, settings_check)
if "type" not in merged or merged["type"] != "github_commit":
deletables = ["current", "displayVersion"]
else:
deletables = []
self._clean_settings_check(key, settings_check, data, delete=deletables, save=False)
check_keys.remove(key)
# and anything that's left over we'll just remove now
for key in check_keys:
dummy_defaults = dict(plugins=dict())
dummy_defaults["plugins"][self._identifier] = dict(checks=dict())
dummy_defaults["plugins"][self._identifier]["checks"][key] = None
self._settings.set(["checks", key], None, defaults=dummy_defaults)
elif current == 1:
configured_checks = self._settings.get(["checks"], incl_defaults=False)
if configured_checks is None:
return
if "octoprint" in configured_checks and "octoprint" in configured_checks["octoprint"]:
# that's a circular reference, back to defaults
dummy_defaults = dict(plugins=dict())
dummy_defaults["plugins"][self._identifier] = dict(checks=dict())
dummy_defaults["plugins"][self._identifier]["checks"]["octoprint"] = None
self._settings.set(["checks", "octoprint"], None, defaults=dummy_defaults)
self._settings.save()
def _clean_settings_check(self, key, data, defaults, delete=None, save=True):
if delete is None:
delete = []
for k, v in data.items():
if k in defaults and defaults[k] == data[k]:
del data[k]
for k in delete:
if k in data:
del data[k]
dummy_defaults = dict(plugins=dict())
dummy_defaults["plugins"][self._identifier] = dict(checks=dict())
dummy_defaults["plugins"][self._identifier]["checks"][key] = defaults
if len(data):
self._settings.set(["checks", key], data, defaults=dummy_defaults)
else:
self._settings.set(["checks", key], None, defaults=dummy_defaults)
if save:
self._settings.save()
return data
#~~ BluePrint API
@octoprint.plugin.BlueprintPlugin.route("/check", methods=["GET"])
@restricted_access
def check_for_update(self):
if "check" in flask.request.values:
check_targets = map(str.strip, flask.request.values["check"].split(","))
else:
check_targets = None
if "force" in flask.request.values and flask.request.values["force"] in octoprint.settings.valid_boolean_trues:
force = True
else:
force = False
try:
information, update_available, update_possible = self.get_current_versions(check_targets=check_targets, force=force)
return flask.jsonify(dict(status="updatePossible" if update_available and update_possible else "updateAvailable" if update_available else "current", information=information))
except exceptions.ConfigurationInvalid as e:
flask.make_response("Update not properly configured, can't proceed: %s" % e.message, 500)
@octoprint.plugin.BlueprintPlugin.route("/update", methods=["POST"])
@restricted_access
@admin_permission.require(403)
def perform_update(self):
if self._printer.is_printing() or self._printer.is_paused():
# do not update while a print job is running
return flask.make_response("Printer is currently printing or paused", 409)
if not "application/json" in flask.request.headers["Content-Type"]:
return flask.make_response("Expected content-type JSON", 400)
json_data = flask.request.json
if "check" in json_data:
check_targets = map(str.strip, json_data["check"])
else:
check_targets = None
if "force" in json_data and json_data["force"] in octoprint.settings.valid_boolean_trues:
force = True
else:
force = False
to_be_checked, checks = self.perform_updates(check_targets=check_targets, force=force)
return flask.jsonify(dict(order=to_be_checked, checks=checks))
#~~ Asset API
def get_assets(self):
return dict(
css=["css/softwareupdate.css"],
js=["js/softwareupdate.js"],
less=["less/softwareupdate.less"]
)
##~~ TemplatePlugin API
def get_template_configs(self):
return [
dict(type="settings", name="Software Update")
]
#~~ Updater
def get_current_versions(self, check_targets=None, force=False):
"""
Retrieves the current version information for all defined check_targets. Will retrieve information for all
available targets by default.
:param check_targets: an iterable defining the targets to check, if not supplied defaults to all targets
"""
checks = self._get_configured_checks()
if check_targets is None:
check_targets = checks.keys()
update_available = False
update_possible = False
information = dict()
for target, check in checks.items():
if not target in check_targets:
continue
populated_check = self._populated_check(target, check)
try:
target_information, target_update_available, target_update_possible = self._get_current_version(target, populated_check, force=force)
if target_information is None:
target_information = dict()
except exceptions.UnknownCheckType:
self._logger.warn("Unknown update check type for %s" % target)
continue
target_information = dict_merge(dict(local=dict(name="unknown", value="unknown"), remote=dict(name="unknown", value="unknown")), target_information)
update_available = update_available or target_update_available
update_possible = update_possible or (target_update_possible and target_update_available)
from octoprint._version import get_versions
octoprint_version = get_versions()["version"]
local_name = target_information["local"]["name"]
local_value = target_information["local"]["value"]
information[target] = dict(updateAvailable=target_update_available,
updatePossible=target_update_possible,
information=target_information,
displayName=populated_check["displayName"],
displayVersion=populated_check["displayVersion"].format(octoprint_version=octoprint_version, local_name=local_name, local_value=local_value),
check=populated_check)
if self._version_cache_dirty:
self._save_version_cache()
return information, update_available, update_possible
def _get_check_hash(self, check):
hash = hashlib.md5()
hash.update(repr(check))
return hash.hexdigest()
def _get_current_version(self, target, check, force=False):
"""
Determines the current version information for one target based on its check configuration.
"""
current_hash = self._get_check_hash(check)
if target in self._version_cache and not force:
data = self._version_cache[target]
if data["hash"] == current_hash and data["timestamp"] + self._version_cache_ttl >= time.time() > data["timestamp"]:
# we also check that timestamp < now to not get confused too much by clock changes
return data["information"], data["available"], data["possible"]
information = dict()
update_available = False
try:
version_checker = self._get_version_checker(target, check)
information, is_current = version_checker.get_latest(target, check)
self._logger.info("Update plugin: %s/%s/%s/%s, current: %s, remote: %s, local: %s" % (check['type'], check['user'], check['repo'], check['branch'], check['current'], information['remote']['value'], information['local']['value']))
if information is not None and not is_current:
update_available = True
except exceptions.UnknownCheckType:
self._logger.warn("Unknown check type %s for %s" % (check["type"], target))
update_possible = False
except:
self._logger.exception("Could not check %s for updates" % target)
update_possible = False
else:
try:
updater = self._get_updater(target, check)
update_possible = updater.can_perform_update(target, check)
except:
update_possible = False
self._version_cache[target] = dict(timestamp=time.time(),
hash=current_hash,
information=information,
available=update_available,
possible=update_possible)
self._version_cache_dirty = True
return information, update_available, update_possible
def _send_client_message(self, message_type, data=None):
self._plugin_manager.send_plugin_message("softwareupdate", dict(type=message_type, data=data))
def perform_updates(self, check_targets=None, force=False):
"""
Performs the updates for the given check_targets. Will update all possible targets by default.
:param check_targets: an iterable defining the targets to update, if not supplied defaults to all targets
"""
checks = self._get_configured_checks()
if check_targets is None:
check_targets = checks.keys()
to_be_updated = sorted(set(check_targets) & set(checks.keys()))
if "octoprint" in to_be_updated:
to_be_updated.remove("octoprint")
tmp = ["octoprint"] + to_be_updated
to_be_updated = tmp
updater_thread = threading.Thread(target=self._update_worker, args=(checks, to_be_updated, force))
updater_thread.daemon = False
updater_thread.start()
return to_be_updated, dict((key, check["displayName"] if "displayName" in check else key) for key, check in checks.items() if key in to_be_updated)
def _update_worker(self, checks, check_targets, force):
restart_type = None
try:
self._update_in_progress = True
target_results = dict()
error = False
### iterate over all configured targets
for target in check_targets:
if not target in checks:
continue
check = checks[target]
if "enabled" in check and not check["enabled"]:
continue
if not target in check_targets:
continue
target_error, target_result = self._perform_update(target, check, force)
error = error or target_error
if target_result is not None:
target_results[target] = target_result
if "restart" in check:
target_restart_type = check["restart"]
elif "pip" in check:
target_restart_type = "octoprint"
# if our update requires a restart we have to determine which type
if restart_type is None or (restart_type == "octoprint" and target_restart_type == "environment"):
restart_type = target_restart_type
finally:
# we might have needed to update the config, so we'll save that now
self._settings.save()
# also, we are now longer updating
self._update_in_progress = False
if error:
# if there was an unignorable error, we just return error
self._send_client_message("error", dict(results=target_results))
else:
self._save_version_cache()
# otherwise the update process was a success, but we might still have to restart
if restart_type is not None and restart_type in ("octoprint", "environment"):
# one of our updates requires a restart of either type "octoprint" or "environment". Let's see if
# we can actually perform that
restart_command = self._settings.get(["%s_restart_command" % restart_type])
if restart_command is not None:
self._send_client_message("restarting", dict(restart_type=restart_type, results=target_results))
try:
self._perform_restart(restart_command)
except exceptions.RestartFailed:
self._send_client_message("restart_failed", dict(restart_type=restart_type, results=target_results))
else:
# we don't have this restart type configured, we'll have to display a message that a manual
# restart is needed
self._send_client_message("restart_manually", dict(restart_type=restart_type, results=target_results))
else:
self._send_client_message("success", dict(results=target_results))
def _perform_update(self, target, check, force):
information, update_available, update_possible = self._get_current_version(target, check)
if not update_available and not force:
return False, None
if not update_possible:
self._logger.warn("Cannot perform update for %s, update type is not fully configured" % target)
return False, None
# determine the target version to update to
target_version = information["remote"]["value"]
target_error = False
### The actual update procedure starts here...
try:
self._logger.info("Starting update of %s to %s..." % (target, target_version))
self._send_client_message("updating", dict(target=target, version=target_version))
updater = self._get_updater(target, check)
if updater is None:
raise exceptions.UnknownUpdateType()
update_result = updater.perform_update(target, check, target_version)
target_result = ("success", update_result)
self._logger.info("Update of %s to %s successful!" % (target, target_version))
except exceptions.UnknownUpdateType:
self._logger.warn("Update of %s can not be performed, unknown update type" % target)
self._send_client_message("update_failed", dict(target=target, version=target_version, reason="Unknown update type"))
return False, None
except Exception as e:
self._logger.exception("Update of %s can not be performed" % target)
if not "ignorable" in check or not check["ignorable"]:
target_error = True
if isinstance(e, exceptions.UpdateError):
target_result = ("failed", e.data)
self._send_client_message("update_failed", dict(target=target, version=target_version, reason=e.data))
else:
target_result = ("failed", None)
self._send_client_message("update_failed", dict(target=target, version=target_version, reason="unknown"))
else:
# make sure that any external changes to config.yaml are loaded into the system
self._settings.load()
# persist the new version if necessary for check type
if check["type"] == "github_commit":
dummy_default = dict(plugins=dict())
dummy_default["plugins"][self._identifier] = dict(checks=dict())
dummy_default["plugins"][self._identifier]["checks"][target] = dict(current=None)
self._settings.set(["checks", target, "current"], target_version, defaults=dummy_default)
# we have to save here (even though that makes us save quite often) since otherwise the next
# load will overwrite our changes we just made
self._settings.save()
del self._version_cache[target]
self._version_cache_dirty = True
return target_error, target_result
def _perform_restart(self, restart_command):
"""
Performs a restart using the supplied restart_command.
"""
self._logger.info("Restarting...")
try:
util.execute(restart_command)
except exceptions.ScriptError as e:
self._logger.exception("Error while restarting")
self._logger.warn("Restart stdout:\n%s" % e.stdout)
self._logger.warn("Restart stderr:\n%s" % e.stderr)
raise exceptions.RestartFailed()
def _populated_check(self, target, check):
result = dict(check)
if target == "octoprint":
from flask.ext.babel import gettext
result["displayName"] = check.get("displayName", gettext("OctoPrint"))
result["displayVersion"] = check.get("displayVersion", "{octoprint_version}")
from octoprint._version import get_versions
versions = get_versions()
if check["type"] == "github_commit":
result["current"] = versions.get("full-revisionid", versions.get("full", "unknown"))
else:
result["current"] = versions["version"]
else:
result["displayName"] = check.get("displayName", target)
result["displayVersion"] = check.get("displayVersion", check.get("current", "unknown"))
if check["type"] in ("github_commit"):
result["current"] = check.get("current", None)
else:
result["current"] = check.get("current", check.get("displayVersion", None))
return result
def _get_version_checker(self, target, check):
"""
Retrieves the version checker to use for given target and check configuration. Will raise an UnknownCheckType
if version checker cannot be determined.
"""
if not "type" in check:
raise exceptions.ConfigurationInvalid("no check type defined")
check_type = check["type"]
if check_type == "github_release":
return version_checks.github_release
elif check_type == "github_commit":
return version_checks.github_commit
elif check_type == "git_commit":
return version_checks.git_commit
elif check_type == "commandline":
return version_checks.commandline
elif check_type == "python_checker":
return version_checks.python_checker
else:
raise exceptions.UnknownCheckType()
def _get_updater(self, target, check):
"""
Retrieves the updater for the given target and check configuration. Will raise an UnknownUpdateType if updater
cannot be determined.
"""
if "update_script" in check:
return updaters.update_script
elif "pip" in check:
if not "pip_command" in check and self._settings.get(["pip_command"]) is not None:
check["pip_command"] = self._settings.get(["pip_command"])
return updaters.pip
elif "python_updater" in check:
return updaters.python_updater
else:
raise exceptions.UnknownUpdateType()
__plugin_name__ = "Software Update"
__plugin_author__ = "Gina Häußge"
__plugin_url__ = "https://github.com/foosel/OctoPrint/wiki/Plugin:-Software-Update"
__plugin_description__ = "Allows receiving update notifications and performing updates of OctoPrint and plugins"
__plugin_license__ = "AGPLv3"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = SoftwareUpdatePlugin()
global __plugin_helpers__
__plugin_helpers__ = dict(
version_checks=version_checks,
updaters=updaters,
exceptions=exceptions,
util=util
)
|
DueLaser/due_rasp
|
src/octoprint/plugins/softwareupdate/__init__.py
|
Python
|
agpl-3.0
| 27,079
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Avencall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
common_globals = {}
execfile_('common.py', common_globals)
MODELS = [
u'VVX300',
u'VVX310',
u'VVX400',
u'VVX410',
u'VVX500',
u'VVX600',
u'VVX1500',
]
VERSION = u'5.3.0'
class PolycomPlugin(common_globals['BasePolycomPlugin']):
IS_PLUGIN = True
pg_associator = common_globals['BasePolycomPgAssociator'](MODELS, VERSION)
|
alafarcinade/xivo-provd-plugins
|
plugins/xivo-polycom/5.3.0/entry.py
|
Python
|
gpl-3.0
| 1,050
|
# Zadání:
#########
#
# Napište funkci convert_num_to_month, která dostane jako parametr pořadové
# číslo měsíce v roku a vrátí jeho jméno.
#
# Napište funkci convert_month_to_num, která dostane jako parametr jméno měsíce
# v roku a vrátí jeho pořadové číslo v roce.
# POZOR: leden má číslo 1, prosinec 12
#
# Tuto funkci použijte v programu, který načte řetězec a pokud je první znak
# písmeno a vytiskne na obrazovku číslo měsíce, pokud je první znak číslo
# vytiskne název měsíce
#
# Pomůcka: V programu můžete využít následující proměnnou:
# month=['leden','unor','brezen','duben','kveten','cerven','cervenec','srpen','zari','rijen','listopad','prosinec']
###############################################################################
month=['leden','unor','brezen','duben','kveten','cerven','cervenec','srpen','zari','rijen','listopad','prosinec']
def convert_num_to_month( number ):
if number < 1 or number > 12:
return None
return month[number - 1]
def convert_month_to_num( name ):
if not name in month:
return None
return month.index(name)+1
# Příklad špatného vstupu
print( convert_month_to_num("bramborník"), convert_num_to_month(14) )
# Příklad dobrého vstupu:
print( convert_month_to_num("duben"), convert_num_to_month(10) )
|
malja/cvut-python
|
cviceni03/06_prevod_mesicu.py
|
Python
|
mit
| 1,341
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import pathlib
import shutil
import tempfile
import unittest
from qiime2.core.path import OwnedPath, OutPath
class TestOwnedPath(unittest.TestCase):
def setUp(self):
self.from_dir = tempfile.mkdtemp()
(pathlib.Path(self.from_dir) / 'foo.txt').touch()
self.to_dir = tempfile.mkdtemp()
# assume to_dir is empty for all tests
def test_move_or_copy_owned(self):
d = OwnedPath(self.from_dir)
# ensure that we are owned
d._user_owned = True
d._move_or_copy(self.to_dir)
# since from_dir is owned, _move_or_copy should copy, not move
self.assertTrue(os.path.exists(os.path.join(self.from_dir, 'foo.txt')))
self.assertTrue(os.path.exists(os.path.join(self.to_dir, 'foo.txt')))
shutil.rmtree(self.from_dir)
shutil.rmtree(self.to_dir)
def test_move_or_copy_not_owned_rename(self):
d = OwnedPath(self.from_dir)
# ensure that we are not owned
d._user_owned = False
d._move_or_copy(self.to_dir)
# since from_dir is not owned, _move_or_copy should move, not copy
self.assertFalse(os.path.exists(os.path.join(self.from_dir,
'foo.txt')))
self.assertTrue(os.path.exists(os.path.join(self.to_dir, 'foo.txt')))
with self.assertRaises(FileNotFoundError):
shutil.rmtree(self.from_dir)
shutil.rmtree(self.to_dir)
@unittest.mock.patch('pathlib.Path.rename', side_effect=FileExistsError)
def test_move_or_copy_not_owned_copy(self, _):
d = OwnedPath(self.from_dir)
# ensure that we are not owned
d._user_owned = False
d._move_or_copy(self.to_dir)
# since from_dir is not owned, but the network fs race condition crops
# up, _move_or_copy should copy, not move, but then we still ensure
# that the original path has been cleaned up
self.assertFalse(os.path.exists(os.path.join(self.from_dir,
'foo.txt')))
self.assertTrue(os.path.exists(os.path.join(self.to_dir, 'foo.txt')))
with self.assertRaises(FileNotFoundError):
shutil.rmtree(self.from_dir)
shutil.rmtree(self.to_dir)
class TestOutPath(unittest.TestCase):
def test_new_outpath(self):
f = OutPath()
self.assertIsInstance(f, OutPath)
self.assertTrue(f.is_file())
g = OutPath(dir=True)
self.assertIsInstance(g, OutPath)
self.assertTrue(g.is_dir())
def test_new_outpath_context_mgr(self):
with OutPath() as f:
path = str(f)
self.assertIsInstance(f, OutPath)
self.assertTrue(os.path.isfile(path))
self.assertFalse(os.path.isfile(path))
def test_destructor(self):
f = OutPath()
path = str(f)
self.assertTrue(os.path.isfile(path))
f._destructor()
self.assertFalse(os.path.isfile(path))
if __name__ == '__main__':
unittest.main()
|
qiime2/qiime2
|
qiime2/core/tests/test_path.py
|
Python
|
bsd-3-clause
| 3,406
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# TerminalRoastDB, released under GPLv3
# Get_Roaster_State
import Pyro4
roast_control = Pyro4.Proxy("PYRONAME:roaster.sr700")
print (roast_control.output_current_state()[0:3])
|
infinigrove/TerminalRoastDB
|
cmds/Get_Artisan_Temp.py
|
Python
|
gpl-3.0
| 226
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pyonscr_curses.py
#
# Copyright 2011 Mark Kolloros <uvthenfuv@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# onscr_interpreter went through big changes in the meantime
# it might not be compatible
from __future__ import division, print_function, unicode_literals
import sys
import curses
import onscr_interpreter
DEBUG = True
class CursesInterpreter(onscr_interpreter.VarKeeper):
def __init__(self, stdscr, filename):
super(CursesInterpreter, self).__init__(filename)
# done by curses.wrapper
self.stdscr = stdscr
if curses.has_colors():
curses.init_pair(1, curses.COLOR_BLUE, curses.COLOR_BLACK)
if DEBUG:
self.errorlog = []
self.running = False
self.clearwait = False
def run(self):
self.running = True
try:
while self.running:
super(CursesInterpreter, self).run_until_wait()
self.user_wait()
if self.clearwait:
self.clear()
self.clearwait = False
finally:
if DEBUG:
with open(b"PYONS_DEBUGLOG", b"w") as f:
for err in self.errorlog:
f.write(err)
f.write(b"\n")
def user_wait(self):
key = self.stdscr.getch()
if key == ord('q'):
self.running = False
self.waiting = False
def error(self, s):
if DEBUG:
self.errorlog.append(s)
def do_text(self, text):
text = text.replace("|", "...") # Tsukihime "…"
self.stdscr.addstr(text)
self.stdscr.refresh()
def clear(self):
self.stdscr.clear()
self.stdscr.refresh()
def do_EOP(self):
self.waiting = True
self.clearwait = True
def do_end(self):
self.running = False
def do_br(self):
# print an end-of-line
self.stdscr.addstr("\n")
self.stdscr.refresh()
def do_select(self, *args):
self.do_goto( self.selection(args) )
self.clear()
def do_selgosub(self, *args):
self.do_gosub( self.selection(args) )
self.clear()
def selection(self, args):
labels = []
for i in xrange( len(args)//2 ):
txt = args[(i*2)]
labels.append( args[(i*2)+1] )
txt = "{0}: {1}\n".format(i+1, txt)
self.spectext(txt)
# User choice
got = self.get_choice( range( 1, len(labels)+1 ) )
return labels[got-1]
def do_btnwait(self, var):
# We emulate it, and don't even care about button definitons
self.spectext("Button wait mode. (Probably a menu.)\n")
self.spectext("Input a number between 0 and 9 please.\n")
self.spectext("9 will be interpreted as '-1', which represents a right-click.\n")
n = self.get_choice( range(10) )
if n == 9:
n = -1
self.do_mov(var, n)
def spectext(self, s):
s = s.replace("|", "...") # Tsukihime "…"
self.stdscr.addstr( s, curses.color_pair(1) )
self.stdscr.refresh()
def get_choice(self, accepted_numbers):
self.spectext( str(accepted_numbers) )
# Loop and a half
while True:
key = self.stdscr.getch()
n = key-ord('0')
#~ self.spectext( str(n) )
if n in accepted_numbers:
return n
#~ class WordBreaker(object):
#~ def __init__(self, fileobj, width, pos = 0):
#~ self.out = fileobj
#~ self.width = width
#~ self.pos = pos
#~
#~
#~ def write(self, text):
#~ if text == "\n":
#~
#~
#~
def main():
if len(sys.argv) != 2:
print("Usage: pyonscr_curses.py FILENAME")
exit(1)
curses.wrapper(run_interpreter, sys.argv[1])
def run_interpreter(stdscr, filename):
interpreter = CursesInterpreter(stdscr, filename)
interpreter.run()
if __name__ == '__main__':
main()
|
uvthenfuv/npynscr
|
pyonscr_curses.py
|
Python
|
gpl-2.0
| 5,379
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute API that proxies via Cells Service."""
from oslo import messaging
from nova import availability_zones
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import utils as cells_utils
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova.objects import base as obj_base
from nova.objects import service as service_obj
from nova import rpc
check_instance_state = compute_api.check_instance_state
wrap_check_policy = compute_api.wrap_check_policy
check_policy = compute_api.check_policy
check_instance_lock = compute_api.check_instance_lock
check_instance_cell = compute_api.check_instance_cell
class ComputeRPCAPIRedirect(object):
# NOTE(comstud): These are a list of methods where the cells_rpcapi
# and the compute_rpcapi methods have the same signatures. This
# is for transitioning to a common interface where we can just
# swap out the compute_rpcapi class with the cells_rpcapi class.
cells_compatible = ['start_instance', 'stop_instance',
'reboot_instance', 'suspend_instance',
'resume_instance', 'terminate_instance',
'soft_delete_instance', 'pause_instance',
'unpause_instance', 'revert_resize',
'confirm_resize', 'reset_network',
'inject_network_info',
'backup_instance', 'snapshot_instance',
'rebuild_instance']
def __init__(self, cells_rpcapi):
self.cells_rpcapi = cells_rpcapi
def __getattr__(self, key):
if key in self.cells_compatible:
return getattr(self.cells_rpcapi, key)
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
class SchedulerRPCAPIRedirect(object):
def __init__(self, cells_rpcapi_obj):
self.cells_rpcapi = cells_rpcapi_obj
def __getattr__(self, key):
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
class ConductorTaskRPCAPIRedirect(object):
# NOTE(comstud): These are a list of methods where the cells_rpcapi
# and the compute_task_rpcapi methods have the same signatures. This
# is for transitioning to a common interface where we can just
# swap out the compute_task_rpcapi class with the cells_rpcapi class.
cells_compatible = ['build_instances', 'resize_instance',
'live_migrate_instance']
def __init__(self, cells_rpcapi_obj):
self.cells_rpcapi = cells_rpcapi_obj
def __getattr__(self, key):
if key in self.cells_compatible:
return getattr(self.cells_rpcapi, key)
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
class RPCClientCellsProxy(object):
def __init__(self, target, version_cap):
super(RPCClientCellsProxy, self).__init__()
self.target = target
self.version_cap = version_cap
self._server = None
self._version = None
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def prepare(self, **kwargs):
ret = type(self)(self.target, self.version_cap)
ret.cells_rpcapi = self.cells_rpcapi
server = kwargs.pop('server', None)
version = kwargs.pop('version', None)
if kwargs:
raise ValueError("Unsupported kwargs: %s" % kwargs.keys())
if server:
ret._server = server
if version:
ret._version = version
return ret
def _check_version_cap(self, version):
client = rpc.get_client(self.target, version_cap=self.version_cap)
if not client.can_send_version(version):
raise messaging.RPCVersionCapError(version=version,
version_cap=self.version_cap)
def _make_msg(self, method, **kwargs):
version = self._version if self._version else self.target.version
self._check_version_cap(version)
return {
'method': method,
'namespace': None,
'version': version,
'args': kwargs
}
def _get_topic(self):
if self._server is not None:
return '%s.%s' % (self.target.topic, self._server)
else:
return self.target.topic
def can_send_version(self, version):
client = rpc.get_client(self.target, version_cap=self.version_cap)
return client.can_send_version(version)
def cast(self, ctxt, method, **kwargs):
msg = self._make_msg(method, **kwargs)
topic = self._get_topic()
self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg, topic)
def call(self, ctxt, method, **kwargs):
msg = self._make_msg(method, **kwargs)
topic = self._get_topic()
return self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg,
topic, call=True)
class ComputeRPCProxyAPI(compute_rpcapi.ComputeAPI):
"""Class used to substitute Compute RPC API that will proxy
via the cells manager to a compute manager in a child cell.
"""
def get_client(self, target, version_cap, serializer):
return RPCClientCellsProxy(target, version_cap)
class ComputeCellsAPI(compute_api.API):
def __init__(self, *args, **kwargs):
super(ComputeCellsAPI, self).__init__(*args, **kwargs)
self.cells_rpcapi = cells_rpcapi.CellsAPI()
# Avoid casts/calls directly to compute
self.compute_rpcapi = ComputeRPCAPIRedirect(self.cells_rpcapi)
# Redirect scheduler run_instance to cells.
self.scheduler_rpcapi = SchedulerRPCAPIRedirect(self.cells_rpcapi)
# Redirect conductor build_instances to cells
self._compute_task_api = ConductorTaskRPCAPIRedirect(self.cells_rpcapi)
self._cell_type = 'api'
def _cast_to_cells(self, context, instance, method, *args, **kwargs):
instance_uuid = instance['uuid']
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
self.cells_rpcapi.cast_compute_api_method(context, cell_name,
method, instance_uuid, *args, **kwargs)
def _call_to_cells(self, context, instance, method, *args, **kwargs):
instance_uuid = instance['uuid']
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
return self.cells_rpcapi.call_compute_api_method(context, cell_name,
method, instance_uuid, *args, **kwargs)
def _check_requested_networks(self, context, requested_networks,
max_count):
"""Override compute API's checking of this. It'll happen in
child cell
"""
return max_count
def create(self, *args, **kwargs):
"""We can use the base functionality, but I left this here just
for completeness.
"""
return super(ComputeCellsAPI, self).create(*args, **kwargs)
def update(self, context, instance, **kwargs):
"""Update an instance."""
cell_name = instance['cell_name']
if cell_name and self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method='update')
rv = super(ComputeCellsAPI, self).update(context,
instance, **kwargs)
kwargs_copy = kwargs.copy()
# We need to skip vm_state/task_state updates as the child
# cell is authoritative for these. The admin API does
# support resetting state, but it has been converted to use
# Instance.save() with an appropriate kwarg.
kwargs_copy.pop('vm_state', None)
kwargs_copy.pop('task_state', None)
if kwargs_copy:
try:
self._cast_to_cells(context, instance, 'update',
**kwargs_copy)
except exception.InstanceUnknownCell:
pass
return rv
def soft_delete(self, context, instance):
self._handle_cell_delete(context, instance, 'soft_delete')
def delete(self, context, instance):
self._handle_cell_delete(context, instance, 'delete')
def _handle_cell_delete(self, context, instance, method_name):
if not instance['cell_name']:
delete_type = method_name == 'soft_delete' and 'soft' or 'hard'
self.cells_rpcapi.instance_delete_everywhere(context,
instance, delete_type)
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid']))
# NOTE(danms): If we try to delete an instance with no cell,
# there isn't anything to salvage, so we can hard-delete here.
super(ComputeCellsAPI, self)._local_delete(context, instance, bdms,
method_name,
self._do_delete)
return
method = getattr(super(ComputeCellsAPI, self), method_name)
method(context, instance)
@check_instance_cell
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
super(ComputeCellsAPI, self).restore(context, instance)
self._cast_to_cells(context, instance, 'restore')
@check_instance_cell
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
super(ComputeCellsAPI, self).force_delete(context, instance)
self._cast_to_cells(context, instance, 'force_delete')
@check_instance_cell
def evacuate(self, context, instance, *args, **kwargs):
"""Evacuate the given instance with the provided attributes."""
super(ComputeCellsAPI, self).evacuate(context, instance, *args,
**kwargs)
self._cast_to_cells(context, instance, 'evacuate', *args, **kwargs)
@check_instance_cell
def add_fixed_ip(self, context, instance, *args, **kwargs):
"""Add fixed_ip from specified network to given instance."""
super(ComputeCellsAPI, self).add_fixed_ip(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'add_fixed_ip',
*args, **kwargs)
@check_instance_cell
def remove_fixed_ip(self, context, instance, *args, **kwargs):
"""Remove fixed_ip from specified network to given instance."""
super(ComputeCellsAPI, self).remove_fixed_ip(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'remove_fixed_ip',
*args, **kwargs)
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
# FIXME(comstud): Cache this?
# Also: only calling super() to get state/policy checking
super(ComputeCellsAPI, self).get_diagnostics(context, instance)
return self._call_to_cells(context, instance, 'get_diagnostics')
@check_instance_cell
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None):
"""Rescue the given instance."""
super(ComputeCellsAPI, self).rescue(context, instance,
rescue_password=rescue_password,
rescue_image_ref=rescue_image_ref)
self._cast_to_cells(context, instance, 'rescue',
rescue_password=rescue_password,
rescue_image_ref=rescue_image_ref)
@check_instance_cell
def unrescue(self, context, instance):
"""Unrescue the given instance."""
super(ComputeCellsAPI, self).unrescue(context, instance)
self._cast_to_cells(context, instance, 'unrescue')
@wrap_check_policy
@check_instance_cell
def shelve(self, context, instance):
"""Shelve the given instance."""
self._cast_to_cells(context, instance, 'shelve')
@wrap_check_policy
@check_instance_cell
def shelve_offload(self, context, instance):
"""Offload the shelved instance."""
super(ComputeCellsAPI, self).shelve_offload(context, instance)
self._cast_to_cells(context, instance, 'shelve_offload')
@wrap_check_policy
@check_instance_cell
def unshelve(self, context, instance):
"""Unshelve the given instance."""
super(ComputeCellsAPI, self).unshelve(context, instance)
self._cast_to_cells(context, instance, 'unshelve')
@check_instance_cell
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
super(ComputeCellsAPI, self).set_admin_password(context, instance,
password=password)
self._cast_to_cells(context, instance, 'set_admin_password',
password=password)
@wrap_check_policy
@check_instance_cell
def get_vnc_console(self, context, instance, console_type):
"""Get a url to a VNC Console."""
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self._call_to_cells(context, instance,
'get_vnc_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance['uuid'])
return {'url': connect_info['access_url']}
@wrap_check_policy
@check_instance_cell
def get_spice_console(self, context, instance, console_type):
"""Get a url to a SPICE Console."""
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self._call_to_cells(context, instance,
'get_spice_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance['uuid'])
return {'url': connect_info['access_url']}
@wrap_check_policy
@check_instance_cell
def get_rdp_console(self, context, instance, console_type):
"""Get a url to a RDP Console."""
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self._call_to_cells(context, instance,
'get_rdp_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_cell
def get_console_output(self, context, instance, *args, **kwargs):
"""Get console output for an an instance."""
# NOTE(comstud): Calling super() just to get policy check
super(ComputeCellsAPI, self).get_console_output(context, instance,
*args, **kwargs)
return self._call_to_cells(context, instance, 'get_console_output',
*args, **kwargs)
def lock(self, context, instance):
"""Lock the given instance."""
super(ComputeCellsAPI, self).lock(context, instance)
self._cast_to_cells(context, instance, 'lock')
def unlock(self, context, instance):
"""Unlock the given instance."""
super(ComputeCellsAPI, self).lock(context, instance)
self._cast_to_cells(context, instance, 'unlock')
@check_instance_cell
def _attach_volume(self, context, instance, volume_id, device,
disk_bus, device_type):
"""Attach an existing volume to an existing instance."""
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
return self._call_to_cells(context, instance, 'attach_volume',
volume_id, device, disk_bus, device_type)
@check_instance_cell
def _detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
self.volume_api.check_detach(context, volume)
self._cast_to_cells(context, instance, 'detach_volume',
volume)
@wrap_check_policy
@check_instance_cell
def associate_floating_ip(self, context, instance, address):
"""Makes calls to network_api to associate_floating_ip.
:param address: is a string floating ip address
"""
self._cast_to_cells(context, instance, 'associate_floating_ip',
address)
@check_instance_cell
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
super(ComputeCellsAPI, self).delete_instance_metadata(context,
instance, key)
self._cast_to_cells(context, instance, 'delete_instance_metadata',
key)
@wrap_check_policy
@check_instance_cell
def update_instance_metadata(self, context, instance,
metadata, delete=False):
rv = super(ComputeCellsAPI, self).update_instance_metadata(context,
instance, metadata, delete=delete)
try:
self._cast_to_cells(context, instance,
'update_instance_metadata',
metadata, delete=delete)
except exception.InstanceUnknownCell:
pass
return rv
def get_migrations(self, context, filters):
return self.cells_rpcapi.get_migrations(context, filters)
class ServiceProxy(object):
def __init__(self, obj, cell_path):
self._obj = obj
self._cell_path = cell_path
@property
def id(self):
return cells_utils.cell_with_item(self._cell_path, self._obj.id)
def __getitem__(self, key):
if key == 'id':
return self.id
return getattr(self._obj, key)
def __getattr__(self, key):
return getattr(self._obj, key)
class HostAPI(compute_api.HostAPI):
"""HostAPI() class for cells.
Implements host management related operations. Works by setting the
RPC API used by the base class to proxy via the cells manager to the
compute manager in the correct cell. Hosts specified with cells will
need to be of the format 'path!to!cell@host'.
DB methods in the base class are also overridden to proxy via the
cells manager.
"""
def __init__(self):
super(HostAPI, self).__init__(rpcapi=ComputeRPCProxyAPI())
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Cannot check this in API cell. This will be checked in the
target child cell.
"""
pass
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
return self.cells_rpcapi.get_host_uptime(context, host_name)
def service_get_all(self, context, filters=None, set_zones=False):
if filters is None:
filters = {}
if 'availability_zone' in filters:
zone_filter = filters.pop('availability_zone')
set_zones = True
else:
zone_filter = None
services = self.cells_rpcapi.service_get_all(context,
filters=filters)
if set_zones:
services = availability_zones.set_availability_zones(context,
services)
if zone_filter is not None:
services = [s for s in services
if s['availability_zone'] == zone_filter]
# NOTE(johannes): Cells adds the cell path as a prefix to the id
# to uniquely identify the service amongst all cells. Unfortunately
# the object model makes the id an integer. Use a proxy here to
# work around this particular problem.
# Split out the cell path first
cell_paths = []
for service in services:
cell_path, id = cells_utils.split_cell_and_item(service['id'])
service['id'] = id
cell_paths.append(cell_path)
# NOTE(danms): Currently cells does not support objects as
# return values, so just convert the db-formatted service objects
# to new-world objects here
services = obj_base.obj_make_list(context,
service_obj.ServiceList(),
service_obj.Service,
services)
# Now wrap it in the proxy with the original cell_path
services = [ServiceProxy(s, c) for s, c in zip(services, cell_paths)]
return services
def service_get_by_compute_host(self, context, host_name):
db_service = self.cells_rpcapi.service_get_by_compute_host(context,
host_name)
# NOTE(danms): Currently cells does not support objects as
# return values, so just convert the db-formatted service objects
# to new-world objects here
if db_service:
return service_obj.Service._from_db_object(context,
service_obj.Service(),
db_service)
def service_update(self, context, host_name, binary, params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
"""
db_service = self.cells_rpcapi.service_update(
context, host_name, binary, params_to_update)
# NOTE(danms): Currently cells does not support objects as
# return values, so just convert the db-formatted service objects
# to new-world objects here
if db_service:
return service_obj.Service._from_db_object(context,
service_obj.Service(),
db_service)
def service_delete(self, context, service_id):
"""Deletes the specified service."""
self.cells_rpcapi.service_delete(context, service_id)
def instance_get_all_by_host(self, context, host_name):
"""Get all instances by host. Host might have a cell prepended
to it, so we'll need to strip it out. We don't need to proxy
this call to cells, as we have instance information here in
the API cell.
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
instances = super(HostAPI, self).instance_get_all_by_host(context,
host_name)
if cell_name:
instances = [i for i in instances
if i['cell_name'] == cell_name]
return instances
def task_log_get_all(self, context, task_name, beginning, ending,
host=None, state=None):
"""Return the task logs within a given range from cells,
optionally filtering by the host and/or state. For cells, the
host should be a path like 'path!to!cell@host'. If no @host
is given, only task logs from a particular cell will be returned.
"""
return self.cells_rpcapi.task_log_get_all(context,
task_name,
beginning,
ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Get a compute node from a particular cell by its integer ID.
compute_id should be in the format of 'path!to!cell@ID'.
"""
return self.cells_rpcapi.compute_node_get(context, compute_id)
def compute_node_get_all(self, context):
return self.cells_rpcapi.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.cells_rpcapi.compute_node_get_all(context,
hypervisor_match=hypervisor_match)
def compute_node_statistics(self, context):
return self.cells_rpcapi.compute_node_stats(context)
class InstanceActionAPI(compute_api.InstanceActionAPI):
"""InstanceActionAPI() class for cells."""
def __init__(self):
super(InstanceActionAPI, self).__init__()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def actions_get(self, context, instance):
return self.cells_rpcapi.actions_get(context, instance)
def action_get_by_request_id(self, context, instance, request_id):
return self.cells_rpcapi.action_get_by_request_id(context, instance,
request_id)
def action_events_get(self, context, instance, action_id):
return self.cells_rpcapi.action_events_get(context, instance,
action_id)
|
nkrinner/nova
|
nova/compute/cells_api.py
|
Python
|
apache-2.0
| 26,829
|
class classproperty(object):
# adds @classproperty decorator
def __init__(self, f):
self.f = f
def __get__(self, obj, owner):
return self.f(owner)
def uncamel(x):
"""
from: http://stackoverflow.com/a/19940888, by TehTris
"""
final = ''
for item in x:
if item.isupper():
final += " " + item
else:
final += item
if final[0] == "_":
final = final[1:]
return final
|
wolfv/SilverFlask
|
silverflask/helper.py
|
Python
|
bsd-2-clause
| 469
|
import sys, os
from django import template
from django.conf import settings
from nevede.vendors.clevercss import convert, ParserError, EvalException
register = template.Library()
@register.filter(name='clevercss')
def do_clevercss(fn):
'''
Create css from ccss and return its path
Requires settings.MEDIA_ROOT, settings.MEDIA_URL
'''
css_name = fn.rsplit('.', 1)[0]
css_url = os.path.join(settings.MEDIA_URL, 'css', css_name +'.css')
fn = os.path.join(settings.MEDIA_ROOT, 'css', fn)
target = fn.rsplit('.', 1)[0] + '.css'
if fn == target:
sys.stderr.write('Error: same name for source and target file'
' "%s".' % fn)
sys.exit(2)
# if css newer than ccss
if not os.path.exists(fn) and os.path.getmtime(fn) < os.path.getmtime(target):
# do nothing
return css_url
src = file(fn)
try:
try:
converted = convert(src.read())
except (ParserError, EvalException), e:
sys.stderr.write('Error in file %s: %s\n' % (fn, e))
sys.exit(1)
dst = file(target, 'w')
try:
dst.write(converted)
finally:
dst.close()
finally:
src.close()
return css_url
|
vad/django-nevede
|
nevede/meetings/templatetags/clevercss_tags.py
|
Python
|
agpl-3.0
| 1,267
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from main import *
import time
import random
import urllib2
import json
#import os
def genToken(L):
CharLib = map(chr,range(97,123)+range(65,91)+range(48,58))
Str = []
for i in range(L):
Str += random.sample(CharLib,1)
return ''.join(Str)
# 加密UID
def encryptUID(id):
key = 'db884468559f4c432bf1c1775f3dc9da'
return key + str(id)
# 解密SID
def decryptUID(uStr):
return int(uStr.split('a')[1])
# 获取cookies
def getCookie(name):
ck = web.cookies()
if ck.get(name):
return ck.get(name)
else:
return None
# 创建会话
def genSession(SID,Username,ShowName,LastIP,LastLocation,LastDate,Token,Lstat,kpl):
LoginDate = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
Expiry = 86400
session = web.config._session
session.isLogin = True
session.SID = SID
session.Username = Username
session.ShowName = ShowName
session.LastLocation = LastLocation
# 登录是否正常返回
if Lstat == 'ok':
session.Lstat = '正常'
elif Lstat == 'other':
session.Lstat = '您的上次登录在别的电脑或者别的浏览器'
else:
session.Lstat = '未知'
# 获取客户端信息
#print 'HTTP_ENV: '
#print web.ctx.environ #来源地址
#print 'HTTP_REFERER: '
#print web.ctx.env.get('HTTP_REFERER', 'http://google.com')
#LoginHost = web.ctx.ip #这两种方法都能获取到客户端IP
LoginHost = web.ctx.environ['REMOTE_ADDR']
Agent = web.ctx.environ['HTTP_USER_AGENT']
# 测试解析
#LoginHost = '119.122.181.82'
# 本次登录地点判断
Location = 'Localhost'
ip = LoginHost.split('.')
if ip[0]+ip[1] in ['17216','192168','1270'] or ip[0] == '10':
Location = '本地局域网'
else:
# 这里要从公网去解析
url = "http://int.dpool.sina.com.cn/iplookup/iplookup.php?format=json&ip=" + LoginHost
response = urllib2.urlopen(url)
rt = json.load(response)
if rt['ret'] == 1 :
Location = rt['province'] + rt['city'] + ' [' + rt['isp'] + ']'
else:
Location = 'unkown'
# 登录日志写入数据库
if not Token:
# Token用来判断是否输入的用户名登录验证的还是从token验证过来的
Token = genToken(32)
if kpl == 'no':
Expiry = 0 # 不记住登录,设置数据库里存储的token的过期时间与登录时间相等
#db.query('''update users set loginhost="%s",logindate="%s" where id="%s"''' % (LoginHost,LoginDate,SID))
db.query('''insert into login_logs (uid,ip,location,agent,token,expiry) values ("%s","%s","%s","%s","%s",NOW() + INTERVAL %d SECOND)''' % (SID,LoginHost,Location,Agent,Token,Expiry))
db.query('''update users set loginfo=(select id from login_logs where uid="%s" and ip="%s" and token="%s" and status="yes" order by id desc limit 1) where id="%s"''' % (SID,LoginHost,Token,SID))
# 写入token到session,存储于服务器端
session.Token = Token
# 写入uid和token到cookies,存储于客户端
#web.setcookie('Username', Username, Expiry)
#用uid伪装成Username存储在cookies中
web.setcookie('Username', encryptUID(SID), Expiry)
web.setcookie('Token', Token, Expiry)
# 写入上次登录日期和IP到session
if LastDate:
# 格式化日期,加上年月日在前台显示,如果为None,表示用户是第一次登录
session.LastDate = time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(str(LastDate),'%Y-%m-%d %H:%M:%S'))
else:
session.LastDate = '第一次登录'
session.LastIP = LastIP
# 写入当前登录日期和IP到数据库设计说明:
# 1.如果用户登录成功,就会从数据库获取上次登录的时间和IP,并写入session,然后立马把本次登录的IP和时间更新到数据库
# 2.还有一种方法就是用户登录时把本次登录的时间和IP写入session而先不动数据库里的记录,直到用户执行正常退出操作时再把session里存储的本次登录的信息写入数据库
# 3.第1个方法和第2个方法记录的数据是相反的,为什么不用第2种呢,因为万一用户不是正常退出呢,那数据库就不会更新本次登录的信息,所以...
# By Luxiaok 2014年4月7日 22:49:00
# 登录成功,这里执行DB操作应该要有异常处理的
# return True
class Login:
def GET(self,*args):
# URL做了多项正则匹配,要进行参数冗余处理,还不知道为什么url正则后会给GET传个参数进来
# 多余的参数就是匹配的url后缀
#print "Self =",self
#print "Args =",args
uid = getCookie('Username')
token = getCookie('Token')
sid = getCookie('xk_session')
HTTP_REFERER = getCookie('HTTP_REFERER')
#print 'Login referer from cookie: ',HTTP_REFERER
if uid and token:
#print 'uid =',uid
#print 'token =',token
#print 'sid =',sid
uid = decryptUID(uid)
try:
g = db.query('''
select U.id,U.username,U.nickname,U.loginfo,L.id as LID,L.ip,L.date from login_logs as L
left join users as U on L.uid=U.id
where U.id="%s" and L.token="%s" and L.status="yes" and L.expiry>now() and U.status="yes"''' % (uid,token))
except Exception,e:
print "MySQL Error: ",Exception,":",e
return "Database Error"
if g:
d = g[0]
Username = d.username
Lstat = 'ok' #是否异常登录反馈
if not d.nickname:
ShowName = d.username
else:
ShowName = d.nickname
if d.loginfo != d.LID:
g2 = db.query('''select L.ip,L.date from users as U left join login_logs as L on U.loginfo=L.id where U.id="%s"''' % uid)
d = g2[0]
# 这里还可以返回一个异地浏览器登录的提示
Lstat = "other" #上次登录在别的浏览器或者异地、异机
LastIP = d.ip
LastDate = d.date
genSession(uid,Username,ShowName,LastIP,LastDate,token,Lstat,kpl='yes')
if HTTP_REFERER:
web.setcookie('HTTP_REFERER', '88888888', -1000)
return web.seeother(HTTP_REFERER)
else:
return web.seeother("/dashboard")
else:
# 如果数据库里存储的token状态为no,即用户已经正常退出,会话无效了,那么清除本地cookies
web.setcookie('Username', '88888888', -1)
web.setcookie('Token', '88888888', -1)
if getLogin():
#用户已登录
return web.seeother("/dashboard")
else:
#用户未登录
return render.login()
def POST(self,*args):
getPost = web.input()
#kpl = getPost.kpl # 是否记住登录
try:
getSQL = db.query('''select u.id,u.username,u.password,u.nickname,u.status,L.ip,L.location,L.date from users as u left join login_logs as L on u.loginfo=L.id where username="%s" and password=md5("%s")''' % (getPost.username,getPost.password))
except:
# 服务器(数据库)错误
web.header('Content-Type', 'application/json')
return json.dumps({'code':-1,'msg':'数据库错误'})
if getSQL:
# 获取登录数据
getData = getSQL[0]
SID = getData['id']
Username = getData['username']
Status = getData['status']
ShowName = getData['nickname']
LastDate = getData['date']
LastIP = getData['ip']
LastLocation = getData['location']
if not ShowName:
ShowName = Username
if Status == 'yes':
# 符合登录要求,登录数据写入session,创建会话
genSession(SID,Username,ShowName,LastIP,LastLocation,LastDate,False,Lstat='ok',kpl=getPost.kpl)
#HTTP_REFERER = getCookie('HTTP_REFERER')
#if HTTP_REFERER:
# web.setcookie('HTTP_REFERER', '88888888', -1000)
# return web.seeother(HTTP_REFERER)
#else:
# web.setcookie('HTTP_REFERER', '88888888', -1000)
# return web.seeother("/dashboard")
web.header('Content-Type', 'application/json')
return json.dumps({'code':0,'msg':'Success'}) # 登录成功
else:
# 用户被禁用
web.header('Content-Type', 'application/json')
return json.dumps({'code':-2,'msg':'用户已被禁用'})
else:
# 用户名或密码错误
web.header('Content-Type', 'application/json')
return json.dumps({'code':-3,'msg':'用户名或密码错误'})
class Logout:
def GET(self):
uid = getCookie('Username')
token = getCookie('Token')
sidName = getCookie('xk_session')
if uid and token and sidName:
uid = decryptUID(uid)
#sfile = 'session/' + sidName
# 删除会话文件,貌似kill方法会把sessionID文件干掉
#try:
# os.remove(sfile)
#except Exception,e:
# print "Session File Error: ",Exception,":",e
# 设置cookies的status为no
try:
db.query('''update login_logs set status="no" where uid="%s" and token="%s"''' % (uid,token))
except Exception,e:
print "MySQL Error: ",Exception,":",e
web.setcookie('Username', '88888888', -1)
web.setcookie('Token', '88888888', -1)
web.config._session.kill()
raise web.seeother("/")
# 测试页面
class Test:
def GET(self):
if getLogin():
SID = getLogin()['SID']
ShowName = getLogin()['ShowName']
#print "ShowName: " + ShowName
return render.test(ShowName=ShowName,uid=SID)
else:
return web.seeother("/login")
|
heafod/SaltAdmin
|
view/index.py
|
Python
|
gpl-2.0
| 10,418
|
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D
from keras.layers import Flatten, Reshape
from keras import regularizers
from plotly import offline as py
import plotly.graph_objs as go
from plotly import tools
py.init_notebook_mode()
# Loads the training and test data sets (ignoring class labels)
(x_train, _), (x_test, _) = mnist.load_data()
# Scales the training and test data to range between 0 and 1.
max_value = float(x_train.max())
x_train = x_train.astype('float32') / max_value
x_test = x_test.astype('float32') / max_value
# Reshape
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
# x_train.shape
# Autoencoder
input_dim = x_train.shape[1]
encoding_dim = 32
compression_factor = float(input_dim) / encoding_dim
print("Compression factor: %s" % compression_factor)
autoencoder = Sequential()
autoencoder.add(
Dense(encoding_dim, input_shape=(input_dim,), activation='relu')
)
autoencoder.add(
Dense(input_dim, activation='sigmoid')
)
autoencoder.summary()
input_img = Input(shape=(input_dim,))
encoder_layer = autoencoder.layers[0]
encoder = Model(input_img, encoder_layer(input_img))
encoder.summary()
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
num_images = 10
np.random.seed(42)
random_test_images = np.random.randint(x_test.shape[0], size=num_images)
encoded_imgs = encoder.predict(x_test)
decoded_imgs = autoencoder.predict(x_test)
encoded_imgs[0]
decoded_imgs[0]
fig = tools.make_subplots(rows=1, cols=3, print_grid=False)
t1 = go.Heatmap(z=x_test[random_test_images[0]].reshape(28, 28), showscale=False)
fig.append_trace(t1, 1, 1)
# fig.append_trace(trace2, 1, 2)
# fig.append_trace(trace3, 1, 3)
for i in map(str,range(1, 4)):
y = 'yaxis'+ i
x = 'xaxis' + i
fig['layout'][y].update(autorange='reversed',
showticklabels=False, ticks='', scaleanchor = 'x')
fig['layout'][x].update(showticklabels=False, ticks='')
fig['layout'].update(height=600)
py.iplot(fig)
|
Christoph/tag-connect
|
keyvis_add/ml.py
|
Python
|
mit
| 2,394
|
#!/usr/bin/env python
import sys, redis, json, re, struct, time, socket
zabbix_host = '127.0.0.1' # Zabbix Server IP
zabbix_port = 10051 # Zabbix Server Port
hostname = 'redis.srv.name' # Name of monitored server like it shows in zabbix web ui display
redis_port = 6379 # Redis Server port
class Metric(object):
def __init__(self, host, key, value, clock=None):
self.host = host
self.key = key
self.value = value
self.clock = clock
def __repr__(self):
if self.clock is None:
return 'Metric(%r, %r, %r)' % (self.host, self.key, self.value)
return 'Metric(%r, %r, %r, %r)' % (self.host, self.key, self.value, self.clock)
def send_to_zabbix(metrics, zabbix_host='127.0.0.1', zabbix_port=10051):
j = json.dumps
metrics_data = []
for m in metrics:
clock = m.clock or ('%d' % time.time())
metrics_data.append(('{"host":%s,"key":%s,"value":%s,"clock":%s}') % (j(m.host), j(m.key), j(m.value), j(clock)))
json_data = ('{"request":"sender data","data":[%s]}') % (','.join(metrics_data))
data_len = struct.pack('<Q', len(json_data))
packet = 'ZBXD\x01'+ data_len + json_data
#print packet
#print ':'.join(x.encode('hex') for x in packet)
try:
zabbix = socket.socket()
zabbix.connect((zabbix_host, zabbix_port))
zabbix.sendall(packet)
resp_hdr = _recv_all(zabbix, 13)
if not resp_hdr.startswith('ZBXD\x01') or len(resp_hdr) != 13:
print ('Wrong zabbix response')
return False
resp_body_len = struct.unpack('<Q', resp_hdr[5:])[0]
resp_body = zabbix.recv(resp_body_len)
zabbix.close()
resp = json.loads(resp_body)
#print resp
if resp.get('response') != 'success':
print ('Got error from Zabbix: %s' % resp)
return False
return True
except:
print ('Error while sending data to Zabbix')
return False
def _recv_all(sock, count):
buf = ''
while len(buf)<count:
chunk = sock.recv(count-len(buf))
if not chunk:
return buf
buf += chunk
return buf
if len(sys.argv) <= 2:
host = (len(sys.argv) >= 2) and sys.argv[1] or hostname
client = redis.StrictRedis(host=host, port=redis_port)
server_info = client.info()
a = []
for i in server_info:
a.append(Metric(host, ('redis[%s]' % i), server_info[i]))
keys = client.keys('*')
llensum = 0
for key in keys:
llensum += client.llen(key)
a.append(Metric(host, 'redis[llenall]', llensum))
send_to_zabbix(a, zabbix_host, zabbix_port)
else:
host = (len(sys.argv) >= 2) and sys.argv[1] or 'localhost'
metric = (len(sys.argv) >= 3) and sys.argv[2]
db = (len(sys.argv) >= 4) and sys.argv[3] or 'none'
client = redis.StrictRedis(host=host, port=redis_port)
server_info = client.info()
if metric:
if db and db in server_info.keys():
server_info['key_space_db_keys'] = server_info[db]['keys']
server_info['key_space_db_expires'] = server_info[db]['expires']
server_info['key_space_db_avg_ttl'] = server_info[db]['avg_ttl']
def llen():
print (client.llen(db))
def llensum():
keys = client.keys('*')
llensum = 0
for key in keys:
llensum += client.llen(key)
print (llensum)
def list_key_space_db():
if db in server_info:
print (db)
else:
print ('database_detect')
def default():
if metric in server_info.keys():
print (server_info[metric])
{
'llen': llen,
'llenall': llensum,
'list_key_space_db': list_key_space_db,
}.get(metric, default)()
else:
print ('Not selected metric');
|
physIQ/turnkey-riak
|
salt/states/profiles/logging/files/var/lib/zabbix/bin/zbx_redis_stats.py
|
Python
|
mit
| 3,657
|
import json
import falcon
from falcon.testing.srmock import StartResponseMock
from falcon.testing.helpers import create_environ
from optio.falcon.testing import app
from optio.falcon.helper import context_req_resp
from optio.falcon.helper import dump
from optio.falcon.helper import load
from optio.falcon.hack import get_context
'''
def create_environ(path='/', query_string='', protocol='HTTP/1.1',
scheme='http', host=DEFAULT_HOST, port=None,
headers=None, app='', body='', method='GET',
wsgierrors=None, file_wrapper=None):
'''
import inspect
import pdb
class HelperResource:
def on_get(self, req, resp, **params):
context = get_context(req)
assert context['request'] == {}
assert context['response'] == {}
def test_using_falcon_TestResource():
app = falcon.API()
app.add_route('/', falcon.testing.resource.TestResource())
r = {'query_string': '', 'headers': None, 'app': '', 'body': '', 'method': 'GET',}
env = create_environ(path='/', **r)
resp = StartResponseMock()
body = app(env, resp)
assert resp.status == falcon.HTTP_200
assert len(body) == 1 # TestResource generates random body
def test_context_basic_1():
routes = {'/' : HelperResource()}
before = [context_req_resp, load]
after = [dump]
app = falcon.API(before=before, after=after)
app.add_route('/', HelperResource())
r = {'query_string': '', 'headers': None, 'app': '', 'body': '{}', 'method': 'GET',}
env = create_environ(path='/', **r)
resp = StartResponseMock()
body = app(env, resp)
assert resp.status == falcon.HTTP_200
assert len(body) == 0
def test_context_basic_2():
routes = {'/' : HelperResource()}
before = [context_req_resp, load]
after = [dump]
app = falcon.API(before=before, after=after)
app.add_route('/', HelperResource())
r = {'query_string': '', 'headers': None, 'app': '', 'body': '', 'method': 'GET',}
env = create_environ(path='/', **r)
resp = StartResponseMock()
body = app(env, resp)
assert resp.status == falcon.HTTP_400
error = json.loads(body[0].decode('utf-8'))
assert error['title'] == 'Empty request body'
def test_context_basic_3():
routes = {'/' : HelperResource()}
before = [context_req_resp, load]
after = [dump]
app = falcon.API(before=before, after=after)
app.add_route('/', HelperResource())
r = {'query_string': '', 'headers': None, 'app': '', 'body': 'goober', 'method': 'GET',}
env = create_environ(path='/', **r)
resp = StartResponseMock()
body = app(env, resp)
assert resp.status == falcon.HTTP_753
error = json.loads(body[0].decode('utf-8'))
assert error['title'] == 'Malformed JSON'
def test_context_basic_4():
test_dict = {"keyx":"valuey"}
class TestResource:
def on_get(self, req, resp, **params):
context = get_context(req)
assert context['request'] == test_dict
context['response'] = context['request']
before = [context_req_resp, load]
after = [dump]
app = falcon.API(before=before, after=after)
app.add_route('/', TestResource())
r = {'query_string': '', 'headers': None, 'app': '', 'body': '{"keyx": "valuey"}', 'method': 'GET',}
env = create_environ(path='/', **r)
resp = StartResponseMock()
body = app(env, resp)
assert resp.status == falcon.HTTP_200
response_json = json.loads(body[0].decode('utf-8'))
assert response_json == test_dict
|
meantheory/optio
|
tests/test_falcon_helper.py
|
Python
|
mit
| 3,335
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import multiprocessing
import os
import sys
import mozlog
import grouping_formatter
here = os.path.split(__file__)[0]
servo_root = os.path.abspath(os.path.join(here, "..", ".."))
def wpt_path(*args):
return os.path.join(here, *args)
def servo_path(*args):
return os.path.join(servo_root, *args)
# Imports
sys.path.append(wpt_path("harness"))
from wptrunner import wptrunner, wptcommandline
def run_tests(paths=None, **kwargs):
if paths is None:
paths = {}
set_defaults(paths, kwargs)
mozlog.commandline.log_formatters["servo"] = \
(grouping_formatter.GroupingFormatter, "A grouping output formatter")
if len(kwargs["test_list"]) == 1:
wptrunner.setup_logging(kwargs, {"mach": sys.stdout})
else:
wptrunner.setup_logging(kwargs, {"servo": sys.stdout})
success = wptrunner.run_tests(**kwargs)
return 0 if success else 1
def set_defaults(paths, kwargs):
if kwargs["product"] is None:
kwargs["product"] = "servo"
if kwargs["config"] is None and "config" in paths:
kwargs["config"] = paths["config"]
if kwargs["include_manifest"] is None and "include_manifest" in paths:
kwargs["include_manifest"] = paths["include_manifest"]
if kwargs["binary"] is None:
bin_dir = "release" if kwargs["release"] else "debug"
bin_name = "servo"
if sys.platform == "win32":
bin_name += ".exe"
bin_path = servo_path("target", bin_dir, bin_name)
kwargs["binary"] = bin_path
if kwargs["processes"] is None:
kwargs["processes"] = multiprocessing.cpu_count()
kwargs["user_stylesheets"].append(servo_path("resources", "ahem.css"))
wptcommandline.check_args(kwargs)
def main(paths=None):
parser = wptcommandline.create_parser()
kwargs = vars(parser.parse_args())
return run_tests(paths, **kwargs)
|
hiei23/servo
|
tests/wpt/run.py
|
Python
|
mpl-2.0
| 2,080
|
# Copyright (c) 2019 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-IoticAgent/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from sys import exc_info as get_exc_info
from enum import Enum, unique
from threading import Event, Thread
import logging
logger = logging.getLogger(__name__)
DEBUG_ENABLED = logger.isEnabledFor(logging.DEBUG)
from IoticAgent.IOT import Client
from IoticAgent.IOT.Exceptions import LinkException, IOTSyncTimeout
from IoticAgent.Core.utils import validate_nonnegative_int
@unique
class RunContext(Enum):
"""Passed to `ThingRunner.on_exception`"""
ON_STARTUP = 'on_startup'
MAIN = 'main'
class ThingRunner(object):
"""Automates, starting, stopping and running of an Agent instance, either in the foreground (blocking) or
background. Create a subclass to use, e.g.:
::
class MyRunner(ThingRunner):
# only required if want to add own fields to class instance
def __init__(self, other, arguments, config=None):
super(ThingRunner, self).__init__(config=config)
# own class setup goes here
def main(self):
while True:
# do stuff here
# end on shutdown request
if self.wait_for_shutdown(1):
break
# runs in foreground, blocking
MyRunner('other', 'arguments', config='agent.ini').run()
Optionally implement `on_startup` or `on_shutdown` to perform one-off actions at the beginning/end of the agent's
run cycle.
"""
def __init__(self, config=None, retry_timeout=15):
"""
`config` (optional) IOT.Client config file to use (or None to try to use default location)
`retry_timeout` (int, optional): Number of seconds to wait before retrying. See also `on_exception`.
"""
self.__client = Client(config=config)
self.__shutdown = Event()
self.__bgthread = None
self.__retry_timeout = validate_nonnegative_int(retry_timeout, 'retry_timeout')
def run(self, background=False):
"""Runs `on_startup`, `main` and `on_shutdown`, blocking until finished, unless background is set."""
if self.__bgthread:
raise Exception('run has already been called (since last stop)')
self.__shutdown.clear()
if background:
self.__bgthread = Thread(target=self.__run, name=('bg_' + self.__client.agent_id))
self.__bgthread.daemon = True
self.__bgthread.start()
else:
self.__run()
def __run(self):
ctx = RunContext.ON_STARTUP
while True:
exc_occurred = False
try:
with self.__client:
if ctx == RunContext.ON_STARTUP:
logger.debug('Calling on_startup')
self.on_startup()
ctx = RunContext.MAIN
logger.debug('Calling main')
self.main()
except KeyboardInterrupt:
# Enable on_shutdown to run normally
pass
except:
exc_occurred = True
if self.__handle_exception(ctx):
logger.debug('Sleeping before retry')
if not self.wait_for_shutdown(self.__retry_timeout):
continue
# Normal run finished
break
self.__shutdown.set()
# Shutdown not applicable if on_startup did not finish
if not (exc_occurred or ctx == RunContext.ON_STARTUP):
logger.debug('Calling on_shutdown')
try:
self.on_shutdown(None)
except:
logger.exception('Exception in on_shutdown callback')
# Will re-raise exception, where appropriate and also call relevant callbacks. A True response indicates the run
# should be re-tried.
def __handle_exception(self, ctx):
exc_info = get_exc_info()
try:
if self.on_exception(ctx, exc_info):
logger.debug('Will retry %s', ctx.value)
return True
except KeyboardInterrupt:
# Exit immediately without running on_shutdown hook as no different to being caught inside on_shutdown.
raise
except:
logger.exception('Exception in on_exception callback')
self.__shutdown.set()
# Failure in on_startup should not result in shutdown callback
if ctx != RunContext.ON_STARTUP:
try:
if self.on_shutdown(exc_info):
return False
except KeyboardInterrupt:
raise
except:
logger.exception('Exception in on_shutdown callback')
raise # pylint: disable=misplaced-bare-raise
def stop(self, timeout=None):
"""Requests device to stop running, waiting at most the given timout in seconds (fractional). Has no effect if
`run()` was not called with background=True set.
Returns:
True if successfully stopped (or already not running).
"""
stopped = True
self.__shutdown.set()
if self.__bgthread:
logger.debug('Stopping bgthread')
self.__bgthread.join(timeout)
if self.__bgthread.is_alive():
logger.warning('bgthread did not finish within timeout')
stopped = False
self.__bgthread = None
return stopped
@property
def client(self):
""":doc:`IoticAgent.IOT.Client` instance in use by this runner"""
return self.__client
@property
def shutdown_requested(self):
"""Whether `stop()` has been called, an exception has occurred (which does not result in a retry) or the
implemented main loop has finished and thus the device should be shutting down."""
return self.__shutdown.is_set()
def wait_for_shutdown(self, timeout=None):
"""Blocks until shutdown has been requested (or the timeout has been reached, if specified). False is returned
for the latter, True otherwise."""
return self.__shutdown.wait(timeout)
def on_startup(self):
"""One-off tasks to perform straight **after** agent startup."""
def main(self): # pylint: disable=no-self-use
"""Application logic goes here. Should return (or raise exception) to end program run. Should check whether the
`shutdown_requested` property is True an return if this is the case."""
def on_exception(self, ctx, exc_info): # pylint: disable=no-self-use,unused-argument
"""Called when an exception occurs within runner methods (or initialisation). If the return value evalutes to
True, the method in question will be re-tried (after `retry_timeout` seconds wait). Otherwise the exception will
be re-raised (the default). Note that KeyboardInterrupt will not result in this method being called and instead
cause a shutdown.
`ctx` One of `RunContext`. Indicates at what point exception occurred.
`exc_info` Tuple (as for `sys.exc_info()`) of the exception
"""
def on_shutdown(self, exc_info): # pylint: disable=no-self-use
"""One-off tasks to perform on just before agent shutdown. exc_info is a tuple (as for `sys.exc_info()`) of the
exception which caused the shutdown (from the `main()` function) or None if the shutdown was graceful. This is
useful if one only wants to perform certains tasks on success. This is not called if `on_startup()` was not
successful. It is possible that due to e.g. network problems the agent cannot be used at this point. If the
return value evalutes to False, the exception will be re-raised (the default). Note that KeyboardInterrupt will
not be passed to this method (but will result in this method being called).
"""
class RetryingThingRunner(ThingRunner):
"""Automatically re-tries on_startup & on_main on network & timeout related failures only."""
def on_exception(self, ctx, exc_info):
if issubclass(exc_info[0], (LinkException, IOTSyncTimeout)):
logger.warning('LinkException/IOTSyncTimeout caught, will retry %s', ctx.value,
exc_info=(exc_info if DEBUG_ENABLED else None))
return True
return False
|
Iotic-Labs/py-IoticAgent
|
src/IoticAgent/ThingRunner.py
|
Python
|
apache-2.0
| 8,996
|
# MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys, time
import tensorflow as tf
import tensorflow_models as tf_models
import tensorflow_datasets as tf_data
from tensorflow_models.trainers import BaseTrainer
class Trainer(BaseTrainer):
def finalize_hook(self):
print('Done training for {} epochs'.format(self.epoch()))
# Create the functions that perform learning and evaluation
def learning_hooks(self):
critic_steps = self._settings['critic_steps']
#discriminator_steps = self._settings['discriminator_steps']
start_avb = self._settings['start_avb']
elbo_train_op = tf_models.get_inference('elbo_like')
train_elbo_loss_op = tf_models.get_loss('train/elbo_like')
critic_train_op = tf_models.get_inference('critic')
train_critic_loss_op = tf_models.get_loss('train/critic')
discriminator_train_op = tf_models.get_inference('discriminator')
train_discriminator_loss_op = tf_models.get_loss('train/discriminator')
test_elbo_loss_op = tf_models.get_loss('test/elbo_like')
test_critic_loss_op = tf_models.get_loss('test/critic')
elbo_avb_train_op = tf_models.get_inference('elbo_avb')
train_elbo_avb_loss_op = tf_models.get_loss('train/elbo_avb')
test_elbo_avb_loss_op = tf_models.get_loss('test/elbo_avb')
test_discriminator_loss_op = tf_models.get_loss('test/discriminator')
x_train = tf_models.train_placeholder()
x_test = tf_models.test_placeholder()
next_train_batch = self._train_batch
next_test_batch = self._test_batch
def train(count_steps):
total_elbo = 0.
total_disc = 0.
# Decide whether to do EMVB or AVB
if start_avb is None or self.step < start_avb:
train_op = elbo_train_op
loss_op = train_elbo_loss_op
adv_train_op = critic_train_op
adv_loss_op = train_critic_loss_op
#print('Doing EMVB')
else:
train_op = elbo_avb_train_op
loss_op = train_elbo_avb_loss_op
adv_train_op = discriminator_train_op
adv_loss_op = train_discriminator_loss_op
#print('Doing AVB')
for idx in range(count_steps):
X_mb = next_train_batch()
_, this_elbo = self.sess.run([train_op, loss_op], feed_dict={x_train: X_mb})
for jdx in range(critic_steps):
X_mb = next_train_batch()
_, this_disc = self.sess.run([adv_train_op, adv_loss_op], feed_dict={x_train: X_mb})
total_elbo += this_elbo
total_disc += this_disc
return total_elbo / count_steps, total_disc / count_steps
def test():
total_elbo = 0.
total_disc = 0.
# Decide whether to do EMVB or AVB
if start_avb is None or self.step < start_avb:
loss_op = test_elbo_loss_op
adv_loss_op = test_critic_loss_op
else:
loss_op = test_elbo_avb_loss_op
adv_loss_op = test_discriminator_loss_op
for idx in range(self.test_batches):
X_mb = next_test_batch()
this_disc, this_elbo = self.sess.run([adv_loss_op, loss_op], feed_dict={x_test: X_mb})
total_elbo += this_elbo
total_disc += this_disc
return total_elbo/self.test_batches, total_disc/self.test_batches
return train, test
def initialize_hook(self):
# See where the test loss starts
if self._settings['resume_from'] is None:
# Do a test evaluation before any training happens
test_elbo, test_discriminator = self.test()
self.results['elbo_test'] += [test_elbo]
self.results['discriminator_test'] += [test_discriminator]
else:
test_elbo = self.results['elbo_test'][-1]
test_discriminator = self.results['discriminator_test'][-1]
print('epoch {:.3f}, test elbo = {:.2f}, test critic = {:.2f}'.format(self.epoch(), test_elbo, test_discriminator))
def step_hook(self):
with tf_models.timer.Timer() as train_timer:
train_elbo, train_discriminator = self.train(self._batches_per_step)
test_elbo, test_discriminator = self.test()
self.results['times_train'] += [train_timer.interval]
self.results['elbo_train'] += [train_elbo]
self.results['elbo_test'] += [test_elbo]
self.results['discriminator_test'] += [test_discriminator]
self.results['discriminator_train'] += [train_discriminator]
def before_step_hook(self):
pass
def after_step_hook(self):
train_time = self.results['times_train'][-1]
test_elbo = self.results['elbo_test'][-1]
test_discriminator = self.results['discriminator_test'][-1]
train_elbo = self.results['elbo_train'][-1]
#test_discriminator = self.results['discriminator_test'][-1]
examples_per_sec = self._settings['batch_size'] * self._batches_per_step / train_time
sec_per_batch = train_time / self._batches_per_step
print('epoch {:.3f}, test elbo = {:.2f}, test critic = {:.2f}, train elbo = {:.2f} ({:.1f} examples/sec)'.format(self.epoch(), test_elbo, test_discriminator, train_elbo, examples_per_sec))
def initialize_results_hook(self):
results = {}
results['elbo_train'] = []
results['times_train'] = []
results['elbo_test'] = []
results['discriminator_test'] = []
results['discriminator_train'] = []
return results
|
stefanwebb/tensorflow-models
|
tensorflow_models/trainers/emvb_np.py
|
Python
|
mit
| 6,138
|
#!/usr/bin/python3.4
import socket as sc
import sys
# AF_INET - Address family internet
# SOCK_STREAM - indicates TCP (connection-oriented)
try:
mysock = sc.socket(sc.AF_INET, sc.SOCK_STREAM)
except sc.error:
print("Failed to create socket")
sys.exit()
# Get IP address i.e. nslookup (DNS Query)
try:
host = sc.gethostbyname("www.google.com")
except sc.gaierror:
print("Failed to get host")
sys.exit()
# OR
# ainfo = sc.getaddrinfo("127.0.0.1", 1234)
# print (ainfo[0][4])
# mysock.connect(ainfo[0][4])
# mysock.sendall(b"Hello World")
print ("ip:", host)
mysock.connect((host, 80))
# HTTP GET request
#message = 'GET / HTTP/1.1\r\n\r\n'
# Tries until it succeeds
try:
mysock.sendall("GET / HTTP/1.1\r\n\r\n".encode('utf-8'))
except sc.error:
print("Failed to send")
sys.exit()
# Maximum number of bytes of receive
# Python will adjust buffer size according to data size - Smart boy ;)
data = mysock.recv(1000)
print ('Recv:', data)
# free up the port
mysock.close()
|
mehul-m-prajapati/mooc-solutions
|
coursera/iot-specialization/interfacing-with-the-R-Pi/client_socket.py
|
Python
|
gpl-3.0
| 986
|
# -*- encoding: utf-8 -*-
import pytest
from abjad import *
from abjad.tools.lilypondparsertools import LilyPondParser
def test_lilypondparsertools_LilyPondParser__spanners__PhrasingSlur_01():
r'''Successful slurs, showing single leaf overlap.
'''
target = Container(scoretools.make_notes([0] * 4, [(1, 4)]))
slur = spannertools.PhrasingSlur()
attach(slur, target[2:])
slur = spannertools.PhrasingSlur()
attach(slur, target[:3])
assert systemtools.TestManager.compare(
target,
r'''
{
c'4 \(
c'4
c'4 \) \(
c'4 \)
}
'''
)
parser = LilyPondParser()
result = parser(format(target))
assert format(target) == format(result) and target is not result
def test_lilypondparsertools_LilyPondParser__spanners__PhrasingSlur_02():
r'''Swapped start and stop.
'''
target = Container(scoretools.make_notes([0] * 4, [(1, 4)]))
slur = spannertools.PhrasingSlur()
attach(slur, target[2:])
slur = spannertools.PhrasingSlur()
attach(slur, target[:3])
assert systemtools.TestManager.compare(
target,
r'''
{
c'4 \(
c'4
c'4 \) \(
c'4 \)
}
'''
)
string = r"\relative c' { c \( c c \( \) c \) }"
parser = LilyPondParser()
result = parser(string)
assert format(target) == format(result) and target is not result
def test_lilypondparsertools_LilyPondParser__spanners__PhrasingSlur_03():
r'''Single leaf.
'''
string = '{ c \( \) c c c }'
assert pytest.raises(Exception, 'LilyPondParser()(string)')
def test_lilypondparsertools_LilyPondParser__spanners__PhrasingSlur_04():
r'''Unterminated.
'''
string = '{ c \( c c c }'
assert pytest.raises(Exception, 'LilyPondParser()(string)')
def test_lilypondparsertools_LilyPondParser__spanners__PhrasingSlur_05():
r'''Unstarted.
'''
string = '{ c c c c \) }'
assert pytest.raises(Exception, 'LilyPondParser()(string)')
def test_lilypondparsertools_LilyPondParser__spanners__PhrasingSlur_06():
r'''Nested.
'''
string = '{ c \( c \( c \) c \) }'
assert pytest.raises(Exception, 'LilyPondParser()(string)')
|
mscuthbert/abjad
|
abjad/tools/lilypondparsertools/test/test_lilypondparsertools_LilyPondParser__spanners__PhrasingSlur.py
|
Python
|
gpl-3.0
| 2,285
|
import tomodachi
from tomodachi.discovery.dummy_registry import DummyRegistry
from tomodachi.envelope.protobuf_base import ProtobufBase
@tomodachi.service
class DummyService(tomodachi.Service):
name = "test_dummy_protobuf"
discovery = [DummyRegistry]
message_envelope = ProtobufBase
options = {
"aws_sns_sqs": {
"region_name": "eu-west-1",
"aws_access_key_id": "XXXXXXXXX",
"aws_secret_access_key": "XXXXXXXXX",
},
"amqp": {"port": 54321, "login": "invalid", "password": "invalid"},
}
start = False
started = False
stop = False
async def _start_service(self) -> None:
self.start = True
async def _started_service(self) -> None:
self.started = True
async def _stop_service(self) -> None:
self.stop = True
|
kalaspuff/tomodachi
|
tests/services/dummy_protobuf_service.py
|
Python
|
mit
| 838
|
"""day15 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from app01 import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^login/', views.login),
url(r'^news/(\d+)', views.news),
url(r'^home/(\d+)/(\d+)', views.home),
url(r'^start/(?P<n1>\d+)/(?P<n2>\d+)', views.home),
url(r'^app01/', include("app01.urls")),
url(r'^app02/', include("app02.urls")),
]
|
dianshen/github
|
day15/day15/urls.py
|
Python
|
gpl-3.0
| 1,055
|
import parole
from parole.colornames import colors
from parole.display import interpolateRGB
import pygame, random
import sim_creatures, main
from util import *
description = \
"""
"No light; but rather darkness visible
Served only to discover sights of woe..."
A tiny demon composed entirely of negative energy, a shadow imp was once a
thrall to a practitioner of the dark arts who failed in an attempt to escape the
chains of its eternal servitude. Its punishment is perversely ironic: it was
released from bondage, but denied return to its original demonic plane of
existence. It now wanders the forgotten crevices of the world, stripped of any
hope of homecoming as well as the existential fulfillment of service. The tiny
red eyes that narrow as you approach burn with a hatred that has festered in the
darkness for eons.
"""
class MonsterClass(sim_creatures.Monster):
def __init__(self):
sim_creatures.Monster.__init__(
self,
'shadow imp', # name
parole.map.AsciiTile('I', (64, 64, 64)), # symbol, color
8, # str
10, # dex
10, # con
17, # per
17, # spd
2, # level
20, # xp value
False, # name starts with vowel? i.e., use "an" instead of "a"
[], # equipment slots
'torment', # unarmed attack verb
'die', # death verb
False, # leaves corpse?
1000, # unarmed attack energy
description,
bloodClass=sim_creatures.BlackBlood,
lightTolerance=0.8,
lightSensitivity=2,
feelsPain=False,
)
def die(self, dieVerb=None, corpsePossible=False):
pos = self.pos
tile = self.parentTile
sim_creatures.Monster.die(self, dieVerb=dieVerb, corpsePossible=False)
# special effect: shadow imps leave a permanent negative lightsource
# when they die
if visible(tile):
main.transient("An unholy darkness bursts forth from the slain "\
"imp's body, consuming it.", tile)
lightSource = parole.map.LightSource((255,255,255), -1.5)
lightSource.apply(tile.map, pos)
tile.map.update()
if main.mapFrame.fovObj and not main.player.dead:
main.mapFrame.touchFOV()
#========================================
thingClass = MonsterClass
|
tectronics/nyctos
|
src/data.res/scripts/monsters/shadowimp.py
|
Python
|
gpl-2.0
| 2,431
|
# Leap year python
def leap(year):
if (year % 400) == 0 or (year % 4) == 0 and (year % 100) != 0:
return "leap Year"
else:
return "not a leap year"
|
amalshehu/exercism-python
|
leap/leap.py
|
Python
|
mit
| 191
|
# @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
class VButton(object):
ATTR_DISABLE_ON_CLICK = "dc"
|
rwl/muntjac
|
muntjac/terminal/gwt/client/ui/v_button.py
|
Python
|
apache-2.0
| 101
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction recuperer_valeur_dans_liste."""
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
class ClasseFonction(Fonction):
"""Renvoie la valeur spécifiée d'une liste."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.recuperer_valeur_dans_liste, "list", "Fraction")
@staticmethod
def recuperer_valeur_dans_liste(liste, indice):
"""Retourne l'élément spécifié dans la liste.
Paramètres à préciser :
* liste : la liste d'éléments
* indice : l'indice représentant la case de la liste
Voir les exemples d'utilisation pour voir comment marche cette fonction.
*NOTE* : les fonctions 'recuperer' et 'recupere_valeur_dans_liste'
font strictement la même chose. La seconde est conservée pour
des raisons de compatibilité. Préférez utiliser la première
('recuperer') dans vos scripts.
Exemples d'utilisation :
lettres = liste("a", "b", "c", "d", "e", "f")
valeur = recuperer(liste, 3)
# valeur contient maintenant "c", le troisième élément dans la liste
# L'indice peut également être négatif
valeur = recuperer(liste, -1)
# valeur contient à présent "f", la dernière lettre
"""
if int(indice) != indice: # nombre flottant
raise ErreurException("le nombre flottant {} a été précisé " \
"comme indice de liste. Seuls des nombres entiers " \
"(1, 2, 3, ...) sont acceptés".format(indice))
if indice == 0:
raise ErreurExecution("l'indice précisé doit être positif ou " \
"négatif".format(indice))
if indice < 0:
indice = int(indice)
else:
indice = int(indice) - 1
try:
return liste[indice]
except IndexError:
raise ErreurExecution("l'indice spécifié ({}) est invalide " \
"dans cette liste de taille {}".format(indice,
len(liste)))
|
vlegoff/tsunami
|
src/primaires/scripting/fonctions/recuperer_valeur_dans_liste.py
|
Python
|
bsd-3-clause
| 3,732
|
#!/usr/bin/env python
"""
A simple interactive program to compare the ids in a file with those in a
certificates file
This program will prompt the user for the name of a csv file containing
only user ids, and a csv of a certificates file, and see if there are any
ids in the first file that correspond to entries in the certificates file.
"""
import csv
import sys
from classData import certificates
import utils
if __name__ == '__main__':
if len(sys.argv) > 2:
f1name = sys.argv[1]
f2name = sys.argv[2]
else:
f1name = utils.getFileName('Enter csv file with ids : ')
f2name = utils.getFileName('Enter certificates csv file name : ')
f1 = csv.reader(open(f1name, 'r'))
f2 = csv.reader(open(f2name, 'r'))
certdict = certificates.builddict(f2)
f1.readrow()
for [ident] in f1:
if ident in certdict:
print 'found new identifier ' + ident + ' in certificates file'
|
jimwaldo/HarvardX-Tools
|
src/main/python/checkData/getCertsFromId.py
|
Python
|
bsd-3-clause
| 951
|
from Crypto.Cipher import AES
import base64
import json
import struct
import logging
def base64urldecode(data):
data += '=='[(2 - len(data) * 3) % 4:]
for search, replace in (('-', '+'), ('_', '/'), (',', '')):
data = data.replace(search, replace)
return base64.b64decode(data)
def str_to_a32(b):
if len(b) % 4: # Add padding, we need a string with a length multiple of 4
b += '\0' * (4 - len(b) % 4)
return struct.unpack('>%dI' % (len(b) / 4), b)
def aes_cbc_decrypt(data, key):
decryptor = AES.new(key, AES.MODE_CBC, '\0' * 16)
return decryptor.decrypt(data)
def a32_to_str(a):
return struct.pack('>%dI' % len(a), *a)
def base64_to_a32(s):
return str_to_a32(base64urldecode(s))
def dec_attr(attr, key):
attr = aes_cbc_decrypt(attr, a32_to_str(key))
attr += "asd" # put some unwanted chars, just in case there is none
attr = '"}'.join(attr.split('"}')[:-1]) # remove unwanted chars "{...} asd"
attr += '"}'
return json.loads(attr[4:])
def get_chunks(size):
chunks = {}
p = pp = 0
i = 1
while i <= 8 and p < size - i * 0x20000:
chunks[p] = i * 0x20000
pp = p
p += chunks[p]
i += 1
while p < size:
chunks[p] = 0x100000
pp = p
p += chunks[p]
chunks[pp] = size - pp
if not chunks[pp]:
del chunks[pp]
return chunks
def aes_cbc_encrypt(data, key):
encryptor = AES.new(key, AES.MODE_CBC, '\0' * 16)
return encryptor.encrypt(data)
def aes_cbc_encrypt_a32(data, key):
return str_to_a32(aes_cbc_encrypt(a32_to_str(data), a32_to_str(key)))
|
nitely/ochDownloader
|
addons/mega/crypto.py
|
Python
|
lgpl-3.0
| 1,635
|
import random
import json
import os.path
class Response:
names = ["bolton", "qbot"]
def __init__(self, emoji, responses, added, removed):
self.emoji = emoji
self.responses = responses
self.added = added
self.removed = removed
def get_response(self, message, tokens, user):
has_trigger = False
is_named = False
lower = message.lower()
for phrase in self.phrases:
if phrase in lower:
has_trigger = True
continue
if not has_trigger:
for word in self.words:
for token in tokens:
if word == token:
has_trigger = True
continue
for name in Response.names:
if name in lower:
is_named = True
result = ""
if has_trigger and (not self.named or is_named):
if self.use_hash:
result = self.start + self.hash(message) + self.end
else:
result = self.start + self.random() + self.end
result = result.replace("user_id", "<@" + user + ">")
return result
def hash(self, text):
hashValue = 11
for character in text:
hashValue *= 47
hashValue += ord(character)
return self.responses[hashValue % len(self.responses)]
def random(self):
return random.choice(self.responses)
class Emoji_master:
def __init__(self, msg_writer):
try:
master_file = open(
os.path.join('./resources', 'emoji_event.txt'), 'r'
)
json_events = json.load(master_file)
self.events = []
for event in json_events["Events"]:
use_hash = "Hash" in event and event["Hash"]
named = "Named" in event and event["Named"]
start = ""
end = ""
if "Start" in event:
start = event["Start"]
if "End" in event:
end = event["End"]
phrases = []
words = []
responses = []
if "Words" in event["Triggers"]:
for w in event["Triggers"]["Words"]:
words.append(w)
if "Phrases" in event["Triggers"]:
for p in event["Triggers"]["Phrases"]:
phrases.append(p)
for r in event["Responses"]:
responses.append(r)
self.events.append(
Response(
phrases, words, responses, use_hash, named, start, end
)
)
except:
msg_writer.write_error("Error loading JSON file")
self.events = []
def get_response(self, message, user):
combined_responses = ""
tokens = message.lower().split()
for event in self.events:
current_response = event.get_response(message, tokens, user)
if current_response != "":
current_response += '\n'
combined_responses += current_response
return combined_responses
|
ianadmu/bolton_bot
|
bot/emoji_master.py
|
Python
|
mit
| 3,263
|
import datetime
import logging
from functools import reduce
from flask_babelpkg import lazy_gettext
from .filters import Filters
log = logging.getLogger(__name__)
class BaseInterface(object):
"""
Base class for all data model interfaces.
Sub class it to implement your own interface for some data engine.
"""
obj = None
filter_converter_class = None
""" when sub classing override with your own custom filter converter """
""" Messages to display on CRUD Events """
add_row_message = lazy_gettext('Added Row')
edit_row_message = lazy_gettext('Changed Row')
delete_row_message = lazy_gettext('Deleted Row')
delete_integrity_error_message = lazy_gettext('Associated data exists, please delete them first')
add_integrity_error_message = lazy_gettext('Integrity error, probably unique constraint')
edit_integrity_error_message = lazy_gettext('Integrity error, probably unique constraint')
general_error_message = lazy_gettext('General Error')
""" Tuple with message and text with severity type ex: ("Added Row", "info") """
message = ()
def __init__(self, obj):
self.obj = obj
def _get_attr_value(self, item, col):
if not hasattr(item, col):
# it's an inner obj attr
return reduce(getattr, col.split('.'), item)
if hasattr(getattr(item, col), '__call__'):
# its a function
return getattr(item, col)()
else:
# its attribute
return getattr(item, col)
def get_filters(self, search_columns=None):
search_columns = search_columns or []
return Filters(self.filter_converter_class, self, search_columns)
def get_values_item(self, item, show_columns):
return [self._get_attr_value(item, col) for col in show_columns]
def _get_values(self, lst, list_columns):
"""
Get Values: formats values for list template.
returns [{'col_name':'col_value',....},{'col_name':'col_value',....}]
:param lst:
The list of item objects from query
:param list_columns:
The list of columns to include
"""
retlst = []
for item in lst:
retdict = {}
for col in list_columns:
retdict[col] = self._get_attr_value(item, col)
retlst.append(retdict)
return retlst
def get_values(self, lst, list_columns):
"""
Get Values: formats values for list template.
returns [{'col_name':'col_value',....},{'col_name':'col_value',....}]
:param lst:
The list of item objects from query
:param list_columns:
The list of columns to include
"""
for item in lst:
retdict = {}
for col in list_columns:
retdict[col] = self._get_attr_value(item, col)
yield retdict
def get_values_json(self, lst, list_columns):
"""
Converts list of objects from query to JSON
"""
result = []
for item in self.get_values(lst, list_columns):
for key, value in list(item.items()):
if isinstance(value, datetime.datetime) or isinstance(value, datetime.date):
value = value.isoformat()
item[key] = value
if isinstance(value, list):
item[key] = [str(v) for v in value]
result.append(item)
return result
"""
Returns the models class name
useful for auto title on views
"""
@property
def model_name(self):
return self.obj.__class__.__name__
"""
Next methods must be overridden
"""
def query(self, filters=None, order_column='', order_direction='',
page=None, page_size=None):
pass
def is_image(self, col_name):
return False
def is_file(self, col_name):
return False
def is_gridfs_file(self, col_name):
return False
def is_gridfs_image(self, col_name):
return False
def is_string(self, col_name):
return False
def is_text(self, col_name):
return False
def is_integer(self, col_name):
return False
def is_numeric(self, col_name):
return False
def is_float(self, col_name):
return False
def is_boolean(self, col_name):
return False
def is_date(self, col_name):
return False
def is_datetime(self, col_name):
return False
def is_relation(self, prop):
return False
def is_relation_col(self, col):
return False
def is_relation_many_to_one(self, prop):
return False
def is_relation_many_to_many(self, prop):
return False
def is_relation_one_to_one(self, prop):
return False
def is_relation_one_to_many(self, prop):
return False
def is_nullable(self, col_name):
return True
def is_unique(self, col_name):
return False
def is_pk(self, col_name):
return False
def is_fk(self, col_name):
return False
def get_max_length(self, col_name):
return -1
def get_min_length(self, col_name):
return -1
"""
-----------------------------------------
FUNCTIONS FOR CRUD OPERATIONS
-----------------------------------------
"""
def add(self, item):
"""
Adds object
"""
raise NotImplementedError
def edit(self, item):
"""
Edit (change) object
"""
raise NotImplementedError
def delete(self, item):
"""
Deletes object
"""
raise NotImplementedError
def get_col_default(self, col_name):
pass
def get_keys(self, lst):
"""
return a list of pk values from object list
"""
pk_name = self.get_pk_name()
return [getattr(item, pk_name) for item in lst]
def get_pk_name(self, item):
"""
Returns the primary key name
"""
raise NotImplementedError
def get_pk_value(self, item):
return getattr(item, self.get_pk_name())
def get(self, pk):
"""
return the record from key
"""
pass
def get_related_model(self, prop):
raise NotImplementedError
def get_related_interface(self, col_name):
"""
Returns a BaseInterface for the related model
of column name.
:param col_name: Column name with relation
:return: BaseInterface
"""
raise NotImplementedError
def get_related_obj(self, col_name, value):
raise NotImplementedError
def get_related_fk(self, model):
raise NotImplementedError
def get_columns_list(self):
"""
Returns a list of all the columns names
"""
return []
def get_user_columns_list(self):
"""
Returns a list of user viewable columns names
"""
return self.get_columns_list()
def get_search_columns_list(self):
"""
Returns a list of searchable columns names
"""
return []
def get_order_columns_list(self, list_columns=None):
"""
Returns a list of order columns names
"""
return []
def get_relation_fk(self, prop):
pass
|
qpxu007/Flask-AppBuilder
|
flask_appbuilder/models/base.py
|
Python
|
bsd-3-clause
| 7,537
|
# Copyright © 2014-2018 Red Hat, Inc. and others.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from bodhi.server import schemas
class TestSchemas:
def test_schema_unflattening_for_comments(self):
expected = {
'text': 'this is an update comment',
'karma': -1,
'karma_critpath': 1,
'bug_feedback': [{'bug_id': 1, 'karma': 1}],
'testcase_feedback': [{'testcase_name': "wat", 'karma': -1}],
}
flat_structure = {
'text': 'this is an update comment',
'karma': -1,
'karma_critpath': 1,
'bug_feedback.0.bug_id': 1,
'bug_feedback.0.karma': 1,
'testcase_feedback.0.testcase_name': 'wat',
'testcase_feedback.0.karma': -1,
}
schema = schemas.SaveCommentSchema()
nested_structure = schema.unflatten(flat_structure)
assert nested_structure == expected
|
Conan-Kudo/bodhi
|
bodhi/tests/server/test_schemas.py
|
Python
|
gpl-2.0
| 1,642
|
"""Test module for Dashboard UI
@Requirement: Dashboard
@CaseAutomation: Automated
@CaseLevel: Acceptance
@CaseComponent: UI
@TestType: Functional
@CaseImportance: High
@Upstream: No
"""
from robottelo.decorators import stubbed, tier1, tier2
from robottelo.test import UITestCase
class DashboardTestCase(UITestCase):
"""Tests for Dashboard UI"""
@stubbed()
@tier1
def test_positive_save(self):
"""Save the Dashboard UI
@id: 0bd8560c-d612-49c7-83ee-558bbaa16bce
@Steps:
1.Navigate to Monitor -> Dashboard
2.Try to remove some widgets
3.Select the Manage Dropdown box
4.Save the Dashboard
@Assert: Dashboard is saved successfully
and the removed widgets does not appear.
@caseautomation: notautomated
"""
@stubbed()
@tier1
def test_positive_reset(self):
"""Reset the Dashboard to default UI
@id: 040c5910-a296-4cfc-ad1f-1b4fc9be8199
@Steps:
1.Navigate to Monitor -> Dashboard
2.Try to remove some widgets
3.Select the Manage Dropdown box
4.Save the Dashboard
5.Dashboard Widgets are saved successfully
6.Click Reset to default
@Assert: Widget positions successfully saved.
@caseautomation: notautomated
"""
@stubbed()
@tier1
def test_positive_add_widgets(self):
"""Add Widgets to the Dashboard UI
@id: ec57d051-83d9-4c11-84ff-4de292784fc1
@Steps:
1.Navigate to Monitor -> Dashboard
2.Select Manage Dropdown box
3.Add Widgets
@Assert: User is able to add widgets.
@caseautomation: notautomated
"""
@stubbed()
@tier1
def test_positive_search_bookmark(self):
"""Bookmark the search filter in Dashboard UI
@id: f9e6259e-2b97-46fc-b357-26ea5ea8d16c
@Steps:
1.Navigate to Monitor -> Dashboard
2.Add a filter to search box (eg. environment)
3.Bookmark the search filter
@Assert: User is able to list the Bookmark
@caseautomation: notautomated
"""
@stubbed()
@tier2
def test_positive_host_configuration_status(self):
"""Check if the Host Configuration Status
Widget links are working
@id: ffb0a6a1-2b65-4578-83c7-61492122d865
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Host Configuration Status
3.Navigate to each of the links which has
search string associated with it.
@Assert: Each link shows the right info
@caseautomation: notautomated
@CaseLevel: Integration
"""
@stubbed()
@tier2
def test_positive_host_configuration_chart(self):
"""Check if the Host Configuration Chart
is working in the Dashboard UI
@id: b03314aa-4394-44e5-86da-c341c783003d
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Host Configuration Chart widget
3.Navigate to each of the links which
has search string associated with it.
@Assert: Each link shows the right info
@caseautomation: notautomated
@CaseLevel: Integration
"""
@stubbed()
@tier2
def test_positive_task_status(self):
"""Check if the Task Status is
working in the Dashboard UI
@id: fb667d6a-7255-4341-9f79-2f03d19e8e0f
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Task Status widget
3.Click each link
@Assert: Each link shows the right info
@caseautomation: notautomated
@CaseLevel: Integration
"""
@stubbed()
@tier2
def test_positive_latest_warning_error_tasks(self):
"""Check if the Latest Warning/Error
Tasks Status are working in the Dashboard UI
@id: c90df864-1472-4b7c-91e6-9ea9e98384a9
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Latest Warning/Error Tasks widget.
@Assert: The links to all failed/warnings tasks are working
@caseautomation: notautomated
@CaseLevel: Integration
"""
@stubbed()
@tier2
def test_positive_content_view_history(self):
"""Check if the Content View History
are working in the Dashboard UI
@id: cb63a67d-7cca-4d2c-9abf-9f4f5e92c856
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Content View History widget
@Assert: Each Content View link shows its current status
(the environment to which it is published)
@caseautomation: notautomated
@CaseLevel: Integration
"""
@stubbed()
@tier2
def test_positive_discovered_hosts(self):
"""Check if the user can access Discovered
Host Widget in the Dashboard UI
@id: 1e06af1b-c21f-42a9-a432-2ed18e0b225f
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Discovered Hosts widget
3.Click on the list of Discovered Hosts
@Assert: It takes you to discovered hosts
@caseautomation: notautomated
@CaseLevel: Integration
"""
@stubbed()
@tier2
def test_positive_latest_events_widget(self):
"""Check if the Latest Events Widget
is working in the Dashboard UI
@id: 6ca2f113-bf15-406a-8b15-77c377048ac6
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Latest Events widget
@Assert: The Widget is updated with
all the latest events
@caseautomation: notautomated
@CaseLevel: Integration
"""
@stubbed()
@tier1
def test_positive_sync_overview_widget(self):
"""Check if the Sync Overview Widget
is working in the Dashboard UI
@id: 515027f5-19e8-4f83-9042-1c347a63758f
@Steps:
1.Create a product
2.Add a repo and sync it
3.Navigate to Monitor -> Dashboard
4.Review the Sync Overview widget
for the above sync details
@Assert: Sync Overview widget is
updated with all sync processes
@caseautomation: notautomated
"""
@stubbed()
@tier2
def test_positive_content_host_subscription_status(self):
"""Check if the Content Host Subscription Status
is working in the Dashboard UI
@id: ce0d7b0c-ae6a-4361-8173-e50f6381194a
@Steps:
1.Register Content Host and subscribe it
2.Navigate Monitor -> Dashboard
3.Review the Content Host Subscription Status
4.Click each link :
a.Invalid Subscriptions
b.Insufficient Subscriptions
c.Current Subscriptions
@Assert: The widget is updated with all details for Current,
Invalid and Insufficient Subscriptions
@caseautomation: notautomated
@CaseLevel: Integration
"""
@stubbed()
@tier1
def test_positive_current_subscription_totals(self):
"""Check if the Current Subscriptions Totals widget
is working in the Dashboard UI
@id: 6d0f56ff-7007-4cdb-96f3-d9e8b6cc1701
@Steps:
1.Make sure sat6 has some active subscriptions
2.Navigate to Monitor -> Dashboard
3.Review the Current Subscription Total widget
@Assert: The widget displays all the active
subscriptions and expired subscriptions details
@caseautomation: notautomated
"""
@stubbed()
@tier2
def test_positive_host_collections(self):
"""Check if the Host Collections widget
displays list of host collection in UI
@id: 1feae601-987d-4553-8644-4ceef5059e64
@Steps:
1.Make sure to have some hosts and host collections
2.Navigate Monitor -> Dashboard
3.Review the Host Collections Widget
@Assert: The list of host collections along
with content host is displayed in the widget
@caseautomation: notautomated
@CaseLevel: Integration
"""
@stubbed()
@tier2
def test_positive_run_distribution_widget(self):
"""Check if the Run distribution widget is
working in the Dashboard UI
@id: ed2205c6-9ba6-4b9a-895a-d6fa8157cb90
@Steps:
1.Navigate Monitor -> Dashboard
2.Review the Run Distribution
in the last 30 minutes widget
@Assert: The widget shows appropriate data
@caseautomation: notautomated
@CaseLevel: Integration
"""
@stubbed()
@tier2
def test_positive_latest_errata_widget(self):
"""Check if the Latest Errata widget is
working in Dashboard the UI
@id: 9012744f-9717-4d6e-a05c-bc7b4b1c1657
@Steps:
1.Make sure you have applied some errata to content host
2.Navigate Monitor -> Dashboard
3.Review the Latest Errata widget
@Assert: The widget is updated with
all errata related details
@caseautomation: notautomated
@CaseLevel: Integration
"""
@stubbed()
@tier1
def test_positive_remove_widget(self):
"""Check if the user is able to remove widget
in the Dashboard UI
@id: 25c6e9e8-a7b6-4aa4-96dd-0d303e0c3aa0
@Steps:
1.Navigate to Monitor -> Dashboard
2.Try to remove some widget
@Assert: Widget is removed
The widget is listed under Manage -> Add Widget
@caseautomation: notautomated
"""
@stubbed()
@tier1
def test_positive_add_removed_widget(self):
"""Check if the user is able to add removed
widget in the Dashboard UI
@id: 156f559f-bb23-480f-bdf0-5dd2ee545fa9
@Steps:
1.Navigate to Monitor -> Dashboard
2.Try to remove some widget
3.Widget is removed
4.The widget is listed under Manage -> Add Widget
5.Click to add the widget back
@Assert: The widget is added back to the Dashboard
@caseautomation: notautomated
"""
@stubbed()
@tier1
def test_positive_minimize_widget(self):
"""Check if the user is able to minimize the widget
in the Dashboard UI
@id: 21f10b30-b121-4347-807d-7b949a3f0e4f
@Steps:
1.Navigate to Monitor -> Dashboard
2.Try to minimize some widget
@Assert: Widget is minimized
The widget is listed under Manage -> Restore Widget
@caseautomation: notautomated
"""
@stubbed()
@tier1
def test_positive_restore_minimize_widget(self):
"""Check if the user is able to restoring the minimized
widget in the Dashboard UI
@id: f42fdcce-26fb-4c56-ac4e-1e00b077bd78
@Steps:
1.Navigate to Monitor -> Dashboard
2.Try to minimize some widget
3.Widget is minimized
4.The widget is listed
under Manage -> Restore Widget
5.Click to add the widget back
@Assert: The widget is added
back to the Dashboard
@caseautomation: notautomated
"""
@stubbed()
@tier1
def test_positive_toggle_auto_refresh(self):
"""Check if the user is able to Toggle
Auto refresh in the Dashboard UI
@id: 2cbb2f2c-ddf2-492a-bda1-904c30da0de3
@Steps:
1.Navigate to Monitor -> Dashboard
2.Click Auto Refresh ON/OFF
@Assert: The auto refresh functionality
works as per the set value.
@caseautomation: notautomated
"""
@stubbed()
@tier1
def test_positive_search(self):
"""Check if the search box is working
in the Dashboard UI
@id: 1545580c-1f0e-4991-a400-4a6224199452
@Steps:
1.Navigate to Monitor -> Dashboard
2.Add a filter to search box (eg. environment)
@Assert: Data displayed according to search box
@caseautomation: notautomated
"""
@stubbed()
@tier1
def test_positive_clear_search_box(self):
"""Check if the user is able to clear the
search box in the Dashboard UI
@id: 97335970-dc1a-485d-aeb2-de6ece2197c3
@Steps:
1.Navigate to Monitor -> Dashboard
2.Add a filter to search box (eg. environment)
3.Data displayed according to search box
4.On left side of the box click
the Clear cross sign
@Assert: Search box is cleared
@caseautomation: notautomated
"""
|
Ichimonji10/robottelo
|
tests/foreman/ui/test_dashboard.py
|
Python
|
gpl-3.0
| 12,592
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_gtm_datacenter
short_description: Manage Datacenter configuration in BIG-IP
description:
- Manage BIG-IP data center configuration. A data center defines the location
where the physical network components reside, such as the server and link
objects that share the same subnet on the network. This module is able to
manipulate the data center definitions in a BIG-IP.
version_added: 2.2
options:
contact:
description:
- The name of the contact for the data center.
description:
description:
- The description of the data center.
location:
description:
- The location of the data center.
name:
description:
- The name of the data center.
required: True
state:
description:
- The virtual address state. If C(absent), an attempt to delete the
virtual address will be made. This will only succeed if this
virtual address is not in use by a virtual server. C(present) creates
the virtual address and enables it. If C(enabled), enable the virtual
address if it exists. If C(disabled), create the virtual address if
needed, and set state to C(disabled).
default: present
choices:
- present
- absent
- enabled
- disabled
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create data center "New York"
bigip_gtm_datacenter:
server: lb.mydomain.com
user: admin
password: secret
name: New York
location: 222 West 23rd
delegate_to: localhost
'''
RETURN = r'''
contact:
description: The contact that was set on the datacenter.
returned: changed
type: string
sample: admin@root.local
description:
description: The description that was set for the datacenter.
returned: changed
type: string
sample: Datacenter in NYC
enabled:
description: Whether the datacenter is enabled or not
returned: changed
type: bool
sample: true
disabled:
description: Whether the datacenter is disabled or not.
returned: changed
type: bool
sample: true
state:
description: State of the datacenter.
returned: changed
type: string
sample: disabled
location:
description: The location that is set for the datacenter.
returned: changed
type: string
sample: 222 West 23rd
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {}
updatables = [
'location', 'description', 'contact', 'state'
]
returnables = [
'location', 'description', 'contact', 'state', 'enabled', 'disabled'
]
api_attributes = [
'enabled', 'location', 'description', 'contact', 'disabled'
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if api_attribute in self.api_map:
result[api_attribute] = getattr(
self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ApiParameters(Parameters):
@property
def disabled(self):
if self._values['disabled'] is True:
return True
return None
@property
def enabled(self):
if self._values['enabled'] is True:
return True
return None
class ModuleParameters(Parameters):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
else:
return None
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
return None
@property
def state(self):
if self.enabled and self._values['state'] != 'present':
return 'enabled'
elif self.disabled and self._values['state'] != 'present':
return 'disabled'
else:
return self._values['state']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
elif self._values['state'] in ['enabled', 'present']:
return False
return None
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
elif self._values['state'] == 'disabled':
return False
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def state(self):
if self.want.enabled != self.have.enabled:
return dict(
state=self.want.state,
enabled=self.want.enabled
)
if self.want.disabled != self.have.disabled:
return dict(
state=self.want.state,
disabled=self.want.disabled
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.client = kwargs.pop('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def read_current_from_device(self):
resource = self.client.api.tm.gtm.datacenters.datacenter.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ApiParameters(params=result)
def exists(self):
result = self.client.api.tm.gtm.datacenters.datacenter.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.gtm.datacenters.datacenter.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
def create(self):
self.have = ApiParameters()
self.should_update()
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the datacenter")
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.gtm.datacenters.datacenter.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the datacenter")
return True
def remove_from_device(self):
resource = self.client.api.tm.gtm.datacenters.datacenter.load(
name=self.want.name,
partition=self.want.partition
)
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
contact=dict(),
description=dict(),
location=dict(),
name=dict(required=True),
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
mheap/ansible
|
lib/ansible/modules/network/f5/bigip_gtm_datacenter.py
|
Python
|
gpl-3.0
| 13,181
|
# Generated by Django 1.11.14 on 2019-01-14 13:29
from django.db import migrations
def attachment_infrastructure(apps, schema_editor):
AttachmentModel = apps.get_model('common', 'Attachment')
InfrastructureModel = apps.get_model('infrastructure', 'Infrastructure')
ContentTypeModel = apps.get_model("contenttypes", "ContentType")
infrastructure = ContentTypeModel.objects.get(app_label='infrastructure', model='infrastructure')
attachments = AttachmentModel.objects.filter(
content_type__model='baseinfrastructure',
object_id__in=InfrastructureModel.objects.all().values_list("pk", flat=True))
attachments.update(content_type=infrastructure)
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0012_remove_signagetype_rename_infrastructure_type'),
]
operations = [
migrations.RunPython(attachment_infrastructure),
]
|
GeotrekCE/Geotrek-admin
|
geotrek/infrastructure/migrations/0013_attachments_infrastructure.py
|
Python
|
bsd-2-clause
| 919
|
import re
import requests
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from allauth.exceptions import ImmediateHttpResponse
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import ShopifyProvider
class ShopifyOAuth2Adapter(OAuth2Adapter):
provider_id = ShopifyProvider.id
supports_state = False
scope_delimiter = ','
def _shop_domain(self):
shop = self.request.GET.get('shop', '')
if '.' not in shop:
shop = '{}.myshopify.com'.format(shop)
# Ensure the provided hostname parameter is a valid hostname,
# ends with myshopify.com, and does not contain characters
# other than letters (a-z), numbers (0-9), dots, and hyphens.
if not re.match(r'^[a-z0-9-]+\.myshopify\.com$', shop):
raise ImmediateHttpResponse(HttpResponseBadRequest(
'Invalid `shop` parameter'))
return shop
def _shop_url(self, path):
shop = self._shop_domain()
return 'https://{}{}'.format(shop, path)
@property
def access_token_url(self):
return self._shop_url('/admin/oauth/access_token')
@property
def authorize_url(self):
return self._shop_url('/admin/oauth/authorize')
@property
def profile_url(self):
return self._shop_url('/admin/shop.json')
def complete_login(self, request, app, token, **kwargs):
headers = {
'X-Shopify-Access-Token': '{token}'.format(token=token.token)}
response = requests.get(
self.profile_url,
headers=headers)
extra_data = response.json()
return self.get_provider().sociallogin_from_response(
request, extra_data)
class ShopifyOAuth2LoginView(OAuth2LoginView):
def dispatch(self, request):
response = super(ShopifyOAuth2LoginView, self).dispatch(request)
is_embedded = getattr(settings, 'SOCIALACCOUNT_PROVIDERS', {}).get(
'shopify', {}).get('IS_EMBEDDED', False)
if is_embedded:
"""
Shopify embedded apps (that run within an iFrame) require a JS
(not server) redirect for starting the oauth2 process.
See Also:
https://help.shopify.com/api/sdks/embedded-app-sdk/getting-started#oauth
"""
js = ''.join((
'<!DOCTYPE html><html><head>'
'<script type="text/javascript">',
'window.top.location.href = "{url}";'.format(url=response.url),
'</script></head><body></body></html>'
))
response = HttpResponse(content=js)
# Because this view will be within shopify's iframe
response.xframe_options_exempt = True
return response
oauth2_login = ShopifyOAuth2LoginView.adapter_view(ShopifyOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(ShopifyOAuth2Adapter)
|
spool/django-allauth
|
allauth/socialaccount/providers/shopify/views.py
|
Python
|
mit
| 3,024
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import unittest
import frappe
from frappe.utils import cstr, flt, nowdate, random_string
from erpnext.hr.doctype.employee.test_employee import make_employee
from erpnext.hr.doctype.vehicle_log.vehicle_log import make_expense_claim
class TestVehicleLog(unittest.TestCase):
def setUp(self):
employee_id = frappe.db.sql("""select name from `tabEmployee` where name='testdriver@example.com'""")
self.employee_id = employee_id[0][0] if employee_id else None
if not self.employee_id:
self.employee_id = make_employee("testdriver@example.com", company="_Test Company")
self.license_plate = get_vehicle(self.employee_id)
def tearDown(self):
frappe.delete_doc("Vehicle", self.license_plate, force=1)
frappe.delete_doc("Employee", self.employee_id, force=1)
def test_make_vehicle_log_and_syncing_of_odometer_value(self):
vehicle_log = make_vehicle_log(self.license_plate, self.employee_id)
#checking value of vehicle odometer value on submit.
vehicle = frappe.get_doc("Vehicle", self.license_plate)
self.assertEqual(vehicle.last_odometer, vehicle_log.odometer)
#checking value vehicle odometer on vehicle log cancellation.
last_odometer = vehicle_log.last_odometer
current_odometer = vehicle_log.odometer
distance_travelled = current_odometer - last_odometer
vehicle_log.cancel()
vehicle.reload()
self.assertEqual(vehicle.last_odometer, current_odometer - distance_travelled)
vehicle_log.delete()
def test_vehicle_log_fuel_expense(self):
vehicle_log = make_vehicle_log(self.license_plate, self.employee_id)
expense_claim = make_expense_claim(vehicle_log.name)
fuel_expense = expense_claim.expenses[0].amount
self.assertEqual(fuel_expense, 50*500)
vehicle_log.cancel()
frappe.delete_doc("Expense Claim", expense_claim.name)
frappe.delete_doc("Vehicle Log", vehicle_log.name)
def test_vehicle_log_with_service_expenses(self):
vehicle_log = make_vehicle_log(self.license_plate, self.employee_id, with_services=True)
expense_claim = make_expense_claim(vehicle_log.name)
expenses = expense_claim.expenses[0].amount
self.assertEqual(expenses, 27000)
vehicle_log.cancel()
frappe.delete_doc("Expense Claim", expense_claim.name)
frappe.delete_doc("Vehicle Log", vehicle_log.name)
def get_vehicle(employee_id):
license_plate=random_string(10).upper()
vehicle = frappe.get_doc({
"doctype": "Vehicle",
"license_plate": cstr(license_plate),
"make": "Maruti",
"model": "PCM",
"employee": employee_id,
"last_odometer": 5000,
"acquisition_date": nowdate(),
"location": "Mumbai",
"chassis_no": "1234ABCD",
"uom": "Litre",
"vehicle_value": flt(500000)
})
try:
vehicle.insert()
except frappe.DuplicateEntryError:
pass
return license_plate
def make_vehicle_log(license_plate, employee_id, with_services=False):
vehicle_log = frappe.get_doc({
"doctype": "Vehicle Log",
"license_plate": cstr(license_plate),
"employee": employee_id,
"date": nowdate(),
"odometer": 5010,
"fuel_qty": flt(50),
"price": flt(500)
})
if with_services:
vehicle_log.append("service_detail", {
"service_item": "Oil Change",
"type": "Inspection",
"frequency": "Mileage",
"expense_amount": flt(500)
})
vehicle_log.append("service_detail", {
"service_item": "Wheels",
"type": "Change",
"frequency": "Half Yearly",
"expense_amount": flt(1500)
})
vehicle_log.save()
vehicle_log.submit()
return vehicle_log
|
mhbu50/erpnext
|
erpnext/hr/doctype/vehicle_log/test_vehicle_log.py
|
Python
|
gpl-3.0
| 3,526
|
"""
Add material to support overhang or remove material at the overhang angle.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
globalExecutionOrder = 20
def getBevelPath( begin, center, close, end, radius ):
"Get bevel path."
beginComplex = begin.dropAxis()
centerComplex = center.dropAxis()
endComplex = end.dropAxis()
beginComplexSegmentLength = abs( centerComplex - beginComplex )
endComplexSegmentLength = abs( centerComplex - endComplex )
minimumRadius = lineation.getMinimumRadius( beginComplexSegmentLength, endComplexSegmentLength, radius )
if minimumRadius <= close:
return [ center ]
beginBevel = center + minimumRadius / beginComplexSegmentLength * ( begin - center )
endBevel = center + minimumRadius / endComplexSegmentLength * ( end - center )
if radius > 0.0:
return [ beginBevel, endBevel ]
midpointComplex = 0.5 * ( beginBevel.dropAxis() + endBevel.dropAxis() )
spikeComplex = centerComplex + centerComplex - midpointComplex
return [ beginBevel, Vector3( spikeComplex.real, spikeComplex.imag, center.z ), endBevel ]
def getManipulatedPaths(close, elementNode, loop, prefix, sideLength):
"Get bevel loop."
if len(loop) < 3:
return [loop]
radius = lineation.getRadiusByPrefix(elementNode, prefix, sideLength)
if radius == 0.0:
return loop
bevelLoop = []
for pointIndex in xrange(len(loop)):
begin = loop[(pointIndex + len(loop) - 1) % len(loop)]
center = loop[pointIndex]
end = loop[(pointIndex + 1) % len(loop)]
bevelLoop += getBevelPath( begin, center, close, end, radius )
return [ euclidean.getLoopWithoutCloseSequentialPoints( close, bevelLoop ) ]
def processElementNode(elementNode):
"Process the xml element."
lineation.processElementNodeByFunction(elementNode, getManipulatedPaths)
|
Pointedstick/ReplicatorG
|
skein_engines/skeinforge-44/fabmetheus_utilities/geometry/manipulation_paths/bevel.py
|
Python
|
gpl-2.0
| 2,387
|
#!/usr/bin/env python
'''used as webhook'''
import os
from flask import (
Flask,
request,
make_response,
jsonify
)
app = Flask(__name__)
log = app.logger
def index_getter(letter):
index = 0
index_list = []
for i in 'kitten'.upper():
if i == letter:
index_list.append(index)
index+=1
return index_list
def reset_vars():
global guess_word
global img_links
guess_word = ['K', '_', '_', '_', '_', 'N']
img_links = [
'https://examples.api.ai/CatGame/cat-game-1.PNG',
'https://examples.api.ai/CatGame/cat-game-2.PNG',
"https://examples.api.ai/CatGame/cat-game-3.PNG"
]
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
action = req["result"].get("action")
if action == 'game.letter':
res = checkLetter(req)
elif action == 'game.word.correct':
res = correctWord(req)
elif action == 'confirmation.yes':
res = gameReset(req)
elif action == 'game.start':
res = gameStart(req)
elif action and action.startswith('smalltalk'):
res = smallTalk(req)
else:
log.error("Unexpected action.")
return make_response(jsonify(res))
def gameStart(req):
reset_vars()
speech = req["result"]["fulfillment"].get("speech")
return {
"speech": speech,
"displayText": speech
}
def smallTalk(req):
speech = req["result"]["fulfillment"].get("speech")
return {
"speech": speech,
"displayText": speech
}
def gameReset(req):
speech = req["result"]["fulfillment"].get("speech").replace(' _ _ _ _ ', '____')
reset_vars()
return {
"speech": speech,
"displayText": speech,
"contextOut": ['yes']
}
def checkLetter(req):
attachments = []
letter = req['result']['resolvedQuery'].upper()
letter_index = index_getter(letter)
letter_diff = [i for i in list('kitten'.upper()) if i not in guess_word]
if guess_word.count(letter) == 0 and letter in letter_diff:
for i in letter_index:
guess_word[i] = letter
if '_' in guess_word:
output = "That's right! " + ''.join(guess_word) + '. Guess the next one.'
if len(img_links) > 0:
link = img_links.pop(0)
attachments.append({"title": "IMAGE", "image_url": link})
else:
output = 'You are so smart! Fantastic! Here is your kitten.'
attachments.append({"title": "IMAGE", "image_url": "https://examples.api.ai/CatGame/cat-game-3.PNG"})
reset_vars()
elif letter in guess_word:
output = 'You have already guessed this letter. Try again.'
else:
output = 'Almost there. Try again!'
slack_message = {'text': output, "attachments":attachments}
return {
#"speech": output,
"displayText": output,
"contextOut": ['guessing'],
"data": {"slack": slack_message}
}
def correctWord(req):
global guess_word
speech = req["result"]["fulfillment"].get("speech")
slack_message = {
'text': speech, "attachments":
[
{"title": "IMAGE", "image_url": "https://examples.api.ai/CatGame/cat-game-3.PNG"}
]
}
reset_vars()
return {
"speech": speech,
"displayText": speech,
"contextOut": [],
"data": {"slack": slack_message}
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
app.run(
debug=True,
port=port,
host='0.0.0.0'
)
|
mrukhlov/gamesagent
|
app.py
|
Python
|
apache-2.0
| 3,157
|
#!/usr/bin/env python
# Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from math import *
class TestLowPass(TestCase):
def testRegression(self):
sr = 44100.
pi2 = 2*pi
signal = [.25*cos(t*pi2*5/sr) + \
.25*cos(t*pi2*50/sr) + \
.25*cos(t*pi2*500./sr) + \
.25*cos(t*pi2*5000./sr)
for t in range(44100)]
filteredSignal = LowPass(cutoffFrequency=1000)(signal)
s = Spectrum()(signal)
sf = Spectrum()(filteredSignal)
for i in range(1000):
if s[i] > 10:
self.assertTrue(sf[i] > 0.5*s[i])
for i in range(1001, len(sf)):
if s[i] > 10:
self.assertTrue(sf[i] < 0.5*s[i])
suite = allTests(TestLowPass)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
MTG/essentia
|
test/src/unittests/filters/test_lowpass.py
|
Python
|
agpl-3.0
| 1,617
|
import tensorflow as tf
import sklearn
from sklearn.datasets import load_boston
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn import preprocessing
import numpy
import random
import pandas as pd
d = pd.read_csv('santander/train.csv')
X_train_list=[]
Y_train_list=[]
X_validate_list=[]
Y_validate_list=[]
data_list=[]
for row in d.itertuples():
#X_train_list.append(list(row[1:371]))
#Y_train_list.append(list(row[372]))
data_list.append(list(row[1:]))
random.shuffle(data_list)
datasize=len(data_list)
training_data = data_list[:datasize*60/100]
validate_data = data_list[datasize*60/100:]
for innerlist in training_data:
#print len(innerlist)
X_train_list.append(innerlist[:369])
if innerlist[370]==0:
Y_train_list.append([1,0])
else:
Y_train_list.append([0,1])
for innerlist in validate_data:
X_validate_list.append(innerlist[:369])
if innerlist[370]==0:
Y_validate_list.append([1,0])
else:
Y_validate_list.append([0,1])
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train_list)
X_validate = scaler.fit_transform(X_validate_list)
print ("Scaled")
X_train_list=[]
X_validate_list=[]
X_train_list_2 = X_train.tolist()
X_validate_list_2 = X_validate.tolist()
x=tf.placeholder(tf.float32, [None, 369])
X_train=[]
X_validate=[]
data_list=[]
d=[]
training_data=[]
validate_data=[]
#W1 = tf.Variable(tf.zeros([13,5]))
#W2 = tf.Variable(tf.zeros([5,1]))
#W = tf.Variable(tf.random_normal([13,1], mean = 0.0,stddev=0.35))
#b1 = tf.Variable(tf.zeros([1]))
#b2 = tf.Variable(tf.zeros([1]))
W = tf.Variable(tf.zeros([369,2]))
b = tf.Variable(tf.zeros([2]))
#y = tf.nn.softmax(tf.matmul(x,W) + b)
#h1 = tf.nn.relu(tf.matmul(x,W1) + b1)
#y = tf.matmul(h1,W2) + b2
y = tf.nn.softmax(tf.matmul(x,W) + b)
y_ = tf.placeholder(tf.float32, [None, 2])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
#sse = tf.reduce_sum( tf.pow((y_ - y),2))
sse = tf.reduce_sum( (y_ - y)*(y_ - y))
mse = tf.reduce_mean( tf.pow((y_ - y),2))
train_step = tf.train.GradientDescentOptimizer(0.000000000000000001).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
correct_predition = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_predition, "float"))
index_list = range(len(X_train_list_2))
for i in range(50):
#1000000 3489.25
#batch_xs, batch_ys = shuffle(X_train_list, Y_train_list, random_state=i)
#for (x,y) in zip(batch_xs, batch_ys):
#sess.run(train_step, feed_dict={x: x, y_: y})
random.shuffle(index_list)
tempx=[]
tempy=[]
##print index_list
count=0
for j in index_list:
if count > 500:
break
tempx.append(X_train_list_2[j])
tempy.append(Y_train_list[j])
sess.run(train_step, feed_dict={x: tempx, y_: tempy})
if i%10 == 0:
print sess.run(accuracy, feed_dict={x: tempx, y_: tempy})
#print sess.run(sse, feed_dict={x: tempx, y_: tempy})
#sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
#sess.run(train_step, feed_dict={x: X_train[i], y_: Y_train2[i]})
print "Done training"
#print sess.run(sse, feed_dict={x: X_test, y_: Y_test2})
print "W:"
print sess.run(W)
print "b:"
print sess.run(b)
print sess.run(accuracy, feed_dict={x: X_validate_list_2, y_: Y_validate_list})
print "y:"
print sess.run(y, feed_dict={x: X_validate_list_2})
print "y_:"
print sess.run(y_, feed_dict={y_: Y_validate_list})
#print sess.run(y, feed_dict={x: X_validate_list})
#print Y_validate_list
#test= pd.read_csv('santander/test.csv')
#print sess.run(y, feed_dict={x: X_validate_list})
|
anishgt/DeepCustomerSatisfaction
|
linear.py
|
Python
|
apache-2.0
| 3,571
|
from minheap import MinHeap
class SimplePriorityQueue(MinHeap):
'''
Priority queue built with a min-heap
'''
def __init__(self, values):
super().__init__(values)
def extract_min(self):
return super().delete_min()
def decrease_key(self, index, key):
'''
Decreases key of heap at index i
'''
if (self.array[index] < key):
raise ValueError("Key is lesser than key at index: " + str(index))
self.array[index] = key
while index > 0:
parent_index = self._find_parent(index)
if parent_index < 0:
return
parent_value = self.array[parent_index]
self.array[parent_index] = self.array[index]
self.array[index] = parent_value
index = parent_index
|
jackys-95/coding-practice
|
algorithms/linear collections/simple_priority_queue.py
|
Python
|
mit
| 828
|
#!/usr/bin/env python3
from collections import namedtuple
from pdfrw import PdfName, PdfDict, PdfObject, PdfString
PageLabelTuple = namedtuple("PageLabelScheme",
"startpage style prefix firstpagenum")
defaults = {"style": "arabic", "prefix": '', "firstpagenum": 1}
styles = {"arabic": PdfName('D'),
"roman lowercase": PdfName('r'),
"roman uppercase": PdfName('R'),
"letters lowercase": PdfName('a'),
"letters uppercase": PdfName('A')}
stylecodes = {v: a for a, v in styles.items()}
class PageLabelScheme(PageLabelTuple):
"""Represents a page numbering scheme.
startpage : the index in the pdf (starting from 0) of the
first page the scheme will be applied to.
style : page numbering style (arabic, roman [lowercase|uppercase], letters [lowercase|uppercase])
prefix: a prefix to be prepended to all page labels
firstpagenum : where to start numbering
"""
__slots__ = tuple()
def __new__(cls, startpage,
style=defaults["style"],
prefix=defaults["prefix"],
firstpagenum=defaults["firstpagenum"]):
if style not in styles:
raise ValueError("PageLabel style must be one of %s" % cls.styles())
return super().__new__(cls, int(startpage), style, str(prefix), int(firstpagenum))
@classmethod
def from_pdf(cls, pagenum, opts):
"""Returns a new PageLabel using options from a pdfrw object"""
return cls(pagenum,
style=stylecodes.get(opts.S, defaults["style"]),
prefix=(opts.P and opts.P.decode() or defaults["prefix"]),
firstpagenum=(opts.St or defaults["firstpagenum"]))
@staticmethod
def styles():
"""List of the allowed styles"""
return styles.keys()
def pdfobjs(self):
"""Returns a tuple of two elements to insert in the PageLabels.Nums
entry of a pdf"""
page_num = PdfObject(self.startpage)
opts = PdfDict(S=styles[self.style])
if self.prefix != defaults["prefix"]:
opts.P = PdfString.encode(self.prefix)
if self.firstpagenum != defaults["firstpagenum"]:
opts.St = PdfObject(self.firstpagenum)
return page_num, opts
|
lovasoa/pagelabels-py
|
pagelabels/pagelabelscheme.py
|
Python
|
gpl-3.0
| 2,320
|
from cfn_sphere.cli import get_first_account_alias_or_account_id
from cfn_sphere.exceptions import CfnSphereException
try:
from unittest2 import TestCase
from mock import patch, Mock
except ImportError:
from unittest import TestCase
from mock import patch, Mock
class CliTests(TestCase):
@patch("boto3.client")
def test_get_first_account_alias_or_account_id_returns_first_account_alias(self, boto_mock):
boto_mock.return_value.list_account_aliases.return_value = {"AccountAliases": ["a", "b", "c"]}
result = get_first_account_alias_or_account_id()
self.assertEqual("a", result)
@patch("boto3.client")
def test_get_first_account_alias_or_account_id_returns_account_id_if_no_alias_found(self, boto_mock):
boto_mock.return_value.list_account_aliases.return_value = {"AccountAliases": []}
boto_mock.return_value.get_caller_identity.return_value = {"Arn": "arn:aws:iam::ACCOUNT_ID:user/USERNAME"}
result = get_first_account_alias_or_account_id()
self.assertEqual("ACCOUNT_ID", result)
|
marco-hoyer/cfn-sphere
|
src/unittest/python/cli_tests.py
|
Python
|
apache-2.0
| 1,074
|
import sys
import socket
import queue
import statistics
import threading
import json
from .stats import OLSRegression
def trickleHTTPRequest(ip,port,hostname):
my_port = None
try:
sock = socket.create_connection((ip, port))
my_port = sock.getsockname()[1]
#print('.')
sock.sendall(b'GET / HTTP/1.1\r\n')
time.sleep(0.5)
rest = b'''Host: '''+hostname.encode('utf-8')+b'''\r\nUser-Agent: Secret Agent Man\r\nX-Extra: extra read all about it!\r\nConnection: close\r\n'''
for r in rest:
sock.sendall(bytearray([r]))
time.sleep(0.05)
time.sleep(0.5)
sock.sendall('\r\n')
r = None
while r != b'':
r = sock.recv(16)
sock.close()
except Exception as e:
pass
return my_port
def runTimestampProbes(host_ip, port, hostname, num_trials, concurrency=4):
#XXX: can we use WorkerThreads for this parallel stuff?
myq = queue.Queue()
def threadWrapper(*args):
try:
myq.put(trickleHTTPRequest(*args))
except Exception as e:
sys.stderr.write("ERROR from trickleHTTPRequest: %s\n" % repr(e))
myq.put(None)
threads = []
ports = []
for i in range(num_trials):
if len(threads) >= concurrency:
ports.append(myq.get())
t = threading.Thread(target=threadWrapper, args=(host_ip, port, hostname))
t.start()
threads.append(t)
for t in threads:
t.join()
while myq.qsize() > 0:
ports.append(myq.get())
return ports
def computeTimestampPrecision(sniffer_fp, ports):
rcvd = []
for line in sniffer_fp:
p = json.loads(line)
if p['sent']==0:
rcvd.append((p['observed'],p['tsval'],int(p['local_port'])))
slopes = []
for port in ports:
trcvd = [tr for tr in rcvd if tr[2]==port and tr[1]!=0]
if len(trcvd) < 2:
sys.stderr.write("WARN: Inadequate data points.\n")
continue
if trcvd[0][1] > trcvd[-1][1]:
sys.stderr.write("WARN: TSval wrap.\n")
continue
x = [tr[1] for tr in trcvd]
y = [tr[0] for tr in trcvd]
slope,intercept = OLSRegression(x, y)
slopes.append(slope)
if len(slopes) == 0:
return None,None,None
m = statistics.mean(slopes)
if len(slopes) == 1:
return (m, None, slopes)
else:
return (m, statistics.stdev(slopes), slopes)
|
ecbftw/nanown
|
trunk/lib/nanownlib/tcpts.py
|
Python
|
gpl-3.0
| 2,528
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
from flask.ext.script import Manager, Shell, Server
from flask.ext.migrate import MigrateCommand
from foobar.app import create_app
from foobar.user.models import User
from foobar.settings import DevConfig, ProdConfig
from foobar.database import db
if os.environ.get("FOOBAR_ENV") == 'prod':
app = create_app(ProdConfig)
else:
app = create_app(DevConfig)
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
"""Return context dict for a shell session so you can access
app, db, and the User model by default.
"""
return {'app': app, 'db': db, 'User': User}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
manager.add_command('server', Server())
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
ghofranehr/foobar
|
manage.py
|
Python
|
bsd-3-clause
| 1,092
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2022 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from decimal import Decimal
from ddd.logic.preparation_programme_annuel_etudiant.domain.service.i_catalogue_formations import \
ICatalogueFormationsTranslator
from ddd.logic.preparation_programme_annuel_etudiant.dtos import FormationDTO, ContenuGroupementCatalogueDTO, \
GroupementDTO, GroupementCatalogueDTO, UniteEnseignementCatalogueDTO
from program_management.ddd.domain.program_tree_version import STANDARD
ANNEE = 2021
def _cas_nominal_formation_version_standard():
SIGLE = 'ECGE1BA'
return FormationDTO(
racine=ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule=SIGLE,
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Bachelier en sciences économiques et de gestion',
code='LECGE100B'
),
contenu_ordonne_catalogue=[
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Contenu :',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Contenu :',
code='LECGE100T',
),
contenu_ordonne_catalogue=[
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Programme de base',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Programme de base',
code='LECGE900R',
),
contenu_ordonne_catalogue=[
UniteEnseignementCatalogueDTO(
bloc=1,
code='LESPO1113',
intitule_complet='Sociologie et anthropologie des mondes contemporains',
quadrimestre='Q1or2',
quadrimestre_texte='Q1 ou Q2',
credits_absolus=Decimal(5),
credits_relatifs=None,
volume_annuel_pm=40,
volume_annuel_pp=0,
obligatoire=True,
session_derogation='',
)
]),
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Formation pluridisciplinaire en sciences humaines',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Formation pluridisciplinaire en sciences humaines',
code='LECGE100R',
),
contenu_ordonne_catalogue=[
UniteEnseignementCatalogueDTO(
bloc=3,
code='LESPO1321',
intitule_complet='Economic, Political and Social Ethics',
quadrimestre='Q2',
quadrimestre_texte='Q2',
credits_absolus=Decimal(3),
credits_relatifs=None,
volume_annuel_pm=30,
volume_annuel_pp=0,
obligatoire=True,
session_derogation='',
)
]),
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Cours au choix',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Cours au choix',
code='LECGE860R',
),
contenu_ordonne_catalogue=[
UniteEnseignementCatalogueDTO(
bloc=1,
code='LCOPS1124',
intitule_complet='Philosophie',
quadrimestre='Q2',
quadrimestre_texte='Q2',
credits_absolus=Decimal(5),
credits_relatifs=None,
volume_annuel_pm=30,
volume_annuel_pp=0,
obligatoire=True,
session_derogation='',
)
]),
])
],
),
annee=ANNEE,
sigle=SIGLE,
version=STANDARD,
transition_name='',
intitule_formation='Bachelier en sciences économiques et de gestion'
)
def _cas_formation_version_particuliere():
SIGLE = 'CORP2MS/CS'
return FormationDTO(
racine=ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule=SIGLE,
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Master [120] en communication[ Double diplôme UCLouvain - uSherbrooke ]',
code='LCORP203S'
),
contenu_ordonne_catalogue=[
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Tronc commun',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Tronc commun',
code='LCORP114T'
),
contenu_ordonne_catalogue=[
UniteEnseignementCatalogueDTO(
bloc=2,
code='LCOMU2904B',
intitule_complet="Séminaire d'accompagnement au mémoire : méthodologie",
quadrimestre='Q2',
credits_absolus=Decimal(20),
volume_annuel_pm=0,
volume_annuel_pp=0,
obligatoire=False,
credits_relatifs=None,
session_derogation='',
quadrimestre_texte='Q2'
)
]
),
],
),
annee=ANNEE,
sigle=SIGLE,
version='DDSHERBROOKE',
transition_name='',
intitule_formation='Master [120] en communication[ Double diplôme UCLouvain - uSherbrooke ]',
)
def _cas_formation_version_transition():
SIGLE = 'DATI2MS/G'
INTITULE = "Master [120] en science des données, orientation technologies de l'information, à finalité " \
"spécialisée[ Version 2020 ]"
return FormationDTO(
racine=ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule=SIGLE,
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet=INTITULE,
code='LDATI200S'
),
contenu_ordonne_catalogue=[
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Contenu:',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Contenu:',
code='TDATI101T'
),
contenu_ordonne_catalogue=[
UniteEnseignementCatalogueDTO(
bloc=2,
code='LINFO2369',
intitule_complet="Artificial intelligence and machine learning seminar",
quadrimestre='Q1',
credits_absolus=Decimal(3),
volume_annuel_pm=30,
volume_annuel_pp=0,
obligatoire=False,
credits_relatifs=None,
session_derogation='',
quadrimestre_texte='Q1'
)
]
),
],
),
annee=ANNEE,
sigle=SIGLE,
version='Version 2020',
transition_name='',
intitule_formation=INTITULE,
)
def _cas_formation_version_particuliere_transition():
SIGLE = 'CORP2MS/CS'
VERSION = 'Version 2020'
INTITULE = "{}[ {} ]".format(
"Master [120] en communication , à finalité spécialisée: communication stratégique des organisations",
VERSION)
return FormationDTO(
racine=ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule=SIGLE,
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet=INTITULE,
code='LCORP201S'
),
contenu_ordonne_catalogue=[
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Contenu:',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Contenu:',
code='TCORP102T'
),
contenu_ordonne_catalogue=[
UniteEnseignementCatalogueDTO(
bloc=2,
code='LCOMU9870',
intitule_complet="Séminaire d'intégration en communication stratégique (Sherbrooke)",
quadrimestre=None,
quadrimestre_texte=None,
credits_absolus=Decimal(5),
credits_relatifs=None,
volume_annuel_pm=0,
volume_annuel_pp=0,
obligatoire=False,
session_derogation='',
)
]
),
],
),
annee=ANNEE,
sigle=SIGLE,
version=VERSION,
transition_name='',
intitule_formation=INTITULE,
)
def _cas_formation_version_standard_annee_moins_1():
SIGLE = 'ECGE1BA'
INTITULE = 'Bachelier en sciences économiques et de gestion'
return FormationDTO(
racine=ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule=SIGLE,
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet=INTITULE,
code='LECGE100B'
),
contenu_ordonne_catalogue=[
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Content:',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Content:',
code='LECGE100T',
),
contenu_ordonne_catalogue=[
UniteEnseignementCatalogueDTO(
bloc=1,
code='LESPO1113',
intitule_complet='Sociologie et anthropologie des mondes contemporains',
quadrimestre='Q1or2',
quadrimestre_texte='Q1 ou Q2',
credits_absolus=Decimal(5),
credits_relatifs=None,
volume_annuel_pm=40,
volume_annuel_pp=0,
obligatoire=True,
session_derogation='',
)
]),
],
),
annee=ANNEE-1,
sigle=SIGLE,
version=STANDARD,
transition_name='',
intitule_formation=INTITULE
)
def _cas_mini_formation_version_standard():
SIGLE = 'MINADROI'
INTITULE = "Mineure en droit (accès)"
return FormationDTO(
racine=ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule=SIGLE,
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet=INTITULE,
code='LADRT100I'
),
contenu_ordonne_catalogue=[
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Contenu:',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Contenu:',
code='LADRT100T',
),
contenu_ordonne_catalogue=[
UniteEnseignementCatalogueDTO(
bloc=3,
code='LDROI1225',
intitule_complet='Droit de la procédure pénale',
quadrimestre='Q2',
quadrimestre_texte='Q2',
credits_absolus=Decimal(4),
credits_relatifs=None,
volume_annuel_pm=45,
volume_annuel_pp=10,
obligatoire=True,
session_derogation='',
)
]),
],
),
annee=ANNEE,
sigle=SIGLE,
version=STANDARD,
transition_name='',
intitule_formation=INTITULE
)
def _cas_mini_formation_version_particuliere():
SIGLE = 'MINADROI'
VERSION = "Pour les bacheliers en sciences économiques et de gestion"
INTITULE = "{}[{}]".format("Mineure en droit (accès)", VERSION)
return FormationDTO(
racine=ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule=SIGLE,
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet=INTITULE,
code='LADRT100S'
),
contenu_ordonne_catalogue=[
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Contenu:',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Contenu:',
code='LADRT101T',
),
contenu_ordonne_catalogue=[
UniteEnseignementCatalogueDTO(
bloc=2,
code='LDROI1222',
intitule_complet="Droit constitutionnel",
quadrimestre='Q1and2',
quadrimestre_texte='Q1 et Q2',
credits_absolus=Decimal(8),
credits_relatifs=None,
volume_annuel_pm=90,
volume_annuel_pp=14,
obligatoire=True,
session_derogation='',
)
]),
],
),
annee=ANNEE,
sigle=SIGLE,
version=VERSION,
transition_name='',
intitule_formation=INTITULE
)
def _cas_mini_formation_version_transition():
SIGLE = 'MINADROI'
VERSION = "Version 2020 "
INTITULE = "{}[{}]".format("Mineure en droit (accès)", VERSION)
return FormationDTO(
racine=ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule=SIGLE,
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet=INTITULE,
code='LADRT111S'
),
contenu_ordonne_catalogue=[
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Contenu:',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Contenu:',
code='TADRT100T',
),
contenu_ordonne_catalogue=[
UniteEnseignementCatalogueDTO(
bloc=2,
code='LDROI1223',
intitule_complet="Droit des obligations",
quadrimestre='Q3',
quadrimestre_texte='Q3',
credits_absolus=Decimal(11.5),
credits_relatifs=None,
volume_annuel_pm=0,
volume_annuel_pp=50,
obligatoire=True,
session_derogation='',
)
]),
],
),
annee=ANNEE,
sigle=SIGLE,
version=VERSION,
transition_name='',
intitule_formation=INTITULE
)
def _cas_mini_formation_version_particuliere_transition():
SIGLE = 'MINADROI'
VERSION = "Version 2020 "
INTITULE = "{}[{}]".format("Mineure en droit (accès)", VERSION)
return FormationDTO(
racine=ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule=SIGLE,
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet=INTITULE,
code='LADRT101S'
),
contenu_ordonne_catalogue=[
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Contenu:',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Contenu:',
code='TADRT101T',
),
contenu_ordonne_catalogue=[
UniteEnseignementCatalogueDTO(
bloc=2,
code='LDROI1225',
intitule_complet="Droit de la procédure pénale",
quadrimestre='Q2',
quadrimestre_texte='Q2',
credits_absolus=Decimal(4),
credits_relatifs=None,
volume_annuel_pm=45,
volume_annuel_pp=10,
obligatoire=True,
session_derogation='',
)
]),
],
),
annee=ANNEE,
sigle=SIGLE,
version=VERSION,
transition_name='',
intitule_formation=INTITULE
)
def _cas_mini_formation_version_standard_annee_moins_1():
SIGLE = 'MINADROI'
VERSION = STANDARD
INTITULE = "Mineure en droit (accès)"
return FormationDTO(
racine=ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule=SIGLE,
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet=INTITULE,
code='LADRT121S'
),
contenu_ordonne_catalogue=[
ContenuGroupementCatalogueDTO(
groupement_contenant=GroupementCatalogueDTO(
intitule='Contenu:',
obligatoire=True,
remarque='Remarque',
credits=Decimal(10),
intitule_complet='Contenu:',
code='LADRT100T',
),
contenu_ordonne_catalogue=[
UniteEnseignementCatalogueDTO(
bloc=3,
code='LDROI1225',
intitule_complet="Droit de la procédure pénale",
quadrimestre='Q2',
quadrimestre_texte='Q2',
credits_absolus=Decimal(4),
credits_relatifs=None,
volume_annuel_pm=45,
volume_annuel_pp=10,
obligatoire=True,
session_derogation='',
)
]),
],
),
annee=ANNEE-1,
sigle=SIGLE,
version=VERSION,
transition_name='',
intitule_formation=INTITULE
)
CAS_NOMINAL_FORMATION_STANDARD = _cas_nominal_formation_version_standard()
CAS_FORMATION_VERSION_PARTICULIERE = _cas_formation_version_particuliere()
CAS_FORMATION_VERSION_TRANSITION = _cas_formation_version_transition()
CAS_FORMATION_VERSION_PARTICULIERE_TRANSITION = _cas_formation_version_particuliere_transition()
CAS_FORMATION_STANDARD_ANNEE_MOINS_1 = _cas_formation_version_standard_annee_moins_1()
CAS_MINI_FORMATION_VERSION_STANDARD = _cas_mini_formation_version_standard()
CAS_MINI_FORMATION_VERSION_PARTICULIERE = _cas_mini_formation_version_particuliere()
CAS_MINI_FORMATION_VERSION_TRANSITION = _cas_mini_formation_version_transition()
CAS_MINI_FORMATION_VERSION_PARTICULIERE_TRANSITION = _cas_mini_formation_version_particuliere_transition()
CAS_MINI_FORMATION_VERSION_STANDARD_ANNEE_MOINS_1 = _cas_mini_formation_version_standard_annee_moins_1()
class CatalogueFormationsTranslatorInMemory(ICatalogueFormationsTranslator):
dtos = [
CAS_NOMINAL_FORMATION_STANDARD,
CAS_FORMATION_VERSION_PARTICULIERE,
CAS_FORMATION_VERSION_TRANSITION,
CAS_FORMATION_VERSION_PARTICULIERE_TRANSITION,
CAS_FORMATION_STANDARD_ANNEE_MOINS_1,
CAS_MINI_FORMATION_VERSION_STANDARD,
CAS_MINI_FORMATION_VERSION_PARTICULIERE,
CAS_MINI_FORMATION_VERSION_TRANSITION,
CAS_MINI_FORMATION_VERSION_PARTICULIERE_TRANSITION,
CAS_MINI_FORMATION_VERSION_STANDARD_ANNEE_MOINS_1,
]
@classmethod
def get_formation(cls, code_programme: str, annee: int) -> 'FormationDTO':
return next(
dto for dto in cls.dtos
if dto.racine.groupement_contenant.code == code_programme and dto.annee == annee
)
@classmethod
def get_groupement(
cls,
sigle_formation: str,
annee: int,
version_formation: str,
code_groupement: str
) -> 'GroupementDTO':
raise NotImplementedError()
|
uclouvain/osis
|
infrastructure/preparation_programme_annuel_etudiant/domain/service/in_memory/catalogue_formations.py
|
Python
|
agpl-3.0
| 25,979
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/responses/get_player_response.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.data import player_data_pb2 as pogoprotos_dot_data_dot_player__data__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/responses/get_player_response.proto',
package='pogoprotos.networking.responses',
syntax='proto3',
serialized_pb=_b('\n9pogoprotos/networking/responses/get_player_response.proto\x12\x1fpogoprotos.networking.responses\x1a!pogoprotos/data/player_data.proto\"t\n\x11GetPlayerResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x30\n\x0bplayer_data\x18\x02 \x01(\x0b\x32\x1b.pogoprotos.data.PlayerData\x12\x0e\n\x06\x62\x61nned\x18\x03 \x01(\x08\x12\x0c\n\x04warn\x18\x04 \x01(\x08\x62\x06proto3')
,
dependencies=[pogoprotos_dot_data_dot_player__data__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETPLAYERRESPONSE = _descriptor.Descriptor(
name='GetPlayerResponse',
full_name='pogoprotos.networking.responses.GetPlayerResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='success', full_name='pogoprotos.networking.responses.GetPlayerResponse.success', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_data', full_name='pogoprotos.networking.responses.GetPlayerResponse.player_data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='banned', full_name='pogoprotos.networking.responses.GetPlayerResponse.banned', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='warn', full_name='pogoprotos.networking.responses.GetPlayerResponse.warn', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=129,
serialized_end=245,
)
_GETPLAYERRESPONSE.fields_by_name['player_data'].message_type = pogoprotos_dot_data_dot_player__data__pb2._PLAYERDATA
DESCRIPTOR.message_types_by_name['GetPlayerResponse'] = _GETPLAYERRESPONSE
GetPlayerResponse = _reflection.GeneratedProtocolMessageType('GetPlayerResponse', (_message.Message,), dict(
DESCRIPTOR = _GETPLAYERRESPONSE,
__module__ = 'pogoprotos.networking.responses.get_player_response_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.responses.GetPlayerResponse)
))
_sym_db.RegisterMessage(GetPlayerResponse)
# @@protoc_insertion_point(module_scope)
|
bellowsj/aiopogo
|
aiopogo/pogoprotos/networking/responses/get_player_response_pb2.py
|
Python
|
mit
| 3,807
|
print("hello world")
|
erocs/2017Challenges
|
challenge_0/python/dsyost/src/helloworld.py
|
Python
|
mit
| 22
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.website_event.tests.common import TestEventOnlineCommon
class TestEventExhibitorCommon(TestEventOnlineCommon):
@classmethod
def setUpClass(cls):
super(TestEventExhibitorCommon, cls).setUpClass()
# Sponsorship data
cls.sponsor_type_0 = cls.env['event.sponsor.type'].create({
'name': 'GigaTop',
'sequence': 1,
})
cls.sponsor_0_partner = cls.env['res.partner'].create({
'name': 'EventSponsor',
'country_id': cls.env.ref('base.be').id,
'email': 'event.sponsor@example.com',
'phone': '04856112233',
})
cls.sponsor_0 = cls.env['event.sponsor'].create({
'partner_id': cls.sponsor_0_partner.id,
'event_id': cls.event_0.id,
'sponsor_type_id': cls.sponsor_type_0.id,
'hour_from': 8.0,
'hour_to': 18.0,
})
|
jeremiahyan/odoo
|
addons/website_event_exhibitor/tests/common.py
|
Python
|
gpl-3.0
| 1,021
|
from setuptools import setup, find_packages
from io import open
setup(
name='django-sage-api',
version='0.1',
description='Django module for Sage 200 / Sage 200 Extra API',
long_description=open('README.md', encoding='utf-8').read(),
author='Nelson Monteiro',
author_email='nelson.reis.monteiro@gmail.com',
url='https://github.com/nelsonmonteiro/django-sage-api',
#download_url='https://pypi.python.org/pypi/django-sage-api',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'django>=1.5.4',
'uuid>=1.30',
'pytz>=2017.2',
],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
nelsonmonteiro/django-sage-api
|
setup.py
|
Python
|
mit
| 1,399
|
# Date: Friday 30 June 2017 05:59:07 PM IST
# Email: nrupatunga@whodat.com
# Name: Nrupatunga
# Description: Image processing functions
import math
import numpy as np
from ..helper.BoundingBox import BoundingBox
def cropPadImage(bbox_tight, image):
"""TODO: Docstring for cropPadImage.
:returns: TODO
"""
pad_image_location = computeCropPadImageLocation(bbox_tight, image)
roi_left = min(pad_image_location.x1, (image.shape[1] - 1))
roi_bottom = min(pad_image_location.y1, (image.shape[0] - 1))
roi_width = min(image.shape[1], max(1.0, math.ceil(pad_image_location.x2 - pad_image_location.x1)))
roi_height = min(image.shape[0], max(1.0, math.ceil(pad_image_location.y2 - pad_image_location.y1)))
err = 0.000000001 # To take care of floating point arithmetic errors
cropped_image = image[int(roi_bottom + err):int(roi_bottom + roi_height), int(roi_left + err):int(roi_left + roi_width)]
output_width = max(math.ceil(bbox_tight.compute_output_width()), roi_width)
output_height = max(math.ceil(bbox_tight.compute_output_height()), roi_height)
if image.ndim > 2:
output_image = np.zeros((int(output_height), int(output_width), image.shape[2]), dtype=image.dtype)
else:
output_image = np.zeros((int(output_height), int(output_width)), dtype=image.dtype)
edge_spacing_x = min(bbox_tight.edge_spacing_x(), (image.shape[1] - 1))
edge_spacing_y = min(bbox_tight.edge_spacing_y(), (image.shape[0] - 1))
# if output_image[int(edge_spacing_y):int(edge_spacing_y) + cropped_image.shape[0], int(edge_spacing_x):int(edge_spacing_x) + cropped_image.shape[1]].shape != cropped_image.shape :
# import pdb
# pdb.set_trace()
# print('debug')
# rounding should be done to match the width and height
output_image[int(edge_spacing_y):int(edge_spacing_y) + cropped_image.shape[0], int(edge_spacing_x):int(edge_spacing_x) + cropped_image.shape[1]] = cropped_image
return output_image, pad_image_location, edge_spacing_x, edge_spacing_y
def computeCropPadImageLocation(bbox_tight, image):
"""TODO: Docstring for computeCropPadImageLocation.
:returns: TODO
"""
# Center of the bounding box
bbox_center_x = bbox_tight.get_center_x()
bbox_center_y = bbox_tight.get_center_y()
image_height = image.shape[0]
image_width = image.shape[1]
# Padded output width and height
output_width = bbox_tight.compute_output_width()
output_height = bbox_tight.compute_output_height()
roi_left = max(0.0, bbox_center_x - (output_width / 2.))
roi_bottom = max(0.0, bbox_center_y - (output_height / 2.))
# Padded roi width
left_half = min(output_width / 2., bbox_center_x)
right_half = min(output_width / 2., image_width - bbox_center_x)
roi_width = max(1.0, left_half + right_half)
# Padded roi height
top_half = min(output_height / 2., bbox_center_y)
bottom_half = min(output_height / 2., image_height - bbox_center_y)
roi_height = max(1.0, top_half + bottom_half)
# Padded image location in the original image
objPadImageLocation = BoundingBox(roi_left, roi_bottom, roi_left + roi_width, roi_bottom + roi_height)
return objPadImageLocation
|
nrupatunga/PY-GOTURN
|
goturn/helper/image_proc.py
|
Python
|
mit
| 3,220
|
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import webob
from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _LI
from cinder import volume
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('snapshot', 'snapshot_unmanage')
class SnapshotUnmanageController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SnapshotUnmanageController, self).__init__(*args, **kwargs)
self.volume_api = volume.API()
@wsgi.response(202)
@wsgi.action('os-unmanage')
def unmanage(self, req, id, body):
"""Stop managing a snapshot.
This action is very much like a delete, except that a different
method (unmanage) is called on the Cinder driver. This has the effect
of removing the snapshot from Cinder management without actually
removing the backend storage object associated with it.
There are no required parameters.
A Not Found error is returned if the specified snapshot does not exist.
"""
context = req.environ['cinder.context']
authorize(context)
LOG.info(_LI("Unmanage snapshot with id: %s"), id, context=context)
try:
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.delete_snapshot(context, snapshot,
unmanage_only=True)
except exception.SnapshotNotFound as ex:
raise exc.HTTPNotFound(explanation=ex.msg)
except exception.InvalidSnapshot as ex:
raise exc.HTTPBadRequest(explanation=ex.msg)
return webob.Response(status_int=202)
class Snapshot_unmanage(extensions.ExtensionDescriptor):
"""Enable volume unmanage operation."""
name = "SnapshotUnmanage"
alias = "os-snapshot-unmanage"
updated = "2014-12-31T00:00:00+00:00"
def get_controller_extensions(self):
controller = SnapshotUnmanageController()
extension = extensions.ControllerExtension(self, 'snapshots',
controller)
return [extension]
|
bswartz/cinder
|
cinder/api/contrib/snapshot_unmanage.py
|
Python
|
apache-2.0
| 2,794
|
import discord
import asyncio
from dateparser import parse
from datetime import datetime
import db
@asyncio.coroutine
def task(client, config):
yield from client.wait_until_ready()
now = datetime.now().timestamp()
c = db.cursor()
c.execute("SELECT target, time, message FROM alerts WHERE time > {}".format(datetime.now().timestamp()))
for alert in c.fetchall():
yield from asyncio.sleep(alert[1] - datetime.now().timestamp())
yield from client.send_message(discord.Object(id = config['main_channel']), alert[0] + ' ' + alert[2])
@asyncio.coroutine
def queue(target, time, message):
when = parse(time, locales=['en-AU'])
if when < datetime.now():
when = parse('in ' + time, locales=['en-AU'])
db.insert('alerts', {'target': target, 'time': when.timestamp(), 'message': message})
delay = (when - datetime.now()).total_seconds()
yield from asyncio.sleep(delay)
|
flukiluke/eris
|
alert.py
|
Python
|
mit
| 928
|
# coding: utf-8
#
# Copyright © 2012-2014 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
from localization import N_
from outputable import Outputable
from changes import FileDiff
import comment
import filtering
import interval
import re
import subprocess
__metric_eloc__ = {"java": 500, "c": 500, "cpp": 500, "h": 300, "hpp": 300, "php": 500, "py": 500, "glsl": 1000,
"rb": 500, "js": 500, "sql": 1000, "xml": 1000}
__metric_cc_tokens__ = [[["java", "js", "c", "cc", "cpp"], ["else", "for\s+\(.*\)", "if\s+\(.*\)", "case\s+\w+:",
"default:", "while\s+\(.*\)"],
["assert", "break", "continue", "return"]],
[["py"], ["^\s+elif .*:$", "^\s+else:$", "^\s+for .*:", "^\s+if .*:$", "^\s+while .*:$"],
["^\s+assert", "break", "continue", "return"]]]
METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD = 50
METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD = 0.75
class MetricsLogic:
def __init__(self):
self.eloc = {}
self.cyclomatic_complexity = {}
self.cyclomatic_complexity_density = {}
ls_tree_r = subprocess.Popen("git ls-tree --name-only -r " + interval.get_ref(), shell=True, bufsize=1,
stdout=subprocess.PIPE).stdout
for i in ls_tree_r.readlines():
i = i.strip().decode("unicode_escape", "ignore")
i = i.encode("latin-1", "replace")
i = i.decode("utf-8", "replace").strip("\"").strip("'").strip()
if FileDiff.is_valid_extension(i) and not filtering.set_filtered(FileDiff.get_filename(i)):
file_r = subprocess.Popen("git show " + interval.get_ref() + ":\"{0}\"".format(i.strip()),
shell=True, bufsize=1, stdout=subprocess.PIPE).stdout.readlines()
extension = FileDiff.get_extension(i)
lines = MetricsLogic.get_eloc(file_r, extension)
cycc = MetricsLogic.get_cyclomatic_complexity(file_r, extension)
if __metric_eloc__.get(extension, None) != None and __metric_eloc__[extension] < lines:
self.eloc[i.strip()] = lines
if METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD < cycc:
self.cyclomatic_complexity[i.strip()] = cycc
if lines > 0 and METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD < cycc / float(lines):
self.cyclomatic_complexity_density[i.strip()] = cycc / float(lines)
@staticmethod
def get_cyclomatic_complexity(file_r, extension):
is_inside_comment = False
cc_counter = 0
entry_tokens = None
exit_tokens = None
for i in __metric_cc_tokens__:
if extension in i[0]:
entry_tokens = i[1]
exit_tokens = i[2]
if entry_tokens or exit_tokens:
for i in file_r:
i = i.decode("utf-8", "replace")
(_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i)
if not is_inside_comment and not comment.is_comment(extension, i):
for j in entry_tokens:
if re.search(j, i, re.DOTALL):
cc_counter += 2
for j in exit_tokens:
if re.search(j, i, re.DOTALL):
cc_counter += 1
return cc_counter
return -1
@staticmethod
def get_eloc(file_r, extension):
is_inside_comment = False
eloc_counter = 0
for i in file_r:
i = i.decode("utf-8", "replace")
(_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i)
if not is_inside_comment and not comment.is_comment(extension, i):
eloc_counter += 1
return eloc_counter
ELOC_INFO_TEXT = N_("The following files are suspiciously big (in order of severity)")
CYCLOMATIC_COMPLEXITY_TEXT = N_("The following files have an elevated cyclomatic complexity (in order of severity)")
CYCLOMATIC_COMPLEXITY_DENSITY_TEXT = N_("The following files have an elevated cyclomatic complexity density " \
"(in order of severity)")
METRICS_MISSING_INFO_TEXT = N_("No metrics violations were found in the repository")
METRICS_VIOLATION_SCORES = [[1.0, "minimal"], [1.25, "minor"], [1.5, "medium"], [2.0, "bad"], [3.0, "severe"]]
def __get_metrics_score__(ceiling, value):
for i in reversed(METRICS_VIOLATION_SCORES):
if value > ceiling * i[0]:
return i[1]
class Metrics(Outputable):
def output_text(self):
metrics_logic = MetricsLogic()
if not metrics_logic.eloc and not metrics_logic.cyclomatic_complexity and not metrics_logic.cyclomatic_complexity_density:
print("\n" + _(METRICS_MISSING_INFO_TEXT) + ".")
if metrics_logic.eloc:
print("\n" + _(ELOC_INFO_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in metrics_logic.eloc.items()]), reverse = True):
print(_("{0} ({1} estimated lines of code)").format(i[1], str(i[0])))
if metrics_logic.cyclomatic_complexity:
print("\n" + _(CYCLOMATIC_COMPLEXITY_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity.items()]), reverse = True):
print(_("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0])))
if metrics_logic.cyclomatic_complexity_density:
print("\n" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity_density.items()]), reverse = True):
print(_("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0]))
def output_html(self):
metrics_logic = MetricsLogic()
metrics_xml = "<div><div class=\"box\" id=\"metrics\">"
if not metrics_logic.eloc and not metrics_logic.cyclomatic_complexity and not metrics_logic.cyclomatic_complexity_density:
metrics_xml += "<p>" + _(METRICS_MISSING_INFO_TEXT) + ".</p>"
if metrics_logic.eloc:
metrics_xml += "<div><h4>" + _(ELOC_INFO_TEXT) + ".</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in metrics_logic.eloc.items()]), reverse = True)):
metrics_xml += "<div class=\"" + __get_metrics_score__(__metric_eloc__[FileDiff.get_extension(i[1])], i[0]) + \
(" odd\">" if num % 2 == 1 else "\">") + \
_("{0} ({1} estimated lines of code)").format(i[1], str(i[0])) + "</div>"
metrics_xml += "</div>"
if metrics_logic.cyclomatic_complexity:
metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_TEXT) + "</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity.items()]), reverse = True)):
metrics_xml += "<div class=\"" + __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD, i[0]) + \
(" odd\">" if num % 2 == 1 else "\">") + \
_("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0])) + "</div>"
metrics_xml += "</div>"
if metrics_logic.cyclomatic_complexity_density:
metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + "</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity_density.items()]), reverse = True)):
metrics_xml += "<div class=\"" + __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD, i[0]) + \
(" odd\">" if num % 2 == 1 else "\">") + \
_("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0]) + "</div>"
metrics_xml += "</div>"
metrics_xml += "</div></div>"
print(metrics_xml)
def output_xml(self):
metrics_logic = MetricsLogic()
if not metrics_logic.eloc and not metrics_logic.cyclomatic_complexity and not metrics_logic.cyclomatic_complexity_density:
print("\t<metrics>\n\t\t<message>" + _(METRICS_MISSING_INFO_TEXT) + "</message>\n\t</metrics>")
else:
eloc_xml = ""
if metrics_logic.eloc:
for i in sorted(set([(j, i) for (i, j) in metrics_logic.eloc.items()]), reverse = True):
eloc_xml += "\t\t\t<estimated-lines-of-code>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n"
eloc_xml += "\t\t\t</estimated-lines-of-code>\n"
if metrics_logic.cyclomatic_complexity:
for i in sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity.items()]), reverse = True):
eloc_xml += "\t\t\t<cyclomatic-complexity>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n"
eloc_xml += "\t\t\t</cyclomatic-complexity>\n"
if metrics_logic.cyclomatic_complexity_density:
for i in sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity_density.items()]), reverse = True):
eloc_xml += "\t\t\t<cyclomatic-complexity-density>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>{0:.3f}</value>\n".format(i[0])
eloc_xml += "\t\t\t</cyclomatic-complexity-density>\n"
print("\t<metrics>\n\t\t<violations>\n" + eloc_xml + "\t\t</violations>\n\t</metrics>")
|
hehaifengcn/gitinspector
|
gitinspector/metrics.py
|
Python
|
gpl-3.0
| 9,526
|
# ##### BEGIN GPL LICENSE BLOCK #####
# animation_o3de_manual_utils.py
#
# Blender addon with a toolbar to facilitate
# the creation of Open Source Hardware assembly manuals
#
# Copyright (C) 2014 Morris Winkler <m.winkler@open3dengineering.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ##### END GPL LICENSE BLOCK #####
import bpy
#############################
# helper functions
#############################
# set keyframes
def set_keyframes(context, ob):
o3de = context.scene.o3de
try:
if o3de.set_location:
ob.keyframe_insert(data_path='location')
if o3de.set_rotation:
ob.keyframe_insert(data_path='rotation_euler')
if o3de.set_hide:
ob.keyframe_insert(data_path='hide')
if o3de.set_hide_render:
ob.keyframe_insert(data_path='hide_render')
except RuntimeError:
pass
# unset keyframes
def unset_keyframes(context, ob):
o3de = context.scene.o3de
try:
if o3de.set_location:
ob.keyframe_delete(data_path='location')
if o3de.set_rotation:
ob.keyframe_delete(data_path='rotation_euler')
if o3de.set_hide:
ob.keyframe_delete(data_path='hide')
if o3de.set_hide_render:
ob.keyframe_delete(data_path='hide_render')
except RuntimeError:
pass
# set keys on Mesh/Curve objects
def set_meshcurve(self, context):
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'MESH' or ob.type == 'CURVE') and context.scene in (x for x in ob.users_scene):
set_keyframes(context, ob)
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
# unset keys on Mesh/Curve objects
def unset_meshcurve(self, context):
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'MESH' or ob.type == 'CURVE') and context.scene in (x for x in ob.users_scene):
unset_keyframes(context, ob)
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
# set keys on visible Mesh/Curve objects
def set_visible_meshcurve(self, context):
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'MESH' or ob.type == 'CURVE') and ob.hide == False and context.scene in (x for x in ob.users_scene):
set_keyframes(context, ob)
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
# unset keys on visible Mesh/Curve objects
def unset_visible_meshcurve(self, context):
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'MESH' or ob.type == 'CURVE') and ob.hide == False and context.scene in (x for x in ob.users_scene):
unset_keyframes(context, ob)
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
# set keys on invisible Mesh/Curve objects
def set_invisible_meshcurve(self, context):
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'MESH' or ob.type == 'CURVE') and ob.hide == True and context.scene in (x for x in ob.users_scene):
set_keyframes(context, ob)
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
# unset keys on invisible Mesh/Curve objects
def unset_invisible_meshcurve(self, context):
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'MESH' or ob.type == 'CURVE') and ob.hide == True and context.scene in (x for x in ob.users_scene):
unset_keyframes(context, ob)
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
# set keys on Camer/Lamp objects
def set_cameralamp(self, context):
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'CAMERA' or ob.type == 'LAMP') and context.scene in (x for x in ob.users_scene):
set_keyframes(context, ob)
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
# unset keys on Camera/Lamp objects
def unset_cameralamp(self, context):
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'CAMERA' or ob.type == 'LAMP') and context.scene in (x for x in ob.users_scene):
unset_keyframes(context, ob)
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
# set keys on selected objects
def set_selected(self, context):
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'CAMERA' or ob.type == 'LAMP' or ob.type == 'MESH' or ob.type == 'CURVE') and ob.select == True and context.scene in (x for x in ob.users_scene):
set_keyframes(context, ob)
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
# unset keys on selected objects
def unset_selected(self, context):
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'CAMERA' or ob.type == 'LAMP' or ob.type == 'MESH' or ob.type == 'CURVE') and ob.select == True and context.scene in (x for x in ob.users_scene):
unset_keyframes(context, ob)
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
# set keys on all objects
def set_all(self, context):
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'CAMERA' or ob.type == 'LAMP' or ob.type == 'MESH' or ob.type == 'CURVE') and context.scene in (x for x in ob.users_scene):
set_keyframes(context, ob)
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
# unset keys on all objects
def unset_all(self, context):
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'CAMERA' or ob.type == 'LAMP' or ob.type == 'MESH' or ob.type == 'CURVE') and context.scene in (x for x in ob.users_scene):
unset_keyframes(context, ob)
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
# toggle hide/hide_render on all selected objects
def toggle_hide_selected(self, context):
o3de = context.scene.o3de
_objects = context.scene.objects
for ob in _objects:
if (ob.type == 'CAMERA' or ob.type == 'LAMP' or ob.type == 'MESH' or ob.type == 'CURVE') and ob.select == True and context.scene in (x for x in ob.users_scene):
try:
if o3de.toggle_hide:
if ob.hide:
ob.hide = False
else:
ob.hide = True
if o3de.toggle_hide_render:
if ob.hide_render:
ob.hide_render = False
else:
ob.hide_render = True
except RuntimeError:
pass
def insert_frame(self, context):
o3de = context.scene.o3de
last_frame = 0
_objects = context.scene.objects
for ob in _objects:
if ob.animation_data and context.scene in (x for x in ob.users_scene):
try:
for fcurve in ob.animation_data.action.fcurves:
for keyframe_point in fcurve.keyframe_points:
if keyframe_point.co.x >= context.scene.frame_current:
keyframe_point.co.x +=1
if keyframe_point.co.x > last_frame:
last_frame = keyframe_point.co.x
except:
pass
if o3de.add_lastframe:
context.scene.frame_end = last_frame
|
open3dengineering/animation_o3de_manual_utils
|
helpers.py
|
Python
|
gpl-2.0
| 8,071
|
"""
Test scenarios for the review xblock.
"""
import ddt
import unittest
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from nose.plugins.attrib import attr
from lms.djangoapps.courseware.tests.factories import GlobalStaffFactory
from lms.djangoapps.courseware.tests.helpers import LoginEnrollmentTestCase
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from review import get_review_ids
import crum
class TestReviewXBlock(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Create the test environment with the review xblock.
"""
STUDENTS = [
{'email': 'learner@test.com', 'password': 'foo'},
]
XBLOCK_NAMES = ['review']
URL_BEGINNING = settings.LMS_ROOT_URL + \
'/xblock/block-v1:DillonX/DAD101x_review/3T2017+type@'
@classmethod
def setUpClass(cls):
# Nose runs setUpClass methods even if a class decorator says to skip
# the class: https://github.com/nose-devs/nose/issues/946
# So, skip the test class here if we are not in the LMS.
if settings.ROOT_URLCONF != 'lms.urls':
raise unittest.SkipTest('Test only valid in lms')
super(TestReviewXBlock, cls).setUpClass()
# Set up for the actual course
cls.course_actual = CourseFactory.create(
display_name='Review_Test_Course_ACTUAL',
org='DillonX',
number='DAD101x',
run='3T2017'
)
# There are multiple sections so the learner can load different
# problems, but should only be shown review problems from what they have loaded
with cls.store.bulk_operations(cls.course_actual.id, emit_signals=False):
cls.chapter_actual = ItemFactory.create(
parent=cls.course_actual, display_name='Overview'
)
cls.section1_actual = ItemFactory.create(
parent=cls.chapter_actual, display_name='Section 1'
)
cls.unit1_actual = ItemFactory.create(
parent=cls.section1_actual, display_name='New Unit 1'
)
cls.xblock1_actual = ItemFactory.create(
parent=cls.unit1_actual,
category='problem',
display_name='Problem 1'
)
cls.xblock2_actual = ItemFactory.create(
parent=cls.unit1_actual,
category='problem',
display_name='Problem 2'
)
cls.xblock3_actual = ItemFactory.create(
parent=cls.unit1_actual,
category='problem',
display_name='Problem 3'
)
cls.xblock4_actual = ItemFactory.create(
parent=cls.unit1_actual,
category='problem',
display_name='Problem 4'
)
cls.section2_actual = ItemFactory.create(
parent=cls.chapter_actual, display_name='Section 2'
)
cls.unit2_actual = ItemFactory.create(
parent=cls.section2_actual, display_name='New Unit 2'
)
cls.xblock5_actual = ItemFactory.create(
parent=cls.unit2_actual,
category='problem',
display_name='Problem 5'
)
cls.section3_actual = ItemFactory.create(
parent=cls.chapter_actual, display_name='Section 3'
)
cls.unit3_actual = ItemFactory.create(
parent=cls.section3_actual, display_name='New Unit 3'
)
cls.xblock6_actual = ItemFactory.create(
parent=cls.unit3_actual,
category='problem',
display_name='Problem 6'
)
cls.course_actual_url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(cls.course_actual.id),
'chapter': 'Overview',
'section': 'Welcome',
}
)
# Set up for the review course where the review problems are hosted
cls.course_review = CourseFactory.create(
display_name='Review_Test_Course_REVIEW',
org='DillonX',
number='DAD101x_review',
run='3T2017'
)
with cls.store.bulk_operations(cls.course_review.id, emit_signals=True):
cls.chapter_review = ItemFactory.create(
parent=cls.course_review, display_name='Overview'
)
cls.section_review = ItemFactory.create(
parent=cls.chapter_review, display_name='Welcome'
)
cls.unit1_review = ItemFactory.create(
parent=cls.section_review, display_name='New Unit 1'
)
cls.xblock1_review = ItemFactory.create(
parent=cls.unit1_review,
category='problem',
display_name='Problem 1'
)
cls.xblock2_review = ItemFactory.create(
parent=cls.unit1_review,
category='problem',
display_name='Problem 2'
)
cls.xblock3_review = ItemFactory.create(
parent=cls.unit1_review,
category='problem',
display_name='Problem 3'
)
cls.xblock4_review = ItemFactory.create(
parent=cls.unit1_review,
category='problem',
display_name='Problem 4'
)
cls.unit2_review = ItemFactory.create(
parent=cls.section_review, display_name='New Unit 2'
)
cls.xblock5_review = ItemFactory.create(
parent=cls.unit2_review,
category='problem',
display_name='Problem 5'
)
cls.unit3_review = ItemFactory.create(
parent=cls.section_review, display_name='New Unit 3'
)
cls.xblock6_review = ItemFactory.create(
parent=cls.unit3_review,
category='problem',
display_name='Problem 6'
)
cls.course_review_url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(cls.course_review.id),
'chapter': 'Overview',
'section': 'Welcome',
}
)
def setUp(self):
super(TestReviewXBlock, self).setUp()
for idx, student in enumerate(self.STUDENTS):
username = 'u{}'.format(idx)
self.create_account(username, student['email'], student['password'])
self.activate_user(student['email'])
self.staff_user = GlobalStaffFactory()
def enroll_student(self, email, password, course):
"""
Student login and enroll for the course
"""
self.login(email, password)
self.enroll(course, verify=True)
@attr(shard=1)
@ddt.ddt
class TestReviewFunctions(TestReviewXBlock):
"""
Check that the essential functions of the Review xBlock work as expected.
Tests cover the basic process of receiving a hint, adding a new hint,
and rating/reporting hints.
"""
def test_no_review_problems(self):
"""
If a user has not seen any problems, they should
receive a response to go out and try more problems so they have
material to review.
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_actual)
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_review)
with self.store.bulk_operations(self.course_actual.id, emit_signals=False):
review_section_actual = ItemFactory.create(
parent=self.chapter_actual, display_name='Review Subsection'
)
review_unit_actual = ItemFactory.create(
parent=review_section_actual, display_name='Review Unit'
)
review_xblock_actual = ItemFactory.create( # pylint: disable=unused-variable
parent=review_unit_actual,
category='review',
display_name='Review Tool'
)
# Loading the review section
response = self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': review_section_actual.location.name,
}
))
expected_h2 = 'Nothing to review'
self.assertIn(expected_h2, response.content)
@ddt.data(5, 7)
def test_too_few_review_problems(self, num_desired):
"""
If a user does not have enough problems to review, they should
receive a response to go out and try more problems so they have
material to review.
Testing loading 4 problems and asking for 5 and then loading every
problem and asking for more than that.
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_actual)
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_review)
# Want to load fewer problems than num_desired
self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': self.section1_actual.location.name,
}
))
if num_desired > 6:
self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': self.section2_actual.location.name,
}
))
self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': self.section3_actual.location.name,
}
))
with self.store.bulk_operations(self.course_actual.id, emit_signals=False):
review_section_actual = ItemFactory.create(
parent=self.chapter_actual, display_name='Review Subsection'
)
review_unit_actual = ItemFactory.create(
parent=review_section_actual, display_name='Review Unit'
)
review_xblock_actual = ItemFactory.create( # pylint: disable=unused-variable
parent=review_unit_actual,
category='review',
display_name='Review Tool',
num_desired=num_desired
)
# Loading the review section
response = self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': review_section_actual.location.name,
}
))
expected_h2 = 'Nothing to review'
self.assertIn(expected_h2, response.content)
@ddt.data(2, 6)
def test_review_problems(self, num_desired):
"""
If a user has enough problems to review, they should
receive a response where there are review problems for them to try.
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_actual)
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_review)
# Loading problems so the learner has enough problems in the CSM
self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': self.section1_actual.location.name,
}
))
self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': self.section2_actual.location.name,
}
))
self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': self.section3_actual.location.name,
}
))
with self.store.bulk_operations(self.course_actual.id, emit_signals=False):
review_section_actual = ItemFactory.create(
parent=self.chapter_actual, display_name='Review Subsection'
)
review_unit_actual = ItemFactory.create(
parent=review_section_actual, display_name='Review Unit'
)
review_xblock_actual = ItemFactory.create( # pylint: disable=unused-variable
parent=review_unit_actual,
category='review',
display_name='Review Tool',
num_desired=num_desired
)
# Loading the review section
response = self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': review_section_actual.location.name,
}
))
expected_header_text = 'Review Problems'
# The problems are defaulted to correct upon load
# This happens because the problems "raw_possible" field is 0 and the
# "raw_earned" field is also 0.
expected_correctness_text = 'correct'
expected_problems = ['Review Problem 1', 'Review Problem 2', 'Review Problem 3',
'Review Problem 4', 'Review Problem 5', 'Review Problem 6']
self.assertIn(expected_header_text, response.content)
self.assertEqual(response.content.count(expected_correctness_text), num_desired)
# Since the problems are randomly selected, we have to check
# the correct number of problems are returned.
count = 0
for problem in expected_problems:
if problem in response.content:
count += 1
self.assertEqual(count, num_desired)
self.assertEqual(response.content.count(self.URL_BEGINNING), num_desired)
@ddt.data(2, 6)
def test_review_problem_urls(self, num_desired):
"""
Verify that the URLs returned from the Review xBlock are valid and
correct URLs for the problems the learner has seen.
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_actual)
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_review)
# Loading problems so the learner has enough problems in the CSM
self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': self.section1_actual.location.name,
}
))
self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': self.section2_actual.location.name,
}
))
self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': self.section3_actual.location.name,
}
))
user = User.objects.get(email=self.STUDENTS[0]['email'])
crum.set_current_user(user)
result_urls = get_review_ids.get_problems(num_desired, self.course_actual.id)
expected_urls = [
(self.URL_BEGINNING + 'problem+block@Problem_1', True, 0),
(self.URL_BEGINNING + 'problem+block@Problem_2', True, 0),
(self.URL_BEGINNING + 'problem+block@Problem_3', True, 0),
(self.URL_BEGINNING + 'problem+block@Problem_4', True, 0),
(self.URL_BEGINNING + 'problem+block@Problem_5', True, 0),
(self.URL_BEGINNING + 'problem+block@Problem_6', True, 0)
]
# Since the problems are randomly selected, we have to check
# the correct number of urls are returned.
count = 0
for url in expected_urls:
if url in result_urls:
count += 1
self.assertEqual(count, num_desired)
@ddt.data(2, 5)
def test_review_problem_urls_unique_problem(self, num_desired):
"""
Verify that the URLs returned from the Review xBlock are valid and
correct URLs for the problems the learner has seen. This test will give
a unique problem to a learner and verify only that learner sees
it as a review. It will also ensure that if a learner has not loaded a
problem, it should never show up as a review problem
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_actual)
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_review)
# Loading problems so the learner has enough problems in the CSM
self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': self.section1_actual.location.name,
}
))
self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': self.section3_actual.location.name,
}
))
user = User.objects.get(email=self.STUDENTS[0]['email'])
crum.set_current_user(user)
result_urls = get_review_ids.get_problems(num_desired, self.course_actual.id)
expected_urls = [
(self.URL_BEGINNING + 'problem+block@Problem_1', True, 0),
(self.URL_BEGINNING + 'problem+block@Problem_2', True, 0),
(self.URL_BEGINNING + 'problem+block@Problem_3', True, 0),
(self.URL_BEGINNING + 'problem+block@Problem_4', True, 0),
# This is the unique problem when num_desired == 5
(self.URL_BEGINNING + 'problem+block@Problem_6', True, 0)
]
expected_not_loaded_problem = (self.URL_BEGINNING + 'problem+block@Problem_5', True, 0)
# Since the problems are randomly selected, we have to check
# the correct number of urls are returned.
count = 0
for url in expected_urls:
if url in result_urls:
count += 1
self.assertEqual(count, num_desired)
self.assertNotIn(expected_not_loaded_problem, result_urls)
# NOTE: This test is failing because when I grab the problem from the CSM,
# it is unable to find its parents. This is some issue with the BlockStructure
# and it not being populated the way we want. For now, this is being left out
# since the first course I'm working with does not use this function.
# TODO: Fix get_vertical from get_review_ids to have the block structure for this test
# or fix something in this file to make sure it populates the block structure for the CSM
@unittest.skip
def test_review_vertical_url(self):
"""
Verify that the URL returned from the Review xBlock is a valid and
correct URL for the vertical the learner has seen.
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_actual)
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_review)
# Loading problems so the learner has problems and thus a vertical in the CSM
self.client.get(reverse(
'courseware_section',
kwargs={
'course_id': self.course_actual.id,
'chapter': self.chapter_actual.location.name,
'section': self.section1_actual.location.name,
}
))
user = User.objects.get(email=self.STUDENTS[0]['email'])
crum.set_current_user(user)
result_url = get_review_ids.get_vertical(self.course_actual.id)
expected_url = self.URL_BEGINNING + 'vertical+block@New_Unit_1'
self.assertEqual(result_url, expected_url)
|
lduarte1991/edx-platform
|
openedx/tests/xblock_integration/test_review_xblock.py
|
Python
|
agpl-3.0
| 21,237
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class UserRatingsData(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
UserRatingsData - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'links': 'Links',
'data': 'list[UserRatings]'
}
self.attribute_map = {
'links': 'links',
'data': 'data'
}
self._links = None
self._data = None
@property
def links(self):
"""
Gets the links of this UserRatingsData.
:return: The links of this UserRatingsData.
:rtype: Links
"""
return self._links
@links.setter
def links(self, links):
"""
Sets the links of this UserRatingsData.
:param links: The links of this UserRatingsData.
:type: Links
"""
self._links = links
@property
def data(self):
"""
Gets the data of this UserRatingsData.
:return: The data of this UserRatingsData.
:rtype: list[UserRatings]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this UserRatingsData.
:param data: The data of this UserRatingsData.
:type: list[UserRatings]
"""
self._data = data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
FireBladeNooT/Medusa_1_6
|
lib/tvdbapiv2/models/user_ratings_data.py
|
Python
|
gpl-3.0
| 3,530
|
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name='popy',
description='Parser for GNU Po files',
long_description=open('README.rst').read(),
version='0.3.0',
packages=['popy'],
author='Murat Aydos',
author_email='murataydos@yandex.com',
url='https://github.com/murataydos/popy',
license='MIT',
zip_safe=False,
include_package_data=True
)
|
murataydos/popy
|
setup.py
|
Python
|
gpl-2.0
| 402
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
if sys.version_info[0] == 2:
reload(sys).setdefaultencoding("utf-8")
import os
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
mavriq/djangoskel
|
src/manage.py
|
Python
|
gpl-3.0
| 382
|
import bpy
op = bpy.context.active_operator
op.radius = 0.5
op.arc_div = 8
op.lin_div = 0
op.size = (0.0, 0.0, 3.0)
op.div_type = 'CORNERS'
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/bin/2.78/scripts/addons/presets/operator/mesh.primitive_round_cube_add/Capsule.py
|
Python
|
gpl-3.0
| 141
|
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
try:
any
except:
# Implement 'any' for python 2.4 and previous
def any(iterable):
for element in iterable:
if element:
return True
return False
import sys
import inputstream
import tokenizer
import treebuilders
from treebuilders._base import Marker
from treebuilders import simpletree
import utils
from constants import contentModelFlags, spaceCharacters, asciiUpper2Lower
from constants import scopingElements, formattingElements, specialElements
from constants import headingElements, tableInsertModeElements
from constants import cdataElements, rcdataElements, voidElements
from constants import tokenTypes, ReparseException, namespaces
def parse(doc, treebuilder="simpletree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="simpletree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree = simpletree.TreeBuilder,
tokenizer = tokenizer.HTMLTokenizer, strict = False,
namespaceHTMLElements = True):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = {
"initial": InitialPhase(self, self.tree),
"beforeHtml": BeforeHtmlPhase(self, self.tree),
"beforeHead": BeforeHeadPhase(self, self.tree),
"inHead": InHeadPhase(self, self.tree),
# XXX "inHeadNoscript": InHeadNoScriptPhase(self, self.tree),
"afterHead": AfterHeadPhase(self, self.tree),
"inBody": InBodyPhase(self, self.tree),
"inCDataRCData": InCDataRCDataPhase(self, self.tree),
"inTable": InTablePhase(self, self.tree),
"inTableText": InTableTextPhase(self, self.tree),
"inCaption": InCaptionPhase(self, self.tree),
"inColumnGroup": InColumnGroupPhase(self, self.tree),
"inTableBody": InTableBodyPhase(self, self.tree),
"inRow": InRowPhase(self, self.tree),
"inCell": InCellPhase(self, self.tree),
"inSelect": InSelectPhase(self, self.tree),
"inSelectInTable": InSelectInTablePhase(self, self.tree),
"inForeignContent": InForeignContentPhase(self, self.tree),
"afterBody": AfterBodyPhase(self, self.tree),
"inFrameset": InFramesetPhase(self, self.tree),
"afterFrameset": AfterFramesetPhase(self, self.tree),
"afterAfterBody": AfterAfterBodyPhase(self, self.tree),
"afterAfterFrameset": AfterAfterFramesetPhase(self, self.tree),
# XXX after after frameset
}
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException, e:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.contentModelFlag = tokenizer.contentModelFlags["RCDATA"]
elif self.innerHTML in rcdataElements:
self.tokenizer.contentModelFlag = tokenizer.contentModelFlags["CDATA"]
elif self.innerHTML == 'plaintext':
self.tokenizer.contentModelFlag = tokenizer.contentModelFlags["PLAINTEXT"]
else:
# contentModelFlag already is PCDATA
#self.tokenizer.contentModelFlag = tokenizer.contentModelFlags["PCDATA"]
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.secondaryPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
def mainLoop(self):
(CharactersToken,
SpaceCharactersToken,
StartTagToken,
EndTagToken,
CommentToken,
DoctypeToken) = (tokenTypes["Characters"],
tokenTypes["SpaceCharacters"],
tokenTypes["StartTag"],
tokenTypes["EndTag"],
tokenTypes["Comment"],
tokenTypes["Doctype"])
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
for token in self.normalizedTokens():
type = token["type"]
if type == CharactersToken:
self.phase.processCharacters(token)
elif type == SpaceCharactersToken:
self.phase.processSpaceCharacters(token)
elif type == StartTagToken:
self.selfClosingAcknowledged = False
self.phase.processStartTag(token)
if (token["selfClosing"]
and not self.selfClosingAcknowledged):
self.parseError("non-void-element-with-trailing-solidus",
{"name":token["name"]})
elif type == EndTagToken:
self.phase.processEndTag(token)
elif type == CommentToken:
self.phase.processComment(token)
elif type == DoctypeToken:
self.phase.processDoctype(token)
else:
self.parseError(token["data"], token.get("datavars", {}))
# When the loop finishes it's EOF
self.phase.processEOF()
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl":"definitionURL"}
for k,v in replacements.iteritems():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename" : "attributeName",
"attributetype" : "attributeType",
"basefrequency" : "baseFrequency",
"baseprofile" : "baseProfile",
"calcmode" : "calcMode",
"clippathunits" : "clipPathUnits",
"contentscripttype" : "contentScriptType",
"contentstyletype" : "contentStyleType",
"diffuseconstant" : "diffuseConstant",
"edgemode" : "edgeMode",
"externalresourcesrequired" : "externalResourcesRequired",
"filterres" : "filterRes",
"filterunits" : "filterUnits",
"glyphref" : "glyphRef",
"gradienttransform" : "gradientTransform",
"gradientunits" : "gradientUnits",
"kernelmatrix" : "kernelMatrix",
"kernelunitlength" : "kernelUnitLength",
"keypoints" : "keyPoints",
"keysplines" : "keySplines",
"keytimes" : "keyTimes",
"lengthadjust" : "lengthAdjust",
"limitingconeangle" : "limitingConeAngle",
"markerheight" : "markerHeight",
"markerunits" : "markerUnits",
"markerwidth" : "markerWidth",
"maskcontentunits" : "maskContentUnits",
"maskunits" : "maskUnits",
"numoctaves" : "numOctaves",
"pathlength" : "pathLength",
"patterncontentunits" : "patternContentUnits",
"patterntransform" : "patternTransform",
"patternunits" : "patternUnits",
"pointsatx" : "pointsAtX",
"pointsaty" : "pointsAtY",
"pointsatz" : "pointsAtZ",
"preservealpha" : "preserveAlpha",
"preserveaspectratio" : "preserveAspectRatio",
"primitiveunits" : "primitiveUnits",
"refx" : "refX",
"refy" : "refY",
"repeatcount" : "repeatCount",
"repeatdur" : "repeatDur",
"requiredextensions" : "requiredExtensions",
"requiredfeatures" : "requiredFeatures",
"specularconstant" : "specularConstant",
"specularexponent" : "specularExponent",
"spreadmethod" : "spreadMethod",
"startoffset" : "startOffset",
"stddeviation" : "stdDeviation",
"stitchtiles" : "stitchTiles",
"surfacescale" : "surfaceScale",
"systemlanguage" : "systemLanguage",
"tablevalues" : "tableValues",
"targetx" : "targetX",
"targety" : "targetY",
"textlength" : "textLength",
"viewbox" : "viewBox",
"viewtarget" : "viewTarget",
"xchannelselector" : "xChannelSelector",
"ychannelselector" : "yChannelSelector",
"zoomandpan" : "zoomAndPan"
}
for originalName in token["data"].keys():
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = {
"xlink:actuate":("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole":("xlink", "arcrole", namespaces["xlink"]),
"xlink:href":("xlink", "href", namespaces["xlink"]),
"xlink:role":("xlink", "role", namespaces["xlink"]),
"xlink:show":("xlink", "show", namespaces["xlink"]),
"xlink:title":("xlink", "title", namespaces["xlink"]),
"xlink:type":("xlink", "type", namespaces["xlink"]),
"xml:base":("xml", "base", namespaces["xml"]),
"xml:lang":("xml", "lang", namespaces["xml"]),
"xml:space":("xml", "space", namespaces["xml"]),
"xmlns":(None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink":("xmlns", "xlink", namespaces["xmlns"])
}
for originalName in token["data"].iterkeys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select":"inSelect",
"td":"inCell",
"th":"inCell",
"tr":"inRow",
"tbody":"inTableBody",
"thead":"inTableBody",
"tfoot":"inTableBody",
"caption":"inCaption",
"colgroup":"inColumnGroup",
"table":"inTable",
"head":"inBody",
"body":"inBody",
"frameset":"inFrameset"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
if node == self.tree.openElements[0]:
last = True
if nodeName not in ['td', 'th']:
# XXX
assert self.innerHTML
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "frameset"):
# XXX
assert self.innerHTML
if nodeName in newModes:
self.phase = self.phases[newModes[nodeName]]
break
elif node.namespace in (namespaces["mathml"], namespaces["svg"]):
self.phase = self.phases["inForeignContent"]
self.secondaryPhase = self.phases["inBody"]
break
elif nodeName == "html":
if self.tree.headPointer is None:
self.phase = self.phases["beforeHead"]
else:
self.phase = self.phases["afterHead"]
break
elif last:
self.phase = self.phases["inBody"]
break
def parseRCDataCData(self, token, contentType):
"""Generic (R)CDATA Parsing algorithm
contentType - RCDATA or CDATA
"""
assert contentType in ("CDATA", "RCDATA")
element = self.tree.insertElement(token)
self.tokenizer.contentModelFlag = contentModelFlags[contentType]
self.originalPhase = self.phase
self.phase = self.phases["inCDataRCData"]
class Phase(object):
"""Base class for helper object that implements each phase of processing
"""
# Order should be (they can be omitted):
# * EOF
# * Comment
# * Doctype
# * SpaceCharacters
# * Characters
# * StartTag
# - startTag* methods
# * EndTag
# - endTag* methods
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if self.parser.firstStartTag == False and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].iteritems():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
# This phase deals with error handling as well which is currently not
# covered in the specification. The error handling is typically known as
# "quirks mode". It is expected that a future version of HTML5 will defin
# this.
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
self.parser.phase.processEOF()
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId != None or
systemId != None):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
if systemId is None:
systemId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId in
("+//silmaril//dtd html pro v0r11 19970101//en",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//en",
"-//as//dtd html 3.0 aswedit + extensions//en",
"-//ietf//dtd html 2.0 level 1//en",
"-//ietf//dtd html 2.0 level 2//en",
"-//ietf//dtd html 2.0 strict level 1//en",
"-//ietf//dtd html 2.0 strict level 2//en",
"-//ietf//dtd html 2.0 strict//en",
"-//ietf//dtd html 2.0//en",
"-//ietf//dtd html 2.1e//en",
"-//ietf//dtd html 3.0//en",
"-//ietf//dtd html 3.0//en//",
"-//ietf//dtd html 3.2 final//en",
"-//ietf//dtd html 3.2//en",
"-//ietf//dtd html 3//en",
"-//ietf//dtd html level 0//en",
"-//ietf//dtd html level 0//en//2.0",
"-//ietf//dtd html level 1//en",
"-//ietf//dtd html level 1//en//2.0",
"-//ietf//dtd html level 2//en",
"-//ietf//dtd html level 2//en//2.0",
"-//ietf//dtd html level 3//en",
"-//ietf//dtd html level 3//en//3.0",
"-//ietf//dtd html strict level 0//en",
"-//ietf//dtd html strict level 0//en//2.0",
"-//ietf//dtd html strict level 1//en",
"-//ietf//dtd html strict level 1//en//2.0",
"-//ietf//dtd html strict level 2//en",
"-//ietf//dtd html strict level 2//en//2.0",
"-//ietf//dtd html strict level 3//en",
"-//ietf//dtd html strict level 3//en//3.0",
"-//ietf//dtd html strict//en",
"-//ietf//dtd html strict//en//2.0",
"-//ietf//dtd html strict//en//3.0",
"-//ietf//dtd html//en",
"-//ietf//dtd html//en//2.0",
"-//ietf//dtd html//en//3.0",
"-//metrius//dtd metrius presentational//en",
"-//microsoft//dtd internet explorer 2.0 html strict//en",
"-//microsoft//dtd internet explorer 2.0 html//en",
"-//microsoft//dtd internet explorer 2.0 tables//en",
"-//microsoft//dtd internet explorer 3.0 html strict//en",
"-//microsoft//dtd internet explorer 3.0 html//en",
"-//microsoft//dtd internet explorer 3.0 tables//en",
"-//netscape comm. corp.//dtd html//en",
"-//netscape comm. corp.//dtd strict html//en",
"-//o'reilly and associates//dtd html 2.0//en",
"-//o'reilly and associates//dtd html extended 1.0//en",
"-//o'reilly and associates//dtd html extended relaxed 1.0//en",
"-//spyglass//dtd html 2.0 extended//en",
"-//sq//dtd html 2.0 hotmetal + extensions//en",
"-//sun microsystems corp.//dtd hotjava html//en",
"-//sun microsystems corp.//dtd hotjava strict html//en",
"-//w3c//dtd html 3 1995-03-24//en",
"-//w3c//dtd html 3.2 draft//en",
"-//w3c//dtd html 3.2 final//en",
"-//w3c//dtd html 3.2//en",
"-//w3c//dtd html 3.2s draft//en",
"-//w3c//dtd html 4.0 frameset//en",
"-//w3c//dtd html 4.0 transitional//en",
"-//w3c//dtd html experimental 19960712//en",
"-//w3c//dtd html experimental 970421//en",
"-//w3c//dtd w3 html//en",
"-//w3o//dtd w3 html 3.0//en",
"-//w3o//dtd w3 html 3.0//en//",
"-//w3o//dtd w3 html strict 3.0//en//",
"-//webtechs//dtd mozilla html 2.0//en",
"-//webtechs//dtd mozilla html//en",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or (publicId in
("-//w3c//dtd html 4.01 frameset//EN",
"-//w3c//dtd html 4.01 transitional//EN") and
systemId == None)
or (systemId != None and
systemId == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd")):
self.parser.compatMode = "quirks"
elif (publicId in
("-//w3c//dtd xhtml 1.0 frameset//EN",
"-//w3c//dtd xhtml 1.0 transitional//EN")
or (publicId in
("-//w3c//dtd html 4.01 frameset//EN",
"-//w3c//dtd html 4.01 transitional//EN") and
systemId == None)):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
self.parser.phase.processCharacters(token)
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
self.parser.phase.processStartTag(token)
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
self.parser.phase.processEndTag(token)
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
self.parser.phase.processEOF()
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
self.parser.phase.processCharacters(token)
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
self.parser.phase.processStartTag(token)
def processEndTag(self, token):
self.insertHtmlElement()
self.parser.phase.processEndTag(token)
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
self.parser.phase.processEOF()
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
self.parser.phase.processCharacters(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
self.parser.phase.processStartTag(token)
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
self.parser.phase.processEndTag(token)
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "link", "command", "eventsource"),
self.startTagBaseLinkCommandEventsource),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# helper
def appendToHead(self, element):
if self.tree.headPointer is not None:
self.tree.headPointer.appendChild(element)
else:
assert self.parser.innerHTML
self.tree.openElementsw[-1].appendChild(element)
# the real thing
def processEOF (self):
self.anythingElse()
self.parser.phase.processEOF()
def processCharacters(self, token):
self.anythingElse()
self.parser.phase.processCharacters(token)
def startTagHtml(self, token):
self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommandEventsource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif "content" in attributes:
data = inputstream.EncodingBytes(
attributes["content"].encode(self.parser.tokenizer.stream.charEncoding[0]))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataCData(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
#Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataCData(token, "CDATA")
def startTagScript(self, token):
#I think this is equivalent to the CDATA stuff since we don't execute script
#self.tree.insertElement(token)
self.parser.parseRCDataCData(token, "CDATA")
def startTagOther(self, token):
self.anythingElse()
self.parser.phase.processStartTag(token)
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s"%node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
self.parser.phase.processEndTag(token)
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "link", "meta", "noframes", "script", "style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
self.parser.phase.processEOF()
def processCharacters(self, token):
self.anythingElse()
self.parser.phase.processCharacters(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name":token["name"]})
def startTagOther(self, token):
self.anythingElse()
self.parser.phase.processStartTag(token)
def endTagHtmlBodyBr(self, token):
#This is not currently in the spec
self.anythingElse()
self.parser.phase.processEndTag(token)
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name":token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-body
# the crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
#Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "link", "meta", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "datagrid",
"details", "dialog", "dir", "div", "dl", "fieldset", "figure",
"footer", "h1", "h2", "h3", "h4", "h5", "h6", "header", "listing",
"menu", "nav", "ol", "p", "pre", "section", "ul"),
self.startTagCloseP),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext",self.startTagPlaintext),
(headingElements, self.startTagHeading),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"),self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "basefont", "bgsound", "br", "embed", "img", "input",
"keygen", "param", "spacer", "wbr"), self.startTagVoidFormatting),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagCdata),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced),
(("event-source", "command"), self.startTagNew)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body",self.endTagBody),
("html",self.endTagHtml),
(("address", "article", "aside", "blockquote", "center", "datagrid",
"details", "dialog", "dir", "div", "dl", "fieldset", "figure",
"footer", "header", "listing", "menu", "nav", "ol", "pre", "section",
"ul"), self.endTagBlock),
("form", self.endTagForm),
("p",self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "button", "marquee", "object"), self.endTagAppletButtonMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(
self.tree.openElements[-1])
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
#Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
# XXX The specification says to do this for every character at the
# moment, but apparently that doesn't match the real world so we don't
# do it for space characters.
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
self.framesetOK = False
#This matches the current spec but may not match the real world
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
for attr, value in token["data"].iteritems():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
if token["name"] in ("pre", "listing"):
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError(u"unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p"):
self.endTagP("p")
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li":["li"],
"dt":["dt", "dd"],
"dd":["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in (scopingElements | specialElements) and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.contentModelFlag = contentModelFlags["PLAINTEXT"]
def startTagHeading(self, token):
if self.tree.elementInScope("p"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
# Uncomment the following for IE7 behavior:
#
#for item in headingElements:
# if self.tree.elementInScope(item):
# self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
# item = self.tree.openElements.pop()
# while item.name not in headingElements:
# item = self.tree.openElements.pop()
# break
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
self.parser.phase.processStartTag(token)
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataCData(token, "CDATA")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagHr(self, token):
if self.tree.elementInScope("p"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Insert your search keywords here: "
self.processCharacters(
{"type":tokenTypes["Characters"], "data":prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes = attributes,
selfClosing =
token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
# XXX Form element pointer checking here as well...
self.tree.insertElement(token)
self.parser.tokenizer.contentModelFlag = contentModelFlags["RCDATA"]
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagCdata(token)
def startTagCdata(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataCData(token, "CDATA")
def startTagOpt(self, token):
if self.tree.elementInScope("option"):
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
while self.tree.openElements[-1].name != "ruby":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
#Need to get the parse error right for the case where the token
#has a namespace not equal to the xmlns attribute
if self.parser.phase != self.parser.phases["inForeignContent"]:
self.parser.secondaryPhase = self.parser.phase
self.parser.phase = self.parser.phases["inForeignContent"]
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
#Need to get the parse error right for the case where the token
#has a namespace not equal to the xmlns attribute
if self.parser.phase != self.parser.phases["inForeignContent"]:
self.parser.secondaryPhase = self.parser.phase
self.parser.phase = self.parser.phases["inForeignContent"]
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagNew(self, token):
"""New HTML5 elements, "event-source", "section", "nav",
"article", "aside", "header", "footer", "datagrid", "command"
"""
#2007-08-30 - MAP - commenting out this write to sys.stderr because
# it's really annoying me when I run the validator tests
#sys.stderr.write("Warning: Undefined behaviour for start tag %s"%name)
self.startTagOther(token)
#raise NotImplementedError
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if self.tree.elementInScope("p"):
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
if self.tree.elementInScope("p"):
while self.tree.elementInScope("p"):
self.tree.openElements.pop()
else:
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.endTagP(impliedTagToken("p"))
def endTagBody(self, token):
# XXX Need to take open <p> tags into account here. We shouldn't imply
# </p> but we should not throw a parse error either. Specification is
# likely to be updated.
if (len(self.tree.openElements) == 1 or
self.tree.openElements[1].name != "body"):
# innerHTML case
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "p",
"tbody", "td", "tfoot",
"th", "thead", "tr")):
#Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
self.endTagBody(impliedTagToken("body"))
if not self.parser.innerHTML:
self.parser.phase.processEndTag(token)
def endTagBlock(self, token):
#Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(token["name"]):
self.parser.parseError("unexpected-end-tag",
{"name":"form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude = token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://www.whatwg.org/specs/web-apps/current-work/#adoptionAgency
# XXX Better parseError messages appreciated.
name = token["name"]
while True:
# Step 1 paragraph 1
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if not formattingElement or (formattingElement in
self.tree.openElements and
not self.tree.elementInScope(
formattingElement.name)):
self.parser.parseError("adoption-agency-1.1", {"name": token["name"]})
return
# Step 1 paragraph 2
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Step 1 paragraph 3
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 2
# Start of the adoption agency algorithm proper
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if (element.nameTuple in
specialElements | scopingElements):
furthestBlock = element
break
# Step 3
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
commonAncestor = self.tree.openElements[afeIndex-1]
# Step 5
#if furthestBlock.parent:
# furthestBlock.parent.removeChild(furthestBlock)
# Step 5
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 12. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 7.4
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 6
lastNode = node = furthestBlock
while True:
# AT replace this with a function and recursion?
# Node is element before node in open elements
node = self.tree.openElements[
self.tree.openElements.index(node)-1]
while node not in self.tree.activeFormattingElements:
tmpNode = node
node = self.tree.openElements[
self.tree.openElements.index(node)-1]
self.tree.openElements.remove(tmpNode)
# Step 6.3
if node == formattingElement:
break
# Step 6.4
if lastNode == furthestBlock:
bookmark = (self.tree.activeFormattingElements.index(node)
+ 1)
# Step 6.5
#cite = node.parent
#if node.hasContent():
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 6.6
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 7.7
lastNode = node
# End of inner loop
# Step 7
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster parent the
# lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
commonAncestor.appendChild(lastNode)
# Step 8
clone = formattingElement.cloneNode()
# Step 9
furthestBlock.reparentChildren(clone)
# Step 10
furthestBlock.appendChild(clone)
# Step 11
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 12
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletButtonMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if (node.nameTuple in
specialElements | scopingElements):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class InCDataRCDataPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
self.tree.openElements[-1].name)
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
self.parser.phase.processEOF()
def startTagOther(self, token):
assert False, "Tried to process start tag %s in (R)CDATA mode"%name
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
#The rest of this method is all stuff that only happens if
#document.write works
def endTagOther(self, token):
node = self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
#self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
def getCurrentTable(self):
i = -1
while -i <= len(self.tree.openElements) and self.tree.openElements[i].name != "table":
i -= 1
if -i > len(self.tree.openElements):
return self.tree.openElements[0]
else:
return self.tree.openElements[i]
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
#Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.characterTokens.append(token)
def processCharacters(self, token):
#If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
self.parser.phase.processStartTag(token)
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
self.parser.phase.processStartTag(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
self.parser.phase.processStartTag(token)
def startTagStyleScript(self, token):
self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
if "tainted" not in self.getCurrentTable()._flags:
self.getCurrentTable()._flags.append("tainted")
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
if "tainted" not in self.getCurrentTable()._flags:
self.getCurrentTable()._flags.append("tainted")
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type":tokenTypes["Characters"], "data":data}
self.originalPhase.processCharacters(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.phase = self.originalPhase
self.phase.processComment(token)
def processEOF(self, token):
self.flushCharacters()
self.phase = self.originalPhase
self.phase.processEOF(token)
def processCharacters(self, token):
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
#pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.phase = self.originalPhase
self.phase.processStartTag(token)
def processEndTag(self, token):
self.flushCharacters()
self.phase = self.originalPhase
self.phase.processEndTag(token)
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
#XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
self.parser.phase.processStartTag(token)
def startTagOther(self, token):
self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
self.parser.phase.processEndTag(token)
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup("colgroup")
if not ignoreEndTag:
self.parser.phase.processEOF()
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
self.parser.phase.processCharacters(token)
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup("colgroup")
if not ignoreEndTag:
self.parser.phase.processStartTag(token)
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup("colgroup")
if not ignoreEndTag:
self.parser.phase.processEndTag(token)
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
#self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
self.parser.phase.processStartTag(token)
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
self.parser.phase.processStartTag(token)
else:
# innerHTML case
self.parser.parseError()
def startTagOther(self, token):
self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
self.parser.phase.processEndTag(token)
else:
# innerHTML case
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr("tr")
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
self.parser.phase.processStartTag(token)
def startTagOther(self, token):
self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr("tr")
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
self.parser.phase.processEndTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr("tr")
self.parser.phase.processEndTag(token)
else:
# innerHTML case
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
self.parser.phase.processStartTag(token)
else:
# innerHTML case
self.parser.parseError()
def startTagOther(self, token):
self.parser.phases["inBody"].processStartTag(token)
# Optimize this for subsequent invocations. Can't do this initially
# because self.phases doesn't really exist at that point.
self.startTagHandler.default =\
self.parser.phases["inBody"].processStartTag
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
self.parser.phase.processEndTag(token)
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
self.parser.phases["inBody"].processEndTag(token)
# Optimize this for subsequent invocations. Can't do this initially
# because self.phases doesn't really exist at that point.
self.endTagHandler.default = self.parser.phases["inBody"].processEndTag
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect),
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td",
"th"), self.endTagTableElements)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect("select")
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="table"):
self.endTagSelect("select")
self.parser.phase.processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="table"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
self.parser.parseError()
def endTagTableElements(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagSelect("select")
self.parser.phase.processEndTag(token)
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
self.parser.phase.processStartTag(token)
def startTagOther(self, token):
self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
self.parser.phase.processEndTag(token)
def endTagOther(self, token):
self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "font", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def nonHTMLElementInScope(self):
for element in self.tree.openElements[::-1]:
if element.namespace == self.tree.defaultNamespace:
return self.tree.elementInScope(element)
assert False
for item in self.tree.openElements[::-1]:
if item.namespace == self.tree.defaultNamespace:
return True
elif item.nameTuple in scopingElements:
return False
return False
def adjustSVGTagNames(self, token):
replacements = {"altglyph":"altGlyph",
"altglyphdef":"altGlyphDef",
"altglyphitem":"altGlyphItem",
"animatecolor":"animateColor",
"animatemotion":"animateMotion",
"animatetransform":"animateTransform",
"clippath":"clipPath",
"feblend":"feBlend",
"fecolormatrix":"feColorMatrix",
"fecomponenttransfer":"feComponentTransfer",
"fecomposite":"feComposite",
"feconvolvematrix":"feConvolveMatrix",
"fediffuselighting":"feDiffuseLighting",
"fedisplacementmap":"feDisplacementMap",
"fedistantlight":"feDistantLight",
"feflood":"feFlood",
"fefunca":"feFuncA",
"fefuncb":"feFuncB",
"fefuncg":"feFuncG",
"fefuncr":"feFuncR",
"fegaussianblur":"feGaussianBlur",
"feimage":"feImage",
"femerge":"feMerge",
"femergenode":"feMergeNode",
"femorphology":"feMorphology",
"feoffset":"feOffset",
"fepointlight":"fePointLight",
"fespecularlighting":"feSpecularLighting",
"fespotlight":"feSpotLight",
"fetile":"feTile",
"feturbulence":"feTurbulence",
"foreignobject":"foreignObject",
"glyphref":"glyphRef",
"lineargradient":"linearGradient",
"radialgradient":"radialGradient",
"textpath":"textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processEOF(self):
pass
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (currentNode.namespace == self.tree.defaultNamespace or
(currentNode.namespace == namespaces["mathml"] and
token["name"] not in frozenset(["mglyph", "malignmark"]) and
currentNode.name in frozenset(["mi", "mo", "mn",
"ms", "mtext"])) or
(currentNode.namespace == namespaces["mathml"] and
currentNode.name == "annotation-xml" and
token["name"] == "svg") or
(currentNode.namespace == namespaces["svg"] and
currentNode.name in frozenset(["foreignObject",
"desc", "title"])
)):
assert self.parser.secondaryPhase != self
self.parser.secondaryPhase.processStartTag(token)
if self.parser.phase == self and self.nonHTMLElementInScope():
self.parser.phase = self.parser.secondaryPhase
elif token["name"] in self.breakoutElements:
self.parser.parseError("unexpected-html-element-in-foreign-content",
token["name"])
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace):
self.tree.openElements.pop()
self.parser.phase = self.parser.secondaryPhase
self.parser.phase.processStartTag(token)
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
self.adjustSVGTagNames(token)
self.parser.secondaryPhase.processEndTag(token)
if self.parser.phase == self and self.nonHTMLElementInScope():
self.parser.phase = self.parser.secondaryPhase
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
#Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processCharacters(token)
def startTagHtml(self, token):
self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processStartTag(token)
def endTagHtml(self,name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processEndTag(token)
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset),
("noframes", self.endTagNoframes)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagNoframes(self, token):
self.parser.phases["inBody"].processEndTag(token)
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
#Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processCharacters(token)
def startTagHtml(self, token):
self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processStartTag(token)
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processEndTag(token)
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processCharacters(token)
def startTagHtml(self, token):
self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processStartTag(token)
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processEndTag(token)
def impliedTagToken(name, type="EndTag", attributes = None,
selfClosing = False):
if attributes is None:
attributes = {}
return {"type":tokenTypes[type], "name":name, "data":attributes,
"selfClosing":selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
|
naokits/adminkun_viewer_old
|
Server/gaeo/html5lib/html5parser.py
|
Python
|
mit
| 106,815
|
import webapp2, datetime
from models.dailymail import DailyMail
class SendMailHandler(webapp2.RequestHandler):
def get(self):
force = self.request.get('force', '0') == '1'
date = self.request.get('date', None)
if date:
try:
y,m,d = date.split('-')
date = datetime.datetime(int(y), int(m), int(d)).date()
except:
self.response.out.write('Invalid date, ignored')
DailyMail().send(False, force, date)
self.response.out.write('Ran daily mail at ' + str(datetime.datetime.now()))
|
einaregilsson/MyLife
|
handlers/sendmail.py
|
Python
|
mit
| 506
|
import plotly.plotly as py
import plotly.graph_objs as go
#Get data
data = open('Real_Final_database_02.csv')
alldata = data.readlines()
listdata = []
for i in alldata:
listdata.append(i.strip().split(','))
#Seperate information
year = []
affect = []
damage = []
death =[]
for j in listdata:
if j[0] == 'Myanmar' and j[2] == 'Earthquake':
year.append(int(j[1]))
affect.append(int(j[3]))
damage.append(int(j[4]))
death.append(int(j[5]))
# Create and style traces
trace0 = go.Scatter(
x=year,
y=affect,
mode='markers',
name='Total Affected',
marker=dict(
symbol='circle',
sizemode='diameter',
sizeref=0.85,
size=[29.810746602820924, 18.197149567147044, 14.675557544415877,
6.610603004351287, 19.543385335458176, 14.956442130894114,
21.72077890062975, 10.792626698654045, 16.52185943835442,
4.353683242838546, 41.50240100063496, 10.066092062338873,
21.91453196050797, 3.6377994860079204, 46.258986486204044,
3.8334450569607683, 11.437310410545528, 45.16465542353964,
6.227961099314154, 6.709136738617642, 24.694430700391482,
16.285386604676816, 6.264612285824508, 30.812100863425822,
7.325179403286266, 9.227791164226492, 12.68649752933601,
22.60573984618565, 18.849582296257626, 17.910159625556144,
9.337109185582111, 5.774872714286052, 29.999726284159046,
23.063420581238734, 7.40199199438875, 18.54140518159347, 60,
4.612764339536968, 15.369704446995708, 2.3067029222366395,
18.084735199216812, 12.79910818701753, 15.592022291528775,
34.24915519732991, 33.57902844158756, 5.496191404660524,
31.887651824471956, 12.329112567064463, 16.55196774082315,
27.887232791984047, 17.696194784090615, 18.11688103909921],
line=dict(
width=2
),
)
)
trace1 = go.Scatter(
x=year,
y=damage,
mode='markers',
name='Total Damage \'000 US',
marker=dict(
sizemode='diameter',
sizeref=0.85,
size=[21.94976988499517, 10.441052822396196, 47.66021903725089,
19.979112486875845, 13.95267548575408, 22.993945975228556,
7.029852430522167, 11.682689085146487, 10.555193870118702,
12.823544926991564, 9.108293955789053, 12.259853478972317,
10.082039742103595, 9.458604761285072, 5.765006135966166,
36.048202790993614, 8.23689670992972, 6.22565654446431,
8.927648460491556, 18.514711052673302, 6.865187781408511,
3.5540539239313094, 60, 6.41976234423909, 17.658738378883186],
line=dict(
width=2
),
)
)
trace2 = go.Scatter(
x=year,
y=death,
mode='markers',
name='Total Death',
marker=dict(
sizemode='diameter',
sizeref=0.85,
size=[9.330561207739747, 1.390827697025556, 20.266312242166443,
6.211273648937339, 60, 4.3653750211924, 55.05795036085951,
24.703896200017994, 13.769821732555231, 8.664520214956125,
4.188652530719761, 18.654412200415056, 4.0651192623762835,
7.975814912067495, 11.57117523159306, 3.271861016562374,
8.231768913808876, 2.8011347940934943, 11.418845373343052,
8.882667412223675, 2.9579312056937046, 21.49670117903256,
15.768343552577761, 8.680479951148044, 3.525577657243318,
7.4587209016354095, 7.261486641287726, 7.95397619750268,
13.3280083790662, 15.256667990032932, 3.312103798885452,
7.787039017632765],
line=dict(
width=2
),
)
)
data = [trace0, trace1, trace2]
layout = go.Layout(
title='Earthquake in Myanmar',
xaxis=dict(
title='Years'),
yaxis=dict(
title='Total affected',
titlefont=dict(
color='3333FF'
),
tickfont=dict(
color='3333FF'
)
),
yaxis2=dict(
title='Total Damage \'000 US',
titlefont=dict(
color='FF6633'
),
tickfont=dict(
color='FF6633'
),
anchor='free',
overlaying='y',
side='left',
position=0.15
),
yaxis3=dict(
title='Total Death',
titlefont=dict(
color='33CC33'
),
tickfont=dict(
color='33CC33'
),
anchor='x',
overlaying='y',
side='right'
),
paper_bgcolor='rgb(243, 243, 243)',
plot_bgcolor='rgb(243, 243, 243)'
)
fig = go.Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='bennyy')
|
pdeesawat/PSIT58_test_01
|
Test_Python_code/final_code/Myanmar/earthquake.py
|
Python
|
apache-2.0
| 4,754
|
"""Allows the creation of a sensor that breaks out state_attributes."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.sensor import (
CONF_STATE_CLASS,
DEVICE_CLASSES_SCHEMA,
DOMAIN as SENSOR_DOMAIN,
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
STATE_CLASSES_SCHEMA,
SensorEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_CLASS,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_FRIENDLY_NAME,
CONF_FRIENDLY_NAME_TEMPLATE,
CONF_ICON,
CONF_ICON_TEMPLATE,
CONF_NAME,
CONF_SENSORS,
CONF_STATE,
CONF_UNIQUE_ID,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.entity import async_generate_entity_id
from .const import (
CONF_ATTRIBUTE_TEMPLATES,
CONF_ATTRIBUTES,
CONF_AVAILABILITY,
CONF_AVAILABILITY_TEMPLATE,
CONF_OBJECT_ID,
CONF_PICTURE,
CONF_TRIGGER,
)
from .template_entity import TemplateEntity
from .trigger_entity import TriggerEntity
LEGACY_FIELDS = {
CONF_ICON_TEMPLATE: CONF_ICON,
CONF_ENTITY_PICTURE_TEMPLATE: CONF_PICTURE,
CONF_AVAILABILITY_TEMPLATE: CONF_AVAILABILITY,
CONF_ATTRIBUTE_TEMPLATES: CONF_ATTRIBUTES,
CONF_FRIENDLY_NAME_TEMPLATE: CONF_NAME,
CONF_FRIENDLY_NAME: CONF_NAME,
CONF_VALUE_TEMPLATE: CONF_STATE,
}
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.template,
vol.Required(CONF_STATE): cv.template,
vol.Optional(CONF_ICON): cv.template,
vol.Optional(CONF_PICTURE): cv.template,
vol.Optional(CONF_AVAILABILITY): cv.template,
vol.Optional(CONF_ATTRIBUTES): vol.Schema({cv.string: cv.template}),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_STATE_CLASS): STATE_CLASSES_SCHEMA,
}
)
LEGACY_SENSOR_SCHEMA = vol.All(
cv.deprecated(ATTR_ENTITY_ID),
vol.Schema(
{
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_FRIENDLY_NAME_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(CONF_ATTRIBUTE_TEMPLATES, default={}): vol.Schema(
{cv.string: cv.template}
),
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
),
)
def extra_validation_checks(val):
"""Run extra validation checks."""
if CONF_TRIGGER in val:
raise vol.Invalid(
"You can only add triggers to template entities if they are defined under `template:`. "
"See the template documentation for more information: https://www.home-assistant.io/integrations/template/"
)
if CONF_SENSORS not in val and SENSOR_DOMAIN not in val:
raise vol.Invalid(f"Required key {SENSOR_DOMAIN} not defined")
return val
def rewrite_legacy_to_modern_conf(cfg: dict[str, dict]) -> list[dict]:
"""Rewrite a legacy sensor definitions to modern ones."""
sensors = []
for object_id, entity_cfg in cfg.items():
entity_cfg = {**entity_cfg, CONF_OBJECT_ID: object_id}
for from_key, to_key in LEGACY_FIELDS.items():
if from_key not in entity_cfg or to_key in entity_cfg:
continue
val = entity_cfg.pop(from_key)
if isinstance(val, str):
val = template.Template(val)
entity_cfg[to_key] = val
if CONF_NAME not in entity_cfg:
entity_cfg[CONF_NAME] = template.Template(object_id)
sensors.append(entity_cfg)
return sensors
PLATFORM_SCHEMA = vol.All(
PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_TRIGGER): cv.match_all, # to raise custom warning
vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(LEGACY_SENSOR_SCHEMA),
}
),
extra_validation_checks,
)
@callback
def _async_create_template_tracking_entities(
async_add_entities, hass, definitions: list[dict], unique_id_prefix: str | None
):
"""Create the template sensors."""
sensors = []
for entity_conf in definitions:
# Still available on legacy
object_id = entity_conf.get(CONF_OBJECT_ID)
state_template = entity_conf[CONF_STATE]
icon_template = entity_conf.get(CONF_ICON)
entity_picture_template = entity_conf.get(CONF_PICTURE)
availability_template = entity_conf.get(CONF_AVAILABILITY)
friendly_name_template = entity_conf.get(CONF_NAME)
unit_of_measurement = entity_conf.get(CONF_UNIT_OF_MEASUREMENT)
device_class = entity_conf.get(CONF_DEVICE_CLASS)
attribute_templates = entity_conf.get(CONF_ATTRIBUTES, {})
unique_id = entity_conf.get(CONF_UNIQUE_ID)
state_class = entity_conf.get(CONF_STATE_CLASS)
if unique_id and unique_id_prefix:
unique_id = f"{unique_id_prefix}-{unique_id}"
sensors.append(
SensorTemplate(
hass,
object_id,
friendly_name_template,
unit_of_measurement,
state_template,
icon_template,
entity_picture_template,
availability_template,
device_class,
attribute_templates,
unique_id,
state_class,
)
)
async_add_entities(sensors)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template sensors."""
if discovery_info is None:
_async_create_template_tracking_entities(
async_add_entities,
hass,
rewrite_legacy_to_modern_conf(config[CONF_SENSORS]),
None,
)
return
if "coordinator" in discovery_info:
async_add_entities(
TriggerSensorEntity(hass, discovery_info["coordinator"], config)
for config in discovery_info["entities"]
)
return
_async_create_template_tracking_entities(
async_add_entities,
hass,
discovery_info["entities"],
discovery_info["unique_id"],
)
class SensorTemplate(TemplateEntity, SensorEntity):
"""Representation of a Template Sensor."""
def __init__(
self,
hass: HomeAssistant,
object_id: str | None,
friendly_name_template: template.Template | None,
unit_of_measurement: str | None,
state_template: template.Template,
icon_template: template.Template | None,
entity_picture_template: template.Template | None,
availability_template: template.Template | None,
device_class: str | None,
attribute_templates: dict[str, template.Template],
unique_id: str | None,
state_class: str | None,
) -> None:
"""Initialize the sensor."""
super().__init__(
attribute_templates=attribute_templates,
availability_template=availability_template,
icon_template=icon_template,
entity_picture_template=entity_picture_template,
)
if object_id is not None:
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, object_id, hass=hass
)
self._friendly_name_template = friendly_name_template
self._attr_name = None
# Try to render the name as it can influence the entity ID
if friendly_name_template:
friendly_name_template.hass = hass
try:
self._attr_name = friendly_name_template.async_render(
parse_result=False
)
except template.TemplateError:
pass
self._attr_native_unit_of_measurement = unit_of_measurement
self._template = state_template
self._attr_device_class = device_class
self._attr_state_class = state_class
self._attr_unique_id = unique_id
async def async_added_to_hass(self):
"""Register callbacks."""
self.add_template_attribute(
"_attr_native_value", self._template, None, self._update_state
)
if self._friendly_name_template and not self._friendly_name_template.is_static:
self.add_template_attribute("_attr_name", self._friendly_name_template)
await super().async_added_to_hass()
@callback
def _update_state(self, result):
super()._update_state(result)
self._attr_native_value = None if isinstance(result, TemplateError) else result
class TriggerSensorEntity(TriggerEntity, SensorEntity):
"""Sensor entity based on trigger data."""
domain = SENSOR_DOMAIN
extra_template_keys = (CONF_STATE,)
@property
def native_value(self) -> str | None:
"""Return state of the sensor."""
return self._rendered.get(CONF_STATE)
@property
def state_class(self) -> str | None:
"""Sensor state class."""
return self._config.get(CONF_STATE_CLASS)
|
aronsky/home-assistant
|
homeassistant/components/template/sensor.py
|
Python
|
apache-2.0
| 9,651
|
from django import forms
from django.contrib.auth.models import User
from .models import Booking, Food, Order
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email']
class BookingForm(forms.ModelForm):
class Meta:
model = Booking
fields = ['dateBooked', 'timeBooked', 'persons']
class OrderForm(forms.ModelForm):
class Meta:
model = Order
fields = '__all__'
|
GeoCSBI/UTH_DB
|
mysite/uth_db/forms.py
|
Python
|
gpl-3.0
| 513
|
from __future__ import print_function
from django.core.management.base import BaseCommand, CommandError
from oscar.core.loading import get_model
from oscar.core.loading import get_class
Order = get_model('order', 'Order')
CommunicationEventType = get_model('customer', 'CommunicationEventType')
Dispatcher = get_class('customer.utils', 'Dispatcher')
class Command(BaseCommand):
args = '<communication_event_type> <order number>'
help = 'For testing the content of order emails'
def handle(self, *args, **options):
if len(args) != 2:
raise CommandError("Please select a event type and order number")
try:
order = Order.objects.get(number=args[1])
except Order.DoesNotExist:
raise CommandError("No order found with number %s" % args[1])
ctx = {
'order': order,
'lines': order.lines.all(),
}
messages = CommunicationEventType.objects.get_and_render(
args[0], ctx)
print("Subject: %s\nBody:\n\n%s\nBody HTML:\n\n%s" % (
messages['subject'], messages['body'], messages['html']))
|
marcoantoniooliveira/labweb
|
oscar/management/commands/oscar_generate_email_content.py
|
Python
|
bsd-3-clause
| 1,132
|
import os
from fluidity_tools import stat_parser
from sympy import *
from numpy import array,max,abs
meshtemplate='''
Point(1) = {0.0,0.0,0,0.1};
Extrude {1,0,0} {
Point{1}; Layers{<layers>};
}
Extrude {0,1,0} {
Line{1}; Layers{<layers>};
}
Extrude {0,0,1} {
Surface{5}; Layers{<layers>};
}
Physical Surface(28) = {5,14,26,22,27,18};
Physical Volume(29) = {1};
'''
def generate_meshfile(name,layers):
geo = meshtemplate.replace('<layers>',str(layers))
file(name+".geo",'w').write(geo)
os.system("gmsh -3 "+name+".geo")
os.system("../../scripts/gmsh2triangle "+name+".msh")
def run_test(layers, binary):
'''run_test(layers, binary)
Run a single test of the channel problem. Layers is the number of mesh
points in the cross-channel direction. The mesh is unstructured and
isotropic. binary is a string containing the fluidity command to run.
The return value is the error in u and p at the end of the simulation.'''
generate_meshfile("channel",layers)
os.system(binary+" channel_viscous.flml")
s=stat_parser("channel-flow-dg.stat")
return (s["Water"]['AnalyticUVelocitySolutionError']['l2norm'][-1],
s["Water"]['AnalyticPressureSolutionError']['l2norm'][-1])
|
FluidityProject/multifluids
|
tests/mms_tracer_P1dg_cdg_diff_steady_3d_cjc/cdg3d.py
|
Python
|
lgpl-2.1
| 1,233
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for the 'database'."""
import os
import unittest
# pylint:disable=import-error
from chesttimer.db.rooster import Rooster
from chesttimer.db.rooster import Character
# pylint:disable=too-many-public-methods
class RoosterTest(unittest.TestCase):
"""Tests for the rooster/database."""
ROOSTER = './rooster.json'
def setUp(self):
"""Set up the global database.
Just make sure the database doesn't exist.
"""
super(RoosterTest, self).setUp()
self._kill_db()
return
def tearDown(self):
"""Tear down the global database."""
super(RoosterTest, self).tearDown()
self._kill_db()
return
def test_add(self):
"""Add a new character to the rooster."""
char = Character('test', 80,
Character.Races.charr,
Character.Sex.male,
Character.Professions.guardian,
{}, None)
rooster = Rooster(self.ROOSTER)
rooster.add(char)
self.assertEqual(len(rooster), 1)
return
def test_save_load(self):
"""Test if saving the rooster actually saves the file."""
char = Character('test', 80,
Character.Races.charr,
Character.Sex.male,
Character.Professions.guardian,
{Character.Disciplines.armorsmith: 500,
Character.Disciplines.weaponsmith: 415},
Character.Orders.durmand_priori)
rooster = Rooster(self.ROOSTER)
rooster.add(char)
rooster.save()
self.assertTrue(os.path.isfile(self.ROOSTER))
# try to load it back
new_rooster = Rooster(self.ROOSTER)
self.assertEqual(len(new_rooster), 1)
self.assertEqual(new_rooster[0].name, 'test')
return
def test_group_by_level(self):
"""Request the list of characters grouped by level."""
rooster = self._demo_rooster()
levels = rooster.group_by(Rooster.Fields.level)
self.assertEqual(len(levels), 2) # 2 groups, 80 & 25
self.assertEqual(levels[0]['group'], 25) # first group is 25
self.assertEqual(levels[1]['group'], 80) # second group is 80
self.assertEqual(len(levels[0]['characters']), 1) # only sgt
self.assertEqual(len(levels[1]['characters']), 2) # thor & buzz
return
def test_group_by_race(self):
"""Request the list of characters grouped by race."""
rooster = self._demo_rooster()
races = rooster.group_by(Rooster.Fields.race)
self.assertEqual(len(races), 2) # humans and charr
return
def test_group_by_profession(self):
"""Request the list of characters grouped by profession."""
rooster = self._demo_rooster()
professions = rooster.group_by(Rooster.Fields.profession)
self.assertEqual(len(professions), 3) # guard, warr and engi
return
def test_group_by_order(self):
"""Request the lsit of characters grouped by order."""
rooster = self._demo_rooster()
orders = rooster.group_by(Rooster.Fields.order)
self.assertEqual(len(orders), 2) # priori & none
return
def test_group_by_discipline(self):
"""Request the list of characters grouped by discipline."""
rooster = self._demo_rooster()
disciplines = rooster.group_by(Rooster.Fields.discipline)
# hunts, armor, weapon, leather & None
self.assertEqual(len(disciplines), 5)
return
def test_find_slug(self):
"""Find a character by slug."""
rooster = self._demo_rooster()
self.assertEqual(0, rooster.find('thorianar'))
return
def test_find_full_character(self):
"""Find a character position by full charaacter info."""
rooster = self._demo_rooster()
self.assertEqual(0, rooster.find(self._thorianar()))
return
def test_remove_slug(self):
"""Remove a character by slug."""
rooster = self._demo_rooster()
rooster.remove('thorianar')
self.assertIsNone(rooster.find('thorianar'))
return
def test_remove_character(self):
"""Remove a character by character object."""
rooster = self._demo_rooster()
rooster.remove(self._thorianar())
self.assertIsNone(rooster.find(self._thorianar()))
return
def _kill_db(self):
"""Destroy the database."""
if os.path.isfile(self.ROOSTER):
os.remove(self.ROOSTER)
return
# pylint:disable=no-self-use
def _thorianar(self):
"""Return the test character "Thorianar"."""
return Character('Thorianar', 80,
Character.Races.charr,
Character.Sex.male,
Character.Professions.guardian,
{Character.Disciplines.armorsmith: 500,
Character.Disciplines.weaponsmith: 415},
Character.Orders.durmand_priori)
def _demo_rooster(self):
"""Return a rooster with a couple of characters for group testing."""
rooster = Rooster(self.ROOSTER)
thorianar = self._thorianar()
buzzkill = Character('Commander Buzzkill', 80,
Character.Races.charr,
Character.Sex.male,
Character.Professions.engineer,
{Character.Disciplines.leatherworker: 500,
Character.Disciplines.huntsman: 400},
Character.Orders.durmand_priori)
sgt_buzzkill = Character('Sgt Buzzkill', 25,
Character.Races.human,
Character.Sex.female,
Character.Professions.warrior,
{},
None)
rooster.add(thorianar)
rooster.add(buzzkill)
rooster.add(sgt_buzzkill)
return rooster
if __name__ == '__main__':
unittest.main()
|
jbiason/chesttimer
|
api/tests/db_tests.py
|
Python
|
gpl-3.0
| 6,280
|
# This file is part of Boomer Core.
#
# Boomer Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Boomer Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Boomer Core. If not, see <http://www.gnu.org/licenses/>.
#
# Forked from Mycroft Core on 2017-07-29
import json
__author__ = 'seanfitz'
class Message(object):
def __init__(self, message_type, data={}, context=None):
self.message_type = message_type
self.data = data
self.context = context
def serialize(self):
return json.dumps({
'message_type': self.message_type,
'data': self.data,
'context': self.context
})
@staticmethod
def deserialize(json_str):
json_message = json.loads(json_str)
return Message(json_message.get('message_type'),
data=json_message.get('data'),
context=json_message.get('context'))
def reply(self, message_type, data, context={}):
if not context:
context = {}
new_context = self.context if self.context else {}
for key in context:
new_context[key] = context[key]
if 'target' in data:
new_context['target'] = data['target']
elif 'client_name' in context:
context['target'] = context['client_name']
return Message(message_type, data, context=new_context)
def publish(self, message_type, data, context={}):
if not context:
context = {}
new_context = self.context.copy() if self.context else {}
for key in context:
new_context[key] = context[key]
if 'target' in new_context:
del new_context['target']
return Message(message_type, data, context=new_context)
|
clusterfudge/boomer
|
boomer/messagebus/message.py
|
Python
|
gpl-3.0
| 2,232
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from hotness.domain.package import Package
class Validator:
"""
Abstract class for validators used by the-new-hotness to validate the package.
This class must be inherited by every external validator.
"""
def validate(self, package: Package) -> dict:
"""
Validation method that should be implemented by every child class.
Params:
package: Package to validate.
Returns:
Output of validation in form of dictionary.
"""
raise NotImplementedError
|
fedora-infra/the-new-hotness
|
hotness/validators/validator.py
|
Python
|
lgpl-2.1
| 1,309
|
"""Various implementations of the Lomb-Scargle Periodogram"""
from .main import lombscargle, available_methods
from .chi2_impl import lombscargle_chi2
from .scipy_impl import lombscargle_scipy
from .slow_impl import lombscargle_slow
from .fast_impl import lombscargle_fast
from .fastchi2_impl import lombscargle_fastchi2
|
pllim/astropy
|
astropy/timeseries/periodograms/lombscargle/implementations/__init__.py
|
Python
|
bsd-3-clause
| 322
|
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, g, abort, request
from dataviva import db
from dataviva.apps.general.views import get_locale
from dataviva.api.attrs.services import Location as LocationService, LocationGdpRankings, \
LocationGdpPerCapitaRankings, LocationPopRankings, LocationAreaRankings, LocationMunicipalityRankings, Bra
from dataviva.api.secex.models import Ymb
from dataviva.api.secex.services import Location as LocationBodyService, LocationWld, LocationEciRankings
from dataviva.api.rais.services import LocationIndustry, LocationOccupation, \
LocationJobs, LocationDistance, LocationOppGain
from dataviva.api.hedu.services import LocationUniversity, LocationMajor
from dataviva.api.sc.services import LocationSchool, LocationBasicCourse
from dataviva.api.attrs.services import All
from dataviva.api.secex.services import Product
from dataviva.api.rais.services import Industry
from dataviva.api.rais.services import Occupation
from dataviva.api.hedu.services import University
from dataviva.api.sc.services import Basic_course
from dataviva.api.hedu.services import Major
from dataviva.api.sc.services import AllScholar
from dataviva.api.sc.services import AllBasicCourse
from dataviva.api.attrs.models import Wld
from sqlalchemy import desc, func
from random import randint
from decimal import *
import sys
reload(sys)
sys.setdefaultencoding('utf8')
mod = Blueprint('location', __name__,
template_folder='templates',
url_prefix='/<lang_code>/location',
static_folder='static')
education = [
'higher-education-university-tree_map',
'new-api-higher-education-university-tree_map',
'education-course-tree_map',
'new-api-education-course-tree_map',
'professional-education-school-tree_map',
'new-api-professional-education-school-tree_map',
'professional-education-course-tree_map',
'new-api-professional-education-course-tree_map',
'basic-education-administrative-dependencie-tree_map',
'new-api-basic-education-administrative-dependencie-tree_map',
'basic-education-level-tree_map',
'new-api-basic-education-level-tree_map',
'basic-education-municipality-tree_map',
'new-api-basic-education-municipality-tree_map',
'basic-education-municipality-tree_map',
]
tabs = {
'general': [],
'opportunities': [
'product-space-scatter',
'activities-space-network',
'activities-space-scatter',
],
'wages': [
'jobs-industry-tree_map',
'new-api-jobs-industry-tree_map',
'jobs-industry-stacked',
'new-api-jobs-industry-stacked',
'jobs-occupation-tree_map',
'new-api-jobs-occupation-tree_map',
'jobs-occupation-stacked',
'new-api-jobs-occupation-stacked',
'wage-industry-tree_map',
'new-api-wage-industry-tree_map',
'wage-industry-stacked',
'new-api-wage-industry-stacked',
'wage-occupation-tree_map',
'new-api-wage-occupation-tree_map',
'wage-occupation-stacked',
'new-api-wage-occupation-stacked'
],
'trade-partner': [
'trade-balance-location-line',
'new-api-trade-balance-location-line',
'exports-products-tree_map',
'new-api-exports-products-tree_map',
'exports-products-stacked',
'new-api-exports-products-stacked',
'exports-destination-tree_map',
'new-api-exports-destination-tree_map',
'exports-destination-stacked',
'new-api-exports-destination-stacked',
'imports-products-tree_map',
'new-api-imports-products-tree_map',
'imports-products-stacked',
'new-api-imports-products-stacked',
'imports-origin-tree_map',
'new-api-imports-origin-tree_map',
'imports-origin-stacked',
'new-api-imports-origin-stacked',
'new-api-exports-port-tree_map',
'new-api-imports-port-tree_map',
'new-api-exports-port-line',
'new-api-imports-port-line'
],
'education': education,
'basic-education': education,
'health': [
'equipments-municipality-map',
'equipments-municipality-tree_map',
'equipments-municipality-stacked',
'equipments-type-tree_map',
'equipments-type-bar',
'equipments-type-stacked',
'equipments-sus-bond-bar',
'establishments-municipality-map',
'establishments-municipality-tree_map',
'establishments-municipality-stacked',
'establishments-unit-type-tree_map',
'establishments-unit-type-stacked',
'establishments-facilities-bar',
'beds-municipality-map',
'beds-municipality-tree_map',
'beds-municipality-stacked',
'beds-bed-type-tree_map',
'beds-bed-type-stacked',
'beds-bed-type-bar',
'beds-sus-bond-bar',
'professionals-municipality-map',
'professionals-municipality-tree_map',
'professionals-municipality-stacked',
'professionals-provider-unit-tree_map',
'professionals-provider-unit-stacked',
'professionals-occupation-tree_map',
'professionals-occupation-stacked',
]
}
@mod.before_request
def before_request():
g.page_type = 'category'
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
def location_depth(bra_id):
locations = {
1: "region",
3: "state",
5: "mesoregion",
7: "microregion",
9: "municipality"
}
return locations[len(bra_id)]
def handle_region_bra_id(bra_id):
return {
"1": "1",
"2": "2",
"3": "5",
"4": "3",
"5": "4"
}[bra_id]
def _location_service(depth, location):
if depth == 'region':
return handle_region_bra_id(location.id)
if depth == 'mesoregion':
return str(location.id_ibge)[:2] + str(location.id_ibge)[-2:]
if depth == 'microregion':
return str(location.id_ibge)[:2] + str(location.id_ibge)[-3:]
else:
return location.id_ibge
@mod.route('/<bra_id>/graphs/<tab>', methods=['POST'])
def graphs(bra_id, tab):
if bra_id == 'all':
location = Wld.query.filter_by(id='sabra').first()
location.id = 'all'
depth = None
id_ibge = None
is_municipality = False
else:
location = Bra.query.filter_by(id=bra_id).first()
depth = location_depth(bra_id)
id_ibge = _location_service(depth, location)
is_municipality = True if depth == 'municipality' else False
return render_template('location/graphs-' + tab + '.html', location=location, depth=depth, id_ibge=id_ibge, graph=None, is_municipality=is_municipality)
@mod.route('/all', defaults={'tab': 'general'})
@mod.route('/all/<tab>')
def all(tab):
location_service_brazil = All()
product_service = Product(product_id=None)
industry_service = Industry(cnae_id=None)
occupation_service = Occupation(occupation_id=None)
university_service = University(university_id=None)
basic_course_service = Basic_course(course_sc_id=None)
major_service = Major(course_hedu_id=None, bra_id=None)
scholar_service = AllScholar()
basic_course_service = AllBasicCourse()
location = Wld.query.filter_by(id='sabra').first_or_404()
location.id = 'all'
is_municipality = False
menu = request.args.get('menu')
url = request.args.get('url')
graph = {}
if menu:
graph['menu'] = menu
if url:
url_prefix = menu.split('-')[-1] + '/' if menu and menu.startswith('new-api-') or tab == 'health' else 'embed/'
graph['url'] = url_prefix + url
profile = {}
header = {
'bg_class_image': 'bg-all',
'gdp': location_service_brazil.gdp(),
'population': location_service_brazil.population(),
'gdp_per_capita': location_service_brazil.gdp_per_capita(),
'eci': 0.151,
'year_yb': location_service_brazil.year_yb(),
'year_ybs': location_service_brazil.year_ybs()
}
body = {
'product_year': product_service.year(),
'total_imports': product_service.all_imported(),
'total_exports': product_service.all_exported(),
'all_trade_balance': product_service.all_trade_balance(),
'industry_year': industry_service.get_year(),
'main_industry_by_num_jobs_name': industry_service.main_industry_by_num_jobs_name(),
'total_jobs': industry_service.total_jobs(),
'university_year': university_service.year(),
}
if body['total_exports'] is None and body['total_imports'] is None and body['total_jobs'] is None and \
body['highest_enrolled_by_university'] is None and body['highest_enrolled_by_basic_course'] is None and \
body['highest_enrolled_by_major'] is None:
abort(404)
if tab not in tabs:
abort(404)
if menu and menu not in tabs[tab]:
abort(404)
else:
return render_template('location/index.html',
header=header, body=body, profile=profile, location=location, is_municipality=is_municipality, tab=tab, graph=graph)
@mod.route('/<bra_id>', defaults={'tab': 'general'})
@mod.route('/<bra_id>/<tab>')
def index(bra_id, tab):
location = Bra.query.filter_by(id=bra_id).first_or_404()
is_municipality = location and len(location.id) == 9
menu = request.args.get('menu')
url = request.args.get('url')
if bra_id == 'all':
depth = None
id_ibge = None
else:
depth = location_depth(bra_id)
id_ibge = _location_service(depth, location)
if depth == 'municipality':
is_municipality = True
if location:
location_id = location.id
else:
location_id = None
graph = {}
if menu:
graph['menu'] = menu
if url:
url_prefix = menu.split('-')[-1] + '/' if menu and menu.startswith('new-api-') or tab == 'health' else 'embed/'
graph['url'] = url_prefix + url
depth = location_depth(bra_id)
if depth == 'region':
id_ibge = handle_region_bra_id(location.id)
elif depth == 'mesoregion':
id_ibge = str(location.id_ibge)[:2] + str(location.id_ibge)[-2:]
elif depth == 'microregion':
id_ibge = str(location.id_ibge)[:2] + str(location.id_ibge)[-3:]
else:
id_ibge = location.id_ibge
if not is_municipality:
tabs['wages'] += [
'jobs-municipality-tree_map',
'new-api-jobs-municipality-tree_map',
'jobs-municipality-stacked',
'new-api-jobs-municipality-stacked',
'wages-municipality-tree_map',
'new-api-wages-municipality-tree_map',
'wages-municipality-stacked',
'new-api-wages-municipality-stacked'
]
tabs['trade-partner'] += [
'exports-municipality-tree_map',
'new-api-exports-municipality-tree_map',
'exports-municipality-stacked',
'new-api-exports-municipality-stacked',
'imports-municipality-tree_map',
'new-api-imports-municipality-tree_map',
'imports-municipality-stacked',
'new-api-imports-municipality-stacked',
]
tabs['education'] += [
'education-municipality-tree_map',
'new-api-education-municipality-tree_map',
'basic-education-municipality-tree_map',
'new-api-basic-education-municipality-tree_map',
]
location_service = LocationService(bra_id=bra_id)
location_gdp_rankings_service = LocationGdpRankings(
bra_id=bra_id, stat_id='gdp')
location_gdp_pc_rankings_service = LocationGdpPerCapitaRankings(
bra_id=bra_id)
location_pop_rankings_service = LocationPopRankings(bra_id=bra_id)
location_eci_rankings_service = LocationEciRankings(bra_id=bra_id)
location_area_rankings_service = LocationAreaRankings(bra_id=bra_id)
location_municipality_rankings_service = LocationMunicipalityRankings(bra_id=bra_id)
location_wld_service = LocationWld(bra_id=bra_id)
location_secex_service = LocationBodyService(bra_id=bra_id)
location_industry_service = LocationIndustry(bra_id=bra_id)
location_occupation_service = LocationOccupation(bra_id=bra_id)
location_jobs_service = LocationJobs(bra_id=bra_id)
location_distance_service = LocationDistance(bra_id=bra_id)
location_opp_gain_service = LocationOppGain(bra_id=bra_id)
location_university_service = LocationUniversity(bra_id=bra_id)
location_major_service = LocationMajor(bra_id=bra_id)
location_school_service = LocationSchool(bra_id=bra_id)
location_basic_course_service = LocationBasicCourse(bra_id=bra_id)
''' Query básica para SECEX'''
max_year_query = db.session.query(
func.max(Ymb.year)).filter_by(bra_id=bra_id, month=12)
eci = Ymb.query.filter(
Ymb.bra_id == bra_id,
Ymb.month == 0,
Ymb.year == max_year_query) \
.order_by(desc(Ymb.year)).limit(1).first()
''' Background Image'''
if len(bra_id) == 1:
countys = Bra.query.filter(Bra.id.like(bra_id + '%'), func.length(Bra.id) == 3).all()
background_image = "bg-" + str(countys[randint(0, len(countys) - 1)].id) + "_" + str(randint(1, 2))
else:
background_image = "bg-" + location.id[:3] + "_" + str(randint(1, 2))
if len(bra_id) != 9 and len(bra_id) != 3:
header = {
'name': location_service.name(),
'gdp': location_service.gdp(),
'population': location_service.population(),
'gdp_per_capita': location_service.gdp() / location_service.population(),
'bg_class_image': background_image,
'year': location_service.year()
}
else:
header = {
'name': location_service.name(),
'gdp': location_service.gdp(),
'life_expectation': location_service.life_expectation(),
'population': location_service.population(),
'gdp_per_capita': location_service.gdp_per_capita(),
'hdi': location_service.hdi(),
'bg_class_image': background_image,
'year': location_service.year(),
'year_dhs': location_service.year_dhs()
}
if eci is not None:
header['eci'] = eci.eci
header['eci_year'] = eci.year
body = {
'product_year': location_secex_service.year(),
'main_product_by_export_value_name': location_secex_service.main_product_by_export_value_name(),
'total_exports': location_secex_service.total_exports(),
'less_distance_by_product': location_secex_service.less_distance_by_product(),
'less_distance_by_product_name': location_secex_service.less_distance_by_product_name(),
'opportunity_gain_by_product': location_secex_service.opportunity_gain_by_product(),
'opportunity_gain_by_product_name': location_secex_service.opportunity_gain_by_product_name(),
'secex_year': location_secex_service.year(),
'industry_year': location_industry_service.year(),
'rais_year': location_jobs_service.year(),
'less_distance_by_occupation': location_distance_service.less_distance_by_occupation(),
'less_distance_by_occupation_name': location_distance_service.less_distance_by_occupation_name(),
'opportunity_gain_by_occupation': location_opp_gain_service.opportunity_gain_by_occupation(),
'opportunity_gain_by_occupation_name': location_opp_gain_service.opportunity_gain_by_occupation_name(),
'university_year': location_university_service.year(),
'basic_course_year': location_basic_course_service.year()
}
if len(bra_id) == 9:
profile = {
'number_of_municipalities': location_service.number_of_locations(len(bra_id)),
'bra_id': bra_id,
'state_name': location_service.location_name(3),
'mesoregion_name': location_service.location_name(5),
'gdp_rank': location_gdp_rankings_service.gdp_rank(),
'area': Decimal(location_service.area())
}
elif len(bra_id) == 7:
profile = {
'number_of_microregions': location_service.number_of_locations(len(bra_id)),
'bra_id': bra_id,
'state_name': location_service.location_name(3),
'mesoregion_name': location_service.location_name(5),
'number_of_municipalities': location_service.number_of_municipalities()
}
elif len(bra_id) == 5:
profile = {
'number_of_mesoregions': location_service.number_of_locations(len(bra_id)),
'bra_id': bra_id,
'state_name': location_service.location_name(3),
'eci_rank': location_eci_rankings_service.eci_rank()
}
elif len(bra_id) == 1:
profile = {
'number_of_regions': location_service.number_of_locations(len(bra_id)),
'bra_id': bra_id,
'gdp_pc_rank': location_gdp_pc_rankings_service.gdp_pc_rank(),
'pop_rank': location_pop_rankings_service.pop_rank(),
'region_states': location_service.states_in_a_region()
}
else:
profile = {
'number_of_states': location_service.number_of_locations(len(bra_id)),
'region_name': location_service.location_name(1),
'number_of_municipalities': location_service.number_of_locations(9),
'pop_rank': location_pop_rankings_service.pop_rank(),
'area_rank': location_area_rankings_service.area_rank(),
'neighbors': location_service.neighbors(),
'municipality_rank': location_municipality_rankings_service.municipality_rank()
}
if body['total_exports'] is None and body['total_imports'] is None and body['total_jobs'] is None and \
body['highest_enrolled_by_university'] is None and body['highest_enrolled_by_basic_course'] is None and \
body['highest_enrolled_by_major'] is None:
abort(404)
if tab not in tabs:
abort(404)
if menu and menu not in tabs[tab]:
abort(404)
else:
return render_template('location/index.html',
header=header, body=body, profile=profile, location=location, is_municipality=is_municipality, tab=tab, graph=graph, id_ibge=id_ibge)
|
DataViva/dataviva-site
|
dataviva/apps/location/views.py
|
Python
|
mit
| 18,908
|
def compute_distance_extremes(X, a, b, M):
"""
Usage:
from compute_distance_extremes import compute_distance_extremes
(l, u) = compute_distance_extremes(X, a, b, M)
Computes sample histogram of the distances between rows of X and returns
the value of these distances at the a^th and b^th percentils. This
method is used to determine the upper and lower bounds for
similarity / dissimilarity constraints.
Args:
X: (n x m) data matrix
a: lower bound percentile between 1 and 100
b: upper bound percentile between 1 and 100
M: Mahalanobis matrix to compute distances
Returns:
l: distance corresponding to a^th percentile
u: distance corresponding the b^th percentile
"""
import numpy as np
import random
random.seed(0)
if (a < 1) or (a > 100):
raise Exception('a must be between 1 and 100')
if (b < 1) or (b > 100):
raise Exception('b must be between 1 and 100')
n = X.shape[0]
num_trials = min(100, n * (n - 1) / 2);
# sample with replacement
dists = np.zeros((num_trials, 1))
for i in xrange(num_trials):
j1 = np.floor(random.uniform(0, n))
j2 = np.floor(random.uniform(0, n))
dists[i] = np.dot(np.dot((X[j1, :] - X[j2, :]), M), (X[j1, :] - X[j2, :]).T)
# return frequencies and bin extremeties
(f, ext) = np.histogram(dists, bins = 100) # specify bins by percentile
# get bin centers
c = [(ext[i]+float(ext[i+1])) / 2 for i in xrange(len(ext) - 1)]
# get values at percentiles
l = c[int(np.floor(a)) - 1] # get counts for lower percentile
u = c[int(np.floor(b)) - 1] # get counts for higher percentile
return l, u
def get_constraints(y, num_constraints, l, u):
"""
get_constraints(y, num_constraints, l, u)
Get ITML constraint matrix from true labels.
"""
import numpy as np
import random
random.seed(0)
# Make quartets for pairs of indices [index1, index2, 1 or -1, l or u]
# Note that l always goes with 1 and u always goes with -1
m = len(y)
C = np.zeros((num_constraints, 4))
for k in xrange(num_constraints):
i = np.floor(random.uniform(0, m))
j = np.floor(random.uniform(0, m))
if y[i] == y[j]:
C[k, :] = (i, j, 1, l)
else:
C[k, :] = (i, j, -1, u)
return np.array(C)
|
johncollins/metric-learn
|
metric_learn/itml/utils.py
|
Python
|
bsd-2-clause
| 2,482
|
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is based on work under the following copyright and permission notice:
# https://github.com/test262-utils/test262-harness-py
# test262.py, _monkeyYaml.py, parseTestRecord.py
# license of test262.py:
# Copyright 2009 the Sputnik authors. All rights reserved.
# This code is governed by the BSD license found in the LICENSE file.
# This is derived from sputnik.py, the Sputnik console test runner,
# with elements from packager.py, which is separately
# copyrighted. TODO: Refactor so there is less duplication between
# test262.py and packager.py.
# license of _packager.py:
# Copyright (c) 2012 Ecma International. All rights reserved.
# This code is governed by the BSD license found in the LICENSE file.
# license of _monkeyYaml.py:
# Copyright 2014 by Sam Mikes. All rights reserved.
# This code is governed by the BSD license found in the LICENSE file.
# license of parseTestRecord.py:
# Copyright 2011 by Google, Inc. All rights reserved.
# This code is governed by the BSD license found in the LICENSE file.
from __future__ import print_function
import logging
import optparse
import os
from os import path
import platform
import re
import subprocess
import sys
import tempfile
import xml.dom.minidom
from collections import Counter
import signal
import threading
import multiprocessing
#######################################################################
# based on _monkeyYaml.py
#######################################################################
M_YAML_LIST_PATTERN = re.compile(r"^\[(.*)\]$")
M_YAML_MULTILINE_LIST = re.compile(r"^ *- (.*)$")
# The timeout of each test case
TEST262_CASE_TIMEOUT = 180
def yaml_load(string):
return my_read_dict(string.splitlines())[1]
def my_read_dict(lines, indent=""):
dictionary = {}
key = None
empty_lines = 0
while lines:
if not lines[0].startswith(indent):
break
line = lines.pop(0)
if my_is_all_spaces(line):
empty_lines += 1
continue
result = re.match(r"(.*?):(.*)", line)
if result:
if not dictionary:
dictionary = {}
key = result.group(1).strip()
value = result.group(2).strip()
(lines, value) = my_read_value(lines, value, indent)
dictionary[key] = value
else:
if dictionary and key and key in dictionary:
char = " " if empty_lines == 0 else "\n" * empty_lines
dictionary[key] += char + line.strip()
else:
raise Exception("monkeyYaml is confused at " + line)
empty_lines = 0
if not dictionary:
dictionary = None
return lines, dictionary
def my_read_value(lines, value, indent):
if value == ">" or value == "|":
(lines, value) = my_multiline(lines, value == "|")
value = value + "\n"
return (lines, value)
if lines and not value:
if my_maybe_list(lines[0]):
return my_multiline_list(lines, value)
indent_match = re.match("(" + indent + r"\s+)", lines[0])
if indent_match:
if ":" in lines[0]:
return my_read_dict(lines, indent_match.group(1))
return my_multiline(lines, False)
return lines, my_read_one_line(value)
def my_maybe_list(value):
return M_YAML_MULTILINE_LIST.match(value)
def my_multiline_list(lines, value):
# assume no explcit indentor (otherwise have to parse value)
value = []
indent = 0
while lines:
line = lines.pop(0)
leading = my_leading_spaces(line)
if my_is_all_spaces(line):
pass
elif leading < indent:
lines.insert(0, line)
break
else:
indent = indent or leading
value += [my_read_one_line(my_remove_list_header(indent, line))]
return (lines, value)
def my_remove_list_header(indent, line):
line = line[indent:]
return M_YAML_MULTILINE_LIST.match(line).group(1)
def my_read_one_line(value):
if M_YAML_LIST_PATTERN.match(value):
return my_flow_list(value)
elif re.match(r"^[-0-9]*$", value):
try:
value = int(value)
except ValueError:
pass
elif re.match(r"^[-.0-9eE]*$", value):
try:
value = float(value)
except ValueError:
pass
elif re.match(r"^('|\").*\1$", value):
value = value[1:-1]
return value
def my_flow_list(value):
result = M_YAML_LIST_PATTERN.match(value)
values = result.group(1).split(",")
return [my_read_one_line(v.strip()) for v in values]
def my_multiline(lines, preserve_newlines=False):
# assume no explcit indentor (otherwise have to parse value)
value = ""
indent = my_leading_spaces(lines[0])
was_empty = None
while lines:
line = lines.pop(0)
is_empty = my_is_all_spaces(line)
if is_empty:
if preserve_newlines:
value += "\n"
elif my_leading_spaces(line) < indent:
lines.insert(0, line)
break
else:
if preserve_newlines:
if was_empty != None:
value += "\n"
else:
if was_empty:
value += "\n"
elif was_empty is False:
value += " "
value += line[(indent):]
was_empty = is_empty
return (lines, value)
def my_is_all_spaces(line):
return len(line.strip()) == 0
def my_leading_spaces(line):
return len(line) - len(line.lstrip(' '))
#######################################################################
# based on parseTestRecord.py
#######################################################################
# Matches trailing whitespace and any following blank lines.
_BLANK_LINES = r"([ \t]*[\r\n]{1,2})*"
# Matches the YAML frontmatter block.
# It must be non-greedy because test262-es2015/built-ins/Object/assign/Override.js contains a comment like yaml pattern
_YAML_PATTERN = re.compile(r"/\*---(.*?)---\*/" + _BLANK_LINES, re.DOTALL)
# Matches all known variants for the license block.
# https://github.com/tc39/test262/blob/705d78299cf786c84fa4df473eff98374de7135a/tools/lint/lib/checks/license.py
_LICENSE_PATTERN = re.compile(
r'// Copyright( \([C]\))? (\w+) .+\. {1,2}All rights reserved\.[\r\n]{1,2}' +
r'(' +
r'// This code is governed by the( BSD)? license found in the LICENSE file\.' +
r'|' +
r'// See LICENSE for details.' +
r'|' +
r'// Use of this source code is governed by a BSD-style license that can be[\r\n]{1,2}' +
r'// found in the LICENSE file\.' +
r'|' +
r'// See LICENSE or https://github\.com/tc39/test262/blob/master/LICENSE' +
r')' + _BLANK_LINES, re.IGNORECASE)
def yaml_attr_parser(test_record, attrs, name, onerror=print):
parsed = yaml_load(attrs)
if parsed is None:
onerror("Failed to parse yaml in name %s" % name)
return
for key in parsed:
value = parsed[key]
if key == "info":
key = "commentary"
test_record[key] = value
if 'flags' in test_record:
for flag in test_record['flags']:
test_record[flag] = ""
def find_license(src):
match = _LICENSE_PATTERN.search(src)
if not match:
return None
return match.group(0)
def find_attrs(src):
match = _YAML_PATTERN.search(src)
if not match:
return (None, None)
return (match.group(0), match.group(1).strip())
def parse_test_record(src, name, onerror=print):
# Find the license block.
header = find_license(src)
# Find the YAML frontmatter.
(frontmatter, attrs) = find_attrs(src)
# YAML frontmatter is required for all tests.
if frontmatter is None:
onerror("Missing frontmatter: %s" % name)
# The license shuold be placed before the frontmatter and there shouldn't be
# any extra content between the license and the frontmatter.
if header is not None and frontmatter is not None:
header_idx = src.index(header)
frontmatter_idx = src.index(frontmatter)
if header_idx > frontmatter_idx:
onerror("Unexpected license after frontmatter: %s" % name)
# Search for any extra test content, but ignore whitespace only or comment lines.
extra = src[header_idx + len(header): frontmatter_idx]
if extra and any(line.strip() and not line.lstrip().startswith("//") for line in extra.split("\n")):
onerror(
"Unexpected test content between license and frontmatter: %s" % name)
# Remove the license and YAML parts from the actual test content.
test = src
if frontmatter is not None:
test = test.replace(frontmatter, '')
if header is not None:
test = test.replace(header, '')
test_record = {}
test_record['header'] = header.strip() if header else ''
test_record['test'] = test
if attrs:
yaml_attr_parser(test_record, attrs, name, onerror)
# Report if the license block is missing in non-generated tests.
if header is None and "generated" not in test_record and "hashbang" not in name:
onerror("No license found in: %s" % name)
return test_record
#######################################################################
# based on test262.py
#######################################################################
class Test262Error(Exception):
def __init__(self, message):
Exception.__init__(self)
self.message = message
def report_error(error_string):
raise Test262Error(error_string)
def build_options():
result = optparse.OptionParser()
result.add_option("--command", default=None,
help="The command-line to run")
result.add_option("--tests", default=path.abspath('.'),
help="Path to the tests")
result.add_option("--exclude-list", default=None,
help="Path to the excludelist.xml file")
result.add_option("--cat", default=False, action="store_true",
help="Print packaged test code that would be run")
result.add_option("--summary", default=False, action="store_true",
help="Print summary after running tests")
result.add_option("--full-summary", default=False, action="store_true",
help="Print summary and test output after running tests")
result.add_option("--strict_only", default=False, action="store_true",
help="Test only strict mode")
result.add_option("--non_strict_only", default=False, action="store_true",
help="Test only non-strict mode")
result.add_option("--unmarked_default", default="both",
help="default mode for tests of unspecified strictness")
result.add_option("-j", "--job-count", default=None, action="store", type=int,
help="Number of parallel test jobs to run. In case of '0' cpu count is used.")
result.add_option("--logname", help="Filename to save stdout to")
result.add_option("--loglevel", default="warning",
help="sets log level to debug, info, warning, error, or critical")
result.add_option("--print-handle", default="print",
help="Command to print from console")
result.add_option("--list-includes", default=False, action="store_true",
help="List includes required by tests")
result.add_option("--module-flag", default="-m",
help="List includes required by tests")
return result
def validate_options(options):
if not options.command:
report_error("A --command must be specified.")
if not path.exists(options.tests):
report_error("Couldn't find test path '%s'" % options.tests)
def is_windows():
actual_platform = platform.system()
return (actual_platform == 'Windows') or (actual_platform == 'Microsoft')
class TempFile(object):
def __init__(self, suffix="", prefix="tmp", text=False):
self.suffix = suffix
self.prefix = prefix
self.text = text
self.file_desc = None
self.name = None
self.is_closed = False
self.open_file()
def open_file(self):
(self.file_desc, self.name) = tempfile.mkstemp(
suffix=self.suffix,
prefix=self.prefix,
text=self.text)
def write(self, string):
os.write(self.file_desc, string)
def read(self):
file_desc = file(self.name)
result = file_desc.read()
file_desc.close()
return result
def close(self):
if not self.is_closed:
self.is_closed = True
os.close(self.file_desc)
def dispose(self):
try:
self.close()
os.unlink(self.name)
except OSError as exception:
logging.error("Error disposing temp file: %s", str(exception))
class TestResult(object):
def __init__(self, exit_code, stdout, stderr, case):
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
self.case = case
def report_outcome(self, long_format):
name = self.case.get_name()
mode = self.case.get_mode()
if self.exit_code != 0 and self.exit_code != 1:
sys.stderr.write(u"===%s failed in %s with negative:%s===\n"
% (name, mode, self.case.get_negative_type()))
self.write_output(sys.stderr)
if self.has_unexpected_outcome():
if self.case.is_negative():
print("=== %s passed in %s, but was expected to fail ===" % (name, mode))
print("--- expected error: %s ---\n" % self.case.get_negative_type())
else:
if long_format:
print("=== %s failed in %s ===" % (name, mode))
else:
print("%s in %s: " % (name, mode))
self.write_output(sys.stdout)
if long_format:
print("===")
elif self.case.is_negative():
print("%s failed in %s as expected" % (name, mode))
else:
print("%s passed in %s" % (name, mode))
def write_output(self, target):
out = self.stdout.strip()
if out:
target.write("--- output --- \n %s" % out)
error = self.stderr.strip()
if error:
target.write("--- errors --- \n %s" % error)
target.write("\n--- exit code: %d ---\n" % self.exit_code)
def has_failed(self):
return self.exit_code != 0
def async_has_failed(self):
return 'Test262:AsyncTestComplete' not in self.stdout
def has_unexpected_outcome(self):
if self.case.is_negative():
return not (self.has_failed() and self.case.negative_match(self.get_error_output()))
if self.case.is_async_test():
return self.async_has_failed() or self.has_failed()
return self.has_failed()
def get_error_output(self):
if self.stderr:
return self.stderr
return self.stdout
class TestCase(object):
def __init__(self, suite, name, full_path, strict_mode, command_template, module_flag):
self.suite = suite
self.name = name
self.full_path = full_path
self.strict_mode = strict_mode
with open(self.full_path, "rb") as file_desc:
self.contents = file_desc.read()
test_record = parse_test_record(self.contents, name)
self.test = test_record["test"]
del test_record["test"]
del test_record["header"]
test_record.pop("commentary", None) # do not throw if missing
self.test_record = test_record
self.command_template = command_template
self.module_flag = module_flag
self.validate()
def negative_match(self, stderr):
neg = re.compile(self.get_negative_type())
return re.search(neg, stderr)
def get_negative(self):
if not self.is_negative():
return None
return self.test_record["negative"]
def get_negative_type(self):
negative = self.get_negative()
if isinstance(negative, dict) and "type" in negative:
return negative["type"]
return negative
def get_negative_phase(self):
negative = self.get_negative()
return negative and "phase" in negative and negative["phase"]
def get_name(self):
return path.join(*self.name)
def get_mode(self):
if self.strict_mode:
return "strict mode"
return "non-strict mode"
def get_path(self):
return self.name
def is_negative(self):
return 'negative' in self.test_record
def is_only_strict(self):
return 'onlyStrict' in self.test_record
def is_no_strict(self):
return 'noStrict' in self.test_record or self.is_raw()
def is_raw(self):
return 'raw' in self.test_record
def is_async_test(self):
return 'async' in self.test_record or '$DONE' in self.test
def is_module(self):
return 'module' in self.test_record
def get_include_list(self):
if self.test_record.get('includes'):
return self.test_record['includes']
return []
def get_additional_includes(self):
return '\n'.join([self.suite.get_include(include) for include in self.get_include_list()])
def get_source(self):
if self.is_raw():
return self.test
source = self.suite.get_include("sta.js") + \
self.suite.get_include("assert.js")
if self.is_async_test():
source = source + \
self.suite.get_include("timer.js") + \
self.suite.get_include("doneprintHandle.js").replace(
'print', self.suite.print_handle)
source = source + \
self.get_additional_includes() + \
self.test + '\n'
if self.get_negative_phase() == "early":
source = ("throw 'Expected an early error, but code was executed.';\n" +
source)
if self.strict_mode:
source = '"use strict";\nvar strict_mode = true;\n' + source
else:
# add comment line so line numbers match in both strict and non-strict version
source = '//"no strict";\nvar strict_mode = false;\n' + source
return source
@staticmethod
def instantiate_template(template, params):
def get_parameter(match):
key = match.group(1)
return params.get(key, match.group(0))
return re.sub(r"\{\{(\w+)\}\}", get_parameter, template)
@staticmethod
def execute(command):
if is_windows():
args = '%s' % command
else:
args = command.split(" ")
stdout = TempFile(prefix="test262-out-")
stderr = TempFile(prefix="test262-err-")
try:
logging.info("exec: %s", str(args))
process = subprocess.Popen(
args,
shell=False,
stdout=stdout.file_desc,
stderr=stderr.file_desc
)
timer = threading.Timer(TEST262_CASE_TIMEOUT, process.kill)
timer.start()
code = process.wait()
timer.cancel()
out = stdout.read()
err = stderr.read()
finally:
stdout.dispose()
stderr.dispose()
return (code, out, err)
def run_test_in(self, tmp):
tmp.write(self.get_source())
tmp.close()
if self.is_module():
arg = self.module_flag + ' ' + tmp.name
else:
arg = tmp.name
command = TestCase.instantiate_template(self.command_template, {
'path': arg
})
(code, out, err) = TestCase.execute(command)
return TestResult(code, out, err, self)
def run(self):
tmp = TempFile(suffix=".js", prefix="test262-")
try:
result = self.run_test_in(tmp)
finally:
tmp.dispose()
return result
def print_source(self):
print(self.get_source())
def validate(self):
flags = self.test_record.get("flags")
phase = self.get_negative_phase()
if phase not in [None, False, "parse", "early", "runtime", "resolution"]:
raise TypeError("Invalid value for negative phase: " + phase)
if not flags:
return
if 'raw' in flags:
if 'noStrict' in flags:
raise TypeError("The `raw` flag implies the `noStrict` flag")
elif 'onlyStrict' in flags:
raise TypeError(
"The `raw` flag is incompatible with the `onlyStrict` flag")
elif self.get_include_list():
raise TypeError(
"The `raw` flag is incompatible with the `includes` tag")
def pool_init():
"""Ignore CTRL+C in the worker process."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def test_case_run_process(case):
return case.run()
class ProgressIndicator(object):
def __init__(self, count):
self.count = count
self.succeeded = 0
self.failed = 0
self.failed_tests = []
def has_run(self, result):
result.report_outcome(True)
if result.has_unexpected_outcome():
self.failed += 1
self.failed_tests.append(result)
else:
self.succeeded += 1
def make_plural(num):
if num == 1:
return (num, "")
return (num, "s")
def percent_format(partial, total):
return "%i test%s (%.1f%%)" % (make_plural(partial) +
((100.0 * partial)/total,))
class TestSuite(object):
def __init__(self, options):
self.test_root = path.join(options.tests, 'test')
self.lib_root = path.join(options.tests, 'harness')
self.strict_only = options.strict_only
self.non_strict_only = options.non_strict_only
self.unmarked_default = options.unmarked_default
self.print_handle = options.print_handle
self.include_cache = {}
self.exclude_list_path = options.exclude_list
self.module_flag = options.module_flag
self.logf = None
def _load_excludes(self):
if self.exclude_list_path and os.path.exists(self.exclude_list_path):
xml_document = xml.dom.minidom.parse(self.exclude_list_path)
xml_tests = xml_document.getElementsByTagName("test")
return {x.getAttribute("id") for x in xml_tests}
return set()
def validate(self):
if not path.exists(self.test_root):
report_error("No test repository found")
if not path.exists(self.lib_root):
report_error("No test library found")
@staticmethod
def is_hidden(test_path):
return test_path.startswith('.') or test_path == 'CVS'
@staticmethod
def is_test_case(test_path):
return test_path.endswith('.js') and not test_path.endswith('_FIXTURE.js')
@staticmethod
def should_run(rel_path, tests):
if not tests:
return True
for test in tests:
if test in rel_path:
return True
return False
def get_include(self, name):
if not name in self.include_cache:
static = path.join(self.lib_root, name)
if path.exists(static):
with open(static) as file_desc:
contents = file_desc.read()
contents = re.sub(r'\r\n', '\n', contents)
self.include_cache[name] = contents + "\n"
else:
report_error("Can't find: " + static)
return self.include_cache[name]
def enumerate_tests(self, tests, command_template):
exclude_list = self._load_excludes()
logging.info("Listing tests in %s", self.test_root)
cases = []
for root, dirs, files in os.walk(self.test_root):
for hidden_dir in [x for x in dirs if self.is_hidden(x)]:
dirs.remove(hidden_dir)
dirs.sort()
for test_path in filter(TestSuite.is_test_case, sorted(files)):
full_path = path.join(root, test_path)
if full_path.startswith(self.test_root):
rel_path = full_path[len(self.test_root)+1:]
else:
logging.warning("Unexpected path %s", full_path)
rel_path = full_path
if self.should_run(rel_path, tests):
basename = path.basename(full_path)[:-3]
name = rel_path.split(path.sep)[:-1] + [basename]
if rel_path in exclude_list:
print('Excluded: ' + rel_path)
else:
if not self.non_strict_only:
strict_case = TestCase(self, name, full_path, True, command_template, self.module_flag)
if not strict_case.is_no_strict():
if strict_case.is_only_strict() or self.unmarked_default in ['both', 'strict']:
cases.append(strict_case)
if not self.strict_only:
non_strict_case = TestCase(self, name, full_path, False, command_template, self.module_flag)
if not non_strict_case.is_only_strict():
if non_strict_case.is_no_strict() or self.unmarked_default in ['both', 'non_strict']:
cases.append(non_strict_case)
logging.info("Done listing tests")
return cases
def print_summary(self, progress, logfile):
def write(string):
if logfile:
self.logf.write(string + "\n")
print(string)
print("")
write("=== Summary ===")
count = progress.count
succeeded = progress.succeeded
failed = progress.failed
write(" - Ran %i test%s" % make_plural(count))
if progress.failed == 0:
write(" - All tests succeeded")
else:
write(" - Passed " + percent_format(succeeded, count))
write(" - Failed " + percent_format(failed, count))
positive = [c for c in progress.failed_tests if not c.case.is_negative()]
negative = [c for c in progress.failed_tests if c.case.is_negative()]
if positive:
print("")
write("Failed Tests")
for result in positive:
write(" %s in %s" % (result.case.get_name(), result.case.get_mode()))
if negative:
print("")
write("Expected to fail but passed ---")
for result in negative:
write(" %s in %s" % (result.case.get_name(), result.case.get_mode()))
def print_failure_output(self, progress, logfile):
for result in progress.failed_tests:
if logfile:
self.write_log(result)
print("")
result.report_outcome(False)
def run(self, command_template, tests, print_summary, full_summary, logname, job_count=1):
if not "{{path}}" in command_template:
command_template += " {{path}}"
cases = self.enumerate_tests(tests, command_template)
if not cases:
report_error("No tests to run")
progress = ProgressIndicator(len(cases))
if logname:
self.logf = open(logname, "w")
if job_count == 1:
for case in cases:
result = case.run()
if logname:
self.write_log(result)
progress.has_run(result)
else:
if job_count == 0:
job_count = None # uses multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=job_count, initializer=pool_init)
try:
for result in pool.imap(test_case_run_process, cases):
if logname:
self.write_log(result)
progress.has_run(result)
except KeyboardInterrupt:
pool.terminate()
pool.join()
if print_summary:
self.print_summary(progress, logname)
if full_summary:
self.print_failure_output(progress, logname)
else:
print("")
print("Use --full-summary to see output from failed tests")
print("")
return progress.failed
def write_log(self, result):
name = result.case.get_name()
mode = result.case.get_mode()
if result.has_unexpected_outcome():
if result.case.is_negative():
self.logf.write(
"=== %s passed in %s, but was expected to fail === \n" % (name, mode))
self.logf.write("--- expected error: %s ---\n" % result.case.GetNegativeType())
result.write_output(self.logf)
else:
self.logf.write("=== %s failed in %s === \n" % (name, mode))
result.write_output(self.logf)
self.logf.write("===\n")
elif result.case.is_negative():
self.logf.write("%s failed in %s as expected \n" % (name, mode))
else:
self.logf.write("%s passed in %s \n" % (name, mode))
def print_source(self, tests):
cases = self.enumerate_tests(tests, "")
if cases:
cases[0].print_source()
def list_includes(self, tests):
cases = self.enumerate_tests(tests, "")
includes_dict = Counter()
for case in cases:
includes = case.get_include_list()
includes_dict.update(includes)
print(includes_dict)
def main():
code = 0
parser = build_options()
(options, args) = parser.parse_args()
validate_options(options)
test_suite = TestSuite(options)
test_suite.validate()
if options.loglevel == 'debug':
logging.basicConfig(level=logging.DEBUG)
elif options.loglevel == 'info':
logging.basicConfig(level=logging.INFO)
elif options.loglevel == 'warning':
logging.basicConfig(level=logging.WARNING)
elif options.loglevel == 'error':
logging.basicConfig(level=logging.ERROR)
elif options.loglevel == 'critical':
logging.basicConfig(level=logging.CRITICAL)
if options.cat:
test_suite.print_source(args)
elif options.list_includes:
test_suite.list_includes(args)
else:
code = test_suite.run(options.command, args,
options.summary or options.full_summary,
options.full_summary,
options.logname,
options.job_count)
return code
if __name__ == '__main__':
try:
sys.exit(main())
except Test262Error as exception:
print("Error: %s" % exception.message)
sys.exit(1)
|
jerryscript-project/jerryscript
|
tools/runners/test262-harness.py
|
Python
|
apache-2.0
| 31,970
|
#This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
# -*- coding: utf-8 -*-
"""
Miscelleanous tools used by tryton
"""
import os
import sys
import subprocess
from threading import local
import smtplib
import dis
from decimal import Decimal
from trytond.config import CONFIG
from trytond.const import OPERATORS
def find_in_path(name):
if os.name == "nt":
sep = ';'
else:
sep = ':'
path = [directory for directory in os.environ['PATH'].split(sep)
if os.path.isdir(directory)]
for directory in path:
val = os.path.join(directory, name)
if os.path.isfile(val) or os.path.islink(val):
return val
return name
def find_pg_tool(name):
if CONFIG['pg_path'] and CONFIG['pg_path'] != 'None':
return os.path.join(CONFIG['pg_path'], name)
else:
return find_in_path(name)
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
args2 = (os.path.basename(prog),) + args
return os.spawnv(os.P_WAIT, prog, args2)
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
if os.name == "nt":
cmd = '"' + prog + '" ' + ' '.join(args)
else:
cmd = prog + ' ' + ' '.join(args)
# if db_password is set in configuration we should pass
# an environment variable PGPASSWORD to our subprocess
# see libpg documentation
child_env = dict(os.environ)
if CONFIG['db_password']:
child_env['PGPASSWORD'] = CONFIG['db_password']
pipe = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, env=child_env)
return pipe
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
if os.name == "nt":
cmd = '"' + prog + '" ' + ' '.join(args)
else:
cmd = prog + ' ' + ' '.join(args)
return os.popen2(cmd, 'b')
def file_open(name, mode="r", subdir='modules'):
"""Open a file from the root dir, using a subdir folder."""
from trytond.modules import EGG_MODULES
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
egg_name = False
if subdir == 'modules':
module_name = name.split(os.sep)[0]
if module_name in EGG_MODULES:
epoint = EGG_MODULES[module_name]
mod_path = os.path.join(epoint.dist.location,
*epoint.module_name.split('.')[:-1])
egg_name = os.path.join(mod_path, name)
if not os.path.isfile(egg_name):
# Find module in path
for path in sys.path:
mod_path = os.path.join(path,
*epoint.module_name.split('.')[:-1])
egg_name = os.path.join(mod_path, name)
if os.path.isfile(egg_name):
break
if not os.path.isfile(egg_name):
# When testing modules from setuptools location is the
# module directory
egg_name = os.path.join(
os.path.dirname(epoint.dist.location), name)
if subdir:
if (subdir == 'modules'
and (name.startswith('ir' + os.sep)
or name.startswith('res' + os.sep)
or name.startswith('webdav' + os.sep)
or name.startswith('test' + os.sep))):
name = os.path.join(root_path, name)
else:
name = os.path.join(root_path, subdir, name)
else:
name = os.path.join(root_path, name)
for i in (name, egg_name):
if i and os.path.isfile(i):
return open(i, mode)
raise IOError('File not found : %s ' % name)
def get_smtp_server():
"""
Instanciate, configure and return a SMTP or SMTP_SSL instance from
smtplib.
:return: A SMTP instance. The quit() method must be call when all
the calls to sendmail() have been made.
"""
if CONFIG['smtp_ssl']:
smtp_server = smtplib.SMTP_SSL(CONFIG['smtp_server'],
CONFIG['smtp_port'])
else:
smtp_server = smtplib.SMTP(CONFIG['smtp_server'], CONFIG['smtp_port'])
if CONFIG['smtp_tls']:
smtp_server.starttls()
if CONFIG['smtp_user'] and CONFIG['smtp_password']:
smtp_server.login(CONFIG['smtp_user'], CONFIG['smtp_password'])
return smtp_server
def memoize(maxsize):
"""
Decorator to 'memoize' a function - caching its results with a
near LRU implementation.
The cache keeps a list of keys logicaly separated in 4 segment :
segment 1 | ... | segment4
[k,k,k,k,k,k,k, .. ,k,k,k,k,k,k,k]
For each segment there is a pointer that loops on it. When a key
is accessed from the cache it is promoted to the first segment (at
the pointer place of segment one), the key under the pointer is
moved to the next segment, the pointer is then incremented and so
on. A key that is removed from the last segment is removed from
the cache.
:param: maxsize the size of the cache (must be greater than or
equal to 4)
"""
assert maxsize >= 4, "Memoize cannot work if maxsize is less than 4"
def wrap(fct):
cache = {}
keys = [None for i in xrange(maxsize)]
seg_size = maxsize // 4
pointers = [i * seg_size for i in xrange(4)]
max_pointers = [(i + 1) * seg_size for i in xrange(3)] + [maxsize]
def wrapper(*args):
key = repr(args)
res = cache.get(key)
if res:
pos, res = res
keys[pos] = None
else:
res = fct(*args)
value = res
for segment, pointer in enumerate(pointers):
newkey = keys[pointer]
keys[pointer] = key
cache[key] = (pointer, value)
pointers[segment] = pointer + 1
if pointers[segment] == max_pointers[segment]:
pointers[segment] = segment * seg_size
if newkey is None:
break
segment, value = cache.pop(newkey)
key = newkey
return res
wrapper.__doc__ = fct.__doc__
wrapper.__name__ = fct.__name__
return wrapper
return wrap
def mod10r(number):
"""
Recursive mod10
:param number: a number
:return: the same number completed with the recursive modulo base 10
"""
codec = [0, 9, 4, 6, 8, 2, 7, 1, 3, 5]
report = 0
result = ""
for digit in number:
result += digit
if digit.isdigit():
report = codec[(int(digit) + report) % 10]
return result + str((10 - report) % 10)
class LocalDict(local):
def __init__(self):
super(LocalDict, self).__init__()
self._dict = {}
def __str__(self):
return str(self._dict)
def __repr__(self):
return str(self._dict)
def clear(self):
return self._dict.clear()
def keys(self):
return self._dict.keys()
def __setitem__(self, i, y):
self._dict.__setitem__(i, y)
def __getitem__(self, i):
return self._dict.__getitem__(i)
def copy(self):
return self._dict.copy()
def iteritems(self):
return self._dict.iteritems()
def iterkeys(self):
return self._dict.iterkeys()
def itervalues(self):
return self._dict.itervalues()
def pop(self, k, d=None):
return self._dict.pop(k, d)
def popitem(self):
return self._dict.popitem()
def setdefault(self, k, d=None):
return self._dict.setdefault(k, d)
def update(self, E, **F):
return self._dict.update(E, F)
def values(self):
return self._dict.values()
def get(self, k, d=None):
return self._dict.get(k, d)
def has_key(self, k):
return k in self._dict
def items(self):
return self._dict.items()
def __cmp__(self, y):
return self._dict.__cmp__(y)
def __contains__(self, k):
return self._dict.__contains__(k)
def __delitem__(self, y):
return self._dict.__delitem__(y)
def __eq__(self, y):
return self._dict.__eq__(y)
def __ge__(self, y):
return self._dict.__ge__(y)
def __gt__(self, y):
return self._dict.__gt__(y)
def __hash__(self):
return self._dict.__hash__()
def __iter__(self):
return self._dict.__iter__()
def __le__(self, y):
return self._dict.__le__(y)
def __len__(self):
return self._dict.__len__()
def __lt__(self, y):
return self._dict.__lt__(y)
def __ne__(self, y):
return self._dict.__ne__(y)
def reduce_ids(field, ids):
'''
Return a small SQL clause for ids
:param field: the field of the clause
:param ids: the list of ids
:return: sql string and sql param
'''
if not ids:
return '(%s)', [False]
assert all(x.is_integer() for x in ids if isinstance(x, float)), \
'ids must be integer'
ids = map(int, ids)
ids.sort()
prev = ids.pop(0)
continue_list = [prev, prev]
discontinue_list = []
sql = []
args = []
for i in ids:
if i == prev:
continue
if i != prev + 1:
if continue_list[-1] - continue_list[0] < 5:
discontinue_list.extend([continue_list[0] + x for x in
range(continue_list[-1] - continue_list[0] + 1)])
else:
sql.append('((' + field + ' >= %s) AND (' + field + ' <= %s))')
args.append(continue_list[0])
args.append(continue_list[-1])
continue_list = []
continue_list.append(i)
prev = i
if continue_list[-1] - continue_list[0] < 5:
discontinue_list.extend([continue_list[0] + x for x in
range(continue_list[-1] - continue_list[0] + 1)])
else:
sql.append('((' + field + ' >= %s) AND (' + field + ' <= %s))')
args.append(continue_list[0])
args.append(continue_list[-1])
if discontinue_list:
sql.append('(' + field + ' IN (' +
','.join(('%s',) * len(discontinue_list)) + '))')
args.extend(discontinue_list)
return '(' + ' OR '.join(sql) + ')', args
_ALLOWED_CODES = set(dis.opmap[x] for x in [
'POP_TOP', 'ROT_TWO', 'ROT_THREE', 'ROT_FOUR', 'DUP_TOP', 'BUILD_LIST',
'BUILD_MAP', 'BUILD_TUPLE', 'LOAD_CONST', 'RETURN_VALUE',
'STORE_SUBSCR', 'UNARY_POSITIVE', 'UNARY_NEGATIVE', 'UNARY_NOT',
'UNARY_INVERT', 'BINARY_POWER', 'BINARY_MULTIPLY', 'BINARY_DIVIDE',
'BINARY_FLOOR_DIVIDE', 'BINARY_TRUE_DIVIDE', 'BINARY_MODULO',
'BINARY_ADD', 'BINARY_SUBTRACT', 'BINARY_LSHIFT', 'BINARY_RSHIFT',
'BINARY_AND', 'BINARY_XOR', 'BINARY_OR', 'STORE_MAP', 'LOAD_NAME',
'CALL_FUNCTION', 'COMPARE_OP', 'LOAD_ATTR', 'STORE_NAME', 'GET_ITER',
'FOR_ITER', 'LIST_APPEND', 'JUMP_ABSOLUTE', 'DELETE_NAME',
'JUMP_IF_TRUE', 'JUMP_IF_FALSE', 'JUMP_IF_FALSE_OR_POP',
'JUMP_IF_TRUE_OR_POP', 'POP_JUMP_IF_FALSE', 'POP_JUMP_IF_TRUE',
'BINARY_SUBSCR', 'JUMP_FORWARD',
] if x in dis.opmap)
@memoize(1000)
def _compile_source(source):
comp = compile(source, '', 'eval')
codes = []
co_code = comp.co_code
i = 0
while i < len(co_code):
code = ord(co_code[i])
codes.append(code)
if code >= dis.HAVE_ARGUMENT:
i += 3
else:
i += 1
for code in codes:
if code not in _ALLOWED_CODES:
raise ValueError('opcode %s not allowed' % dis.opname[code])
return comp
def safe_eval(source, data=None):
if '__subclasses__' in source:
raise ValueError('__subclasses__ not allowed')
comp = _compile_source(source)
return eval(comp, {'__builtins__': {
'True': True,
'False': False,
'str': str,
'globals': locals,
'locals': locals,
'bool': bool,
'dict': dict,
'round': round,
'Decimal': Decimal,
}}, data)
def reduce_domain(domain):
'''
Reduce domain
'''
if not domain:
return []
operator = 'AND'
if isinstance(domain[0], basestring):
operator = domain[0]
domain = domain[1:]
result = [operator]
for arg in domain:
if (isinstance(arg, tuple) or
(isinstance(arg, list) and
len(arg) > 2 and
arg[1] in OPERATORS)):
#clause
result.append(arg)
elif isinstance(arg, list) and arg:
#sub-domain
sub_domain = reduce_domain(arg)
sub_operator = sub_domain[0]
if sub_operator == operator:
result.extend(sub_domain[1:])
else:
result.append(sub_domain)
else:
result.append(arg)
return result
|
openlabs/trytond
|
trytond/tools/misc.py
|
Python
|
gpl-3.0
| 13,242
|
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
import qibuild.profile
from qisrc.sync import compute_profile_updates
def make_profiles(*args):
res = dict()
for (name, flags) in args:
profile = qibuild.profile.Profile(name)
profile.cmake_flags = flags
res[profile.name] = profile
return res
def test_remote_added():
local = make_profiles()
remote = make_profiles(
("foo", [("WITH_FOO", "ON")]),
)
new, updated = compute_profile_updates(local, remote)
assert not updated
assert len(new) == 1
assert new[0] == remote["foo"]
def test_remote_updated():
local = make_profiles(
("eggs", [("WITH_EGGS"), "ON"]),
("foo", [("WITH_FOO", "ON"), ("WITH_BAR", "OFF")]),
)
remote = make_profiles(
("eggs", [("WITH_EGGS"), "ON"]),
("foo", [("WITH_FOO", "ON")]),
)
new, updated = compute_profile_updates(local, remote)
assert not new
assert len(updated) == 1
assert updated[0] == remote["foo"]
def test_same_remote():
local = make_profiles(
("eggs", [("WITH_EGGS"), "ON"]),
("foo", [("WITH_FOO", "ON")]),
)
remote = make_profiles(
("eggs", [("WITH_EGGS"), "ON"]),
("foo", [("WITH_FOO", "ON")]),
)
new, updated = compute_profile_updates(local, remote)
assert not new
assert not updated
|
dmerejkowsky/qibuild
|
python/qisrc/test/test_sync_compute_profile_update.py
|
Python
|
bsd-3-clause
| 1,495
|
"""
Tools for n-dimensional linear algebra
Vectors are just numpy arrays, as are dense matrices. Sparse matrices
are CSR matrices. Parallel vector and matrix are built on top of those
representations using PETSc.
.. inheritance-diagram:: proteus.LinearAlgebraTools
:parts: 1
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import range
from builtins import object
from past.utils import old_div
import numpy
import math
import sys
from . import superluWrappers
from . import Comm
from .Comm import globalSum, globalMax
from .superluWrappers import *
from .Profiling import logEvent
from petsc4py import PETSc as p4pyPETSc
# PETSc Matrix Functions
def _petsc_view(obj, filename):
"""Saves petsc object to disk using a PETSc binary viewer.
Parameters
----------
obj : PETSc obj
PETSc4py object to be saved (e.g. vector, matrix, etc)
filename : str
String with PETSc filename
"""
viewer = p4pyPETSc.Viewer().createBinary(filename, 'w')
viewer(obj)
viewer2 = p4pyPETSc.Viewer().createASCII(filename+".m", 'w')
viewer2.pushFormat(1)
viewer2(obj)
viewer2.popFormat()
def petsc_load_matrix(filename):
""" This function loads a PETSc matrix from a binary format.
(Eg. what is saved using the petsc_view function).
Parameters
----------
filename : str
This is the name of the binary with the file stored.
Returns
-------
matrix : petsc4py matrix
The matrix that is stored in the binary file.
"""
try:
viewer = p4pyPETSc.Viewer().createBinary(filename,'r')
output = p4pyPETSc.Mat().load(viewer)
except:
logEvent("Either you've entered an invalid file name or your object is not a matrix (try petsc_load_vector).")
output = None
return output
def petsc_load_vector(filename):
""" This function loads a PETSc vector from a binary format.
(Eg. what is saved using the petsc_view function).
Parameters
----------
filename : str
This is the name of the binary with the file stored.
Returns
-------
matrix : petsc4py vector
The matrix that is stored in the binary file.
"""
try:
viewer = p4pyPETSc.Viewer().createBinary(filename,'r')
output = p4pyPETSc.Vec().load(viewer)
except:
logEvent("Either you've entered an invalid file name or your object is not a vector (try petsc_load_matrix).")
output = None
return output
def petsc_load_IS(filename):
""" This function loads a PETSc index-set from a binary format.
(Eg. what is saved using the petsc_view function).
Parameters
----------
filename : str
This is the name of the binary with the file stored.
Returns
-------
matrix : petsc4py IS
The index-set that is stored in the binary file.
"""
try:
viewer = p4pyPETSc.Viewer().createBinary(filename,'r')
output = p4pyPETSc.IS().load(viewer)
except:
logEvent("Either you've entered an invalid file name or your object is not an index set.")
output = None
return output
def csr_2_petsc(size,csr):
""" Create an petsc4py matrix from size and CSR information.
Parameters:
----------
size : tuple
A 2-tuple with the number of matrix rows and columns.
csr : tuple
A 3-tuple with the sparse matrix csr information.
Returns:
--------
matrix : PETSc4py aij matrix
"""
mat = p4pyPETSc.Mat().create()
mat.setSizes(size = size)
mat.setType('aij')
mat.setUp()
mat.assemblyBegin()
mat.setValuesCSR(csr[0],csr[1],csr[2])
mat.assemblyEnd()
return mat
def _pythonCSR_2_dense(rowptr,colptr,data,nr,nc,output=False):
""" Takes python CSR datatypes and makes a dense matrix """
dense_matrix = numpy.zeros(shape = (nr,nc), dtype='float')
for idx in range(len(rowptr)-1):
row_vals = data[rowptr[idx]:rowptr[idx+1]]
for val_idx,j in enumerate(colptr[rowptr[idx]:rowptr[idx+1]]):
dense_matrix[idx][j] = row_vals[val_idx]
if output is not False:
numpy.save(output,dense_matrix)
return dense_matrix
def superlu_get_rank(sparse_matrix):
""" Returns the rank of a superluWrapper sparse matrix.
Parameters
----------
sparse_matrix : :class:`proteus.superluWrappers.SparseMatrix`
Returns
-------
matrix_rank : int
The rank of the sparse_matrix
Notes
-----
This function is a tool for debugging and should only be used
for small matrices.
"""
A = superlu_sparse_2_dense(sparse_matrix)
return numpy.linalg.matrix_rank(A)
def petsc4py_get_rank(sparse_matrix):
""" Returns the rank of a superluWrapper sparse matrix.
Parameters
----------
sparse_matrix : :class:`p4pyPETSc.Mat`
Returns
-------
matrix_rank : int
The rank of the sparse_matrix
Notes
-----
This function is a debugging tool and should only be used
for small matrices.
"""
A = petsc4py_sparse_2_dense(sparse_matrix)
return numpy.linalg.matrix_rank(A)
def superlu_has_pressure_null_space(sparse_matrix):
"""
Checks whether a superluWrapper sparse matrix has a constant
pressure null space.
Parameters
----------
sparse_matrix : :class:`proteus.superluWrappers.SparseMatrix`
Returns
-------
does : bool
Boolean variable indicating whether the pressure term
creates a null space.
Notes
-----
Assumes interwoven dof.
This function was written mainly for debugging purposes and may be
slow for large matrices.
"""
A = superlu_2_petsc4py(sparse_matrix)
return petsc4py_mat_has_pressure_null_space(A)
def petsc4py_mat_has_pressure_null_space(A):
"""
Checks whether a PETSc4Py sparse matrix has a constant
pressure null space.
Parameters
----------
A : :class:`p4pyPETSc.Mat`
Returns
-------
does : bool
Boolean variable indicating whether the pressure term
creates a null space.
Notes
-----
Assumes interwoven dof.
This function was written mainly for debugging purposes and may be
slow for large matrices.
"""
x = numpy.zeros(A.getSize()[1])
y = numpy.zeros(A.getSize()[1])
x[::3] = 1
x_petsc = p4pyPETSc.Vec().createWithArray(x)
y_petsc = p4pyPETSc.Vec().createWithArray(y)
A.mult(x_petsc,y_petsc)
if y_petsc.norm() < 1e-15:
return True
else:
return False
def superlu_sparse_2_dense(sparse_matrix,output=False):
""" Converts a sparse superluWrapper into a dense matrix.
Parameters
----------
sparse_matrix :
output : str
Out file name to store the matrix.
Returns
-------
dense_matrix : numpy array
A numpy array storing the dense matrix.
Notes
-----
This function should not be used for large matrices.
"""
rowptr = sparse_matrix.getCSRrepresentation()[0]
colptr = sparse_matrix.getCSRrepresentation()[1]
data = sparse_matrix.getCSRrepresentation()[2]
nr = sparse_matrix.shape[0]
nc = sparse_matrix.shape[1]
return _pythonCSR_2_dense(rowptr,colptr,data,nr,nc,output)
def petsc4py_sparse_2_dense(sparse_matrix,output=False):
""" Converts a PETSc4Py matrix to a dense numpyarray.
Parameters
----------
sparse_matrix : PETSc4py matrix
output : str
Output file name to store the matrix.
Returns
-------
dense_matrix : numpy array
A numpy array with the dense matrix.
Notes
-----
This function is very inefficient for large matrices.
"""
rowptr = sparse_matrix.getValuesCSR()[0]
colptr = sparse_matrix.getValuesCSR()[1]
data = sparse_matrix.getValuesCSR()[2]
nr = sparse_matrix.getSize()[0]
nc = sparse_matrix.getSize()[1]
return _pythonCSR_2_dense(rowptr,colptr,data,nr,nc,output)
def superlu_2_petsc4py(sparse_superlu):
""" Copy a sparse superlu matrix to a sparse petsc4py matrix
Parameters
----------
sparse_superlu : :class:`proteus.superluWrappers.SparseMatrix`
Returns
-------
sparse_matrix : :class: `p4pyPETSc.Mat`
"""
comm = Comm.get()
if comm.size() > 1:
rowptr,colind,nzval = sparse_superlu.getCSRrepresentation()
A_petsc4py = ParMat_petsc4py.create_ParMat_from_OperatorConstructor(sparse_superlu)
else:
rowptr, colind, nzval = sparse_superlu.getCSRrepresentation()
A_rowptr = rowptr.copy()
A_colind = colind.copy()
A_nzval = nzval.copy()
nr = sparse_superlu.shape[0]
nc = sparse_superlu.shape[1]
A_petsc4py = p4pyPETSc.Mat().createAIJWithArrays((nr,nc),
(A_rowptr,
A_colind,
A_nzval))
return A_petsc4py
def petsc_create_diagonal_inv_matrix(sparse_petsc):
""" Create an inverse diagonal petsc4py matrix from input matrix.
Parameters
----------
sparse_petsc : :class:`p4pyPETSc.Mat`
Returns
-------
sparse_matrix : :class:`p4pyPETSc.Mat`
"""
diag_inv = p4pyPETSc.Mat().create()
diag_inv.setSizes(sparse_petsc.getSizes())
diag_inv.setType('aij')
diag_inv.setUp()
diag_inv.setDiagonal(old_div(1.,sparse_petsc.getDiagonal()))
return diag_inv
def dense_numpy_2_petsc4py(dense_numpy, eps = 1.e-12):
""" Create a sparse petsc4py matrix from a dense numpy matrix.
Note - This routine has been built mainly to support testing.
It would be rare for this routine to be useful for most applications.
Parameters
----------
dense_numpy :
eps : float
Tolerance for non-zero values.
Returns
-------
sparse_matrix : PETSc4py matrix
"""
vals = []
colptr = []
rowptr = [0]
rowptr_track = 0
for i,row in enumerate(dense_numpy):
for j,val in enumerate(row):
if abs(val) > eps:
vals.append(val)
colptr.append(j)
rowptr_track += 1
rowptr.append(rowptr_track)
return p4pyPETSc.Mat().createAIJ(size=dense_numpy.shape,
csr = (rowptr, colptr, vals))
def csr_2_petsc_mpiaij(size,csr):
""" Create an MPIaij petsc4py matrix from size and CSR information.
Parameters:
----------
size : tuple
Two entires: (num_rows, num_cols)
csr : tuple
(row_idx, col_idx, vals)
Returns:
--------
matrix : PETSc4py MPIaij matrix
"""
mat = p4pyPETSc.Mat().create()
mat.setSizes(size = size)
mat.setType('mpiaij')
mat.setUp()
mat.assemblyBegin()
mat.setValuesCSR(csr[0],csr[1],csr[2])
mat.assemblyEnd()
return mat
def split_PETSc_Mat(mat):
""" Decompose a PETSc matrix into a symmetric and skew-symmetric
matrix
Parameters:
----------
mat : :class: `PETSc4py Matrix`
Returns:
--------
H : :class: `PETSc4py Matrix`
Symmetric (or Hermitian) component of mat
S : :class: `PETSc4py Matrix`
Skew-Symmetric (or skew-Hermitian) component of mat
"""
H = mat.copy()
H.zeroEntries()
H.axpy(1.0,mat)
H.axpy(1.0,mat.transpose())
H.scale(0.5)
S = mat.copy()
S.zeroEntries()
S.axpy(1.0,mat)
S.aypx(-1.0,mat.transpose())
S.scale(0.5)
return H, S
class ParVec_petsc4py(p4pyPETSc.Vec):
"""
Parallel vector using petsc4py's wrappers for PETSc
Parameters
----------
array : numpy_array
A numpy array with size equal to the number of locally
owned unknowns plus the number of local ghost cells.
bs : int
Block size.
n : int
The number of locally owned unknowns
N : int
The number of unknowns in the global system
nghosts : int
The number of ghost nodes for the process.
subdomain2global : numpy array
Map from the process unknowns to the global
uknowns.
blockVecType : str
ghosts : numpy array
A numpy array with the local process uknowns that are
ghost nodes.
proteus2petsc_subdomain : numpy array
A numpy array that serves as a map from the proteus
uknown ordering to the petsc uknown ordering
petsc2proteus_subdomain : numpy array
A numpy array that serves as a map from the petsc uknown
ordering to the proteus unknown ordering
"""
def __init__(self,array=None,bs=None,n=None,N=None,nghosts=None,subdomain2global=None,blockVecType="simple",ghosts=None,
proteus2petsc_subdomain=None,
petsc2proteus_subdomain=None):
p4pyPETSc.Vec.__init__(self)
if array is None:
return#when duplicating for petsc usage
self.proteus2petsc_subdomain=proteus2petsc_subdomain
self.petsc2proteus_subdomain=petsc2proteus_subdomain
blockSize = max(1,bs)
self.dim_proc = n*blockSize
self.nghosts = nghosts
self.blockVecType = blockVecType
assert self.blockVecType == "simple", "petsc4py wrappers require self.blockVecType=simple"
self.proteus_array = array
if nghosts is None:
if blockVecType == "simple":
self.createWithArray(array,size=(blockSize*n,blockSize*N),bsize=1)
else:
self.createWithArray(array,size=(blockSize*n,blockSize*N),bsize=blockSize)
self.subdomain2global=subdomain2global
self.petsc_l2g = None
self.setUp()
else:
assert nghosts >= 0, "The number of ghostnodes must be non-negative"
assert subdomain2global.shape[0] == (n+nghosts), ("The subdomain2global map is the wrong length n=%i,nghosts=%i,shape=%i \n" % (n,n+nghosts,subdomain2global.shape[0]))
assert len(array.flat) == (n+nghosts)*blockSize, "%i != (%i+%i)*%i \n" % (len(array.flat), n,nghosts,blockSize)
if blockVecType == "simple":
if ghosts is None:
ghosts = numpy.zeros((blockSize*nghosts),'i')
for j in range(blockSize):
ghosts[j::blockSize]=subdomain2global[n:]*blockSize+j
self.createGhostWithArray(ghosts,array,size=(blockSize*n,blockSize*N),bsize=1)
if blockSize > 1: #have to build in block dofs
subdomain2globalTotal = numpy.zeros((blockSize*subdomain2global.shape[0],),'i')
for j in range(blockSize):
subdomain2globalTotal[j::blockSize]=subdomain2global*blockSize+j
self.subdomain2global=subdomain2globalTotal
else:
self.subdomain2global=subdomain2global
else:
#TODO need to debug
ghosts = subdomain2global[n:]
self.createGhostWithArray(ghosts,array,size=(blockSize*n,blockSize*N),bsize=blockSize)
self.subdomain2global = subdomain2global
self.setUp()
#self.petsc_l2g = p4pyPETSc.LGMap()
#self.petsc_l2g.create(self.subdomain2global)
#self.setLGMap(self.petsc_l2g)
self.setFromOptions()
def scatter_forward_insert(self):
if self.proteus2petsc_subdomain is not None:
self.proteus_array[:] = self.proteus_array[self.petsc2proteus_subdomain]
self.ghostUpdateBegin(p4pyPETSc.InsertMode.INSERT,p4pyPETSc.ScatterMode.FORWARD)
self.ghostUpdateEnd(p4pyPETSc.InsertMode.INSERT,p4pyPETSc.ScatterMode.FORWARD)
if self.proteus2petsc_subdomain is not None:
self.proteus_array[:] = self.proteus_array[self.proteus2petsc_subdomain]
def scatter_reverse_add(self):
if self.proteus2petsc_subdomain is not None:
self.proteus_array[:] = self.proteus_array[self.petsc2proteus_subdomain]
self.ghostUpdateBegin(p4pyPETSc.InsertMode.ADD_VALUES,p4pyPETSc.ScatterMode.REVERSE)
self.ghostUpdateEnd(p4pyPETSc.InsertMode.ADD_VALUES,p4pyPETSc.ScatterMode.REVERSE)
if self.proteus2petsc_subdomain is not None:
self.proteus_array[:] = self.proteus_array[self.proteus2petsc_subdomain]
def save(self, filename):
"""Saves to disk using a PETSc binary viewer."""
_petsc_view(self, filename)
class ParInfo_petsc4py(object):
"""
ARB - this class is experimental. My idea is to store the
information need to constructor parallel vectors and matrices
here as static class values. Then ParVec and ParMat can
use these values to create parallel objects later.
"""
def __init__(self):
self.par_bs = None
self.par_n = None
self.par_n_lst = None
self.par_N = None
self.par_nghost = None
self.par_nghost_lst = None
self.petsc_subdomain2global_petsc = None
self.subdomain2global = None
self.proteus2petsc_subdomain = None
self.petsc2proteus_subdomain = None
self.nzval_proteus2petsc = None
self.dim = None
self.mixed = False
def print_info(cls):
from . import Comm
comm = Comm.get()
logEvent('comm.rank() = ' + repr(comm.rank()) + ' par_bs = ' + repr(cls.par_bs))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' par_n = ' + repr(cls.par_n))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' par_n_lst = ' + repr(cls.par_n_lst))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' par_N = ' + repr(cls.par_N))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' par_nghost = ' + repr(cls.par_nghost))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' par_nghost_lst = ' + repr(cls.par_nghost_lst))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' petsc_subdomain2global_petsc = ' + repr(cls.petsc_subdomain2global_petsc))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' subdomain2global = ' + repr(cls.subdomain2global))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' proteus2petsc_subdomain = ' + repr(cls.proteus2petsc_subdomain))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' petsc2proteus_subomdain = ' + repr(cls.petsc2proteus_subdomain))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' dim = ' + repr(cls.dim))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' nzval_proteus2petsc = ' + repr(cls.nzval_proteus2petsc))
class ParMat_petsc4py(p4pyPETSc.Mat):
""" Parallel matrix based on petsc4py's wrappers for PETSc.
ghosted_csr_mat : :class:`proteus.superluWrappers.SparseMatrix`
Primary CSR information for the ParMat.
par_bs : int
The block size.
par_n : int
The number of locally owned unknowns.
par_N : int
The number of global unknowns.
par_nghost : int
The number of locally owned ghost unknowns.
subdomain2global : :class:`numpy.ndarray`
A map from the local unknown to the global unknown.
blockVecType : str
pde : :class:`proteus.Transport.OneLevelTransport`
The Transport class defining the problem.
par_nc : int
par_Nc : int
proteus_jacobian : :class:`proteus.superluWrappers.SparseMatrix`
Jacobian generated by Transport class's initializeJacobian.
nzval_proteus2petsc : :class:`numpy.ndarray`
Array with index permutations for mapping between
proteus and petsc degrees of freedom.
"""
def __init__(self,
ghosted_csr_mat=None,
par_bs=None,
par_n=None,
par_N=None,
par_nghost=None,
subdomain2global=None,
blockVecType="simple",
pde=None,
par_nc=None,
par_Nc=None,
proteus_jacobian=None,
nzval_proteus2petsc=None):
p4pyPETSc.Mat.__init__(self)
if ghosted_csr_mat is None:
return#when duplicating for petsc usage
self.pde = pde
if par_nc is None:
par_nc = par_n
if par_Nc is None:
par_Nc = par_N
self.proteus_jacobian=proteus_jacobian
self.nzval_proteus2petsc = nzval_proteus2petsc
self.ghosted_csr_mat=ghosted_csr_mat
self.blockVecType = blockVecType
assert self.blockVecType == "simple", "petsc4py wrappers require self.blockVecType=simple"
self.create(p4pyPETSc.COMM_WORLD)
self.blockSize = max(1,par_bs)
if self.blockSize > 1 and blockVecType != "simple":
## \todo fix block aij in ParMat_petsc4py
self.setType('mpibaij')
self.setSizes([[self.blockSize*par_n,self.blockSize*par_N],[self.blockSize*par_nc,self.blockSize*par_Nc]],bsize=self.blockSize)
self.setBlockSize(self.blockSize)
self.subdomain2global = subdomain2global #no need to include extra block dofs?
else:
self.setType('aij')
self.setSizes([[par_n*self.blockSize,par_N*self.blockSize],[par_nc*self.blockSize,par_Nc*self.blockSize]],bsize=1)
if self.blockSize > 1: #have to build in block dofs
subdomain2globalTotal = numpy.zeros((self.blockSize*subdomain2global.shape[0],),'i')
for j in range(self.blockSize):
subdomain2globalTotal[j::self.blockSize]=subdomain2global*self.blockSize+j
self.subdomain2global=subdomain2globalTotal
else:
self.subdomain2global=subdomain2global
from proteus import Comm
comm = Comm.get()
logEvent("ParMat_petsc4py comm.rank= %s blockSize = %s par_n= %s par_N=%s par_nghost=%s par_jacobian.getSizes()= %s "
% (comm.rank(),self.blockSize,par_n,par_N,par_nghost,self.getSizes()))
self.csr_rep = ghosted_csr_mat.getCSRrepresentation()
if self.proteus_jacobian is not None:
self.proteus_csr_rep = self.proteus_jacobian.getCSRrepresentation()
if self.blockSize > 1:
blockOwned = self.blockSize*par_n
self.csr_rep_local = ghosted_csr_mat.getSubMatCSRrepresentation(0,blockOwned)
else:
self.csr_rep_local = ghosted_csr_mat.getSubMatCSRrepresentation(0,par_n)
self.petsc_l2g = p4pyPETSc.LGMap()
self.petsc_l2g.create(self.subdomain2global)
self.setUp()
self.setLGMap(self.petsc_l2g)
#
self.colind_global = self.petsc_l2g.apply(self.csr_rep_local[1]) #prealloc needs global indices
self.setPreallocationCSR([self.csr_rep_local[0],self.colind_global,self.csr_rep_local[2]])
self.setFromOptions()
@classmethod
def create_ParMat_from_OperatorConstructor(cls,
operator):
""" Build a ParMat consistent with the problem from an Operator
constructor matrix.
Arguments
---------
operator : :class:`proteus.superluWrappers.SparseMatrix`
Matrix to be turned into a parallel petsc matrix.
"""
par_bs = ParInfo_petsc4py.par_bs
par_n = ParInfo_petsc4py.par_n
par_N = ParInfo_petsc4py.par_N
par_nghost = ParInfo_petsc4py.par_nghost
petsc_subdomain2global_petsc = ParInfo_petsc4py.petsc_subdomain2global_petsc
subdomain2global = ParInfo_petsc4py.subdomain2global
petsc2proteus_subdomain = ParInfo_petsc4py.petsc2proteus_subdomain
proteus2petsc_subdomain = ParInfo_petsc4py.proteus2petsc_subdomain
dim = ParInfo_petsc4py.dim
# ARB - this is largely copied from Transport.py,
# a refactor should be done to elimate this duplication
rowptr, colind, nzval = operator.getCSRrepresentation()
rowptr_petsc = rowptr.copy()
colind_petsc = colind.copy()
nzval_petsc = nzval.copy()
nzval_proteus2petsc = colind.copy()
nzval_petsc2proteus = colind.copy()
rowptr_petsc[0] = 0
for i in range(par_n+par_nghost):
start_proteus = rowptr[petsc2proteus_subdomain[i]]
end_proteus = rowptr[petsc2proteus_subdomain[i]+1]
nzrow = end_proteus - start_proteus
rowptr_petsc[i+1] = rowptr_petsc[i] + nzrow
start_petsc = rowptr_petsc[i]
end_petsc = rowptr_petsc[i+1]
petsc_cols_i = proteus2petsc_subdomain[colind[start_proteus:end_proteus]]
j_sorted = petsc_cols_i.argsort()
colind_petsc[start_petsc:end_petsc] = petsc_cols_i[j_sorted]
nzval_petsc[start_petsc:end_petsc] = nzval[start_proteus:end_proteus][j_sorted]
for j_petsc, j_proteus in zip(numpy.arange(start_petsc,end_petsc),
numpy.arange(start_proteus,end_proteus)[j_sorted]):
nzval_petsc2proteus[j_petsc] = j_proteus
nzval_proteus2petsc[j_proteus] = j_petsc
proteus_a = {}
petsc_a = {}
for i in range(dim):
for j,k in zip(colind[rowptr[i]:rowptr[i+1]],list(range(rowptr[i],rowptr[i+1]))):
proteus_a[i,j] = nzval[k]
petsc_a[proteus2petsc_subdomain[i],proteus2petsc_subdomain[j]] = nzval[k]
for i in range(dim):
for j,k in zip(colind_petsc[rowptr_petsc[i]:rowptr_petsc[i+1]],list(range(rowptr_petsc[i],rowptr_petsc[i+1]))):
nzval_petsc[k] = petsc_a[i,j]
#additional stuff needed for petsc par mat
petsc_jacobian = SparseMat(dim,dim,nzval_petsc.shape[0], nzval_petsc, colind_petsc, rowptr_petsc)
return cls(petsc_jacobian,
par_bs,
par_n,
par_N,
par_nghost,
petsc_subdomain2global_petsc,
proteus_jacobian = operator,
nzval_proteus2petsc=nzval_proteus2petsc)
def save(self, filename):
"""Saves to disk using a PETSc binary viewer. """
_petsc_view(self, filename)
def Vec(n):
"""
Build a vector of length n (using numpy)
For example::
>>> Vec(3)
array([ 0., 0., 0.])
"""
return numpy.zeros((n,),'d')
def Mat(m,n):
"""
Build an m x n matrix (using numpy)
For example::
>>> Mat(2,3)
array([[ 0., 0., 0.],
[ 0., 0., 0.]])
"""
return numpy.zeros((m,n),'d')
def SparseMatFromDict(nr,nc,aDict):
"""
Build a nr x nc sparse matrix from a dictionary representation
"""
from . import superluWrappers
indeces = list(aDict.keys())
indeces.sort()
nnz = len(indeces)
nzval = numpy.zeros((nnz,),'d')
rowptr = numpy.zeros((nr+1,),'i')
colind = numpy.zeros((nnz,),'i')
i=0
k=0
rowptr[i]=0
for ij in indeces:
nzval[k] = aDict[ij]
colind[k] = ij[1]
if ij[0] > i:
i += 1
rowptr[i]=k
k+=1
rowptr[i+1] = k
return (SparseMat(nr,nc,nnz,nzval,colind,rowptr),nzval)
def SparseMat(nr,nc,nnz,nzval,colind,rowptr):
""" Build a nr x nc sparse matrix from the CSR data structures
Parameters
----------
nr : int
The number of rows.
nc : int
The number of columns.
nnz : int
The number of non-zero matrix entries.
nzval : numpy array
Array with non-zero matrix entries.
colind : numpy array of 32bit integers
CSR column array.
rowptr : numpy array of 32bit integers
CSR row pointer.
Returns
-------
sparse_matrix : :class:`proteus.superluWrappers.SparseMatrix`
superlu sparse matrix in CSR format.
Note
----
For the superluWrapper, both the colind and rowptr should use
32-bit integer data types.
"""
if (colind.dtype != 'int32' or rowptr.dtype != 'int32'):
print('ERROR - colind and rowptr must be "int32" numpy arrays for ' \
'superluWrappers')
sys.exit(1)
return superluWrappers.SparseMatrix(nr,nc,nnz,nzval,colind,rowptr)
class SparseMatShell(object):
""" Build a parallel matrix shell from CSR data structures.
Parameters
----------
ghosted_csr_mat: :class: `proteus.superluWrappers.SparseMatrix`
"""
def __init__(self,ghosted_csr_mat):
self.ghosted_csr_mat=ghosted_csr_mat
self.par_b = None
self.xGhosted = None
self.yGhosted = None
def create(self, A):
pass
def mult(self, A, x, y):
assert self.par_b is not None, "The parallel RHS vector par_b must be " \
"initialized before using the mult function"
logEvent("Using SparseMatShell in LinearSolver matrix multiply")
if self.xGhosted is None:
self.xGhosted = self.par_b.duplicate()
self.yGhosted = self.par_b.duplicate()
self.xGhosted.setArray(x.getArray())
self.xGhosted.ghostUpdateBegin(p4pyPETSc.InsertMode.INSERT,p4pyPETSc.ScatterMode.FORWARD)
self.xGhosted.ghostUpdateEnd(p4pyPETSc.InsertMode.INSERT,p4pyPETSc.ScatterMode.FORWARD)
self.yGhosted.zeroEntries()
with self.xGhosted.localForm() as xlf, self.yGhosted.localForm() as ylf:
self.ghosted_csr_mat.matvec(xlf.getArray(),ylf.getArray())
y.setArray(self.yGhosted.getArray())
class OperatorShell(object):
""" A base class for operator shells """
def __init__(self):
pass
def create(self,A):
pass
def getSize(self):
"""
Return the number of degrees of freedom for the operator.
"""
raise NotImplementedError('You need to define a getSize ' \
'method for your shell')
class ProductOperatorShell(OperatorShell):
""" A base class for shell operators that apply multiplcation.
Operators derived from this class should have working multiplication
functions.
"""
def __init__(self):
pass
def mult(self, A, x, y):
raise NotImplementedError('You need to define a multiply' \
'function for your shell')
class InvOperatorShell(OperatorShell):
""" A base class for inverse operator shells
Operators derived from this class should have working apply
functions.
"""
def __init__(self):
pass
@staticmethod
def _create_tmp_vec(size):
""" Creates an empty vector of given size.
Arguments
---------
size : int
Size of the temporary vector.
Returns
-------
vec : PETSc vector
"""
tmp = p4pyPETSc.Vec().create()
tmp.setType('mpi')
tmp.setSizes(size)
return tmp
@staticmethod
def _create_copy_vec(vec):
""" Creates a copy of a petsc4py vector.
Parameters
----------
vec : :class:`petsc4py.Vec`
Returns
-------
tmp : :class:`petsc4py.Vec`
"""
tmp = p4pyPETSc.Vec().create()
tmp.setType('mpi')
tmp = vec.copy()
return tmp
def apply(self, A, x, y):
raise NotImplementedError('You need to define an apply' \
'method for your shell')
def getSize(self):
""" Returns the size of InvOperatorShell.
Notes
-----
This acts a virtual method and must be implemented for
all inherited classes.
"""
raise NotImplementedError()
def create_petsc_ksp_obj(self,
petsc_option_prefix,
matrix_operator,
constant_null_space = False):
""" Create a PETSc4py KSP object.
Arguments
---------
petsc_option_prefix : str
PETSc commandline option prefix.
matrix_operator : mat
PETSc matrix object for the ksp class.
null_space : bool
True if the KSP object has a constant null space.
Returns
-------
ksp_obj : PETSc ksp
"""
ksp_obj = p4pyPETSc.KSP().create()
ksp_obj.setOperators(matrix_operator,
matrix_operator)
ksp_obj.setOptionsPrefix(petsc_option_prefix)
if constant_null_space:
const_nullspace_str = ''.join([petsc_option_prefix,
'ksp_constant_null_space'])
self.options.setValue(const_nullspace_str,'')
matrix_operator.setNullSpace(self.const_null_space)
ksp_obj.setFromOptions()
ksp_obj.setUp()
return ksp_obj
def _create_constant_nullspace(self):
"""Initialize a constant null space. """
self.const_null_space = p4pyPETSc.NullSpace().create(comm=p4pyPETSc.COMM_WORLD,
vectors = (),
constant = True)
def _set_dirichlet_idx_set(self):
"""
Initialize an index set of non-Dirichlet degrees of freedom.
When the value of some degrees of freedom are known in
advance it can be helfpul to remove these degrees of
freedom from the inverse operator. This function
creates a PETSc4py index set of unknown degrees of freedom.
"""
comm = Comm.get()
# Assign number of unknowns
num_dof = self.getSize()
self.strong_dirichlet_DOF = [i for i in self.strong_dirichlet_DOF if i< num_dof]
try:
num_known_dof = len(self.strong_dirichlet_DOF)
except AttributeError:
print("ERROR - strong_dirichlet_DOF have not been " \
" assigned for this inverse operator object.")
exit()
num_unknown_dof = num_dof - num_known_dof
# Use boolean mask to collect unknown DOF indices
self.dof_indices = numpy.arange(num_dof,
dtype = 'int32')
known_dof_mask = numpy.ones(num_dof,
dtype = bool)
known_dof_mask[self.strong_dirichlet_DOF] = False
self.unknown_dof_indices = self.dof_indices[known_dof_mask]
self.known_dof_indices = self.dof_indices[~known_dof_mask]
if comm.size() == 1:
# Create PETSc4py index set of unknown DOF
self.known_dof_is = p4pyPETSc.IS()
self.known_dof_is.createGeneral(self.known_dof_indices,
comm=p4pyPETSc.COMM_WORLD)
self.unknown_dof_is = p4pyPETSc.IS()
self.unknown_dof_is.createGeneral(self.unknown_dof_indices,
comm=p4pyPETSc.COMM_WORLD)
elif comm.size() > 1:
self.global_known_dof_indices = [self.par_info.subdomain2global[i] for i in self.known_dof_indices]
self.global_unknown_dof_indices = [self.par_info.subdomain2global[i] for i in self.unknown_dof_indices]
self.known_dof_is = p4pyPETSc.IS()
self.known_dof_is.createGeneral(self.global_known_dof_indices,
comm=p4pyPETSc.COMM_WORLD)
self.unknown_dof_is = p4pyPETSc.IS()
self.unknown_dof_is.createGeneral(self.global_unknown_dof_indices,
comm=p4pyPETSc.COMM_WORLD)
def _converged_trueRes(self,ksp,its,rnorm):
""" Function handle to feed to ksp's setConvergenceTest """
ksp.buildResidual(self.r_work)
truenorm = self.r_work.norm()
if its == 0:
self.rnorm0 = truenorm
# ARB - Leaving these log events in for future debugging purposes.
# logEvent("NumericalAnalytics KSP_LSC_LaplaceResidual: %12.5e" %(truenorm) )
# logEvent("NumericalAnalytics KSP_LSC_LaplaceResidual(relative): %12.5e" %(truenorm / self.rnorm0) )
# logEvent(" KSP it %i norm(r) = %e norm(r)/|b| = %e ; atol=%e rtol=%e " % (its,
# truenorm,
# (truenorm/ self.rnorm0),
# ksp.atol,
# ksp.rtol))
return False
else:
# ARB - Leaving these log events in for future debugging purposes.
# logEvent("NumericalAnalytics KSP_LSC_LaplaceResidual: %12.5e" %(truenorm) )
# logEvent("NumericalAnalytics KSP_LSC_LaplaceResidual(relative): %12.5e" %(truenorm / self.rnorm0) )
# logEvent(" KSP it %i norm(r) = %e norm(r)/|b| = %e ; atol=%e rtol=%e " % (its,
# truenorm,
# (truenorm/ self.rnorm0),
# ksp.atol,
# ksp.rtol))
if truenorm < self.rnorm0*ksp.rtol:
return p4pyPETSc.KSP.ConvergedReason.CONVERGED_RTOL
if truenorm < ksp.atol:
return p4pyPETSc.KSP.ConvergedReason.CONVERGED_ATOL
return False
class LSCInv_shell(InvOperatorShell):
""" Shell class for the LSC Inverse Preconditioner
This class creates a shell for the least-squares commutator (LSC)
preconditioner, where
:math:`M_{s}= (B \hat{Q^{-1}_{v}} B^{'}) (B \hat{Q^{-1}_{v}} F
\hat{Q^{-1}_{v}} B^{'})^{-1} (B \hat{Q^{-1}_{v}} B^{'})`
is used to approximate the Schur complement.
"""
def __init__(self, Qv, B, Bt, F):
"""Initializes the LSC inverse operator.
Parameters
----------
Qv : petsc4py matrix object
The diagonal elements of the velocity mass matrix.
B : petsc4py matrix object
The discrete divergence operator.
Bt : petsc4py matrix object
The discrete gradient operator.
F : petsc4py matrix object
The A-block of the linear system.
"""
# TODO - Find a good way to assert that Qv is diagonal
self.Qv = Qv
self.B = B
self.Bt = Bt
self.F = F
self._constructBQinvBt()
self._options = p4pyPETSc.Options()
if self._options.hasName('innerLSCsolver_BTinvBt_ksp_constant_null_space'):
self._create_constant_nullspace()
self.BQinvBt.setNullSpace(self.const_null_space)
self.kspBQinvBt = p4pyPETSc.KSP().create()
self.kspBQinvBt.setOperators(self.BQinvBt,self.BQinvBt)
self.kspBQinvBt.setOptionsPrefix('innerLSCsolver_BTinvBt_')
self.kspBQinvBt.pc.setUp()
self.kspBQinvBt.setFromOptions()
self.kspBQinvBt.setUp()
# initialize solver for Qv
self.kspQv = p4pyPETSc.KSP().create()
self.kspQv.setOperators(self.Qv,self.Qv)
self.kspQv.setOptionsPrefix('innerLSCsolver_T_')
self.kspQv.setFromOptions()
convergenceTest = 'r-true'
if convergenceTest == 'r-true':
self.r_work = self.BQinvBt.getVecLeft()
self.rnorm0 = None
self.kspBQinvBt.setConvergenceTest(self._converged_trueRes)
else:
self.r_work = None
self.kspBQinvBt.setUp()
def apply(self,A,x,y):
""" Apply the LSC inverse operator
Parameters
----------
A : NULL
A placeholder for internal function PETSc functions.
x : :class:`p4pyPETSc.Vec`
Vector which LSC operator is being applied to.
Returns
--------
y : :class:`p4pyPETSc.Vec`
Result of LSC acting on x.
"""
# create temporary vectors
B_sizes = self.B.getSizes()
x_tmp = p4pyPETSc.Vec().create()
x_tmp = x.copy()
tmp1 = self._create_tmp_vec(B_sizes[0])
tmp2 = self._create_tmp_vec(B_sizes[1])
tmp3 = self._create_tmp_vec(B_sizes[1])
if self._options.hasName('innerLSCsolver_BTinvBt_ksp_constant_null_space'):
self.const_null_space.remove(x_tmp)
self.kspBQinvBt.solve(x_tmp,tmp1)
self.B.multTranspose(tmp1,tmp2)
self.kspQv.solve(tmp2,tmp3)
self.F.mult(tmp3,tmp2)
self.kspQv.solve(tmp2,tmp3)
self.B.mult(tmp3,tmp1)
if self._options.hasName('innerLSCsolver_BTinvBt_ksp_constant_null_space'):
self.const_null_space.remove(x_tmp)
self.kspBQinvBt.solve(tmp1,y)
assert numpy.isnan(y.norm())==False, "Applying the schur complement \
resulted in not-a-number."
def _constructBQinvBt(self):
""" Private method repsonsible for building BQinvBt """
self.Qv_inv = petsc_create_diagonal_inv_matrix(self.Qv)
QinvBt = self.Qv_inv.matMult(self.Bt)
self.BQinvBt = self.B.matMult(QinvBt)
class MatrixShell(ProductOperatorShell):
""" A shell class for a matrix. """
def __init__(self,A):
"""
Specifies a basic matrix shell.
Parameters
----------
A : matrix
A petsc4py matrix object
"""
self.A = A
def mult(self,A,x,y):
"""
Multiply the matrix and x.
Parameters
----------
A : matrix
Dummy place holder for PETSc compatibility
x : vector
Returns
-------
y : vector
"""
self.A.mult(x,y)
class MatrixInvShell(InvOperatorShell):
""" A PETSc shell class for a inverse operator. """
def __init__(self, A):
""" Initializes operators and solvers for inverse operator.
Parameters
----------
A : PETSc matrix
This is the matrix object used to construct the inverse.
"""
self.A = A
self.ksp = p4pyPETSc.KSP().create()
self.ksp.setOperators(self.A,self.A)
self.ksp.setType('preonly')
self.ksp.pc.setType('lu')
self.ksp.pc.setFactorSolverType('superlu_dist')
self.ksp.setUp()
def apply(self,A,x,y):
""" Apply the inverse pressure mass matrix.
Parameters
----------
A : matrix
Dummy place holder for PETSc compatibility
x : vector
Returns
-------
y : vector
"""
self.ksp.solve(x,y)
class SpInv_shell(InvOperatorShell):
r""" Shell class for the SIMPLE preconditioner which applies the
following action:
.. math::
\hat{S}^{-1} = (A_{11} - A_{01} \text{diag}(A_{00}) A_{10})^{-1}
where :math:`A_{ij}` are sub-blocks of the global saddle point system.
Parameters
----------
A00: :class:`p4pyPETSc.Mat`
The A00 block of the global saddle point system.
A01: :class:`p4pyPETSc.Mat`
The A01 block of the global saddle point system.
A10: :class:`p4pyPETSc.Mat`
The A10 block of the global saddle point system.
A11: :class:`p4pyPETSc.Mat`
The A11 block of the global saddle point system.
use_constant_null_space: bool
Indicates whether a constant null space should be used. See
note below.
Notes
-----
For Stokes or Navier-Stokes systems, the :math:`S` operator
resembles a Laplcian matrix on the pressure. In cases where the
global saddle point system uses pure Dirichlet boundary
conditions, the :math:`S^{-1}` operator has a constant null
space. Since most saddle-point simulations of interest do not
have pure Dirichlet conditions, the `constNullSpace` flag defaults
to false. Having the null space set to false when the global
problem uses pure Dirichlet boundary conditions will likely result
in poor solver performance or failure.
"""
def __init__(self, A00, A11, A01, A10, constNullSpace):
self.A00 = A00
self.A11 = A11
self.A01 = A01
self.A10 = A10
self.constNullSpace = constNullSpace
self._create_Sp()
self._options = p4pyPETSc.Options()
self.kspSp = p4pyPETSc.KSP().create()
self.kspSp.setOperators(self.Sp,self.Sp)
self.kspSp.setOptionsPrefix('innerSpsolver_')
self.kspSp.setFromOptions()
if self.constNullSpace:
self._create_constant_nullspace()
self.Sp.setNullSpace(self.const_null_space)
self.kspSp.setUp()
def apply(self,A,x,y):
""" Applies the :math:`S_{p}` operator
Parameters
----------
A : None
Dummy argument for PETSc interface
x : :class:`p4pyPETSc.Vec`
Vector to which :math:`S` is applied
Returns
-------
y : :class:`p4pyPETSc.Vec`
Result of :math:`S^{-1}x`
"""
tmp1 = p4pyPETSc.Vec().create()
tmp1 = x.copy()
if self.constNullSpace:
self.const_null_space.remove(tmp1)
self.kspSp.solve(tmp1,y)
assert numpy.isnan(y.norm())==False, "Applying the schur complement \
resulted in not-a-number."
def _create_Sp(self):
self.A00_inv = petsc_create_diagonal_inv_matrix(self.A00)
A00_invBt = self.A00_inv.matMult(self.A01)
self.Sp = self.A10.matMult(A00_invBt)
self.Sp.aypx(-1.,self.A11)
class TwoPhase_PCDInv_shell(InvOperatorShell):
r""" Shell class for the two-phase PCD preconditioner. The
two-phase PCD_inverse shell applies the following operator.
.. math::
\hat{S}^{-1} = (Q^{(1 / \mu)})^{-1} + (A_{p}^{(1 / \rho)})^{-1}
(N_{p}^{(\rho)} + \dfrac{\alpha}{\Delta t} Q^{(\rho)} )
(Q^{(\rho)})^{-1}
where :math:`Q^{(1 / \mu)}` and :math:`Q^{(\rho)}` denote the pressure
mass matrix scaled by the inverse dynamic viscosity and density
respectively, :math:`(A_{p}^{(1 / \rho)})^{-1}`
denotes the pressure Laplacian scaled by inverse density, and
:math:`N_{p}^{(\rho)}` denotes the pressure advection operator scaled by
the density, and :math:`\alpha` is a binary operator indicating
whether the problem is temporal or steady state.
"""
def __init__(self,
Qp_visc,
Qp_dens,
Ap_rho,
Np_rho,
alpha = False,
delta_t = 0,
num_chebyshev_its = 0,
strong_dirichlet_DOF = [],
laplace_null_space = False,
par_info=None):
""" Initialize the two-phase PCD inverse operator.
Parameters
----------
Qp_visc : petsc4py matrix
The pressure mass matrix with dynamic viscocity
scaling.
Qp_dens : petsc4py matrix
The pressure mass matrix with density scaling.
Ap_rho : petsc4py matrix
The pressure Laplacian scaled with density scaling.
Np_rho : petsc4py matrix
The pressure advection operator with inverse density
scaling.
alpha : binary
True if problem is temporal, False if problem is steady
state.
delta_t : float
Time step parameter.
num_chebyshev_its : int
Number of chebyshev iteration steps to take. (0 indicates
the chebyshev semi iteration is not used)
strong_dirichlet_DOF : lst
List of DOF with known, strongly enforced values.
laplace_null_space : binary
Indicates whether the pressure Laplace matrix has a
null space or not.
par_info : ParInfoClass
Provides parallel info.
"""
from . import LinearSolvers as LS
# Set attributes
self.Qp_visc = Qp_visc
self.Qp_dens = Qp_dens
self.Ap_rho = Ap_rho
self.Np_rho = Np_rho
self.alpha = alpha
self.delta_t = delta_t
self.num_chebyshev_its = num_chebyshev_its
self.strong_dirichlet_DOF = strong_dirichlet_DOF
self.laplace_null_space = laplace_null_space
self.par_info = par_info
self.options = p4pyPETSc.Options()
self._create_constant_nullspace()
self._set_dirichlet_idx_set()
self.kspAp_rho = self.create_petsc_ksp_obj('innerTPPCDsolver_Ap_rho_',
self.Ap_rho,
self.laplace_null_space)
self.kspAp_rho.getOperators()[0].zeroRows(self.known_dof_is)
if self.num_chebyshev_its:
self.Qp_visc = LS.ChebyshevSemiIteration(self.Qp_visc,
0.5,
2.0)
self.Qp_dens = LS.ChebyshevSemiIteration(self.Qp_dens,
0.5,
2.0)
else:
pass
# Using ksp objects for the lumped mass matrices is much
# slower than pointwise division.
# self.kspQp_visc = self.create_petsc_ksp_obj('innerTPPCDsolver_Qp_visc_',
# self.Qp_visc)
# self.kspQp_dens = self.create_petsc_ksp_obj('innerTPPCDsolver_Qp_dens_',
# self.Qp_dens)
def getSize(self):
""" Return the total number of DOF for the shell problem. """
return self.Ap_rho.getSizes()[0][0]
def apply(self,A,x,y):
"""
Applies the two-phase pressure-convection-diffusion
Schur complement approximation.
Parameters
----------
A : None
Dummy variabled needed to interface with PETSc
x : petsc4py vector
Vector to which operator is applied
Returns
-------
y : petsc4py vector
Result of operator acting on x.
Notes
-----
When strong Dirichlet conditions are enforced on the pressure,
the PCD operator is applied to the set of unknowns that do not
have Dirichlet boundary conditions. At the end, the solution
is then loaded into the original y-vector.
"""
comm = Comm.get()
x_tmp = self._create_copy_vec(x)
tmp1 = self._create_copy_vec(x_tmp)
tmp2 = self._create_copy_vec(x_tmp)
if self.num_chebyshev_its:
self.Qp_visc.apply(x_tmp,
y,
self.num_chebyshev_its)
self.Qp_dens.apply(x_tmp,
tmp1,
self.num_chebyshev_its)
else:
y.pointwiseDivide(x_tmp,self.Qp_visc.getDiagonal())
tmp1.pointwiseDivide(x_tmp,self.Qp_dens.getDiagonal())
# Pointwise divide appears to be much faster than ksp.
# self.kspQp_visc.solve(x_tmp,y)
# self.kspQp_dens.solve(x_tmp,tmp1)
self.Np_rho.mult(tmp1,tmp2)
if self.alpha is True:
tmp2.axpy(old_div(1.,self.delta_t),x_tmp)
if self.options.hasName('innerTPPCDsolver_Ap_rho_ksp_constant_null_space'):
self.const_null_space.remove(tmp2)
zero_array = numpy.zeros(len(self.known_dof_is.getIndices()))
tmp2.setValues(self.known_dof_is.getIndices(),zero_array)
tmp2.assemblyBegin()
tmp2.assemblyEnd()
self.kspAp_rho.solve(tmp2, tmp1)
y.axpy(1.,tmp1)
y.setValues(self.known_dof_is.getIndices(),zero_array)
y.assemblyBegin()
y.assemblyEnd()
assert numpy.isnan(y.norm())==False, "Applying the schur complement \
resulted in not-a-number."
def l2Norm(x):
"""
Compute the parallel :math:`l_2` norm
"""
return math.sqrt(globalSum(numpy.dot(x,x)))
def l1Norm(x):
"""
Compute the parallel :math:`l_1` norm
The :math:`l_1` norm of a vector :math:`\mathbf{x} \in
\mathbb{R}^n` is
.. math::
\| \mathbf{x} \|_{1} = \sum_{i=0} |x_i|
If Python is running in parallel, then the sum is over all
dimensions on all processors so that the input must not contain
"ghost" entries.
This implemtation works for a distributed array with no ghost
components (each component must be on a single processor).
:param x: numpy array of length n
:return: float
"""
return globalSum(numpy.sum(numpy.abs(x)))
def lInfNorm(x):
"""
Compute the parallel :math:`l_{\infty}` norm
The :math:`l_{\infty}` norm of a vector :math:`\mathbf{x} \in
\mathbb{R}^n` is
.. math::
\|x\|_{\infty} = \max_i |x_i|
This implemtation works for a distributed array with no ghost
components (each component must be on a single processor).
:param x: numpy array of length n
:return: float
"""
return globalMax(numpy.linalg.norm(x,numpy.inf))
def wDot(x,y,h):
"""
Compute the parallel weighted dot product of vectors x and y using
weight vector h.
The weighted dot product is defined for a weight vector
:math:`\mathbf{h}` as
.. math::
(\mathbf{x},\mathbf{y})_h = \sum_{i} h_{i} x_{i} y_{i}
All weight vector components should be positive.
:param x,y,h: numpy arrays for vectors and weight
:return: the weighted dot product
"""
return globalSum(numpy.sum(x*y*h))
def wl2Norm(x,h):
"""
Compute the parallel weighted l_2 norm with weight h
"""
return math.sqrt(globalSum(wDot(x,x,h)))
def wl1Norm(x,h):
"""
Compute the parallel weighted l_1 norm with weight h
"""
return globalSum(numpy.sum(numpy.abs(h*x)))
def wlInfNorm(x,h):
"""
Compute the parallel weighted l_{\infty} norm with weight h
"""
return globalMax(numpy.linalg.norm(h*x,numpy.inf))
def energyDot(x,y,A):
"""
Compute the "energy" dot product x^t A y (not parallel)
"""
return numpy.dot(numpy.dot(x,A),y)
def energyNorm(x,A):
"""
Compute the "energy" norm x^t A x (not parallel)
"""
return math.sqrt(energyDot(x,x,A))
def l2NormAvg(x):
"""
Compute the arithmetic averaged l_2 norm (root mean squared norm)
"""
scale = old_div(1.0,globalSum(len(x.flat)))
return math.sqrt(scale*globalSum(numpy.dot(x,x)))
rmsNorm = l2NormAvg
def l2Norm_local(x):
"""
Compute the l_2 norm for just local (processor) system (not parallel)
"""
return math.sqrt(numpy.dot(x,x))
class WeightedNorm(object):
"""
Compute the weighted norm for time step control (not currently parallel)
"""
def __init__(self,shape,atol,rtol):
self.shape = shape
self.dim = sum(self.shape)
self.atol= atol
self.rtol= rtol
self.weight = numpy.ones(shape,'d')
self.tmp = numpy.ones(shape,'d')
def setWeight(self,y):
self.weight[:] = numpy.absolute(y)
self.weight *= self.rtol
self.weight += self.atol
def norm(self,y,type):
self.tmp[:] = y
self.tmp /= self.weight
value = numpy.linalg.norm(self.tmp.flat,type)
return old_div(value,self.dim)
if __name__ == '__main__':
import doctest
doctest.testmod()
# def test_MGV():
# n=2**8 + 1
# h =1.0/(n-1.0)
# freq=10
# u = numpy.random.uniform(0,1,(n))
# u[0]=0.0
# u[n-1]=0.0
# x = numpy.arange(0,1.0+h,h)
# AList=[]
# N=n
# pList=[]
# rList=[]
# resList=[]
# while N >= 3:
# resList.append(Vec(N-2))
# A = dict()#SparseMat(N-2,N-2,3*(N-2),sym=True)
# H = 1.0/(N-1.0)
# #beginAssembly(A)
# for i in range(N-2):
# A[(i,i)] = 2.0/H**2
# if i > 0:
# A[(i,i-1)] = -1.0/H**2
# if i < N-3:
# A[(i,i+1)] = -1.0/H**2
# #endAssembly(A)
# AList.append(SparseMatFromDict(N-2,N-2,A)[0])
# cN = (N - 1)/2 + 1
# r = dict()#SparseMat(cN-2,N-2,3*(N-2))
# p = dict()#SparseMat(N-2,cN-2,3*(N-2))
# for i in range(cN-2):
# r[(i,2*i)] = 1.0/4.0
# r[(i,2*i+1)] = 2.0/4.0
# r[(i,2*i+2)] = 1.0/4.0
# p[(2*i,i)] = 1.0/2.0
# p[(2*i+1,i)]= 2.0/2.0
# p[(2*i+2,i)]= 1.0/2.0
# #r.to_csr()
# print cN-2,N-2,r.keys()
# if cN-2 > 0:
# rList.append(SparseMatFromDict(cN-2,N-2,r)[0])
# else:
# rList.append(None)
# #p.to_csr()
# pList.append(SparseMatFromDict(N-2,cN-2,p)[0])
# N = cN
# class Jacobi:
# def __init__(self,A):
# self.A=A
# self.n=A.shape[0]
# self.M=Vec(self.n)
# for i in range(self.n):
# self.M[i]=1.0/A[i,i]
# self.res=Vec(self.n)
# self.dx=Vec(self.n)
# def apply(self,w,jits,b,x):
# self.A.matvec(x,self.res)
# self.res-=b
# for it in range(jits):
# self.dx[:] = self.M*self.res
# self.dx*=w
# x -= self.dx
# self.A.matvec(x,self.res)
# self.res -= b
# jacobiList=[]
# for A in AList:
# jacobiList.append(Jacobi(A))
# jits = 3
# w = 2.0/3.0
# class MGV:
# def __init__(self,smootherList,AList,pList,rList,resList):
# self.AList = AList
# self.pList = pList
# self.rList = rList
# self.resList = resList
# self.xList=[]
# self.vList=[]
# self.bList=[]
# self.gpList=[]
# for res in resList:
# self.xList.append(Vec(len(res)))
# self.vList.append(Vec(len(res)))
# self.bList.append(Vec(len(res)))
# self.smootherList = smootherList
# def apply(self,w,nsPre,nsPost,level,b,x):
# logEvent("Level = "+`level`)
# if level == len(self.AList)-1:
# self.smootherList[level].apply(1.0,1,b,x)
# else:
# #smooth
# self.smootherList[level].apply(w,nsPre,b,x)
# #restrict the defect
# self.rList[level].matvec(self.smootherList[level].res,self.bList[level+1])
# #V-cycle on the error equation
# self.xList[level+1][:]=0.0
# self.apply(w,nsPre,nsPost,level+1,self.bList[level+1],self.xList[level+1])
# #prolong
# self.pList[level].matvec(self.xList[level+1],self.vList[level])
# #correct
# x-=self.vList[level]
# #smooth
# self.smootherList[level].apply(w,nsPost,b,x)
# self.resList[level][:]=self.smootherList[level].res
# mgv = MGV(jacobiList,AList,pList,rList,resList)
# rnorm=1.0
# mgits = 0
# while rnorm > 1.0e-10 and mgits < 20:
# mgits +=1
# mgv.apply(w,jits,jits,0,f[1:n-1],u[1:n-1])
# rnorm = l2Norm(resList[0])
|
erdc/proteus
|
proteus/LinearAlgebraTools.py
|
Python
|
mit
| 59,753
|
import paramiko,socket
# ssh function
def sshConnect(ip, username, password, command):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(ip, username=username, password=password)
stdin, stdout, stderr = client.exec_command(command)
#print stderr
result_before = stdout.read()
#print result_before
result = result_before.splitlines()
print "Success!! connection",
except paramiko.AuthenticationException:
print "Authentication problem"
result = None
except socket.error, e:
print "Comunication problem "
result = None
client.close()
return result
# main function
if __name__ == "__main__":
ip = "10.100.0.100"
username = "junos"
password = "junos123"
command = "show chassis alarm"
result = sshConnect(ip, username, password, command)
print result
|
trjones841/pynet_class
|
Exercises/Week4/juniper_paramiko.py
|
Python
|
apache-2.0
| 986
|
from wiki.conf import settings
###############################
# TARGET PERMISSION HANDLING #
###############################
#
# All functions are:
# can_something(target, user)
# => True/False
#
# All functions can be replaced by pointing their relevant
# settings variable in wiki.conf.settings to a callable(target, user)
def can_read(target, user):
if callable(settings.CAN_READ):
return settings.CAN_READ(target, user)
else:
# Deny reading access to deleted entities if user has no delete access
is_deleted = target.current_revision and target.deleted
if is_deleted and not target.can_delete(user):
return False
# Check access for other users...
if user.is_anonymous() and not settings.ANONYMOUS:
return False
elif target.other_read:
return True
elif user.is_anonymous():
return False
if user == target.owner:
return True
if target.group_read:
if target.group and user.groups.filter(id=target.group.id).exists():
return True
if target.can_moderate(user):
return True
return False
def can_write(target, user):
if callable(settings.CAN_WRITE):
return settings.CAN_WRITE(target, user)
# Check access for other users...
if user.is_anonymous() and not settings.ANONYMOUS_WRITE:
return False
elif target.other_write:
return True
elif user.is_anonymous():
return False
if user == target.owner:
return True
if target.group_write:
if target.group and user and user.groups.filter(id=target.group.id).exists():
return True
if target.can_moderate(user):
return True
return False
def can_assign(target, user):
if callable(settings.CAN_ASSIGN):
return settings.CAN_ASSIGN(target, user)
return not user.is_anonymous() and user.has_perm('wiki.assign')
def can_assign_owner(target, user):
if callable(settings.CAN_ASSIGN_OWNER):
return settings.CAN_ASSIGN_OWNER(target, user)
return False
def can_change_permissions(target, user):
if callable(settings.CAN_CHANGE_PERMISSIONS):
return settings.CAN_CHANGE_PERMISSIONS(target, user)
return (
not user.is_anonymous() and (
target.owner == user or
user.has_perm('wiki.assign')
)
)
def can_delete(target, user):
if callable(settings.CAN_DELETE):
return settings.CAN_DELETE(target, user)
return not user.is_anonymous() and target.can_write(user)
def can_moderate(target, user):
if callable(settings.CAN_MODERATE):
return settings.CAN_MODERATE(target, user)
return not user.is_anonymous() and user.has_perm('wiki.moderate')
def can_admin(target, user):
if callable(settings.CAN_ADMIN):
return settings.CAN_ADMIN(target, user)
return not user.is_anonymous() and user.has_perm('wiki.admin')
|
skakri/django-unstructured
|
wiki/core/permissions.py
|
Python
|
gpl-3.0
| 3,004
|
#
# Evy - a concurrent networking library for Python
#
# Unless otherwise noted, the files in Evy are under the following MIT license:
#
# Copyright (c) 2012, Alvaro Saurin
# Copyright (c) 2008-2010, Eventlet Contributors (see AUTHORS)
# Copyright (c) 2007-2010, Linden Research, Inc.
# Copyright (c) 2005-2006, Bob Ippolito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import time
import traceback
from tests import skip_unless, get_database_auth, LimitedTestCase
from evy import event
from evy.green.threads import spawn, sleep
try:
from evy.patched import MySQLdb
except ImportError:
MySQLdb = False
def mysql_requirement (_f):
"""We want to skip tests if using pyevent, MySQLdb is not installed, or if
there is no database running on the localhost that the auth file grants
us access to.
This errs on the side of skipping tests if everything is not right, but
it's better than a million tests failing when you don't care about mysql
support."""
if MySQLdb is False:
print "Skipping mysql tests, MySQLdb not importable"
return False
try:
auth = get_database_auth()['MySQLdb'].copy()
MySQLdb.connect(**auth)
return True
except MySQLdb.OperationalError:
print "Skipping mysql tests, error when connecting:"
traceback.print_exc()
return False
class TestMySQLdb(LimitedTestCase):
def setUp (self):
self._auth = get_database_auth()['MySQLdb']
self.create_db()
self.connection = None
self.connection = MySQLdb.connect(**self._auth)
cursor = self.connection.cursor()
cursor.execute("""CREATE TABLE gargleblatz
(
a INTEGER
);""")
self.connection.commit()
cursor.close()
def tearDown (self):
if self.connection:
self.connection.close()
self.drop_db()
@skip_unless(mysql_requirement)
def create_db (self):
auth = self._auth.copy()
try:
self.drop_db()
except Exception:
pass
dbname = 'test_%d_%d' % (os.getpid(), int(time.time() * 1000))
db = MySQLdb.connect(**auth).cursor()
db.execute("create database " + dbname)
db.close()
self._auth['db'] = dbname
del db
def drop_db (self):
db = MySQLdb.connect(**self._auth).cursor()
db.execute("drop database " + self._auth['db'])
db.close()
del db
def set_up_dummy_table (self, connection = None):
close_connection = False
if connection is None:
close_connection = True
if self.connection is None:
connection = MySQLdb.connect(**self._auth)
else:
connection = self.connection
cursor = connection.cursor()
cursor.execute(self.dummy_table_sql)
connection.commit()
cursor.close()
if close_connection:
connection.close()
dummy_table_sql = """CREATE TEMPORARY TABLE test_table
(
row_id INTEGER PRIMARY KEY AUTO_INCREMENT,
value_int INTEGER,
value_float FLOAT,
value_string VARCHAR(200),
value_uuid CHAR(36),
value_binary BLOB,
value_binary_string VARCHAR(200) BINARY,
value_enum ENUM('Y','N'),
created TIMESTAMP
) ENGINE=InnoDB;"""
def assert_cursor_yields (self, curs):
counter = [0]
def tick ():
while True:
counter[0] += 1
sleep()
gt = spawn(tick)
curs.execute("select 1")
rows = curs.fetchall()
self.assertEqual(rows, ((1L,),))
self.assert_(counter[0] > 0, counter[0])
gt.kill()
def assert_cursor_works (self, cursor):
cursor.execute("select 1")
rows = cursor.fetchall()
self.assertEqual(rows, ((1L,),))
self.assert_cursor_yields(cursor)
def assert_connection_works (self, conn):
curs = conn.cursor()
self.assert_cursor_works(curs)
def test_module_attributes (self):
import MySQLdb as orig
for key in dir(orig):
if key not in ('__author__', '__path__', '__revision__',
'__version__', '__loader__'):
self.assert_(hasattr(MySQLdb, key), "%s %s" % (key, getattr(orig, key)))
def test_connecting (self):
self.assert_(self.connection is not None)
def test_connecting_annoyingly (self):
self.assert_connection_works(MySQLdb.Connect(**self._auth))
self.assert_connection_works(MySQLdb.Connection(**self._auth))
self.assert_connection_works(MySQLdb.connections.Connection(**self._auth))
def test_create_cursor (self):
cursor = self.connection.cursor()
cursor.close()
def test_run_query (self):
cursor = self.connection.cursor()
self.assert_cursor_works(cursor)
cursor.close()
def test_run_bad_query (self):
cursor = self.connection.cursor()
try:
cursor.execute("garbage blah blah")
self.assert_(False)
except AssertionError:
raise
except Exception:
pass
cursor.close()
def fill_up_table (self, conn):
curs = conn.cursor()
for i in range(1000):
curs.execute('insert into test_table (value_int) values (%s)' % i)
conn.commit()
def test_yields (self):
conn = self.connection
self.set_up_dummy_table(conn)
self.fill_up_table(conn)
curs = conn.cursor()
results = []
SHORT_QUERY = "select * from test_table"
evt = event.Event()
def a_query ():
self.assert_cursor_works(curs)
curs.execute(SHORT_QUERY)
results.append(2)
evt.send()
spawn(a_query)
results.append(1)
self.assertEqual([1], results)
evt.wait()
self.assertEqual([1, 2], results)
def test_visibility_from_other_connections (self):
conn = MySQLdb.connect(**self._auth)
conn2 = MySQLdb.connect(**self._auth)
curs = conn.cursor()
try:
curs2 = conn2.cursor()
curs2.execute("insert into gargleblatz (a) values (%s)" % (314159))
self.assertEqual(curs2.rowcount, 1)
conn2.commit()
selection_query = "select * from gargleblatz"
curs2.execute(selection_query)
self.assertEqual(curs2.rowcount, 1)
del curs2, conn2
# create a new connection, it should see the addition
conn3 = MySQLdb.connect(**self._auth)
curs3 = conn3.cursor()
curs3.execute(selection_query)
self.assertEqual(curs3.rowcount, 1)
# now, does the already-open connection see it?
curs.execute(selection_query)
self.assertEqual(curs.rowcount, 1)
del curs3, conn3
finally:
# clean up my litter
curs.execute("delete from gargleblatz where a=314159")
conn.commit()
from tests import test_patcher
class TestMySQLMonkeyPatch(test_patcher.ProcessBase):
@skip_unless(mysql_requirement)
def test_monkey_patching (self):
output, lines = self.run_script("""
from evy import patcher
import MySQLdb as m
from evy.patched import MySQLdb as gm
patcher.monkey_patch(all=True, MySQLdb=True)
print "mysqltest", ",".join(sorted(patcher.already_patched.keys()))
print "connect", m.connect == gm.connect
""")
self.assertEqual(len(lines), 3)
self.assertEqual(lines[0].replace("psycopg,", ""),
'mysqltest MySQLdb,os,select,socket,thread,time')
self.assertEqual(lines[1], "connect True")
|
inercia/evy
|
tests/test_patcher_mysqldb.py
|
Python
|
mit
| 8,833
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.