repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
sidnarayanan/BAdNet | train/pf/adv/models/train_panda_0.py | 1 | 9018 | #!/usr/local/bin/python2.7
from sys import exit, stdout, argv
from os import environ, system
environ['KERAS_BACKEND'] = 'tensorflow'
import numpy as np
import utils
import signal
from keras.layers import Input, Dense, Dropout, concatenate, LSTM, BatchNormalization, Conv1D, concatenate
from keras.models import Model
from keras.callbacks import ModelCheckpoint, LambdaCallback, TensorBoard
from keras.optimizers import Adam, SGD
from keras.utils import np_utils
from keras import backend as K
K.set_image_data_format('channels_last')
from adversarial import Adversary
import obj
import config
#config.DEBUG = True
#config.n_truth = 5
#config.truth = 'resonanceType'
#config.adversary_mask = 0
'''
some global definitions
'''
LEARNMASS = True
LEARNRHO = True
LEARNPT = True
DECORRMASS = True
DECORRRHO = False
DECORRPT = False
adv_loss_weights = [0.0001, 50]
ADV = 2
NEPOCH = 2
APOSTLE = 'panda_0'
system('cp %s models/train_%s.py'%(argv[0], APOSTLE))
'''
instantiate data loaders
'''
def make_coll(fpath):
coll = obj.PFSVCollection()
coll.add_categories(['singletons', 'pf'], fpath)
return coll
top = make_coll('/fastscratch/snarayan/pandaarrays/v1//PARTITION/ZpTT_*_CATEGORY.npy')
qcd = make_coll('/fastscratch/snarayan/pandaarrays/v1//PARTITION/QCD_*_CATEGORY.npy')
data = [top, qcd]
# preload some data just to get the dimensions
data[0].objects['train']['pf'].load(memory=False)
dims = data[0].objects['train']['pf'].data.data.shape
obj.limit = 20
dims = (None, obj.limit, 9) # override
'''
first build the classifier!
'''
# set up data
opts = {'learn_mass':LEARNMASS,
'learn_pt':LEARNPT,
'learn_rho':LEARNRHO}
classifier_train_gen = obj.generatePF(data, partition='train', batch=1000, normalize=False, **opts)
classifier_validation_gen = obj.generatePF(data, partition='validate', batch=10000, **opts)
classifier_test_gen = obj.generatePF(data, partition='test', batch=2, **opts)
test_i, test_o, test_w = next(classifier_test_gen)
#print test_i
inputs = Input(shape=(dims[1], dims[2]), name='input')
mass_inputs = Input(shape=(1,), name='mass_input')
rho_inputs = Input(shape=(1,), name='rho_input')
pt_inputs = Input(shape=(1,), name='pt_input')
norm = BatchNormalization(momentum=0.6, name='input_bnorm') (inputs)
conv = Conv1D(32, 2, activation='relu', name='conv0', kernel_initializer='lecun_uniform', padding='same')(norm)
norm = BatchNormalization(momentum=0.6, name='conv0_bnorm') (conv)
conv = Conv1D(16, 4, activation='relu', name='conv1', kernel_initializer='lecun_uniform', padding='same')(norm)
norm = BatchNormalization(momentum=0.6, name='conv1_bnorm') (conv)
lstm = LSTM(100, go_backwards=True, implementation=2, name='lstm') (norm)
norm = BatchNormalization(momentum=0.6, name='lstm_norm') (lstm)
dense = Dense(100, activation='relu',name='lstmdense',kernel_initializer='lecun_uniform') (norm)
norm = BatchNormalization(momentum=0.6,name='lstmdense_norm') (dense)
for i in xrange(1,5):
dense = Dense(50, activation='relu',name='dense%i'%i)(norm)
norm = BatchNormalization(momentum=0.6,name='dense%i_norm'%i)(dense)
if LEARNMASS or LEARNPT or LEARNRHO:
to_merge = [norm]
if LEARNMASS:
to_merge.append(mass_inputs)
if LEARNRHO:
to_merge.append(rho_inputs)
if LEARNPT:
to_merge.append(pt_inputs)
merge = concatenate(to_merge)
dense = Dense(50, activation='tanh', name='dense5a')(merge)
norm = BatchNormalization(momentum=0.6,name='dense5a_norm')(dense)
# dense = Dense(50, activation='tanh', name='dense5')(norm)
# norm = BatchNormalization(momentum=0.6,name='dense5_norm')(dense)
else:
dense = Dense(50, activation='tanh',name='dense5')(norm)
norm = BatchNormalization(momentum=0.6,name='dense5_norm')(dense)
y_hat = Dense(config.n_truth, activation='softmax') (norm)
i = [inputs]
if LEARNMASS:
i.append(mass_inputs)
if LEARNRHO:
i.append(rho_inputs)
if LEARNPT:
i.append(pt_inputs)
classifier = Model(inputs=i, outputs=y_hat)
classifier.compile(optimizer=Adam(lr=0.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# print '########### CLASSIFIER ############'
# classifier.summary()
# print '###################################'
pred = classifier.predict(test_i)
# ctrl+C now triggers a graceful exit
def save_classifier(name='classifier_conv', model=classifier):
model.save('models/%s_%s.h5'%(name, APOSTLE))
def save_and_exit(signal=None, frame=None, name='classifier_conv', model=classifier):
save_classifier(name, model)
flog.close()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
'''
now build the adversarial setup
'''
# set up data
opts = {'decorr_mass':DECORRMASS,
'decorr_rho':DECORRRHO,
'decorr_pt':DECORRPT,
'learn_mass':LEARNMASS,
'learn_pt':LEARNPT,
'learn_rho':LEARNRHO}
train_gen = obj.generatePF(data, partition='train', batch=1000, normalize=False, **opts)
validation_gen = obj.generatePF(data, partition='validate', batch=100, **opts)
test_gen = obj.generatePF(data, partition='test', batch=1, **opts)
# build the model
kin_hats = Adversary(config.n_decorr_bins, n_outputs=(int(DECORRMASS)+int(DECORRPT)+int(DECORRRHO)), scale=0.0001)(y_hat)
# kin_hats = Adversary(config.n_decorr_bins, n_outputs=2, scale=0.01)(y_hat)
i = [inputs]
if LEARNMASS:
i.append(mass_inputs)
if LEARNRHO:
i.append(rho_inputs)
if LEARNPT:
i.append(pt_inputs)
pivoter = Model(inputs=i,
outputs=[y_hat]+kin_hats)
pivoter.compile(optimizer=Adam(lr=0.001),
loss=['categorical_crossentropy'] + ['categorical_crossentropy' for _ in kin_hats],
loss_weights=adv_loss_weights)
print '############# ARCHITECTURE #############'
pivoter.summary()
print '###################################'
'''
Now we train both models
'''
if ADV > 0:
print 'TRAINING ADVERSARIAL NETWORK'
system('mv logs/train_conv_adv.log logs/train_conv_adv.log.old')
flog = open('logs/train_conv_adv.log','w')
callback = LambdaCallback(
on_batch_end=lambda batch, logs: flog.write('%i,%f,%f,%f,%f\n'%(batch,logs['loss'],logs['dense_6_loss'],logs['dense_7_loss'],logs['dense_1_loss'])),
on_epoch_end=lambda epoch, logs: save_classifier(name='regularized_conv')
)
tb = TensorBoard(
log_dir = './logs/conv_logs',
write_graph = True,
write_images = True
)
print ' -Pre-training the classifier'
# bit of pre-training to get the classifer in the right place
classifier.fit_generator(classifier_train_gen,
steps_per_epoch=1000,
epochs=2)
save_classifier(name='pretrained_conv')
# np.set_printoptions(threshold='nan')
# print test_o
# print classifier.predict(test_i)
def save_and_exit(signal=None, frame=None, name='regularized_conv', model=classifier):
save_classifier(name, model)
flog.close()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
print ' -Training the adversarial stack'
# now train the model for real
pivoter.fit_generator(train_gen,
steps_per_epoch=5000,
epochs=NEPOCH*2,
# callbacks=[callback],
# validation_data=validation_gen,
# validation_steps=100
)
save_classifier(name='regularized_conv')
save_classifier(name='pivoter_conv', model=pivoter)
flog.close()
if ADV % 2 == 0:
print 'TRAINING CLASSIFIER ONLY'
system('mv logs/train_conv.log logs/train_conv.log.old')
flog = open('logs/train_conv.log','w')
callback = LambdaCallback(
on_batch_end=lambda batch, logs: flog.write('%i,%f\n'%(batch,logs['loss'])),
on_epoch_end=lambda epoch, logs: save_classifier(name='classifier_conv')
)
tb = TensorBoard(
log_dir = './logs/lstmnoreg_logs',
write_graph = True,
write_images = True
)
n_epochs = 1 if (ADV == 2) else 2 # fewer epochs if network is pretrained
n_epochs *= NEPOCH
def save_and_exit(signal=None, frame=None, name='classifier_conv', model=classifier):
save_classifier(name, model)
flog.close()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
classifier.fit_generator(classifier_train_gen,
steps_per_epoch=5000,
epochs=n_epochs,
# callbacks=[callback],
# validation_data=classifier_validation_gen,
# validation_steps=100
)
save_classifier(name='classifier_conv')
| mit |
mattkretz/root | interpreter/llvm/src/tools/clang/bindings/python/tests/cindex/util.py | 88 | 2293 | # This file provides common utility functions for the test suite.
from clang.cindex import Cursor
from clang.cindex import TranslationUnit
def get_tu(source, lang='c', all_warnings=False, flags=[]):
"""Obtain a translation unit from source and language.
By default, the translation unit is created from source file "t.<ext>"
where <ext> is the default file extension for the specified language. By
default it is C, so "t.c" is the default file name.
Supported languages are {c, cpp, objc}.
all_warnings is a convenience argument to enable all compiler warnings.
"""
args = list(flags)
name = 't.c'
if lang == 'cpp':
name = 't.cpp'
args.append('-std=c++11')
elif lang == 'objc':
name = 't.m'
elif lang != 'c':
raise Exception('Unknown language: %s' % lang)
if all_warnings:
args += ['-Wall', '-Wextra']
return TranslationUnit.from_source(name, args, unsaved_files=[(name,
source)])
def get_cursor(source, spelling):
"""Obtain a cursor from a source object.
This provides a convenient search mechanism to find a cursor with specific
spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If the cursor is not found, None is returned.
"""
# Convenience for calling on a TU.
root_cursor = source if isinstance(source, Cursor) else source.cursor
for cursor in root_cursor.walk_preorder():
if cursor.spelling == spelling:
return cursor
return None
def get_cursors(source, spelling):
"""Obtain all cursors from a source object with a specific spelling.
This provides a convenient search mechanism to find all cursors with
specific spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If no cursors are found, an empty list is returned.
"""
# Convenience for calling on a TU.
root_cursor = source if isinstance(source, Cursor) else source.cursor
cursors = []
for cursor in root_cursor.walk_preorder():
if cursor.spelling == spelling:
cursors.append(cursor)
return cursors
__all__ = [
'get_cursor',
'get_cursors',
'get_tu',
]
| lgpl-2.1 |
kordless/wisdom | lib/pyotp/otp.py | 11 | 1820 | import base64
import hashlib
import hmac
class OTP(object):
def __init__(self, s, digits=6, digest=hashlib.sha1):
"""
@param [String] secret in the form of base32
@option options digits [Integer] (6)
Number of integers in the OTP
Google Authenticate only supports 6 currently
@option options digest [Callable] (hashlib.sha1)
Digest used in the HMAC
Google Authenticate only supports 'sha1' currently
@returns [OTP] OTP instantiation
"""
self.digits = digits
self.digest = digest
self.secret = s
def generate_otp(self, input):
"""
@param [Integer] input the number used seed the HMAC
Usually either the counter, or the computed integer
based on the Unix timestamp
"""
hmac_hash = hmac.new(
self.byte_secret(),
self.int_to_bytestring(input),
self.digest,
).digest()
offset = ord(hmac_hash[19]) & 0xf
code = ((ord(hmac_hash[offset]) & 0x7f) << 24 |
(ord(hmac_hash[offset + 1]) & 0xff) << 16 |
(ord(hmac_hash[offset + 2]) & 0xff) << 8 |
(ord(hmac_hash[offset + 3]) & 0xff))
return code % 10 ** self.digits
def byte_secret(self):
return base64.b32decode(self.secret, casefold=True)
def int_to_bytestring(self, int, padding=8):
"""
Turns an integer to the OATH specified
bytestring, which is fed to the HMAC
along with the secret
"""
result = []
while int != 0:
result.append(chr(int & 0xFF))
int = int >> 8
return ''.join(reversed(result)).rjust(padding, '\0')
| mit |
aguestuser/social-network-analysis-sandbox | lib/python3.5/site-packages/pkg_resources/_vendor/packaging/markers.py | 228 | 8248 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from pkg_resources.extern.pyparsing import Literal as L # noqa
from ._compat import string_types
from .specifiers import Specifier, InvalidSpecifier
__all__ = [
"InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
"Marker", "default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
raise NotImplementedError
class Variable(Node):
def serialize(self):
return str(self)
class Value(Node):
def serialize(self):
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
return str(self)
VARIABLE = (
L("implementation_version") |
L("platform_python_implementation") |
L("implementation_name") |
L("python_full_version") |
L("platform_release") |
L("platform_version") |
L("platform_machine") |
L("platform_system") |
L("python_version") |
L("sys_platform") |
L("os_name") |
L("os.name") | # PEP-345
L("sys.platform") | # PEP-345
L("platform.version") | # PEP-345
L("platform.machine") | # PEP-345
L("platform.python_implementation") | # PEP-345
L("python_implementation") | # undocumented setuptools legacy
L("extra")
)
ALIASES = {
'os.name': 'os_name',
'sys.platform': 'sys_platform',
'platform.version': 'platform_version',
'platform.machine': 'platform_machine',
'platform.python_implementation': 'platform_python_implementation',
'python_implementation': 'platform_python_implementation'
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") |
L("==") |
L(">=") |
L("<=") |
L("!=") |
L("~=") |
L(">") |
L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (isinstance(marker, list) and len(marker) == 1 and
isinstance(marker[0], (list, tuple))):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs, op, rhs):
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
_undefined = object()
def _get_env(environment, name):
value = environment.get(name, _undefined)
if value is _undefined:
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
groups = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
version = '{0.major}.{0.minor}.{0.micro}'.format(info)
kind = info.releaselevel
if kind != 'final':
version += kind[0] + str(info.serial)
return version
def default_environment():
if hasattr(sys, 'implementation'):
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
else:
iver = '0'
implementation_name = ''
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": platform.python_version()[:3],
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc:e.loc + 8])
raise InvalidMarker(err_str)
def __str__(self):
return _format_marker(self._markers)
def __repr__(self):
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
| gpl-3.0 |
vitor-alves/pixel-canvas-bot | packages/idna/intranges.py | 293 | 1749 | """
Given a list of integers, made up of (hopefully) a small number of long runs
of consecutive integers, compute a representation of the form
((start1, end1), (start2, end2) ...). Then answer the question "was x present
in the original list?" in time O(log(# runs)).
"""
import bisect
def intranges_from_list(list_):
"""Represent a list of integers as a sequence of ranges:
((start_0, end_0), (start_1, end_1), ...), such that the original
integers are exactly those x such that start_i <= x < end_i for some i.
Ranges are encoded as single integers (start << 32 | end), not as tuples.
"""
sorted_list = sorted(list_)
ranges = []
last_write = -1
for i in range(len(sorted_list)):
if i+1 < len(sorted_list):
if sorted_list[i] == sorted_list[i+1]-1:
continue
current_range = sorted_list[last_write+1:i+1]
ranges.append(_encode_range(current_range[0], current_range[-1] + 1))
last_write = i
return tuple(ranges)
def _encode_range(start, end):
return (start << 32) | end
def _decode_range(r):
return (r >> 32), (r & ((1 << 32) - 1))
def intranges_contain(int_, ranges):
"""Determine if `int_` falls into one of the ranges in `ranges`."""
tuple_ = _encode_range(int_, 0)
pos = bisect.bisect_left(ranges, tuple_)
# we could be immediately ahead of a tuple (start, end)
# with start < int_ <= end
if pos > 0:
left, right = _decode_range(ranges[pos-1])
if left <= int_ < right:
return True
# or we could be immediately behind a tuple (int_, end)
if pos < len(ranges):
left, _ = _decode_range(ranges[pos])
if left == int_:
return True
return False
| gpl-3.0 |
sdvillal/manysources | chemdeco/integration/smartsviewer_utils.py | 1 | 5954 | # coding=utf-8
"""Thin python wrapper over SmartsViewer."""
import os.path as op
import subprocess
from joblib import Parallel, delayed, cpu_count
# for joblib pickling...
def _depict(svr, smarts, dest_file):
return svr.depict(smarts, dest_file)
def as_pil(paths):
from PIL import Image
return map(Image.open, paths)
class SmartsViewerRunner(object):
"""Pythonic interface to smartsviewer:
http://www.smartsviewer.de/
http://www.biosolveit.de/SMARTStools/
Parameters
----------
Output of running "SmartsViewer -h" for version 0.9.0
-----------------------------------------------------
SYNOPSIS:
smartsviewer [OPTIONS]
OPTIONS:
-h : Print this help text.
-s <smarts> : The input smarts for visualization. Either -s or -f have to be given.
-f <file> : A file containing the smarts. Either -s or -f have to be given.
-o <outfile> : Prints the diagram to <outfile>
possible file formats: .pdf, .ps, .svg
-d <w> <h> : Dimension of the .svg output file. (100 <= w|h <= 1000)
-p : Set default parameter.
: Eight values have to be given, range and defaults:
: 1. Display options: 0-3 <0>
: (0=Complete Visualization, 1= IDs, 2= Element symbols, 3=Structure Diagram-like)
: 2. Default bond options: 0-1 <0>
: (0=Single bond, 1=Single or aromatic bond
: 3. Show Userlabels?: 0-1 <0>
: (0=No, 1=Yes)
: 4. Trim-errorcheck?: 0-1 <0>
: (0=Yes, 1=No)
: 5. Trim-simplification?: 0-1 <0>
: (0=Yes, 1=No)
: 6. Trim-interpretation?: 0-1 <0>
: (0=Yes, 1=No)
: 7. Show Legend?: 0-3 <0>
: (0=No, 1=Dynamic legend, 2=Static Legend 3=Both)
: 8. Print SMARTS string into picture?: 0-1 <0>
: (0=YES, 1=NO)
"""
def __init__(self,
sv_root=None,
w=500, h=500, # Dimension of the .svg output file. (100 <= w|h <= 1000)
display_style=0, # (0=Complete Visualization,
# 1= IDs, 2= Element symbols, 3=Structure Diagram-like)
also_aromatic_bonds=False, # (0=Single bond, 1=Single or aromatic bond)
user_labels=False, # (0=No, 1=Yes)
trim_errorcheck=True, # (0=Yes, 1=No)
trim_simplification=True, # (0=Yes, 1=No)
trim_interpretation=True, # (0=Yes, 1=No)
legend_style=0, # (0=No, 1=Dynamic legend, 2=Static Legend 3=Both)
show_smarts=True): # (0=YES, 1=NO)
super(SmartsViewerRunner, self).__init__()
self.sv_root = sv_root if sv_root is not None else \
op.abspath(op.join(op.dirname(__file__), 'thirdparty', 'smartsviewer'))
self.w = w
self.h = h
self.display_style = display_style
self.also_aromatic = also_aromatic_bonds
self.user_labels = user_labels
self.trim_errorcheck = trim_errorcheck
self.trim_simplification = trim_simplification
self.trim_interpretation = trim_interpretation
self.legend_style = legend_style
self.show_smarts = show_smarts
self.cl = op.join(self.sv_root, 'SmartsViewer -d %d %d -p %d %d %d %d %d %d %d %d' %
(self.w, self.h, self.display_style,
0 if not self.also_aromatic else 1,
0 if not self.user_labels else 1,
0 if self.trim_errorcheck else 1,
0 if self.trim_simplification else 1,
0 if self.trim_interpretation else 1,
self.legend_style,
0 if self.show_smarts else 1))
def depict(self, smarts, dest_file):
"""
Generates the image file for a smarts string using the object configuration.
Parameters
----------
smarts : string
The smiles or smarts to depict
dest_file : string
The path to the file where the depiction will happen (.pdf, .ps and .svg are supported in v. 0.9.0)
Returns
-------
A tuple (retcode, command_output) with an int for the errcode of the smartsviewer run
and its stdout+stderr output.
"""
cl = self.cl + ' -s \"%s\" -o \"%s\"' % (smarts, dest_file)
subprocess.call(cl, shell=True) # TODO: eat the output
return dest_file
def depict_all(self, smartss, root, ext='.png', n_jobs=1):
if n_jobs is None:
n_jobs = cpu_count()
# TODO: block all in n_jobs blocks and only create once the pool
return Parallel(n_jobs=n_jobs)(delayed(_depict)(self, smarts, op.join(root, '%d%s' % (i, ext)))
for i, smarts in enumerate(smartss))
if __name__ == '__main__':
svr = SmartsViewerRunner(w=500, h=500, legend_style=3, show_smarts=False)
svr.depict('CCCC#CC1=CC(=CC(=C1)C#CC2=CC(=C(C=C2C#CC(C)(C)C)C3OCCO3)C#CC(C)(C)C)C#CCCC',
op.join(op.expanduser('~'), 'example-smartsviewer.png'))
smartss = ['CCCC#CC1=CC(=CC(=C1)C#CC2=CC(=C(C=C2C#CC(C)(C)C)C3OCCO3)C#CC(C)(C)C)C#CCCC'] * 20
print as_pil(svr.depict_all(smartss, op.expanduser('~'), n_jobs=20))
| bsd-3-clause |
kartikdhar/djangotest | virt1/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| mit |
jamesleesaunders/python-xbee | xbee/tests/test_base.py | 1 | 5818 | #! /usr/bin/python
"""
test_base.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
Tests the XBeeBase superclass module for XBee API conformance.
"""
import unittest
from xbee.base import XBeeBase
from xbee.tests.Fake import Serial
class TestWriteToDevice(unittest.TestCase):
"""
XBeeBase class should properly._write binary data in a valid API
frame to a given serial device.
"""
def test_write(self):
"""
_write method should write the expected data to the serial
device
"""
device = Serial()
xbee = XBeeBase(device)
xbee._write(b'\x00')
# Check resuting state of fake device
result_frame = device.get_data_written()
expected_frame = b'\x7E\x00\x01\x00\xFF'
self.assertEqual(result_frame, expected_frame)
def test_write_again(self):
"""
_write method should write the expected data to the serial
device
"""
device = Serial()
xbee = XBeeBase(device)
xbee._write(b'\x00\x01\x02')
# Check resuting state of fake device
expected_frame = b'\x7E\x00\x03\x00\x01\x02\xFC'
result_frame = device.get_data_written()
self.assertEqual(result_frame, expected_frame)
def test_write_escaped(self):
"""
_write method should write the expected data to the serial
device
"""
device = Serial()
xbee = XBeeBase(device,escaped=True)
xbee._write(b'\x7E\x01\x7D\x11\x13')
# Check resuting state of fake device
expected_frame = b'\x7E\x00\x05\x7D\x5E\x01\x7D\x5D\x7D\x31\x7D\x33\xDF'
result_frame = device.get_data_written()
self.assertEqual(result_frame, expected_frame)
class TestReadFromDevice(unittest.TestCase):
"""
XBeeBase class should properly read and extract data from a valid
API frame
"""
def test_read(self):
"""
_wait_for_frame should properly read a frame of data
"""
device = Serial()
device.set_read_data(b'\x7E\x00\x01\x00\xFF')
xbee = XBeeBase(device)
frame = xbee._wait_for_frame()
self.assertEqual(frame.data, b'\x00')
def test_read_invalid_followed_by_valid(self):
"""
_wait_for_frame should skip invalid data
"""
device = Serial()
device.set_read_data(b'\x7E\x00\x01\x00\xFA' + b'\x7E\x00\x01\x05\xFA')
xbee = XBeeBase(device)
frame = xbee._wait_for_frame()
self.assertEqual(frame.data, b'\x05')
def test_read_escaped(self):
"""
_wait_for_frame should properly read a frame of data
Verify that API mode 2 escaped bytes are read correctly
"""
device = Serial()
device.set_read_data(b'\x7E\x00\x04\x7D\x5E\x7D\x5D\x7D\x31\x7D\x33\xE0')
xbee = XBeeBase(device,escaped=True)
frame = xbee._wait_for_frame()
self.assertEqual(frame.data, b'\x7E\x7D\x11\x13')
class TestNotImplementedFeatures(unittest.TestCase):
"""
In order to properly use the XBeeBase class for most situations,
it must be subclassed with the proper attributes definined. If
this is not the case, then a NotImplemented exception should be
raised as appropriate.
"""
def setUp(self):
"""
Set up a base class XBeeBase object which does not have
api_commands or api_responses defined
"""
self.xbee = XBeeBase(None)
def test_build_command(self):
"""
_build_command should raise NotImplemented
"""
self.assertRaises(NotImplementedError, self.xbee._build_command, "at")
def test_split_response(self):
"""
split_command should raise NotImplemented
"""
self.assertRaises(NotImplementedError, self.xbee._split_response, b"\x00")
def test_shorthand(self):
"""
Shorthand calls should raise NotImplementedError
"""
try:
self.xbee.at
except NotImplementedError:
pass
else:
self.fail("Shorthand call on XBeeBase base class should raise NotImplementedError")
class TestAsyncCallback(unittest.TestCase):
"""
XBeeBase constructor should accept an optional callback function
argument. When provided, this will put the module into a threaded
mode, in which it will call the provided function with any API
frame data received.
As it would be very difficult to sanely test an asynchonous callback
routine with a synchronous test process, proper callback behavior
is not tested automatically at this time. Theoretically, the
callback implementation logic is simple, but use it at your own risk.
"""
def setUp(self):
self.xbee = None
self.serial = Serial()
self.callback = lambda data: None
self.error_callback = lambda data: None
def tearDown(self):
# Ensure proper thread shutdown before continuing
self.xbee.halt()
def test_provide_callback(self):
"""
XBeeBase constructor should accept a callback function
"""
self.xbee = XBeeBase(self.serial,
callback=self.callback,
error_callback=self.error_callback)
class TestInitialization(unittest.TestCase):
"""
Ensures that XBeeBase objects are properly constructed
"""
def setUp(self):
self.base = XBeeBase(None)
def test_thread_always_initialized(self):
"""
Even when a callback method is not supplied to the XBeeBase
constructor, it must be properly initalized as a
threading.Thread object
"""
self.assertFalse(self.base.is_alive())
if __name__ == '__main__':
unittest.main()
| mit |
ishay2b/tensorflow | tensorflow/contrib/linalg/python/kernel_tests/linear_operator_test.py | 41 | 7997 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(123)
class LinearOperatorShape(linalg.LinearOperator):
"""LinearOperator that implements the methods ._shape and _shape_tensor."""
def __init__(self,
shape,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None):
self._stored_shape = shape
super(LinearOperatorShape, self).__init__(
dtype=dtypes.float32,
graph_parents=None,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square)
def _shape(self):
return tensor_shape.TensorShape(self._stored_shape)
def _shape_tensor(self):
return constant_op.constant(self._stored_shape, dtype=dtypes.int32)
def _matmul(self):
raise NotImplementedError("Not needed for this test.")
class LinearOperatorMatmulSolve(linalg.LinearOperator):
"""LinearOperator that wraps a [batch] matrix and implements matmul/solve."""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
super(LinearOperatorMatmulSolve, self).__init__(
dtype=self._matrix.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square)
def _shape(self):
return self._matrix.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
x = ops.convert_to_tensor(x, name="x")
return math_ops.matmul(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = ops.convert_to_tensor(rhs, name="rhs")
assert not adjoint_arg, "Not implemented for this test class."
return linalg_ops.matrix_solve(self._matrix, rhs, adjoint=adjoint)
class LinearOperatorTest(test.TestCase):
def test_all_shape_properties_defined_by_the_one_property_shape(self):
shape = (1, 2, 3, 4)
operator = LinearOperatorShape(shape)
self.assertAllEqual(shape, operator.shape)
self.assertAllEqual(4, operator.tensor_rank)
self.assertAllEqual((1, 2), operator.batch_shape)
self.assertAllEqual(4, operator.domain_dimension)
self.assertAllEqual(3, operator.range_dimension)
def test_all_shape_methods_defined_by_the_one_method_shape(self):
with self.test_session():
shape = (1, 2, 3, 4)
operator = LinearOperatorShape(shape)
self.assertAllEqual(shape, operator.shape_tensor().eval())
self.assertAllEqual(4, operator.tensor_rank_tensor().eval())
self.assertAllEqual((1, 2), operator.batch_shape_tensor().eval())
self.assertAllEqual(4, operator.domain_dimension_tensor().eval())
self.assertAllEqual(3, operator.range_dimension_tensor().eval())
def test_is_x_properties(self):
operator = LinearOperatorShape(
shape=(2, 2),
is_non_singular=False,
is_self_adjoint=True,
is_positive_definite=False)
self.assertFalse(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint)
self.assertFalse(operator.is_positive_definite)
def test_generic_to_dense_method_non_square_matrix_static(self):
matrix = rng.randn(2, 3, 4)
operator = LinearOperatorMatmulSolve(matrix)
with self.test_session():
operator_dense = operator.to_dense()
self.assertAllEqual((2, 3, 4), operator_dense.get_shape())
self.assertAllClose(matrix, operator_dense.eval())
def test_generic_to_dense_method_non_square_matrix_tensor(self):
matrix = rng.randn(2, 3, 4)
matrix_ph = array_ops.placeholder(dtypes.float64)
operator = LinearOperatorMatmulSolve(matrix_ph)
with self.test_session():
operator_dense = operator.to_dense()
self.assertAllClose(
matrix, operator_dense.eval(feed_dict={matrix_ph: matrix}))
def test_matvec(self):
matrix = [[1., 0], [0., 2.]]
operator = LinearOperatorMatmulSolve(matrix)
x = [1., 1.]
with self.test_session():
y = operator.matvec(x)
self.assertAllEqual((2,), y.get_shape())
self.assertAllClose([1., 2.], y.eval())
def test_solvevec(self):
matrix = [[1., 0], [0., 2.]]
operator = LinearOperatorMatmulSolve(matrix)
y = [1., 1.]
with self.test_session():
x = operator.solvevec(y)
self.assertAllEqual((2,), x.get_shape())
self.assertAllClose([1., 1 / 2.], x.eval())
def test_is_square_set_to_true_for_square_static_shapes(self):
operator = LinearOperatorShape(shape=(2, 4, 4))
self.assertTrue(operator.is_square)
def test_is_square_set_to_false_for_square_static_shapes(self):
operator = LinearOperatorShape(shape=(2, 3, 4))
self.assertFalse(operator.is_square)
def test_is_square_set_incorrectly_to_false_raises(self):
with self.assertRaisesRegexp(ValueError, "but.*was square"):
_ = LinearOperatorShape(shape=(2, 4, 4), is_square=False).is_square
def test_is_square_set_inconsistent_with_other_hints_raises(self):
with self.assertRaisesRegexp(ValueError, "is always square"):
matrix = array_ops.placeholder(dtypes.float32)
LinearOperatorMatmulSolve(matrix, is_non_singular=True, is_square=False)
with self.assertRaisesRegexp(ValueError, "is always square"):
matrix = array_ops.placeholder(dtypes.float32)
LinearOperatorMatmulSolve(
matrix, is_positive_definite=True, is_square=False)
def test_non_square_operators_raise_on_determinant_and_solve(self):
operator = LinearOperatorShape((2, 3))
with self.assertRaisesRegexp(NotImplementedError, "not be square"):
operator.determinant()
with self.assertRaisesRegexp(NotImplementedError, "not be square"):
operator.log_abs_determinant()
with self.assertRaisesRegexp(NotImplementedError, "not be square"):
operator.solve(rng.rand(2, 2))
with self.assertRaisesRegexp(ValueError, "is always square"):
matrix = array_ops.placeholder(dtypes.float32)
LinearOperatorMatmulSolve(
matrix, is_positive_definite=True, is_square=False)
def test_is_square_manual_set_works(self):
matrix = array_ops.placeholder(dtypes.float32)
# Default is None.
operator = LinearOperatorMatmulSolve(matrix)
self.assertEqual(None, operator.is_square)
# Set to True
operator = LinearOperatorMatmulSolve(matrix, is_square=True)
self.assertTrue(operator.is_square)
if __name__ == "__main__":
test.main()
| apache-2.0 |
pando85/gourmet | gourmet/timer.py | 6 | 7647 | import gtk, gobject, time, gglobals, os
import xml.sax.saxutils
from sound import Player
from gtk_extras import cb_extras as cb
from gettext import gettext as _
class TimeSpinnerUI:
def __init__ (self, hoursSpin, minutesSpin, secondsSpin):
self.timer_hooks = []
self.running = False
self.hoursSpin = hoursSpin
self.minutesSpin = minutesSpin
self.secondsSpin = secondsSpin
for s in [self.hoursSpin,self.minutesSpin,self.secondsSpin]:
# This is set up to assure 2 digit entries... 00:00:00, etc.
s.connect('changed',self.val_changed_cb)
s.val_change_is_changing_entry = False
s.set_width_chars(2)
s.set_value(1)
s.set_value(0)
def set_time (self, s):
s = int(s)
if s < 0: s = 0
#self.hoursSpin.set_text(self.pad_n(s / 3600))
self.hoursSpin.set_value(s / 3600)
s = s % 3600
#self.minutesSpin.set_text(self.pad_n(s / 60))
self.minutesSpin.set_value(s / 60)
#self.secondsSpin.set_text(self.pad_n(s % 60))
self.secondsSpin.set_value(s % 60)
def val_changed_cb (self, widg):
if not widg.val_change_is_changing_entry:
widg.val_change_is_changing_entry = True
widg.set_text(self.pad_n(int(widg.get_value())))
widg.val_change_is_changing_entry = False
def pad_n (self, int):
s = str(int)
if len(s)==1: return '0'+s
else: return s
def get_time (self):
return self.hoursSpin.get_value()*3600 + self.minutesSpin.get_value()*60 + self.secondsSpin.get_value()
# Methods to run the timer...
def tick (self):
if self.running:
elapsed = time.time() - self.running
t = self.start_time - elapsed
#t = self.get_time() - 1
self.set_time(t)
if t<=0:
self.finish_timer()
return False
else:
return True
else:
return False
def start_cb (self,*args):
if not self.running and self.get_time():
self.running = time.time()
self.orig_time = self.start_time = self.get_time()
gobject.timeout_add(1000,self.tick)
def pause_cb (self, *args):
if self.running:
self.running = False
else:
self.running = time.time()
self.start_time = self.get_time()
if self.running: gobject.timeout_add(1000,self.tick)
def reset_cb (self, *args):
self.running = False
self.set_time(self.orig_time)
def connect_timer_hook (self, h, prepend=False):
if prepend:
self.timer_hooks = [h] + self.timer_hooks
else:
self.timer_hooks.append(h)
def finish_timer (self):
self.running = False
for h in self.timer_hooks: h()
from gtk_extras import dialog_extras as de
class TimerDialog:
keep_annoying = False
sounds_and_files = {
_('Ringing Sound'):'phone.wav',
_('Warning Sound'):'warning.wav',
_('Error Sound'):'error.wav',
}
def __init__ (self):
self.init_player()
self.ui = gtk.Builder()
self.ui.add_from_file(os.path.join(gglobals.uibase,'timerDialog.ui'))
self.timer = TimeSpinnerUI(
self.ui.get_object('hoursSpinButton'),
self.ui.get_object('minutesSpinButton'),
self.ui.get_object('secondsSpinButton')
)
self.timer.connect_timer_hook(self.timer_done_cb)
for w in ['timerDialog','mainLabel',
'soundComboBox','repeatCheckButton',
'noteEntry','expander1','timerBox','resetTimerButton',
'timerFinishedLabel','keepAnnoyingLabel'
]:
setattr(self,w,self.ui.get_object(w))
cb.set_model_from_list(self.soundComboBox,self.sounds_and_files.keys())
cb.cb_set_active_text(self.soundComboBox,_('Ringing Sound'))
self.ui.connect_signals(
{'reset_cb':self.timer.reset_cb,
'pause_cb':self.timer.pause_cb,
'start_cb':self.timer.start_cb,
'note_changed_cb':self.note_changed_cb,
}
)
self.timerDialog.connect('response',self.response_cb)
self.timerDialog.connect('close',self.close_cb)
self.timerDialog.set_modal(False)
self.note = ''
def set_time (self, s):
self.timer.set_time(s)
def note_changed_cb (self, entry):
txt = entry.get_text()
self.note = txt
if txt: txt = _('Timer')+': '+txt
else: txt = _('Timer')
self.timerDialog.set_title(txt)
self.mainLabel.set_markup('<span weight="bold" size="larger">' + xml.sax.saxutils.escape(txt) + '</span>')
def init_player (self):
self.player = Player()
def play_tune (self):
sound_file = self.sounds_and_files[cb.cb_get_active_text(self.soundComboBox)]
sound_file = os.path.join(gglobals.data_dir,'sound',sound_file)
self.player.play_file(sound_file)
def annoy_user (self):
if self.keep_annoying:
self.play_tune()
return True
def timer_done_cb (self):
if hasattr(self.timerDialog,'set_urgency_hint'): self.timerDialog.set_urgency_hint(True)
self.play_tune()
if self.repeatCheckButton.get_active():
self.keep_annoying = True
gobject.timeout_add(3000,self.annoy_user)
self.timerBox.hide()
self.expander1.hide()
self.timerFinishedLabel.show()
self.resetTimerButton.show()
if self.keep_annoying: self.keepAnnoyingLabel.show()
def stop_annoying (self):
self.keep_annoying = False
if hasattr(self.timerDialog,'set_urgency_hint'): self.timerDialog.set_urgency_hint(False)
def refresh (self, *args):
self.stop_annoying()
self.timer.reset_cb()
self.timerFinishedLabel.hide()
self.keepAnnoyingLabel.hide()
self.timerBox.show()
self.resetTimerButton.hide()
self.expander1.show()
def response_cb (self, dialog, resp):
if resp == gtk.RESPONSE_APPLY:
self.refresh()
else:
self.close_cb()
def close_cb (self,*args):
self.stop_annoying()
if (not self.timer.running) or de.getBoolean(label=_('Stop timer?'),
sublabel=_("You've requested to close a window with an active timer. You can stop the timer, or you can just close the window. If you close the window, it will reappear when your timer goes off."),
custom_yes=_('Stop _timer'),custom_no=_('_Keep timing')
):
self.timer.running = False
self.timerDialog.hide()
self.timerDialog.destroy()
else:
self.timer.connect_timer_hook(self.timerDialog.show,1)
self.timerDialog.hide()
def run (self): self.timerDialog.run()
def show (self): self.timerDialog.show()
def show_timer (time=600,
note=''):
td = TimerDialog()
td.set_time(time)
if note:
td.noteEntry.set_text(note)
td.show()
if __name__ == '__main__':
w = gtk.Window()
b = gtk.Button('Show timer')
b.connect('clicked',lambda *args: show_timer())
w.add(b)
w.connect('delete-event',lambda *args: gtk.main_quit())
w.show_all()
gtk.main()
| gpl-2.0 |
karelin/polymode | Polymode/TriDiagonalSolver.py | 4 | 17195 | # _*_ coding=utf-8 _*_
#
#---------------------------------------------------------------------------------
#Copyright © 2009 Andrew Docherty
#
#This program is part of Polymode.
#Polymode is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------------
"""
TriDiagonalSolver.py
===========
Implements a dense block-tridiagonal solver
"""
from __future__ import division
import logging, datetime
from numpy import inf, dtype, array, zeros, append, conj
from . import Material, Waveguide, Equation, Modes
from .Solver import Solve
from .mathlink import blockarray,eigensolver,timer
from .difflounge import finitedifference, boundary
#Import eigensolver
use_arpack=True
if use_arpack:
#ARPACK interface - this keeps changing in scipy
from scipy.sparse.linalg.eigen.arpack import eigen
from scipy.sparse.linalg.interface import LinearOperator
else:
from .mathlink.eigensolver import eigs
#Try importing external block LU solver
try:
from .mathlink import ublocklu
except ImportError:
logging.debug("Warning: Couldn't find C++ block lu solver, using python version")
ublocklu = None
#**************************************************************************************
# Linear operator routines for eigensolver
#**************************************************************************************
LinearOperator = object
class ShiftInvertPyBlock(LinearOperator):
def __init__(self, overwrite=False):
self.lu = blockarray.TriBlockLU(overwrite=overwrite)
self.overwrite = overwrite
self.transpose = False
def set_shift(self, Ain, shift=0.0):
self.lu(Ain, shift)
self.shape = (Ain.shape[0],)*2
self.shift = shift
self.dtype = Ain.dtype
def eigenvalue_transform(self, hvals):
return 1./hvals + self.shift
def update(self, Aupdate, uprows=1):
self.lu.update(Aupdate, uprows=uprows)
def matvec(self, x):
y = self.lu.solve(x)
return y
def rmatvec(self, x):
y = conj(self.lu.solve_transpose(conj(x)))
return y
#**************************************************************************************
class ShiftInvertCBlock(LinearOperator):
def __init__(self, overwrite=False):
self.lu = ublocklu.cblocklu()
self.lumatrix = None
self.overwrite = overwrite
def set_shift(self, Ain, shift=0.0):
"Setup LU factorization A-sI = LU with a shift s"
self.yshape = (Ain.mshape[0], Ain.blockshape[0])
self.shift = shift
self.shape = (Ain.shape[0], Ain.shape[0])
self.dtype = Ain.dtype
if self.overwrite:
self.lu(Ain, Ain.blockshape[0], shift)
else:
self.lumatrix = None
self.lumatrix = Ain.copy()
self.lu(self.lumatrix, Ain.blockshape[0], shift)
def eigenvalue_transform(self, hvals):
return 1./hvals + self.shift
def update(self, Aupdate, uprows=1):
"Update LU factorization A-sI = LU using last uprows of Aupdate"
self.lu.update(Aupdate, uprows)
def matvec(self, x):
y=x.copy()
self.lu.solve(y)
return y
def rmatvec(self, x):
y=conj(x)
self.lu.solve_transpose(y)
return conj(y)
if ublocklu is None:
ShiftInvertBlock = ShiftInvertPyBlock
else:
ShiftInvertBlock = ShiftInvertCBlock
#**************************************************************************************
class TriDiBlockSolve(Solve):
'''
Main solve class for VWE/SWE. Construct a solver object with
Solve(shape_in, wg, store=True, compress_to_size=None, discard_vectors=False,
mode_calculations=False, label={}, dtype=complex128):
Where:
shape (Nr,Naz): The number of radial and Azimuthal nodes to use
wg: The waveguide
compress_to_size: size to compress to or None to not store modes
'''
overwrite = False #Overwrite matrix with LU decomp
stop_test = None #Give a function that enables a premature stop
def setup(self, tolerance=1e-10, bandwidth=3, bcwidth=3, xbc=0, \
fast_convolve=0, eqtype='vector', add_unconverged=False, force_electric=True):
'''
Create new Vector Wave Equation with custom parameters:
bandwidth: Finite difference matrix bandwidth
bcwidth: Width of stencil used for the boundary conditions
(must - currently - be less than matrix bandwidth)
xbc: location of expansion for boundary condition stencil
eqtype: either vector or scalar
'''
coord = self.wg.get_coord(self.base_shape, border=0)
#Boundary conditions are Neumann for m+m0==0 and Dirichlet otherwise
if coord.rmin==0:
bcl = boundary.BC_Switch(coord.rv, bandwidth=bcwidth, xbc=xbc, dtype=self.dtype)
else:
bcl = boundary.BC_Mixed(coord.rv, bandwidth=bcwidth, xbc=xbc, dtype=self.dtype)
bcr = boundary.BC_Mixed(coord.rv, bandwidth=bcwidth, xbc=xbc, dtype=self.dtype)
#Setup default finite differences
fd_diff = finitedifference.DifferenceMatrix(2, bandwidth=bandwidth, \
X=coord.rv, bcr=bcr, bcl=bcl, dtype=self.dtype)
fd_diff_ip = finitedifference.DifferenceMatrix(2, bandwidth=bandwidth, \
X=coord.rv, bcr=bcr, bcl=bcl, dtype=self.dtype)
fd_jac = finitedifference.JacobianMatrix(2, bandwidth=bandwidth, \
X=coord.rv, bcr=bcr, bcl=bcl, dtype=self.dtype)
if eqtype=='vector':
eqkwargs = {'fast_convolve':fast_convolve, 'dtype':dtype}
self.equation = Equation.VectorWaveEquation(fd_diff, **eqkwargs)
self.equationip = Equation.VectorWaveEquation(fd_diff_ip, **eqkwargs)
self.jacobian = Equation.VectorWaveJacobian(fd_jac, **eqkwargs)
self.mode_class = Modes.VectorMode
elif eqtype=='scalar':
raise NotImplementedError, "Scalar Wave Equation not implemented"
#self.equation = Equation.Scalar_Wave_Equation(fd_diff)
#self.jacobian = Equation.Scalar_Wave_Jacobian(fd_jac)
#self.mode_class = Modes.ScalarMode
else:
logging.error("Equation type not recognised")
#Misc solver parameters
self.tolerance = tolerance
#Save unconverged modes if true, else discard them
self.add_if_unconverged = add_unconverged
self.force_electric_calculation = force_electric
# +-----------------------------------------------------------------------+
# | Pickling marshalling functions
# | We don't pickle the matrix data to give compact storage
# +-----------------------------------------------------------------------+
def __getstate__(self):
"Pickle all needed data, ignore cached data"
state = self.__dict__.copy()
ignore_list = ["matrix", "si"]
for ignore in ignore_list:
if ignore in state:
state[ignore] = None
return state
def __setstate__(self,state):
"Restore pickled data"
self.__dict__.update(state)
# +-----------------------------------------------------------------------+
# | Matrix creation functions
# | These are unnessesary for matrix free methods ..
# +-----------------------------------------------------------------------+
def generate(self, leftbc=0, rightbc=0):
'''
Generate VWE Matrix
leftbc, rightbc: only update the specified boundary conditions
'''
Nr, Naz = self.base_shape
bw = self.equation.diff.bandwidth
pmax = self.equation.pmax
M = self.matrix.blockview()
#Extents over which the boundary condition changes the lines
bcstart,bcend = self.equation.diff.bc_extents()
#Only update or full construction
if not leftbc and not rightbc:
rows = range(Nr)
else:
rows=array([])
if leftbc:
rows = append(rows, range(bcstart))
if rightbc:
rows = append(rows, range(Nr-bcend, Nr))
#Block constrcution of matrix
vec_row = zeros((1,Naz,pmax), dtype=self.dtype)
for ii in rows:
blockstart = max(bw//2 - ii,0)
for kk in range(pmax*Naz):
vec_row.flat[kk] = 1
y = self.equation.construct(vec_row, ii)
M[ii,blockstart:blockstart+y.shape[0],:,kk] = \
y.reshape((y.shape[0], 2*Naz))
vec_row.flat[kk] = 0
def create(self, generate=True):
'''
Create the initial equation matrix
'''
t = timer.timer(); t.start()
Nr, Naz = self.base_shape
bw = self.equation.diff.bandwidth
blockshape = (self.equation.pmax*Naz,)*2
self.matrix = blockarray.BlockArray((Nr,bw), blockshape=blockshape, dtype=self.dtype)
#Final setup of the equation objects
self.equation.setup(self.base_shape,self.wg,self.m0,self.wl)
self.equationip.setup(self.base_shape,self.wg,self.m0,self.wl)
self.jacobian.setup(self.base_shape,self.wg,self.m0,self.wl)
#This is a nasty hack!
self.equationip.diff.generate(leftbc=1, rightbc=1, nodefault=1)
if generate: self.generate()
logging.debug("Matrix generation done in %.4gs" % t.lap())
def update_lambda(self, ev, update=1):
"Set the boundary eigenvalue and update those rows affected"
self.equation.set_lambda(ev)
self.jacobian.set_lambda(ev)
self.generate(leftbc=update, rightbc=update)
def residue(self, x, l=None):
if l is None: l = x.evalue
res = resl = 0
self.equation.set_lambda(l)
if x.right is not None:
res = absolute(self.equation.matvec(x.right)-l*x.right).max() \
/linalg.norm(x.right)
if x.left is not None:
resl = absolute(self.equation.rmatvec(x.left)-conj(l)*x.left).max() \
/linalg.norm(x.left)
return max(res, resl)
def get_data(self):
return self.modes
def clear_data(self):
self.modes = []
def _clean_up_temporary_data(self):
"""
Remove temporary matrices
"""
self.matrix = None
self.si = None
class FixedPointSolver(TriDiBlockSolve):
"""
Find modes with the real part of the effective index in the given range
Run with the following parameters:
m0, wavelength, neffbracket, number=10
"""
def __init__(self, *args, **kwargs):
Solve.__init__(self, *args, **kwargs)
def setup(self, **kwargs):
tolerance = 1e-8
iterations = 10
if 'tolerance' in kwargs:
tolerance = kwargs['tolerance']
if 'iterations' in kwargs:
iterations = kwargs.pop('iterations')
#Iterative parameters
self.abc_iterations = iterations
self.abc_convergence = tolerance
Solve.setup(self, **kwargs)
def calculate_one(self, evsearch, searchnum):
"""
Calculate single mode using fixed point iterations
calculate_one(evsearch, searchnum)
"""
m0 = self.m0; k0 = self.k0
wl = self.wl
bcstart, bcend = self.equation.diff.bc_extents()
coord = self.wg.get_coord(self.base_shape, border=1)
#Create new mode
mode = self.mode_class(coord=coord, symmetry=self.wg.symmetry, m0=m0, wl=wl, evalue=evsearch, wg=self.wg)
mode.right = ones(mode.shape, dtype=self.dtype)
mode.discard_vectors = self.discard_vectors
residue = inf; numit=0; evtol=1e-10
while residue>self.abc_convergence and numit<self.abc_iterations:
numit+=1
#Update equation with new BCs and update LU decomp
self.update_lambda(mode.evalue)
if self.equation.coord.rmin==0:
self.si.update(self.matrix, uprows=bcend)
else:
self.si.set_shift(self.matrix, complex(mode.evalue))
#Solve linear eigenproblem
if use_arpack:
evals, revecs = eigen(self.si, k=searchnum, \
which='LM', return_eigenvectors=True)
evals = self.si.eigenvalue_transform(evals)
else:
evals, revecs = eigs(self.si, searchnum, tol=evtol)
revecs = revecs.T
#Locate closest eigenvalue
itrack = absolute(evals-mode.evalue).argmin()
mode.evalue = evals[itrack]
mode.right = asarray(revecs)[:,itrack]
#Residue and convergence
evconverge = abs(mode.evalue - evals[itrack])
residue = mode.residue = self.residue(mode)
mode.convergence += [ residue ]
mode.track += [ mode.evalue ]
logging.debug( "[%d] neff: %s conv: %.3g, res:%.3g" % (numit, mode.neff, evconverge, mode.residue) )
mode.iterations = numit
#Discard vectors at this stage, to free up memory if we can
mode.compress()
return mode, evals
class CenterSolver(FixedPointSolver):
"""
Find modes with the real part of the effective index in the given range
Note overwrite must be false
Run with the following parameters:
m0, wl, neffbracket, number=10
"""
#Unlike all other solvers overwrite should _default_ to false
#as the creation of the matrix currently takes so long
overwrite = False
def initialize(self, m0, wl, neffrange=None, number=10):
"""
Setup a solver run with paramters:
m0, wl, neffs, searchnumber = 1
"""
FixedPointSolver.initialize(self, m0, wl, neffrange, number)
self.ignore_outside_interval = size(self.bracket)>1
#Solver specific parameters
self.searchnumber = 3
self.numbersolved = 0
def isfinished(self):
"Check if the solver has found enough modes or is outside the bracket"
if self.is_finalized: return True
number_test = (len(self.modes)>0)
#custom_test = self.user_stop_test(self.modes)
return number_test
def calculate(self, number=inf):
m0=self.m0; k0=self.k0
number = min(number, self.totalnumber)
logging.info( "Center solver. Finding modes, m0=%d, wl=%.3g" % (m0, self.wl) )
#Create new matrix if there is no existing one
if self.si is None:
self.si = ShiftInvertBlock(overwrite=self.overwrite)
self.create()
#Time mode solve
tick = timer.timer()
tick.start()
#Array to store solved modes
evrange = array(self.bracket)**2*self.k0**2
#Update equation with new BCs and update LU decomp
self.update_lambda(self.evapprox, not self.overwrite)
self.si.set_shift(self.matrix, self.evapprox+0j)
#Find modes of linearized system
if use_arpack:
cev = eigen(self.si, k=self.totalnumber, which='LM', return_eigenvectors=False)
cev = self.si.eigenvalue_transform(cev)
else:
cev, cvecs = eigs(self.si, self.totalnumber, tol=1e-6)
#Filter cev within range
if self.ignore_outside_interval:
cev = filter(lambda x: min(evrange)<=real(x)<=max(evrange), cev)
#Refine modes
for ii in range(len(cev)):
evsearch = cev[ii]
#If the matrix is overwritten we must recreate it
if self.overwrite: self.generate()
self.si.set_shift(self.matrix, evsearch+0j)
#Calculate next mode
mode, evals = self.calculate_one(evsearch, self.searchnumber)
#Add mode to list
if (mode.residue<self.abc_convergence) or self.add_if_unconverged:
self.modes += [ mode ]
avtime = tick.lap()/(ii+1)
logging.info( "Mode #%d [%d/%.3gs], neff=%s, res: %.2e" % \
(self.numbersolved, mode.iterations, avtime, mode.neff, mode.residue) )
self.add_time(tick.lap())
#Clean up if calculation is finished!
if self.isfinished(): self.finalize()
return self.modes
DefaultSolver = CenterSolver
| gpl-3.0 |
boada/planckClusters | MOSAICpipe/bpz-1.99.3/bpz.py | 1 | 52171 | """
bpz: Bayesian Photo-Z estimation
Reference: Benitez 2000, ApJ, 536, p.571
Usage:
python bpz.py catalog.cat
Needs a catalog.columns file which describes the contents of catalog.cat
"""
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import map
from builtins import input
from builtins import range
from past.utils import old_div
from useful import *
rolex = watch()
rolex.set()
#from Numeric import *
from numpy import *
from bpz_tools import *
from string import *
import os, glob, sys
import time
import pickle
import shelve
from coetools import pause, params_cl
class Printer():
"""Print things to stdout on one line dynamically"""
def __init__(self, data):
sys.stdout.write("\r\x1b[K" + data.__str__())
sys.stdout.flush()
def seglist(vals, mask=None):
"""Split vals into lists based on mask > 0"""
if mask == None:
mask = greater(vals, 0)
lists = []
i = 0
lastgood = False
list1 = []
for i in range(len(vals)):
if mask[i] == False:
if lastgood:
lists.append(list1)
list1 = []
lastgood = False
if mask[i]:
list1.append(vals[i])
lastgood = True
if lastgood:
lists.append(list1)
return lists
# Initialization and definitions#
#Current directory
homedir = os.getcwd()
#Parameter definition
pars = params()
pars.d = {
'SPECTRA': 'CWWSB4.list', # template list
#'PRIOR': 'hdfn_SB', # prior name
'PRIOR': 'hdfn_gen', # prior name
'NTYPES':
None, # Number of Elliptical, Spiral, and Starburst/Irregular templates Default: 1,2,n-3
'DZ': 0.01, # redshift resolution
'ZMIN': 0.01, # minimum redshift
'ZMAX': 10., # maximum redshift
'MAG': 'yes', # Data in magnitudes?
'MIN_MAGERR': 0.001, # minimum magnitude uncertainty --DC
'ODDS': 0.95, # Odds threshold: affects confidence limits definition
'INTERP':
0, # Number of interpolated templates between each of the original ones
'EXCLUDE': 'none', # Filters to be excluded from the estimation
'NEW_AB': 'no', # If yes, generate new AB files even if they already exist
'CHECK':
'yes', # Perform some checks, compare observed colors with templates, etc.
'VERBOSE': 'yes', # Print estimated redshifts to the standard output
'PROBS':
'no', # Save all the galaxy probability distributions (it will create a very large file)
'PROBS2':
'no', # Save all the galaxy probability distributions P(z,t) (but not priors) -- Compact
'PROBS_LITE': 'yes', # Save only the final probability distribution
'GET_Z': 'yes', # Actually obtain photo-z
'ONLY_TYPE': 'no', # Use spectroscopic redshifts instead of photo-z
'MADAU': 'yes', #Apply Madau correction to spectra
'Z_THR': 0, #Integrate probability for z>z_thr
'COLOR': 'no', #Use colors instead of fluxes
'PLOTS': 'no', #Don't produce plots
'INTERACTIVE': 'yes', #Don't query the user
'PHOTO_ERRORS':
'no', #Define the confidence interval using only the photometric errors
'MIN_RMS':
0.05, #"Intrinsic" photo-z rms in dz /(1+z) (Change to 0.05 for templates from Benitez et al. 2004
'N_PEAKS': 1,
'MERGE_PEAKS': 'no',
'CONVOLVE_P': 'yes',
'P_MIN': 1e-2,
'SED_DIR': sed_dir,
'AB_DIR': ab_dir,
'FILTER_DIR': fil_dir,
'DELTA_M_0': 0.,
'ZP_OFFSETS': 0.,
'ZC': None,
'FC': None,
"ADD_SPEC_PROB": None,
"ADD_CONTINUOUS_PROB": None,
"NMAX": None # Useful for testing
}
if pars.d['PLOTS'] == 'no': plots = 0
if plots:
# If pylab installed show plots
plots = 'pylab'
try:
import matplotlib
matplotlib.use('TkAgg')
from pylab import *
# from coeplot2a import *
plot([1])
title('KILL THIS WINDOW!')
show()
ioff()
except:
try:
from biggles import *
plots = 'biggles'
except:
plots = 0
#Define the default values of the parameters
pars.d['INPUT'] = sys.argv[1] # catalog with the photometry
obs_file = pars.d['INPUT']
root = os.path.splitext(pars.d['INPUT'])[0]
pars.d[
'COLUMNS'] = root + '.columns' # column information for the input catalog
pars.d['OUTPUT'] = root + '.bpz' # output
nargs = len(sys.argv)
ipar = 2
if nargs > 2: #Check for parameter file and update parameters
if sys.argv[2] == '-P':
pars.fromfile(sys.argv[3])
ipar = 4
# Update the parameters using command line additions
#pars.fromcommandline(sys.argv[ipar:])
#for key in pars.d:
# print key, pars.d[key]
#pause()
pars.d.update(
params_cl()) # allows for flag only (no value after), e.g., -CHECK
def updateblank(var, ext):
global pars
if pars.d[var] in [None, 'yes']:
pars.d[var] = root + '.' + ext
updateblank('CHECK', 'flux_comparison')
updateblank('PROBS_LITE', 'probs')
updateblank('PROBS', 'full_probs')
updateblank('PROBS2', 'chisq')
#if pars.d['CHECK'] in [None, 'yes']:
# pars.d['CHECK'] = root+'.flux_comparison'
#This allows to change the auxiliary directories used by BPZ
if pars.d['SED_DIR'] != sed_dir:
print("Changing sed_dir to ", pars.d['SED_DIR'])
sed_dir = pars.d['SED_DIR']
if sed_dir[-1] != '/': sed_dir += '/'
if pars.d['AB_DIR'] != ab_dir:
print("Changing ab_dir to ", pars.d['AB_DIR'])
ab_dir = pars.d['AB_DIR']
if ab_dir[-1] != '/': ab_dir += '/'
if pars.d['FILTER_DIR'] != fil_dir:
print("Changing fil_dir to ", pars.d['FILTER_DIR'])
fil_dir = pars.d['FILTER_DIR']
if fil_dir[-1] != '/': fil_dir += '/'
#Better safe than sorry
if pars.d['OUTPUT'] == obs_file or pars.d['PROBS'] == obs_file or pars.d[
'PROBS2'] == obs_file or pars.d['PROBS_LITE'] == obs_file:
print("This would delete the input file!")
sys.exit()
if pars.d['OUTPUT'] == pars.d['COLUMNS'] or pars.d['PROBS_LITE'] == pars.d[
'COLUMNS'] or pars.d['PROBS'] == pars.d['COLUMNS']:
print("This would delete the .columns file!")
sys.exit()
#Assign the intrinsin rms
if pars.d['SPECTRA'] == 'CWWSB.list':
print('Setting the intrinsic rms to 0.067(1+z)')
pars.d['MIN_RMS'] = 0.067
pars.d['MIN_RMS'] = float(pars.d['MIN_RMS'])
pars.d['MIN_MAGERR'] = float(pars.d['MIN_MAGERR'])
if pars.d['INTERACTIVE'] == 'no': interactive = 0
else: interactive = 1
if pars.d['VERBOSE'] == 'yes':
print("Current parameters")
view_keys(pars.d)
pars.d['N_PEAKS'] = int(pars.d['N_PEAKS'])
if pars.d["ADD_SPEC_PROB"] != None:
specprob = 1
specfile = pars.d["ADD_SPEC_PROB"]
spec = get_2Darray(specfile)
ns = spec.shape[1]
if old_div(ns, 2) != (old_div(ns, 2.)):
print("Number of columns in SPEC_PROB is odd")
sys.exit()
z_spec = spec[:, :old_div(ns, 2)]
p_spec = spec[:, old_div(ns, 2):]
# Write output file header
header = "#ID "
header += ns / 2 * " z_spec%i"
header += ns / 2 * " p_spec%i"
header += "\n"
header = header % tuple(list(range(old_div(ns, 2))) + list(range(old_div(
ns, 2))))
specout = open(specfile.split()[0] + ".p_spec", "w")
specout.write(header)
else:
specprob = 0
pars.d['DELTA_M_0'] = float(pars.d['DELTA_M_0'])
#Some misc. initialization info useful for the .columns file
#nofilters=['M_0','OTHER','ID','Z_S','X','Y']
nofilters = ['M_0', 'OTHER', 'ID', 'Z_S']
#Numerical codes for nondetection, etc. in the photometric catalog
unobs = -99. #Objects not observed
undet = 99. #Objects not detected
#Define the z-grid
zmin = float(pars.d['ZMIN'])
zmax = float(pars.d['ZMAX'])
if zmin > zmax: raise 'zmin < zmax !'
dz = float(pars.d['DZ'])
linear = 1
if linear:
z = arange(zmin, zmax + dz, dz)
else:
if zmax != 0.:
zi = zmin
z = []
while zi <= zmax:
z.append(zi)
zi = zi + dz * (1. + zi)
z = array(z)
else:
z = array([0.])
#Now check the contents of the FILTERS,SED and A diBrectories
#Get the filters in stock
filters_db = []
filters_db = glob.glob(fil_dir + '*.res')
for i in range(len(filters_db)):
filters_db[i] = os.path.basename(filters_db[i])
filters_db[i] = filters_db[i][:-4]
#Get the SEDs in stock
sed_db = []
sed_db = glob.glob(sed_dir + '*.sed')
for i in range(len(sed_db)):
sed_db[i] = os.path.basename(sed_db[i])
sed_db[i] = sed_db[i][:-4]
#Get the ABflux files in stock
ab_db = []
ab_db = glob.glob(ab_dir + '*.AB')
for i in range(len(ab_db)):
ab_db[i] = os.path.basename(ab_db[i])
ab_db[i] = ab_db[i][:-3]
#Get a list with the filter names and check whether they are in stock
col_file = pars.d['COLUMNS']
filters = get_str(col_file, 0)
for cosa in nofilters:
if filters.count(cosa): filters.remove(cosa)
if pars.d['EXCLUDE'] != 'none':
if type(pars.d['EXCLUDE']) == type(' '):
pars.d['EXCLUDE'] = [pars.d['EXCLUDE']]
for cosa in pars.d['EXCLUDE']:
if filters.count(cosa): filters.remove(cosa)
for filter in filters:
if filter[-4:] == '.res': filter = filter[:-4]
if filter not in filters_db:
print('filter ', filter, 'not in database at', fil_dir, ':')
if ask('Print filters in database?'):
for line in filters_db:
print(line)
sys.exit()
#Get a list with the spectrum names and check whether they're in stock
#Look for the list in the home directory first,
#if it's not there, look in the SED directory
spectra_file = os.path.join(homedir, pars.d['SPECTRA'])
if not os.path.exists(spectra_file):
spectra_file = os.path.join(sed_dir, pars.d['SPECTRA'])
spectra = get_str(spectra_file, 0)
for i in range(len(spectra)):
if spectra[i][-4:] == '.sed': spectra[i] = spectra[i][:-4]
nf = len(filters)
nt = len(spectra)
nz = len(z)
#Get the model fluxes
f_mod = zeros((nz, nt, nf)) * 0.
abfiles = []
for it in range(nt):
for jf in range(nf):
if filters[jf][-4:] == '.res': filtro = filters[jf][:-4]
else: filtro = filters[jf]
#model = join([spectra[it], filtro, 'AB'], '.')
model = '.'.join([spectra[it], filtro, 'AB'])
model_path = os.path.join(ab_dir, model)
abfiles.append(model)
#Generate new ABflux files if not present
# or if new_ab flag on
if pars.d['NEW_AB'] == 'yes' or model[:-3] not in ab_db:
if spectra[it] not in sed_db:
print('SED ', spectra[it], 'not in database at', sed_dir)
# for line in sed_db:
# print line
sys.exit()
#print spectra[it],filters[jf]
print(' Generating ', model, '....')
ABflux(spectra[it], filtro, madau=pars.d['MADAU'])
#z_ab=arange(0.,zmax_ab,dz_ab) #zmax_ab and dz_ab are def. in bpz_tools
# abflux=f_z_sed(spectra[it],filters[jf], z_ab,units='nu',madau=pars.d['MADAU'])
# abflux=clip(abflux,0.,1e400)
# buffer=join(['#',spectra[it],filters[jf], 'AB','\n'])
#for i in range(len(z_ab)):
# buffer=buffer+join([`z_ab[i]`,`abflux[i]`,'\n'])
#open(model_path,'w').write(buffer)
#zo=z_ab
#f_mod_0=abflux
#else:
#Read the data
zo, f_mod_0 = get_data(model_path, (0, 1))
#Rebin the data to the required redshift resolution
f_mod[:, it, jf] = match_resol(zo, f_mod_0, z)
#if sometrue(less(f_mod[:,it,jf],0.)):
if less(f_mod[:, it, jf], 0.).any():
print('Warning: some values of the model AB fluxes are <0')
print('due to the interpolation ')
print('Clipping them to f>=0 values')
#To avoid rounding errors in the calculation of the likelihood
f_mod[:, it, jf] = clip(f_mod[:, it, jf], 0., 1e300)
#We forbid f_mod to take values in the (0,1e-100) interval
#f_mod[:,it,jf]=where(less(f_mod[:,it,jf],1e-100)*greater(f_mod[:,it,jf],0.),0.,f_mod[:,it,jf])
#Here goes the interpolacion between the colors
ninterp = int(pars.d['INTERP'])
ntypes = pars.d['NTYPES']
if ntypes == None:
nt0 = nt
else:
nt0 = list(ntypes)
for i, nt1 in enumerate(nt0):
print(i, nt1)
nt0[i] = int(nt1)
if (len(nt0) != 3) or (sum(nt0) != nt):
print()
print('%d ellipticals + %d spirals + %d ellipticals' % tuple(nt0))
print('does not add up to %d templates' % nt)
print('USAGE: -NTYPES nell,nsp,nsb')
print('nell = # of elliptical templates')
print('nsp = # of spiral templates')
print('nsb = # of starburst templates')
print(
'These must add up to the number of templates in the SPECTRA list')
print('Quitting BPZ.')
sys.exit()
if ninterp:
nti = nt + (nt - 1) * ninterp
buffer = zeros((nz, nti, nf)) * 1.
tipos = arange(0., float(nti), float(ninterp) + 1.)
xtipos = arange(float(nti))
for iz in arange(nz):
for jf in range(nf):
buffer[iz, :, jf] = match_resol(tipos, f_mod[iz, :, jf], xtipos)
nt = nti
f_mod = buffer
#for j in range(nf):
# plot=FramedPlot()
# for i in range(nt): plot.add(Curve(z,log(f_mod[:,i,j]+1e-40)))
# plot.show()
# ask('More?')
#Load all the parameters in the columns file to a dictionary
col_pars = params()
col_pars.fromfile(col_file)
# Read which filters are in which columns
flux_cols = []
eflux_cols = []
cals = []
zp_errors = []
zp_offsets = []
for filter in filters:
datos = col_pars.d[filter]
flux_cols.append(int(datos[0]) - 1)
eflux_cols.append(int(datos[1]) - 1)
cals.append(datos[2])
zp_errors.append(datos[3])
zp_offsets.append(datos[4])
zp_offsets = array(list(map(float, zp_offsets)))
if pars.d['ZP_OFFSETS']:
zp_offsets += array(list(map(float, pars.d['ZP_OFFSETS'])))
flux_cols = tuple(flux_cols)
eflux_cols = tuple(eflux_cols)
#READ the flux and errors from obs_file
f_obs = get_2Darray(obs_file, flux_cols)
ef_obs = get_2Darray(obs_file, eflux_cols)
#Convert them to arbitrary fluxes if they are in magnitudes
if pars.d['MAG'] == 'yes':
seen = greater(f_obs, 0.) * less(f_obs, undet)
no_seen = equal(f_obs, undet)
no_observed = equal(f_obs, unobs)
todo = seen + no_seen + no_observed
#The minimum photometric error is 0.01
#ef_obs=ef_obs+seen*equal(ef_obs,0.)*0.001
ef_obs = where(
greater_equal(ef_obs, 0.), clip(ef_obs, pars.d['MIN_MAGERR'], 1e10),
ef_obs)
if add.reduce(add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
print('Objects with unexpected magnitudes!')
print("""Allowed values for magnitudes are
0<m<""" + repr(undet) + " m=" + repr(undet) + "(non detection), m=" + repr(
unobs) + "(not observed)")
for i in range(len(todo)):
if not alltrue(todo[i, :]):
print(i + 1, f_obs[i, :], ef_obs[i, :])
sys.exit()
#Detected objects
try:
f_obs = where(seen, 10.**(-.4 * f_obs), f_obs)
except OverflowError:
print(
'Some of the input magnitudes have values which are >700 or <-700')
print('Purge the input photometric catalog')
print('Minimum value', min(f_obs))
print('Maximum value', max(f_obs))
print('Indexes for minimum values', argmin(f_obs, 0.))
print('Indexes for maximum values', argmax(f_obs, 0.))
print('Bye.')
sys.exit()
try:
ef_obs = where(seen, (10.**(.4 * ef_obs) - 1.) * f_obs, ef_obs)
except OverflowError:
print(
'Some of the input magnitude errors have values which are >700 or <-700')
print('Purge the input photometric catalog')
print('Minimum value', min(ef_obs))
print('Maximum value', max(ef_obs))
print('Indexes for minimum values', argmin(ef_obs, 0.))
print('Indexes for maximum values', argmax(ef_obs, 0.))
print('Bye.')
sys.exit()
#print 'ef', ef_obs[0,:nf]
#print 'f', f_obs[1,:nf]
#print 'ef', ef_obs[1,:nf]
#Looked at, but not detected objects (mag=99.)
#We take the flux equal to zero, and the error in the flux equal to the 1-sigma detection error.
#If m=99, the corresponding error magnitude column in supposed to be dm=m_1sigma, to avoid errors
#with the sign we take the absolute value of dm
f_obs = where(no_seen, 0., f_obs)
ef_obs = where(no_seen, 10.**(-.4 * abs(ef_obs)), ef_obs)
#Objects not looked at (mag=-99.)
f_obs = where(no_observed, 0., f_obs)
ef_obs = where(no_observed, 0., ef_obs)
#Flux codes:
# If f>0 and ef>0 : normal objects
# If f==0 and ef>0 :object not detected
# If f==0 and ef==0: object not observed
#Everything else will crash the program
#Check that the observed error fluxes are reasonable
#if sometrue(less(ef_obs,0.)): raise 'Negative input flux errors'
if less(ef_obs, 0.).any():
raise ValueError('Negative input flux errors')
f_obs = where(less(f_obs, 0.), 0., f_obs) #Put non-detections to 0
ef_obs = where(
less(f_obs, 0.), maximum(1e-100, f_obs + ef_obs),
ef_obs) # Error equivalent to 1 sigma upper limit
#if sometrue(less(f_obs,0.)) : raise 'Negative input fluxes'
seen = greater(f_obs, 0.) * greater(ef_obs, 0.)
no_seen = equal(f_obs, 0.) * greater(ef_obs, 0.)
no_observed = equal(f_obs, 0.) * equal(ef_obs, 0.)
todo = seen + no_seen + no_observed
if add.reduce(add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
print('Objects with unexpected fluxes/errors')
#Convert (internally) objects with zero flux and zero error(non observed)
#to objects with almost infinite (~1e108) error and still zero flux
#This will yield reasonable likelihoods (flat ones) for these objects
ef_obs = where(no_observed, 1e108, ef_obs)
#Include the zero point errors
zp_errors = array(list(map(float, zp_errors)))
zp_frac = e_mag2frac(zp_errors)
#zp_frac=10.**(.4*zp_errors)-1.
ef_obs = where(seen, sqrt(ef_obs * ef_obs + (zp_frac * f_obs)**2), ef_obs)
ef_obs = where(no_seen,
sqrt(ef_obs * ef_obs + (zp_frac * (old_div(ef_obs, 2.)))**2),
ef_obs)
#Add the zero-points offset
#The offsets are defined as m_new-m_old
zp_offsets = array(list(map(float, zp_offsets)))
zp_offsets = where(not_equal(zp_offsets, 0.), 10.**(-.4 * zp_offsets), 1.)
f_obs = f_obs * zp_offsets
ef_obs = ef_obs * zp_offsets
#Convert fluxes to AB if needed
for i in range(f_obs.shape[1]):
if cals[i] == 'Vega':
const = mag2flux(VegatoAB(0., filters[i]))
f_obs[:, i] = f_obs[:, i] * const
ef_obs[:, i] = ef_obs[:, i] * const
elif cals[i] == 'AB':
continue
else:
print('AB or Vega?. Check ' + col_file + ' file')
sys.exit()
#Get m_0 (if present)
if 'M_0' in col_pars.d:
m_0_col = int(col_pars.d['M_0']) - 1
m_0 = get_data(obs_file, m_0_col)
m_0 += pars.d['DELTA_M_0']
#Get the objects ID (as a string)
if 'ID' in col_pars.d:
# print col_pars.d['ID']
id_col = int(col_pars.d['ID']) - 1
id = get_str(obs_file, id_col)
else:
id = list(map(str, list(range(1, len(f_obs[:, 0]) + 1))))
#Get spectroscopic redshifts (if present)
if 'Z_S' in col_pars.d:
z_s_col = int(col_pars.d['Z_S']) - 1
z_s = get_data(obs_file, z_s_col)
#Get the X,Y coordinates
if 'X' in col_pars.d:
datos = col_pars.d['X']
if len(datos) == 1: # OTHERWISE IT'S A FILTER!
x_col = int(col_pars.d['X']) - 1
x = get_data(obs_file, x_col)
if 'Y' in col_pars.d:
datos = col_pars.d['Y']
if len(datos) == 1: # OTHERWISE IT'S A FILTER!
y_col = int(datos) - 1
y = get_data(obs_file, y_col)
#If 'check' on, initialize some variables
check = pars.d['CHECK']
# This generates a file with m,z,T and observed/expected colors
#if check=='yes': pars.d['FLUX_COMPARISON']=root+'.flux_comparison'
checkSED = check != 'no'
ng = f_obs.shape[0]
if checkSED:
# PHOTOMETRIC CALIBRATION CHECK
#r=zeros((ng,nf),float)+1.
#dm=zeros((ng,nf),float)+1.
#w=r*0.
# Defaults: r=1, dm=1, w=0
frat = ones((ng, nf), float)
dmag = ones((ng, nf), float)
fw = zeros((ng, nf), float)
#Visualize the colors of the galaxies and the templates
#When there are spectroscopic redshifts available
if interactive and 'Z_S' in col_pars.d and plots and checkSED and ask(
'Plot colors vs spectroscopic redshifts?'):
color_m = zeros((nz, nt, nf - 1)) * 1.
if plots == 'pylab':
figure(1)
nrows = 2
ncols = old_div((nf - 1), nrows)
if (nf - 1) % nrows: ncols += 1
for i in range(nf - 1):
##plot=FramedPlot()
# Check for overflows
fmu = f_obs[:, i + 1]
fml = f_obs[:, i]
good = greater(fml, 1e-100) * greater(fmu, 1e-100)
zz, fmu, fml = multicompress(good, (z_s, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
if plots == 'pylab':
subplot(nrows, ncols, i + 1)
plot(zz, colour, "bo")
elif plots == 'biggles':
d = Points(zz, colour, color='blue')
plot.add(d)
for it in range(nt):
#Prevent overflows
fmu = f_mod[:, it, i + 1]
fml = f_mod[:, it, i]
good = greater(fml, 1e-100)
zz, fmu, fml = multicompress(good, (z, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
if plots == 'pylab':
plot(zz, colour, "r")
elif plots == 'biggles':
d = Curve(zz, colour, color='red')
plot.add(d)
if plots == 'pylab':
xlabel(r'$z$')
ylabel('%s - %s' % (filters[i], filters[i + 1]))
elif plots == 'biggles':
plot.xlabel = r'$z$'
plot.ylabel = '%s - %s' % (filters[i], filters[i + 1])
plot.save_as_eps('%s-%s.eps' % (filters[i], filters[i + 1]))
plot.show()
if plots == 'pylab':
show()
inp = eval(input('Hit Enter to continue.'))
#Get other information which will go in the output file (as strings)
if 'OTHER' in col_pars.d:
if col_pars.d['OTHER'] != 'all':
other_cols = col_pars.d['OTHER']
if type(other_cols) == type((2, )):
other_cols = tuple(map(int, other_cols))
else:
other_cols = (int(other_cols), )
other_cols = [x - 1 for x in other_cols]
n_other = len(other_cols)
else:
n_other = get_2Darray(obs_file, cols='all', nrows=1).shape[1]
other_cols = list(range(n_other))
others = get_str(obs_file, other_cols)
if len(other_cols) > 1:
other = []
for j in range(len(others[0])):
lista = []
for i in range(len(others)):
lista.append(others[i][j])
other.append(join(lista))
else:
other = others
if pars.d['GET_Z'] == 'no': get_z = 0
else: get_z = 1
#Prepare the output file
out_name = pars.d['OUTPUT']
if get_z:
if os.path.exists(out_name):
os.system('cp %s %s.bak' % (out_name, out_name))
print("File %s exists. Copying it to %s.bak" % (out_name, out_name))
output = open(out_name, 'w')
if pars.d['PROBS_LITE'] == 'no': save_probs = 0
else: save_probs = 1
if pars.d['PROBS'] == 'no': save_full_probs = 0
else: save_full_probs = 1
if pars.d['PROBS2'] == 'no': save_probs2 = 0
else: save_probs2 = 1
#Include some header information
# File name and the date...
time_stamp = time.ctime(time.time())
if get_z: output.write('## File ' + out_name + ' ' + time_stamp + '\n')
#and also the parameters used to run bpz...
if get_z: output.write("""##
##Parameters used to run BPZ:
##
""")
claves = list(pars.d.keys())
claves.sort()
for key in claves:
if type(pars.d[key]) == type((1, )):
cosa = join(list(pars.d[key]), ',')
else:
cosa = str(pars.d[key])
if get_z: output.write('##' + key.upper() + '=' + cosa + '\n')
if save_full_probs:
#Shelve some info on the run
full_probs = shelve.open(pars.d['PROBS'])
full_probs['TIME'] = time_stamp
full_probs['PARS'] = pars.d
if save_probs:
probs = open(pars.d['PROBS_LITE'], 'w')
probs.write('# ID p_bayes(z) where z=arange(%.4f,%.4f,%.4f) \n' %
(zmin, zmax + dz, dz))
if save_probs2:
probs2 = open(pars.d['PROBS2'], 'w')
probs2.write(
'# id t z1 P(z1) P(z1+dz) P(z1+2*dz) ... where dz = %.4f\n' % dz)
#probs2.write('# ID\n')
#probs2.write('# t z1 P(z1) P(z1+dz) P(z1+2*dz) ... where dz = %.4f\n' % dz)
#Use a empirical prior?
tipo_prior = pars.d['PRIOR']
useprior = 0
if 'M_0' in col_pars.d:
has_mags = 1
else:
has_mags = 0
if has_mags and tipo_prior != 'none' and tipo_prior != 'flat':
useprior = 1
#Add cluster 'spikes' to the prior?
cluster_prior = 0.
if pars.d['ZC']:
cluster_prior = 1
if type(pars.d['ZC']) == type(""): zc = array([float(pars.d['ZC'])])
else: zc = array(list(map(float, pars.d['ZC'])))
if type(pars.d['FC']) == type(""): fc = array([float(pars.d['FC'])])
else: fc = array(list(map(float, pars.d['FC'])))
fcc = add.reduce(fc)
if fcc > 1.:
print(ftc)
raise 'Too many galaxies in clusters!'
pi_c = zeros((nz, nt)) * 1.
#Go over the different cluster spikes
for i in range(len(zc)):
#We define the cluster within dz=0.01 limits
cluster_range = less_equal(abs(z - zc[i]), .01) * 1.
#Clip values to avoid overflow
exponente = clip(-(z - zc[i])**2 / 2. / (0.00333)**2, -700., 0.)
#Outside the cluster range g is 0
g = exp(exponente) * cluster_range
norm = add.reduce(g)
pi_c[:, 0] = pi_c[:, 0] + g / norm * fc[i]
#Go over the different types
print('We only apply the cluster prior to the early type galaxies')
for i in range(1, 3 + 2 * ninterp):
pi_c[:, i] = pi_c[:, i] + pi_c[:, 0]
#Output format
format = '%' + repr(maximum(5, len(id[0]))) + 's' #ID format
format = format + pars.d[
'N_PEAKS'] * ' %.3f %.3f %.3f %.3f %.5f' + ' %.3f %.3f %10.3f'
#Add header with variable names to the output file
sxhdr = """##
##Column information
##
# 1 ID"""
k = 1
if pars.d['N_PEAKS'] > 1:
for j in range(pars.d['N_PEAKS']):
sxhdr += """
# %i Z_B_%i
# %i Z_B_MIN_%i
# %i Z_B_MAX_%i
# %i T_B_%i
# %i ODDS_%i""" % (k + 1, j + 1, k + 2, j + 1, k + 3, j + 1, k + 4, j + 1,
k + 5, j + 1)
k += 5
else:
sxhdr += """
# %i Z_B
# %i Z_B_MIN
# %i Z_B_MAX
# %i T_B
# %i ODDS""" % (k + 1, k + 2, k + 3, k + 4, k + 5)
k += 5
sxhdr += """
# %i Z_ML
# %i T_ML
# %i CHI-SQUARED\n""" % (k + 1, k + 2, k + 3)
nh = k + 4
if 'Z_S' in col_pars.d:
sxhdr = sxhdr + '# %i Z_S\n' % nh
format = format + ' %.3f'
nh += 1
if has_mags:
format = format + ' %.3f'
sxhdr = sxhdr + '# %i M_0\n' % nh
nh += 1
if 'OTHER' in col_pars.d:
sxhdr = sxhdr + '# %i OTHER\n' % nh
format = format + ' %s'
nh += n_other
#print sxhdr
if get_z: output.write(sxhdr + '##\n')
odds_i = float(pars.d['ODDS'])
oi = inv_gauss_int(odds_i)
print(odds_i, oi)
#Proceed to redshift estimation
if checkSED: buffer_flux_comparison = ""
if pars.d['CONVOLVE_P'] == 'yes':
# Will Convolve with a dz=0.03 gaussian to make probabilities smoother
# This is necessary; if not there are too many close peaks
sigma_g = 0.03
x = arange(-3. * sigma_g, 3. * sigma_g + old_div(dz, 10.),
dz) # made symmetric --DC
gaus = exp(-(old_div(x, sigma_g))**2)
if pars.d["NMAX"] != None: ng = int(pars.d["NMAX"])
for ig in range(ng):
currentPercent = ig / ng * 100
status = "{:.3f}% of {} completed.".format(currentPercent, ng)
Printer(status)
#Don't run BPZ on galaxies with have z_s > z_max
#if col_pars.d.has_key('Z_S'):
# if z_s[ig]<9.9 and z_s[ig]>zmax : continue
if not get_z: continue
if pars.d['COLOR'] == 'yes':
likelihood = p_c_z_t_color(f_obs[ig, :nf], ef_obs[ig, :nf],
f_mod[:nz, :nt, :nf])
else:
likelihood = p_c_z_t(f_obs[ig, :nf], ef_obs[ig, :nf],
f_mod[:nz, :nt, :nf])
if 0:
print(f_obs[ig, :nf])
print(ef_obs[ig, :nf])
iz_ml = likelihood.i_z_ml
t_ml = likelihood.i_t_ml
red_chi2 = old_div(likelihood.min_chi2, float(nf - 1.))
#p=likelihood.Bayes_likelihood
#likelihood.various_plots()
#print 'FULL BAYESAIN LIKELIHOOD'
p = likelihood.likelihood
if not ig:
print('ML * prior -- NOT QUITE BAYESIAN')
if pars.d[
'ONLY_TYPE'] == 'yes': #Use only the redshift information, no priors
p_i = zeros((nz, nt)) * 1.
j = searchsorted(z, z_s[ig])
#print j,nt,z_s[ig]
try:
p_i[j, :] = old_div(1., float(nt))
except IndexError:
pass
else:
if useprior:
if pars.d['PRIOR'] == 'lensing':
p_i = prior(z, m_0[ig], tipo_prior, nt0, ninterp, x[ig], y[ig])
else:
p_i = prior(z, m_0[ig], tipo_prior, nt0, ninterp)
else:
p_i = old_div(ones((nz, nt), float), float(nz * nt))
if cluster_prior: p_i = (1. - fcc) * p_i + pi_c
if save_full_probs:
full_probs[id[ig]] = [z, p_i[:nz, :nt], p[:nz, :nt], red_chi2]
#Multiply the prior by the likelihood to find the final probability
pb = p_i[:nz, :nt] * p[:nz, :nt]
#plo=FramedPlot()
#for i in range(p.shape[1]):
# plo.add(Curve(z,p_i[:nz,i]/sum(sum(p_i[:nz,:]))))
#for i in range(p.shape[1]):
# plo.add(Curve(z,p[:nz,i]/sum(sum(p[:nz,:])),color='red'))
#plo.add(Curve(z,pb[:nz,-1]/sum(pb[:nz,-1]),color='blue'))
#plo.show()
#ask('More?')
#Convolve with a gaussian of width \sigma(1+z) to take into
#accout the intrinsic scatter in the redshift estimation 0.06*(1+z)
#(to be done)
#Estimate the bayesian quantities
p_bayes = add.reduce(pb[:nz, :nt], -1)
#print p_bayes.shape
#print argmax(p_bayes)
#print p_bayes[300:310]
#Convolve with a gaussian
if pars.d['CONVOLVE_P'] == 'yes' and pars.d['ONLY_TYPE'] == 'no':
#print 'GAUSS CONV'
p_bayes = convolve(p_bayes, gaus, 1)
#print 'gaus', gaus
#print p_bayes.shape
#print argmax(p_bayes)
#print p_bayes[300:310]
# Eliminate all low level features in the prob. distribution
pmax = max(p_bayes)
p_bayes = where(
greater(p_bayes, pmax * float(pars.d['P_MIN'])), p_bayes, 0.)
norm = add.reduce(p_bayes)
p_bayes = old_div(p_bayes, norm)
if specprob:
p_spec[ig, :] = match_resol(z, p_bayes, z_spec[ig, :]) * p_spec[ig, :]
norma = add.reduce(p_spec[ig, :])
if norma == 0.: norma = 1.
p_spec[ig, :] /= norma
#vyjod=tuple([id[ig]]+list(z_spec[ig,:])+list(p_spec[ig,:])+[z_s[ig],
# int(float(other[ig]))])
vyjod = tuple([id[ig]] + list(z_spec[ig, :]) + list(p_spec[ig, :]))
formato = "%s " + 5 * " %.4f"
formato += 5 * " %.3f"
#formato+=" %4f %i"
formato += "\n"
print(formato % vyjod)
specout.write(formato % vyjod)
if pars.d['N_PEAKS'] > 1:
# Identify maxima and minima in the final probability
g_max = less(p_bayes[2:], p_bayes[1:-1]) * less(p_bayes[:-2],
p_bayes[1:-1])
g_min = greater(p_bayes[2:], p_bayes[1:-1]) * greater(p_bayes[:-2],
p_bayes[1:-1])
g_min += equal(p_bayes[1:-1], 0.) * greater(p_bayes[2:], 0.)
g_min += equal(p_bayes[1:-1], 0.) * greater(p_bayes[:-2], 0.)
i_max = compress(g_max, arange(nz - 2)) + 1
i_min = compress(g_min, arange(nz - 2)) + 1
# Check that the first point and the last one are not minima or maxima,
# if they are, add them to the index arrays
if p_bayes[0] > p_bayes[1]:
i_max = concatenate([[0], i_max])
i_min = concatenate([[0], i_min])
if p_bayes[-1] > p_bayes[-2]:
i_max = concatenate([i_max, [nz - 1]])
i_min = concatenate([i_min, [nz - 1]])
if p_bayes[0] < p_bayes[1]:
i_min = concatenate([[0], i_min])
if p_bayes[-1] < p_bayes[-2]:
i_min = concatenate([i_min, [nz - 1]])
p_max = take(p_bayes, i_max)
#p_min=take(p_bayes,i_min)
p_tot = []
z_peaks = []
t_peaks = []
# Sort them by probability values
p_max, i_max = multisort(old_div(1., p_max), (p_max, i_max))
# For each maximum, define the minima which sandwich it
# Assign minima to each maximum
jm = searchsorted(i_min, i_max)
p_max = list(p_max)
for i in range(len(i_max)):
z_peaks.append([z[i_max[i]], z[i_min[jm[i] - 1]], z[i_min[jm[i]]]])
t_peaks.append(argmax(pb[i_max[i], :nt]))
p_tot.append(sum(p_bayes[i_min[jm[i] - 1]:i_min[jm[i]]]))
# print z_peaks[-1][0],f_mod[i_max[i],t_peaks[-1]-1,:nf]
if ninterp:
t_peaks = list(old_div(array(t_peaks), (1. + ninterp)))
if pars.d['MERGE_PEAKS'] == 'yes':
# Merge peaks which are very close 0.03(1+z)
merged = []
for k in range(len(z_peaks)):
for j in range(len(z_peaks)):
if j > k and k not in merged and j not in merged:
if abs(z_peaks[k][0] - z_peaks[j][0]) < 0.06 * (
1. + z_peaks[j][0]):
# Modify the element which receives the accretion
z_peaks[k][1] = minimum(z_peaks[k][1],
z_peaks[j][1])
z_peaks[k][2] = maximum(z_peaks[k][2],
z_peaks[j][2])
p_tot[k] += p_tot[j]
# Put the merged element in the list
merged.append(j)
#print merged
# Clean up
copia = p_tot[:]
for j in merged:
p_tot.remove(copia[j])
copia = z_peaks[:]
for j in merged:
z_peaks.remove(copia[j])
copia = t_peaks[:]
for j in merged:
t_peaks.remove(copia[j])
copia = p_max[:]
for j in merged:
p_max.remove(copia[j])
if sum(array(p_tot)) != 1.:
p_tot = old_div(array(p_tot), sum(array(p_tot)))
# Define the peak
iz_b = argmax(p_bayes)
zb = z[iz_b]
# OKAY, NOW THAT GAUSSIAN CONVOLUTION BUG IS FIXED
# if pars.d['ONLY_TYPE']=='yes': zb=zb-dz/2. #This corrects a small bias
# else: zb=zb-dz #This corrects another small bias --DC
#Integrate within a ~ oi*sigma interval to estimate
# the odds. (based on a sigma=pars.d['MIN_RMS']*(1+z))
#Look for the number of sigma corresponding
#to the odds_i confidence limit
zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
if pars.d['Z_THR'] > 0:
zo1 = float(pars.d['Z_THR'])
zo2 = float(pars.d['ZMAX'])
o = odds(p_bayes[:nz], z, zo1, zo2)
# Integrate within the same odds interval to find the type
# izo1=maximum(0,searchsorted(z,zo1)-1)
# izo2=minimum(nz,searchsorted(z,zo2))
# t_b=argmax(add.reduce(p[izo1:izo2,:nt],0))
it_b = argmax(pb[iz_b, :nt])
t_b = it_b + 1
if ninterp:
tt_b = old_div(float(it_b), (1. + ninterp))
tt_ml = old_div(float(t_ml), (1. + ninterp))
else:
tt_b = it_b
tt_ml = t_ml
if max(pb[iz_b, :]) < 1e-300:
print('NO CLEAR BEST t_b; ALL PROBABILITIES ZERO')
t_b = -1.
tt_b = -1.
#print it_b, t_b, tt_b, pb.shape
if 0:
print(f_mod[iz_b, it_b, :nf])
print(min(ravel(p_i)), max(ravel(p_i)))
print(min(ravel(p)), max(ravel(p)))
print(p_i[iz_b, :])
print(p[iz_b, :])
print(p_i[iz_b, it_b]) # prior
print(p[iz_b, it_b]) # chisq
print(likelihood.likelihood[iz_b, it_b])
print(likelihood.chi2[iz_b, it_b])
print(likelihood.ftt[iz_b, it_b])
print(likelihood.foo)
print()
print('t_b', t_b)
print('iz_b', iz_b)
print('nt', nt)
print(max(ravel(pb)))
impb = argmax(ravel(pb))
impbz = old_div(impb, nt)
impbt = impb % nt
print(impb, impbz, impbt)
print(ravel(pb)[impb])
print(pb.shape, (nz, nt))
print(pb[impbz, impbt])
print(pb[iz_b, it_b])
print('z, t', z[impbz], t_b)
print(t_b)
# Redshift confidence limits
z1, z2 = interval(p_bayes[:nz], z, odds_i)
if pars.d['PHOTO_ERRORS'] == 'no':
zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
if zo1 < z1: z1 = maximum(0., zo1)
if zo2 > z2: z2 = zo2
# Print output
if pars.d['N_PEAKS'] == 1:
salida = [id[ig], zb, z1, z2, tt_b + 1, o, z[iz_ml], tt_ml + 1,
red_chi2]
else:
salida = [id[ig]]
for k in range(pars.d['N_PEAKS']):
if k <= len(p_tot) - 1:
salida = salida + list(z_peaks[k]) + [t_peaks[k] + 1, p_tot[k]]
else:
salida += [-1., -1., -1., -1., -1.]
salida += [z[iz_ml], tt_ml + 1, red_chi2]
if 'Z_S' in col_pars.d: salida.append(z_s[ig])
if has_mags: salida.append(m_0[ig] - pars.d['DELTA_M_0'])
if 'OTHER' in col_pars.d: salida.append(other[ig])
if get_z: output.write(format % tuple(salida) + '\n')
if pars.d['VERBOSE'] == 'yes': print(format % tuple(salida))
#try:
# if sometrue(greater(z_peaks,7.5)):
# connect(z,p_bayes)
# ask('More?')
#except:
# pass
odd_check = odds_i
if checkSED:
ft = f_mod[iz_b, it_b, :]
fo = f_obs[ig, :]
efo = ef_obs[ig, :]
dfosq = (old_div((ft - fo), efo))**2
if 0:
print(ft)
print(fo)
print(efo)
print(dfosq)
pause()
factor = ft / efo / efo
ftt = add.reduce(ft * factor)
fot = add.reduce(fo * factor)
am = old_div(fot, ftt)
ft = ft * am
if 0:
print(factor)
print(ftt)
print(fot)
print(am)
print(ft)
print()
pause()
flux_comparison = [id[ig], m_0[ig], z[iz_b], t_b, am] + list(
concatenate([ft, fo, efo]))
nfc = len(flux_comparison)
format_fc = '%s %.2f %.2f %i' + (nfc - 4) * ' %.3e' + '\n'
buffer_flux_comparison = buffer_flux_comparison + format_fc % tuple(
flux_comparison)
if o >= odd_check:
# PHOTOMETRIC CALIBRATION CHECK
# Calculate flux ratios, but only for objects with ODDS >= odd_check
# (odd_check = 0.95 by default)
# otherwise, leave weight w = 0 by default
eps = 1e-10
frat[ig, :] = divsafe(fo, ft, inf=eps, nan=eps)
#fw[ig,:] = greater(fo, 0)
fw[ig, :] = divsafe(fo, efo, inf=1e8, nan=0)
fw[ig, :] = clip(fw[ig, :], 0, 100)
#print fw[ig,:]
#print
if 0:
bad = less_equal(ft, 0.)
#Avoid overflow by setting r to 0.
fo = where(bad, 0., fo)
ft = where(bad, 1., ft)
r[ig, :] = old_div(fo, ft)
try:
dm[ig, :] = -flux2mag(old_div(fo, ft))
except:
dm[ig, :] = -100
# Clip ratio between 0.01 & 100
r[ig, :] = where(greater(r[ig, :], 100.), 100., r[ig, :])
r[ig, :] = where(less_equal(r[ig, :], 0.), 0.01, r[ig, :])
#Weight by flux
w[ig, :] = where(greater(fo, 0.), 1, 0.)
#w[ig,:]=where(greater(fo,0.),fo,0.)
#print fo
#print r[ig,:]
#print
# This is no good becasue r is always > 0 (has been clipped that way)
#w[ig,:]=where(greater(r[ig,:],0.),fo,0.)
# The is bad because it would include non-detections:
#w[ig,:]=where(greater(r[ig,:],0.),1.,0.)
if save_probs:
texto = '%s ' % str(id[ig])
texto += len(p_bayes) * '%.3e ' + '\n'
probs.write(texto % tuple(p_bayes))
# pb[z,t] -> p_bayes[z]
# 1. tb are summed over
# 2. convolved with Gaussian if CONVOLVE_P
# 3. Clipped above P_MIN * max(P), where P_MIN = 0.01 by default
# 4. normalized such that sum(P(z)) = 1
if save_probs2: # P = exp(-chisq / 2)
#probs2.write('%s\n' % id[ig])
pmin = pmax * float(pars.d['P_MIN'])
#pb = where(less(pb,pmin), 0, pb)
chisq = -2 * log(pb)
for itb in range(nt):
chisqtb = chisq[:, itb]
pqual = greater(pb[:, itb], pmin)
chisqlists = seglist(chisqtb, pqual)
if len(chisqlists) == 0:
continue
#print pb[:,itb]
#print chisqlists
zz = arange(zmin, zmax + dz, dz)
zlists = seglist(zz, pqual)
for i in range(len(zlists)):
probs2.write('%s %2d %.3f ' %
(id[ig], itb + 1, zlists[i][0]))
fmt = len(chisqlists[i]) * '%4.2f ' + '\n'
probs2.write(fmt % tuple(chisqlists[i]))
#fmt = len(chisqtb) * '%4.2f '+'\n'
#probs2.write('%d ' % itb)
#probs2.write(fmt % tuple(chisqtb))
#if checkSED: open(pars.d['FLUX_COMPARISON'],'w').write(buffer_flux_comparison)
if checkSED: open(pars.d['CHECK'], 'w').write(buffer_flux_comparison)
if get_z: output.close()
#if checkSED and get_z:
if checkSED:
#try:
if 1:
if interactive:
print("")
print("")
print("PHOTOMETRIC CALIBRATION TESTS")
# See PHOTOMETRIC CALIBRATION CHECK above
#ratios=add.reduce(w*r,0)/add.reduce(w,0)
#print "Average, weighted by flux ratios f_obs/f_model for objects with odds >= %g" % odd_check
#print len(filters)*' %s' % tuple(filters)
#print nf*' % 7.3f ' % tuple(ratios)
#print "Corresponding zero point shifts"
#print nf*' % 7.3f ' % tuple(-flux2mag(ratios))
#print
fratavg = old_div(sum(fw * frat, axis=0), sum(fw, axis=0))
dmavg = -flux2mag(fratavg)
fnobj = sum(greater(fw, 0), axis=0)
#print 'fratavg', fratavg
#print 'dmavg', dmavg
#print 'fnobj', fnobj
#fnobj = sum(greater(w[:,i],0))
print(
"If the dmag are large, add them to the .columns file (zp_offset), then re-run BPZ.")
print(
"(For better results, first re-run with -ONLY_TYPE yes to fit SEDs to known spec-z.)")
print()
print(' fo/ft dmag nobj filter')
#print nf
for i in range(nf):
print('% 7.3f % 7.3f %5d %s'\
% (fratavg[i], dmavg[i], fnobj[i], filters[i]))
#% (ratios[i], -flux2mag(ratios)[i], sum(greater(w[:,i],0)), filters[i])
#print ' fo/ft dmag filter'
#for i in range(nf):
# print '% 7.3f % 7.3f %s' % (ratios[i], -flux2mag(ratios)[i], filters[i])
print(
"fo/ft = Average f_obs/f_model weighted by f_obs/ef_obs for objects with ODDS >= %g"
% odd_check)
print(
"dmag = magnitude offset which should be applied (added) to the photometry (zp_offset)")
print(
"nobj = # of galaxies considered in that filter (detected and high ODDS >= %g)"
% odd_check)
# print r
# print w
#print
#print "Number of galaxies considered (with ODDS >= %g):" % odd_check
#print ' ', sum(greater(w,0)) / float(nf)
#print '(Note a galaxy detected in only 5 / 6 filters counts as 5/6 = 0.833)'
#print sum(greater(w,0))
#This part is experimental and may not work in the general case
#print "Median color offsets for objects with odds > "+`odd_check`+" (not weighted)"
#print len(filters)*' %s' % tuple(filters)
#r=flux2mag(r)
#print nf*' %.3f ' % tuple(-median(r))
#print nf*' %.3f ' % tuple(median(dm))
#rms=[]
#efobs=[]
#for j in range(nf):
# ee=where(greater(f_obs[:,j],0.),f_obs[:,j],2.)
# zz=e_frac2mag(ef_obs[:,j]/ee)
#
# xer=arange(0.,1.,.02)
# hr=hist(abs(r[:,j]),xer)
# hee=hist(zz,xer)
# rms.append(std_log(compress(less_equal(r[:,j],1.),r[:,j])))
# zz=compress(less_equal(zz,1.),zz)
# efobs.append(sqrt(mean(zz*zz)))
#print nf*' %.3f ' % tuple(rms)
#print nf*' %.3f ' % tuple(efobs)
#print nf*' %.3f ' % tuple(sqrt(abs(array(rms)**2-array(efobs)**2)))
#except: pass
if save_full_probs: full_probs.close()
if save_probs: probs.close()
if save_probs2: probs2.close()
if plots and checkSED:
zb, zm, zb1, zb2, o, tb = get_data(out_name, (1, 6, 2, 3, 5, 4))
#Plot the comparison between z_spec and z_B
if 'Z_S' in col_pars.d:
if not interactive or ask('Compare z_B vs z_spec?'):
good = less(z_s, 9.99)
print(
'Total initial number of objects with spectroscopic redshifts= ',
sum(good))
od_th = 0.
if ask('Select for galaxy characteristics?\n'):
od_th = eval(input('Odds threshold?\n'))
good *= greater_equal(o, od_th)
t_min = eval(input('Minimum spectral type\n'))
t_max = eval(input('Maximum spectral type\n'))
good *= less_equal(tb, t_max) * greater_equal(tb, t_min)
if has_mags:
mg_min = eval(input('Bright magnitude limit?\n'))
mg_max = eval(input('Faint magnitude limit?\n'))
good = good * less_equal(m_0, mg_max) * greater_equal(
m_0, mg_min)
zmo, zso, zbo, zb1o, zb2o, tb = multicompress(good, (zm, z_s, zb,
zb1, zb2, tb))
print('Number of objects with odds > %.2f= %i ' %
(od_th, len(zbo)))
deltaz = old_div((zso - zbo), (1. + zso))
sz = stat_robust(deltaz, 3., 3)
sz.run()
outliers = greater_equal(abs(deltaz), 3. * sz.rms)
print('Number of outliers [dz >%.2f*(1+z)]=%i' %
(3. * sz.rms, add.reduce(outliers)))
catastrophic = greater_equal(deltaz * (1. + zso), 1.)
n_catast = sum(catastrophic)
print('Number of catastrophic outliers [dz >1]=', n_catast)
print('Delta z/(1+z) = %.4f +- %.4f' % (sz.median, sz.rms))
if interactive and plots:
if plots == 'pylab':
figure(2)
subplot(211)
plot(
arange(
min(zso), max(zso) + 0.01, 0.01), arange(
min(zso), max(zso) + 0.01, 0.01), "r")
errorbar(zso,
zbo, [abs(zbo - zb1o), abs(zb2o - zbo)],
fmt="bo")
xlabel(r'$z_{spec}$')
ylabel(r'$z_{bpz}$')
subplot(212)
plot(zso, zmo, "go", zso, zso, "r")
xlabel(r'$z_{spec}$')
ylabel(r'$z_{ML}$')
show()
elif plots == 'biggles':
plot = FramedPlot()
if len(zso) > 2000: symbol = 'dot'
else: symbol = 'circle'
plot.add(Points(zso, zbo, symboltype=symbol, color='blue'))
plot.add(Curve(zso, zso, linewidth=2., color='red'))
plot.add(ErrorBarsY(zso, zb1o, zb2o))
plot.xlabel = r'$z_{spec}$'
plot.ylabel = r'$z_{bpz}$'
# plot.xrange=0.,1.5
# plot.yrange=0.,1.5
plot.show()
#
plot_ml = FramedPlot()
if len(zso) > 2000: symbol = 'dot'
else: symbol = 'circle'
plot_ml.add(Points(
zso, zmo, symboltype=symbol,
color='blue'))
plot_ml.add(Curve(zso, zso, linewidth=2., color='red'))
plot_ml.xlabel = r"$z_{spec}$"
plot_ml.ylabel = r"$z_{ML}$"
plot_ml.show()
if interactive and plots and ask('Plot Bayesian photo-z histogram?'):
if plots == 'biggles':
dz = eval(input('Redshift interval?\n'))
od_th = eval(input('Odds threshold?\n'))
good = greater_equal(o, od_th)
if has_mags:
mg_min = eval(input('Bright magnitude limit?\n'))
mg_max = eval(input('Faint magnitude limit?\n'))
good = good * less_equal(m_0, mg_max) * greater_equal(m_0,
mg_min)
z = compress(good, zb)
xz = arange(zmin, zmax, dz)
hz = hist(z, xz)
plot = FramedPlot()
h = Histogram(hz, 0., dz, color='blue')
plot.add(h)
plot.xlabel = r'$z_{bpz}$'
plot.ylabel = r'$N(z_{bpz})$'
plot.show()
if ask('Want to save plot as eps file?'):
file = eval(input('File name?\n'))
if file[-2:] != 'ps': file = file + '.eps'
plot.save_as_eps(file)
if interactive and plots and ask(
'Compare colors with photometric redshifts?'):
if plots == 'biggles':
color_m = zeros((nz, nt, nf - 1)) * 1.
for i in range(nf - 1):
plot = FramedPlot()
# Check for overflows
fmu = f_obs[:, i + 1]
fml = f_obs[:, i]
good = greater(fml, 1e-100) * greater(fmu, 1e-100)
zz, fmu, fml = multicompress(good, (zb, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
d = Points(zz, colour, color='blue')
plot.add(d)
for it in range(nt):
#Prevent overflows
fmu = f_mod[:, it, i + 1]
fml = f_mod[:, it, i]
good = greater(fml, 1e-100)
zz, fmu, fml = multicompress(good, (z, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
d = Curve(zz, colour, color='red')
plot.add(d)
plot.xlabel = r'$z$'
plot.ylabel = '%s - %s' % (filters[i], filters[i + 1])
plot.save_as_eps('%s-%s.eps' % (filters[i], filters[i + 1]))
plot.show()
rolex.check()
| mit |
xupingmao/xnote | handlers/search/note.py | 1 | 1364 | # -*- coding:utf-8 -*-
# Created by xupingmao on 2017/06/11
# @modified 2020/02/18 00:20:12
"""搜索知识库文件"""
import re
import sys
import six
import xutils
import xauth
import xmanager
import xconfig
import xtables
from xutils import textutil
from xutils import SearchResult, text_contains
NOTE_DAO = xutils.DAO("note")
def to_sqlite_obj(text):
if text is None:
return "NULL"
if not isinstance(text, six.string_types):
return repr(text)
text = text.replace("'", "''")
return "'" + text + "'"
def filter_symbols(words):
new_words = []
for word in words:
word = re.sub("。", "", word)
if word == "":
continue
new_words.append(word)
return new_words
def search(ctx, expression=None):
words = ctx.words
files = []
words = filter_symbols(words)
if len(words) == 0:
return files
if ctx.search_note_content:
files += NOTE_DAO.search_content(words, xauth.current_name())
if ctx.search_note:
files += NOTE_DAO.search_name(words, xauth.current_name())
for item in files:
item.category = 'note'
# group 放前面
groups = list(filter(lambda x: x.type == "group", files))
text_files = list(filter(lambda x: x.type != "group", files))
files = groups + text_files
return files
| gpl-3.0 |
jimbydamonk/ansible-modules-core | cloud/google/gce_pd.py | 130 | 9532 | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.13.3+) is required for this module')
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError, e:
module.fail_json(msg=str(e.value), changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| gpl-3.0 |
kslundberg/pants | src/python/pants/reporting/reporting_utils.py | 10 | 1233 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
def items_to_report_element(items, item_type):
"""Converts an iterable of items to a (message, detail) pair.
- items: a list of items (e.g., Target instances) that can be str()-ed.
- item_type: a string describing the type of item (e.g., 'target').
Returns (message, detail) where message is the count of items (e.g., '26 targets')
and detail is the text representation of the list of items, one per line.
The return value can be used as an argument to Report.log().
This is useful when we want to say "N targets" or "K sources"
and allow the user to see which ones by clicking on that text.
"""
def pluralize(x):
if x.endswith('s'):
return x + 'es'
else:
return x + 's'
items = [str(x) for x in items]
n = len(items)
text = '{} {}'.format(n, item_type if n == 1 else pluralize(item_type))
if n == 0:
return text
else:
detail = '\n'.join(items)
return text, detail
| apache-2.0 |
nitzmahone/ansible-modules-core | network/dellos10/dellos10_facts.py | 11 | 13932 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: dellos10_facts
version_added: "2.2"
author: "Senthil Kumar Ganesan (@skg-net)"
short_description: Collect facts from remote devices running Dell OS10
description:
- Collects a base set of device facts from a remote device that
is running Dell OS10. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: dellos10
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument inlcude
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial M(!) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Collect all facts from the device
- dellos10_facts:
gather_subset: all
# Collect only the config and default facts
- dellos10_facts:
gather_subset:
- config
# Do not collect hardware facts
- dellos10_facts:
gather_subset:
- "!hardware"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_name:
description: The name of the OS which is running
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_servicetag:
description: The service tag number of the remote device
returned: always
type: str
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
# hardware
ansible_net_cpu_arch:
description: Cpu Architecture of the remote device
returned: when hardware is configured
type: str
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcli import CommandRunner
from ansible.module_utils.network import NetworkModule
import ansible.module_utils.dellos10
try:
from lxml import etree as ET
except ImportError:
import xml.etree.ElementTree as ET
class FactsBase(object):
def __init__(self, runner):
self.runner = runner
self.facts = dict()
self.commands()
class Default(FactsBase):
def commands(self):
self.runner.add_command('show version | display-xml')
self.runner.add_command('show system | display-xml')
self.runner.add_command('show running-configuration | grep hostname')
def populate(self):
data = self.runner.get_command('show version | display-xml')
xml_data = ET.fromstring(data)
self.facts['name'] = self.parse_name(xml_data)
self.facts['version'] = self.parse_version(xml_data)
data = self.runner.get_command('show system | display-xml')
xml_data = ET.fromstring(data)
self.facts['servicetag'] = self.parse_serialnum(xml_data)
self.facts['model'] = self.parse_model(xml_data)
data = self.runner.get_command('show running-configuration | grep hostname')
self.facts['hostname'] = self.parse_hostname(data)
def parse_name(self, data):
sw_name = data.find('./data/system-sw-state/sw-version/sw-name')
if sw_name is not None:
return sw_name.text
else:
return ""
def parse_version(self, data):
sw_ver = data.find('./data/system-sw-state/sw-version/sw-version')
if sw_ver is not None:
return sw_ver.text
else:
return ""
def parse_hostname(self, data):
match = re.search(r'hostname\s+(\S+)', data, re.M)
if match:
return match.group(1)
def parse_model(self, data):
prod_name = data.find('./data/system/node/mfg-info/product-name')
if prod_name is not None:
return prod_name.text
else:
return ""
def parse_serialnum(self, data):
svc_tag = data.find('./data/system/node/unit/mfg-info/service-tag')
if svc_tag is not None:
return svc_tag.text
else:
return ""
class Hardware(FactsBase):
def commands(self):
self.runner.add_command('show processes memory | grep Total')
def populate(self):
data = self.runner.get_command('show version | display-xml')
xml_data = ET.fromstring(data)
self.facts['cpu_arch'] = self.parse_cpu_arch(xml_data)
data = self.runner.get_command('show processes memory | grep Total')
match = self.parse_memory(data)
if match:
self.facts['memtotal_mb'] = int(match[0]) / 1024
self.facts['memfree_mb'] = int(match[2]) / 1024
def parse_cpu_arch(self, data):
cpu_arch = data.find('./data/system-sw-state/sw-version/cpu-arch')
if cpu_arch is not None:
return cpu_arch.text
else:
return ""
def parse_memory(self, data):
return re.findall(r'\:\s*(\d+)', data, re.M)
class Config(FactsBase):
def commands(self):
self.runner.add_command('show running-config')
def populate(self):
config = self.runner.get_command('show running-config')
self.facts['config'] = config
class Interfaces(FactsBase):
def commands(self):
self.runner.add_command('show interface | display-xml')
def populate(self):
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.runner.get_command('show interface | display-xml')
xml_data = ET.fromstring(data)
self.facts['interfaces'] = self.populate_interfaces(xml_data)
self.facts['neighbors'] = self.populate_neighbors(xml_data)
def populate_interfaces(self, interfaces):
int_facts = dict()
for interface in interfaces.findall('./data/interfaces/interface'):
intf = dict()
name = self.parse_item(interface, 'name')
intf['description'] = self.parse_item(interface, 'description')
intf['duplex'] = self.parse_item(interface, 'duplex')
intf['primary_ipv4'] = self.parse_primary_ipv4(interface)
intf['secondary_ipv4'] = self.parse_secondary_ipv4(interface)
intf['ipv6'] = self.parse_ipv6_address(interface)
intf['mtu'] = self.parse_item(interface, 'mtu')
intf['type'] = self.parse_item(interface, 'type')
int_facts[name] = intf
for interface in interfaces.findall('./data/interfaces-state/interface'):
name = self.parse_item(interface, 'name')
intf = int_facts[name]
intf['bandwidth'] = self.parse_item(interface, 'speed')
intf['adminstatus'] = self.parse_item(interface, 'admin-status')
intf['operstatus'] = self.parse_item(interface, 'oper-status')
intf['macaddress'] = self.parse_item(interface, 'phys-address')
for interface in interfaces.findall('./data/ports/ports-state/port'):
name = self.parse_item(interface, 'name')
fanout = self.parse_item(interface, 'fanout-state')
mediatype = self.parse_item(interface, 'media-type')
typ, sname = name.split('-eth')
if fanout == "BREAKOUT_1x1":
name = "ethernet" + sname
intf = int_facts[name]
intf['mediatype'] = mediatype
else:
#TODO: Loop for the exact subport
for subport in xrange(1, 5):
name = "ethernet" + sname + ":" + str(subport)
intf = int_facts[name]
intf['mediatype'] = mediatype
return int_facts
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def parse_item(self, interface, item):
elem = interface.find(item)
if elem is not None:
return elem.text
else:
return ""
def parse_primary_ipv4(self, interface):
ipv4 = interface.find('ipv4')
ip_address = ""
if ipv4 is not None:
prim_ipaddr = ipv4.find('./address/primary-addr')
if prim_ipaddr is not None:
ip_address = prim_ipaddr.text
self.add_ip_address(ip_address, 'ipv4')
return ip_address
def parse_secondary_ipv4(self, interface):
ipv4 = interface.find('ipv4')
ip_address = ""
if ipv4 is not None:
sec_ipaddr = ipv4.find('./address/secondary-addr')
if sec_ipaddr is not None:
ip_address = sec_ipaddr.text
self.add_ip_address(ip_address, 'ipv4')
return ip_address
def parse_ipv6_address(self, interface):
ipv6 = interface.find('ipv6')
ip_address = ""
if ipv6 is not None:
ipv6_addr = ipv6.find('./address/ipv6-address')
if ipv6_addr is not None:
ip_address = ipv6_addr.text
self.add_ip_address(ip_address, 'ipv6')
return ip_address
def populate_neighbors(self, interfaces):
lldp_facts = dict()
for interface in interfaces.findall('./data/interfaces-state/interface'):
name = interface.find('name').text
rem_sys_name = interface.find('./lldp-rem-neighbor-info/info/rem-system-name')
if rem_sys_name is not None:
lldp_facts[name] = list()
fact = dict()
fact['host'] = rem_sys_name.text
rem_sys_port = interface.find('./lldp-rem-neighbor-info/info/rem-lldp-port-id')
fact['port'] = rem_sys_port.text
lldp_facts[name].append(fact)
return lldp_facts
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = NetworkModule(argument_spec=spec, supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
runner = CommandRunner(module)
instances = list()
for key in runable_subsets:
runs = FACT_SUBSETS[key](runner)
instances.append(runs)
runner.run()
try:
for inst in instances:
inst.populate()
facts.update(inst.facts)
except Exception:
module.exit_json(out=module.from_json(runner.items))
ansible_facts = dict()
for key, value in facts.iteritems():
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts)
if __name__ == '__main__':
main()
| gpl-3.0 |
yokose-ks/edx-platform | common/djangoapps/track/tracker.py | 239 | 2374 | """
Module that tracks analytics events by sending them to different
configurable backends.
The backends can be configured using Django settings as the example
below::
TRACKING_BACKENDS = {
'tracker_name': {
'ENGINE': 'class.name.for.backend',
'OPTIONS': {
'host': ... ,
'port': ... ,
...
}
}
}
"""
import inspect
from importlib import import_module
from dogapi import dog_stats_api
from django.conf import settings
from track.backends import BaseBackend
__all__ = ['send']
backends = {}
def _initialize_backends_from_django_settings():
"""
Initialize the event tracking backends according to the
configuration in django settings
"""
backends.clear()
config = getattr(settings, 'TRACKING_BACKENDS', {})
for name, values in config.iteritems():
# Ignore empty values to turn-off default tracker backends
if values:
engine = values['ENGINE']
options = values.get('OPTIONS', {})
backends[name] = _instantiate_backend_from_name(engine, options)
def _instantiate_backend_from_name(name, options):
"""
Instantiate an event tracker backend from the full module path to
the backend class. Useful when setting backends from configuration
files.
"""
# Parse backend name
try:
parts = name.split('.')
module_name = '.'.join(parts[:-1])
class_name = parts[-1]
except IndexError:
raise ValueError('Invalid event track backend %s' % name)
# Get and verify the backend class
try:
module = import_module(module_name)
cls = getattr(module, class_name)
if not inspect.isclass(cls) or not issubclass(cls, BaseBackend):
raise TypeError
except (ValueError, AttributeError, TypeError, ImportError):
raise ValueError('Cannot find event track backend %s' % name)
backend = cls(**options)
return backend
@dog_stats_api.timed('track.send')
def send(event):
"""
Send an event object to all the initialized backends.
"""
dog_stats_api.increment('track.send.count')
for name, backend in backends.iteritems():
with dog_stats_api.timer('track.send.backend.{0}'.format(name)):
backend.send(event)
_initialize_backends_from_django_settings()
| agpl-3.0 |
michaelBenin/sqlalchemy | lib/sqlalchemy/testing/plugin/noseplugin.py | 1 | 2735 | # plugin/noseplugin.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Enhance nose with extra options and behaviors for running SQLAlchemy tests.
Must be run via ./sqla_nose.py so that it is imported in the expected
way (e.g. as a package-less import).
"""
import os
import sys
from nose.plugins import Plugin
fixtures = None
# no package imports yet! this prevents us from tripping coverage
# too soon.
path = os.path.join(os.path.dirname(__file__), "plugin_base.py")
if sys.version_info >= (3,3):
from importlib import machinery
plugin_base = machinery.SourceFileLoader("plugin_base", path).load_module()
else:
import imp
plugin_base = imp.load_source("plugin_base", path)
class NoseSQLAlchemy(Plugin):
enabled = True
name = 'sqla_testing'
score = 100
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
opt = parser.add_option
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
def wrap_(option, opt_str, value, parser):
callback_(opt_str, value, parser)
kw["callback"] = wrap_
opt(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def configure(self, options, conf):
super(NoseSQLAlchemy, self).configure(options, conf)
plugin_base.pre_begin(options)
plugin_base.set_coverage_flag(options.enable_plugin_coverage)
global fixtures
from sqlalchemy.testing import fixtures
def begin(self):
plugin_base.post_begin()
def describeTest(self, test):
return ""
def wantFunction(self, fn):
if fn.__module__ is None:
return False
if fn.__module__.startswith('sqlalchemy.testing'):
return False
def wantClass(self, cls):
return plugin_base.want_class(cls)
def beforeTest(self, test):
plugin_base.before_test(test,
test.test.cls.__module__,
test.test.cls, test.test.method.__name__)
def afterTest(self, test):
plugin_base.after_test(test)
def startContext(self, ctx):
if not isinstance(ctx, type) \
or not issubclass(ctx, fixtures.TestBase):
return
plugin_base.start_test_class(ctx)
def stopContext(self, ctx):
if not isinstance(ctx, type) \
or not issubclass(ctx, fixtures.TestBase):
return
plugin_base.stop_test_class(ctx)
| mit |
jazzmes/ryu | ryu/app/rest_quantum.py | 22 | 4625 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides a set of REST API dedicated to OpenStack Ryu plug-in.
- Interface (uuid in ovsdb) registration
- Maintain interface association to a network
Used by OpenStack Ryu plug-in.
"""
import json
from webob import Response
from ryu.base import app_manager
from ryu.app.wsgi import (ControllerBase,
WSGIApplication)
from ryu.lib import quantum_ifaces
# REST API for openstack quantum
# get the list of iface-ids
# GET /v1.0/quantum/ports/
#
# register the iface_id
# POST /v1.0/quantum/ports/{iface_id}
#
# unregister iface_id
# DELETE /v1.0/quantum/ports/{iface_id}
#
# associate network_id with iface_id
# GET /v1.0/quantum/ports/{iface_id}/network_id
#
# associate network_id with iface_id
# POST /v1.0/quantum/ports/{iface_id}/network_id/{network_id}
#
# update network_id with iface_id
# PUT /v1.0/quantum/ports/{iface_id}/network_id/{network_id}
class QuantumController(ControllerBase):
def __init__(self, req, link, data, **config):
super(QuantumController, self).__init__(req, link, data, **config)
self.ifaces = data
def list_ifaces(self, _req, **_kwargs):
body = json.dumps(self.ifaces.keys())
return Response(content_type='application/json', body=body)
def delete_iface(self, _req, iface_id, **_kwargs):
self.ifaces.unregister(iface_id)
return Response(status=200)
def list_keys(self, _req, iface_id, **_kwargs):
try:
keys = self.ifaces.list_keys(iface_id)
except KeyError:
return Response(status=404)
body = json.dumps(keys)
return Response(content_type='application/json', body=body)
def get_key(self, _req, iface_id, key, **_kwargs):
try:
value = self.ifaces.get_key(iface_id, key)
except KeyError:
return Response(status=404)
body = json.dumps(value)
return Response(content_type='application/json', body=body)
def create_value(self, _req, iface_id, key, value, **_kwargs):
try:
self.ifaces.set_key(iface_id, key, value)
except ValueError:
return Response(status=404)
return Response(status=200)
def update_value(self, _req, iface_id, key, value, **_kwargs):
try:
self.ifaces.update_key(iface_id, key, value)
except ValueError:
return Response(status=404)
return Response(status=200)
class QuantumIfaceAPI(app_manager.RyuApp):
_CONTEXTS = {
'quantum_ifaces': quantum_ifaces.QuantumIfaces,
'wsgi': WSGIApplication,
}
def __init__(self, *args, **kwargs):
super(QuantumIfaceAPI, self).__init__(*args, **kwargs)
self.ifaces = kwargs['quantum_ifaces']
wsgi = kwargs['wsgi']
mapper = wsgi.mapper
controller = QuantumController
wsgi.registory[controller.__name__] = self.ifaces
route_name = 'quantum_ifaces'
uri = '/v1.0/quantum'
ports_uri = uri + '/ports'
s = mapper.submapper(controller=controller)
s.connect(route_name, ports_uri, action='list_ifaces',
conditions=dict(method=['GET', 'HEAD']))
iface_uri = ports_uri + '/{iface_id}'
s.connect(route_name, iface_uri, action='delete_iface',
conditions=dict(method=['DELETE']))
keys_uri = iface_uri + '/keys'
s.connect(route_name, keys_uri, action='list_keys',
conditions=dict(method=['GET', 'HEAD']))
key_uri = keys_uri + '/{key}'
s.connect(route_name, key_uri, action='get_key',
conditions=dict(method=['GET', 'HEAD']))
value_uri = keys_uri + '/{key}/{value}'
s.connect(route_name, value_uri, action='create_value',
conditions=dict(method=['POST']))
s.connect(route_name, value_uri, action='update_value',
conditions=dict(method=['PUT']))
| apache-2.0 |
pasqualguerrero/django | django/middleware/http.py | 247 | 2129 | from django.utils.http import http_date, parse_http_date_safe
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has an ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.streaming and not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
# If-None-Match must be ignored if original result would be anything
# other than a 2XX or 304 status. 304 status would result in no change.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26
if 200 <= response.status_code < 300 and response.has_header('ETag'):
if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
if if_none_match == response['ETag']:
# Setting the status is enough here. The response handling path
# automatically removes content for this status code (in
# http.conditional_content_removal()).
response.status_code = 304
# If-Modified-Since must be ignored if the original result was not a 200.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.25
if response.status_code == 200 and response.has_header('Last-Modified'):
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if if_modified_since is not None:
if_modified_since = parse_http_date_safe(if_modified_since)
if if_modified_since is not None:
last_modified = parse_http_date_safe(response['Last-Modified'])
if last_modified is not None and last_modified <= if_modified_since:
# Setting the status code is enough here (same reasons as
# above).
response.status_code = 304
return response
| bsd-3-clause |
dkubiak789/OpenUpgrade | addons/sales_team/__openerp__.py | 52 | 1792 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Team',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'Sales Management',
'summary': 'Sales Team',
'description': """
Using this application you can manage Sales Team with CRM and/or Sales
=======================================================================
""",
'website': 'http://www.openerp.com',
'depends': ['base','mail','web_kanban_sparkline',],
'data': ['security/sales_team_security.xml',
'security/ir.model.access.csv',
'res_config_view.xml',
'sales_team_data.xml',
'sales_team.xml',],
'demo': ['sales_team_demo.xml'],
'css': ['static/src/css/sales_team.css'],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
grengojbo/satchmo | satchmo/apps/satchmo_utils/urlhelper.py | 7 | 2626 | def delete_named_urlpattern(urlpatterns, name):
"""Delete the named urlpattern.
"""
found = False
ix = 0
while not found and ix < len(urlpatterns):
pattern = urlpatterns[ix]
if hasattr(pattern, 'url_patterns'):
found = delete_named_urlpattern(pattern.url_patterns, name)
else:
if name and hasattr(pattern, 'name'):
if pattern.name == name:
del urlpatterns[ix]
found = True
if not found:
ix += 1
return found
def remove_duplicate_urls(urls, names):
"""Remove any URLs whose names are already in use."""
for pattern in urls:
if hasattr(pattern, 'url_patterns'):
remove_duplicate_urls(pattern.url_patterns, names)
elif hasattr(pattern, 'name') and pattern.name:
if pattern.name in names:
urls.remove(pattern)
else:
names.append(pattern.name)
def replace_urlpattern(urlpatterns, replacement):
"""Delete the old urlpattern, and add a new one.
parameters:
urlpatterns: list
replacement: an `django.conf.urls.defaults.url` object.
example:
replacement = url(r'^accounts/login/', 'my.site.login_signup', {}, name='auth_login'))
replace_urlpattern(urlpatterns, replacement)
"""
found = False
ix = 0
if hasattr(replacement, 'name') and replacement.name:
name = replacement.name
regex = None
else:
name = None
regex = replacement.regex.pattern
while not found and ix < len(urlpatterns):
pattern = urlpatterns[ix]
if hasattr(pattern, 'url_patterns'):
found = replace_urlpattern(pattern.url_patterns, replacement)
else:
if name and hasattr(pattern, 'name'):
if pattern.name == name:
del urlpatterns[ix]
urlpatterns.append(replacement)
found = True
elif pattern.regex.pattern == regex:
del urlpatterns[ix]
urlpatterns.append(replacement)
found = True
if not found:
ix += 1
return found
def replace_urlpatterns(urlpatterns, replacelist):
for replace in replacelist:
replace_urlpattern(urlpatterns, replace)
def reverse_admin_url(model, action, args=None, kwargs=None):
from django.core.urlresolvers import reverse
meta = model._meta
name = 'admin:%s_%s_%s' % (meta.app_label, meta.module_name, action)
return reverse(name, args=args, kwargs=kwargs)
| bsd-3-clause |
TraderZed/greygardens | node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| mit |
alikins/ansible | lib/ansible/modules/inventory/add_host.py | 66 | 2701 | # -*- mode: python -*-
#
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: add_host
short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory
description:
- Use variables to create new hosts and groups in inventory for use in later plays of the same playbook.
Takes variables so you can define the new hosts more fully.
- This module is also supported for Windows targets.
version_added: "0.9"
options:
name:
aliases: [ 'hostname', 'host' ]
description:
- The hostname/ip of the host to add to the inventory, can include a colon and a port number.
required: true
groups:
aliases: [ 'groupname', 'group' ]
description:
- The groups to add the hostname to, comma separated.
required: false
notes:
- This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it
to iterate use a with\_ directive.
- Windows targets are supported by this module.
- The alias 'host' of the parameter 'name' is only available on >=2.4
- Since Ansible version 2.4, the ``inventory_dir`` variable is now set to ``None`` instead of the 'global inventory source',
because you can now have multiple sources. An example was added that shows how to partially restore the previous behaviour.
author:
- "Ansible Core Team"
- "Seth Vidal"
'''
EXAMPLES = '''
- name: add host to group 'just_created' with variable foo=42
add_host:
name: "{{ ip_from_ec2 }}"
groups: just_created
foo: 42
- name: add host to multiple groups
add_host:
hostname: "{{ new_ip }}"
groups:
- group1
- group2
- name: add a host with a non-standard port local to your machines
add_host:
name: "{{ new_ip }}:{{ new_port }}"
- name: add a host alias that we reach through a tunnel (Ansible <= 1.9)
add_host:
hostname: "{{ new_ip }}"
ansible_ssh_host: "{{ inventory_hostname }}"
ansible_ssh_port: "{{ new_port }}"
- name: add a host alias that we reach through a tunnel (Ansible >= 2.0)
add_host:
hostname: "{{ new_ip }}"
ansible_host: "{{ inventory_hostname }}"
ansible_port: "{{ new_port }}"
- name: Ensure inventory vars are set to the same value as the inventory_hostname has (close to pre 2.4 behaviour)
add_host:
hostname: charlie
inventory_dir: "{{inventory_dir}}"
'''
| gpl-3.0 |
rkJun/libhangul | doc/gen_hangulkeyboards.py | 4 | 14748 | #!/usr/bin/env python
# coding=utf-8
import sys
table = {}
current_name = ''
current_table = []
src = open('../hangul/hangulkeyboard.h', 'r')
for i in src.readlines():
pos = i.find('hangul_keyboard_table_')
if pos >= 0:
end = i.find('[')
current_name = i[pos + 22:end];
continue
pos = i.find('}')
if pos >= 0:
if len(current_name) > 0:
table[current_name] = current_table
current_name = ''
current_table = []
continue
if len(current_name) > 0:
code = int(i.split()[0].replace(',', ''), 16)
current_table.append(code)
def jamo_to_compat_jamo(code):
table = [
0x3131, 0x3132, 0x3134, 0x3137,
0x3138, 0x3139, 0x3141, 0x3142,
0x3143, 0x3145, 0x3146, 0x3147,
0x3148, 0x3149, 0x314a, 0x314b,
0x314c, 0x314d, 0x314e, 0x0000,
0x3165, 0x3166, 0x0000, 0x0000,
0x0000, 0x0000, 0x3140, 0x0000,
0x316e, 0x3171, 0x3172, 0x0000,
0x3173, 0x3144, 0x3174, 0x3175,
0x0000, 0x0000, 0x0000, 0x3176,
0x0000, 0x3177, 0x0000, 0x3178,
0x3179, 0x317a, 0x317b, 0x317c,
0x0000, 0x0000, 0x317d, 0x0000,
0x0000, 0x0000, 0x317e, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x317f, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x3180,
0x0000, 0x0000, 0x0000, 0x0000,
0x3181, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x3184,
0x3185, 0x3186, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x3164, 0x314f, 0x3150, 0x3151,
0x3152, 0x3153, 0x3154, 0x3155,
0x3156, 0x3157, 0x3158, 0x3159,
0x315a, 0x315b, 0x315c, 0x315d,
0x315e, 0x315f, 0x3160, 0x3161,
0x3162, 0x3163, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x3187, 0x3188, 0x0000, 0x0000,
0x3189, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x318a, 0x318b, 0x0000,
0x318c, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x318d, 0x0000,
0x0000, 0x318e, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x3131, 0x3132, 0x3133, 0x3134,
0x3135, 0x3136, 0x3137, 0x3139,
0x313a, 0x313b, 0x313c, 0x313d,
0x313e, 0x313f, 0x3140, 0x3141,
0x3142, 0x3144, 0x3145, 0x3146,
0x3147, 0x3148, 0x314a, 0x314b,
0x314c, 0x314d, 0x314e, 0x0000,
0x0000, 0x0000, 0x0000, 0x3167,
0x3168, 0x0000, 0x0000, 0x0000,
0x3169, 0x0000, 0x316a, 0x0000,
0x0000, 0x0000, 0x0000, 0x316b,
0x0000, 0x0000, 0x0000, 0x316c,
0x0000, 0x316d, 0x0000, 0x0000,
0x0000, 0x316f, 0x0000, 0x3170,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x3182, 0x3183, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
]
ret = 0
if code >= 0x1100 and code <= 0x11ff:
ret = table[code - 0x1100]
if ret == 0:
return code
else:
return ret
def is_choseong(code):
if code >= 0x1100 and code <= 0x115f:
return True
else:
return False
def is_jungseong(code):
if code >= 0x1160 and code <= 0x11a7:
return True
else:
return False
def is_jongseong(code):
if code >= 0x11a8 and code <= 0x11ff:
return True
else:
return False
def to_html_entity(code):
if is_choseong(code):
return '&#%d;&#%d;' % (code, 0x1160)
elif is_jungseong(code):
return '&#%d;&#%d;' % (0x115f, code)
elif is_jongseong(code):
return '&#%d;&#%d;&#%d;' % (0x115f, 0x1160, code)
else:
return '&#%d;' % code
# if is_choseong(code):
# return (unichr(code) + u'\x1160').encode('utf-8')
# elif is_jungseong(code):
# return (u'\0x115f' + unichr(code)).encode('utf-8')
# elif is_jongseong(code):
# return (u'\x115f\x1160' + unichr(code)).encode('utf-8')
# else:
# return unichr(code).encode('utf-8')
def to_choseong_label(s):
return '<span class="choseong">%s</span>' % s
def to_jungseong_label(s):
return '<span class="jungseong">%s</span>' % s
def to_jongseong_label(s):
return '<span class="jongseong">%s</span>' % s
def to_regular_label(s):
return '<span class="other">%s</span>' % s
def make_label(code):
c = jamo_to_compat_jamo(code)
s = to_html_entity(c);
if is_choseong(code):
return to_choseong_label(s)
elif is_jungseong(code):
return to_jungseong_label(s)
elif is_jongseong(code):
return to_jongseong_label(s)
else:
return to_regular_label(s)
def print_empty_cell():
print '<td class="emptycell"></td>'
def print_cell_with_label(label, span):
print '<td class="graykeycell" colspan="%d">%s</td>' % (span, label)
def print_cell_with_table(table, upper_index, lower_index, span = 4):
upper = table[upper_index];
lower = table[lower_index];
if upper == lower:
upper = ord(' ')
print '<td class="keycell" colspan="%d">%s<br/>%s</td>' % (span, make_label(upper), make_label(lower))
def print_keyboard_table(table):
print ''
print '@htmlonly'
print ''
print '<table class="keyboard">'
print '<tr>'
for i in range(0, 60):
print_empty_cell()
print '</tr>'
# 1st row
print '<tr>'
print_cell_with_table(table, 0x7e, 0x60)
print_cell_with_table(table, 0x21, 0x31)
print_cell_with_table(table, 0x40, 0x32)
print_cell_with_table(table, 0x23, 0x33)
print_cell_with_table(table, 0x24, 0x34)
print_cell_with_table(table, 0x25, 0x35)
print_cell_with_table(table, 0x5e, 0x36)
print_cell_with_table(table, 0x26, 0x37)
print_cell_with_table(table, 0x2a, 0x38)
print_cell_with_table(table, 0x28, 0x39)
print_cell_with_table(table, 0x29, 0x30)
print_cell_with_table(table, 0x5f, 0x2d)
print_cell_with_table(table, 0x2b, 0x3d)
print_cell_with_label('Backspace', 8)
print '</tr>'
# 2nd row
print '<tr>'
print_cell_with_label('Tab', 6)
print_cell_with_table(table, 0x51, 0x71)
print_cell_with_table(table, 0x57, 0x77)
print_cell_with_table(table, 0x45, 0x65)
print_cell_with_table(table, 0x52, 0x72)
print_cell_with_table(table, 0x54, 0x74)
print_cell_with_table(table, 0x59, 0x79)
print_cell_with_table(table, 0x55, 0x75)
print_cell_with_table(table, 0x49, 0x69)
print_cell_with_table(table, 0x4f, 0x6f)
print_cell_with_table(table, 0x50, 0x70)
print_cell_with_table(table, 0x7b, 0x5b)
print_cell_with_table(table, 0x7d, 0x5d)
print_cell_with_table(table, 0x7c, 0x5c, 6)
print '</tr>'
# 3rd row
print '<tr>'
print_cell_with_label('Caps Lock', 8)
print_cell_with_table(table, 0x41, 0x61)
print_cell_with_table(table, 0x53, 0x73)
print_cell_with_table(table, 0x44, 0x64)
print_cell_with_table(table, 0x46, 0x66)
print_cell_with_table(table, 0x47, 0x67)
print_cell_with_table(table, 0x48, 0x68)
print_cell_with_table(table, 0x4a, 0x6a)
print_cell_with_table(table, 0x4b, 0x6b)
print_cell_with_table(table, 0x4c, 0x6c)
print_cell_with_table(table, 0x3a, 0x3b)
print_cell_with_table(table, 0x22, 0x27)
print_cell_with_label('Enter', 8)
print '</tr>'
# 4th row
print '<tr>'
print_cell_with_label('Shift', 10)
print_cell_with_table(table, 0x5a, 0x7a)
print_cell_with_table(table, 0x58, 0x78)
print_cell_with_table(table, 0x43, 0x63)
print_cell_with_table(table, 0x56, 0x76)
print_cell_with_table(table, 0x42, 0x62)
print_cell_with_table(table, 0x4e, 0x6e)
print_cell_with_table(table, 0x4d, 0x6d)
print_cell_with_table(table, 0x3c, 0x2c)
print_cell_with_table(table, 0x3e, 0x2e)
print_cell_with_table(table, 0x3f, 0x2f)
print_cell_with_label('Shift', 10)
print '</tr>'
# 5th row
print '<!--'
print '<tr>'
print_cell_with_label('Ctrl', 5)
print_cell_with_label('Super', 4)
print_cell_with_label('Alt', 5)
print_cell_with_label('한자', 4)
print_cell_with_label(' <br/> ', 20)
print_cell_with_label('한/영', 4)
print_cell_with_label('Alt', 5)
print_cell_with_label('Super', 4)
print_cell_with_label('Menu', 4)
print_cell_with_label('Ctrl', 5)
print '</tr>'
print '-->'
print '</table>'
print ''
print '@endhtmlonly'
print ''
print '''/**
@defgroup hangulkeyboards 한글 자판
libhangul에서 builtin으로 지원하는 자판은 @ref layout_2, @ref layout_2y,
@ref layout_3f, @ref layout_390, @ref layout_3s,
@ref layout_3y, @ref layout_ahn, @ref layout_ro 자판 이다.
아래의 자판 배열은 libhangul에서 지원하는 자판을 알아보기 쉽게 그림형태로
나타낸 것이다.
@li 각 네모에 표시된 글자중 위에 있는 글자는 shift와 함께 눌렀을때
입력되는 글자고 아래에 있는 글자는 그냥 눌렀을때 입력되는 글자다.
@li 초성, 중성, 종성은 각각 붉은색, 녹색, 파란색(RGB 순서)으로 표시하였다.
@li 한글이 아닌 것은 검은색으로 표시하였다.
@htmlonly
<style type="text/css">
.choseong {
color: darkred;
}
.jungseong {
color: darkgreen;
}
.jongseong {
color: darkblue;
}
.other {
color: black;
}
table.keyboard {
font-family: sans-serif;
font-size: 120%;
border: 0px solid darkgray;
padding: 3px;
border-collapse: collapse;
border-collapse: separate;
}
td.emptycell {
width: 8px;
height: -3px;
padding: 0px;
margin: 0px;
border-style: none;
}
td.keycell {
text-align: center;
line-height: 110%;
background: whitesmoke;
padding: 2px;
margin: 1px;
border: 1px solid dimgray;
//border-width: 0px 1px 1px 0px;
}
td.graykeycell {
font-size: small;
text-align: center;
line-height: 110%;
color: lightgray;
background-color: white;
padding: 2px;
margin: 0px;
border: 2px solid lightgray;
border-width: 1px 1px 1px 1px;
}
</style>
@endhtmlonly
'''
print '''@section layout_2 두벌식
이 자판은 표준 두벌식을 구현한 것이다. 그러나 표준 자판과 조금 다른 동작이
있다. 된소리 ㅃㅉㄸㄲㅆ들은 shift와 함께 누르는 방법 이외에도 반복해서
누르면 입력된다.
'''
print_keyboard_table(table['2'])
print ''
print '''@section layout_2y 두벌식 옛글
이 자판은 옛한글을 입력하기 위한 두벌식 자판이다. 아래한글의 두벌식 옛글 자판과
같은 배열을 가지도록 만들었다. @ref layout_2 자판과 마찬가지로 된소리를 두번
누르는 방법으로 입력할 수 있다.
'''
print_keyboard_table(table['2y'])
print ''
print '''@section layout_3f 세벌식 최종
이 자판은 세벌식 최종 자판을 구현한 것이다. 원래 방식대로라면 왼쪽의 ㅗㅜ는
다른 모음과 조합되지 않아야 하지만, libhangul에서는 좌우의 ㅗㅜ를 구분하지
않고 다른 모음과 조합된다.
'''
print_keyboard_table(table['3final'])
print ''
print '''@section layout_390 세벌식 390
이 자판은 세벌식 390 자판을 구현한 것이다. 원래 방식대로라면 왼쪽의 ㅗㅜ는
다른 모음과 조합되지 않아야 하지만, libhangul에서는 좌우의 ㅗㅜ를 구분하지
않고 다른 모음과 조합된다.
'''
print_keyboard_table(table['390'])
print ''
print '''@section layout_3s 세벌식 순아래
이 자판은 세벌식 순아래 자판을 구현한 것이다.
'''
print_keyboard_table(table['3sun'])
print ''
print '''@section layout_3y 세벌식 옛글
이 자판은 세벌식 옛글 자판을 구현한 것이다. 자판 배열은 아래한글의 세벌식
옛글 자판과 동일하다.
'''
print_keyboard_table(table['3yet'])
print ''
print '''@section layout_32 세벌식 두벌배열
이 자판은 두벌식 자판의 배열을 그대로 사용하면서 shift와 함께 자음을 누르면
종성으로 입력되도록 만든 자판이다. 일반 사용을 위해 만들어진 것이 아니고
두벌식 사용자가 손쉽게 세벌식 자판의 테스트를 할 수 있도록 하기 위해서
만든 자판이다.
'''
print_keyboard_table(table['32'])
print ''
print '''@section layout_ahn 안마태
이 자판은 안마태 자판을 구현한 것이다. libhangul의 안마태 자판 구현은
안마태 자판의 규격을 충실하게 구현한 것은 아니고 적당한 수준에서 사용가능하게
만든 것이다. 키입력이 동시에 일어났는지 판단하여 입력을 하는 방식이 아니고,
키를 순서대로 입력하여도 자모를 조합하도록 구현하여, 안마태 자판과 유사한
동작을 하도록 구현하였다.
'''
print_keyboard_table(table['ahn'])
print ''
print '''@section layout_ro 로마자
이 자판은 현재 자판 배열의 라틴 문자를 음차하여 한글로 입력되도록 만든 자판이다.
예를 들면 'an'을 치면 '안'이 입력되는 방식이다.
로마자 자판은 다른 한글자판과 달리 <a href="http://en.wikipedia.org/wiki/Transliteration">Transliteration</a> 방식으로 작동하는
자판이므로 별도의 레이아웃이 없다. 이 입력 자판에서 사용한 알파벳과 한글
자모의 대응은 기본적으로 한글 로마자 표기법과 유사하지만
동일하지는 않다. 로마자 자판은 한글을 입력하기 위한 것이므로 한글을 그럴듯
하게 표기하기 위한 로마자 표기법과는 목적이 다르다. 주로 한글 자모를 모두
입력할 수 있도록 하는 것을 우선적으로 고려하였다. 따라서 아래 자판의 대응대로
한글을 로마자로 표기하는 것은 로마자 표기법으로는 적절하지 않다.
한글 자모와 알파벳의 대응은 아래와 같다.
@li ㄱ : g
@li ㄴ : n
@li ㄷ : d
@li ㄹ : r, l
@li ㅁ : m
@li ㅂ : b, v
@li ㅅ : s
@li ㅇ : ng
@li ㅈ : j
@li ㅊ : c, ch
@li ㅋ : k, q
@li ㅌ : t
@li ㅍ : f, p
@li ㅎ : h
@li ㄲ : gg
@li ㄸ : dd
@li ㅃ : bb
@li ㅆ : ss
@li ㅉ : jj
@li ㅊ : ch
@li ㅏ : a
@li ㅐ : ae, ai
@li ㅑ : ya, ia
@li ㅒ : yae, yai, iae, iai
@li ㅓ : eo
@li ㅔ : e
@li ㅕ : yeo, ieo
@li ㅖ : ye, ie
@li ㅗ : o
@li ㅘ : oa, wa
@li ㅙ : oae, oai
@li ㅚ : oi
@li ㅛ : yo, io
@li ㅜ : u, w
@li ㅝ : ueo, weo, uo
@li ㅞ : ue
@li ㅟ : ui, wi
@li ㅠ : yu, iu
@li ㅡ : eu
@li ㅢ : eui
@li ㅣ : i, y
그 외에 다음과 같은 조합 규칙이 있다.
@li 자음없이 모음만 먼저 입력하면 초성 ㅇ이 추가된다.
예를 들어 'a'만 눌러도 '아'로 입력된다.
@li ng 는 종성 ㅇ으로 입력된다.
@li x는 음절의 시작에서는 초성 ㅈ, 끝에서는 종성 ㄳ으로 입력된다.
@li 종성의 자음군은 앞음절의 글자로 입력된다.
예를 들어 'banga'는 '방아'로 입력된다.
@li 대문자를 입력하면 음절의 시작으로 인식한다.
예를 들어 'banga'는 '방아'로, 'banGa'는 '반가'로 입력된다.
'beos'는 '벗'으로 'beOs'는 '베옷'으로 입력된다.
@li 자음군으로 음절이 시작되면 ㅡ를 자동으로 추가한다.
예를 들어 'string'을 입력하면 s와 t에 ㅡ가 붙게 되어 '스트링'으로 입력된다.
@sa
다음 위키 페이지를 참조하라. @n
http://en.wikipedia.org/wiki/Korean_romanization @n
http://en.wikipedia.org/wiki/Revised_Romanization_of_Korean
'''
print '*/'
| lgpl-2.1 |
gammu/wammu | Wammu/Logger.py | 1 | 5021 | # -*- coding: UTF-8 -*-
#
# Copyright © 2003 - 2018 Michal Čihař <michal@cihar.com>
#
# This file is part of Wammu <https://wammu.eu/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Wammu - Phone manager
Logging window and thread for log reading
'''
import threading
import wx
import os
import sys
import time
import Wammu.Events
from Wammu.Locales import ugettext as _
class LoggerDebug(threading.Thread):
'''
Thread which reads defined files and prints it to stderr.
'''
def __init__(self, filename):
'''
Initializes reader on filename, text will be printed to stderr.
'''
threading.Thread.__init__(self)
self.file_descriptor = open(filename, 'r')
self.filename = filename
self.canceled = False
def run(self):
"""
This is basically tail -f reimplementation
"""
while not self.canceled:
where = self.file_descriptor.tell()
txt = self.file_descriptor.readlines()
if len(txt) == 0:
fd_results = os.fstat(self.file_descriptor.fileno())
try:
st_results = os.stat(self.filename)
except OSError:
st_results = fd_results
if st_results[1] == fd_results[1] or sys.platform == 'win32':
time.sleep(1)
self.file_descriptor.seek(where)
else:
self.file_descriptor = open(self.filename, 'r')
else:
sys.stderr.write(''.join(txt))
self.file_descriptor.close()
class Logger(threading.Thread):
'''
Thread which reads defined files and posts events on change.
'''
def __init__(self, win, filename):
'''
Initializes reader on filename, events will be sent to win.
'''
threading.Thread.__init__(self)
self.win = win
self.file_descriptor = open(filename, 'r')
self.filename = filename
self.canceled = False
def run(self):
"""
This is basically tail -f reimplementation
"""
while not self.canceled:
where = self.file_descriptor.tell()
txt = self.file_descriptor.readlines()
if len(txt) == 0:
fd_results = os.fstat(self.file_descriptor.fileno())
try:
st_results = os.stat(self.filename)
except OSError:
st_results = fd_results
if st_results[1] == fd_results[1] or sys.platform == 'win32':
time.sleep(1)
self.file_descriptor.seek(where)
else:
self.file_descriptor = open(self.filename, 'r')
else:
evt = Wammu.Events.LogEvent(txt=''.join(txt))
wx.PostEvent(self.win, evt)
self.file_descriptor.close()
class LogFrame(wx.Frame):
'''
Window with debug log.
'''
def __init__(self, parent, cfg):
'''
Creates window and initializes event handlers.
'''
self.cfg = cfg
if cfg.HasEntry('/Debug/X') and cfg.HasEntry('/Debug/Y'):
pos = wx.Point(
cfg.ReadInt('/Debug/X'),
cfg.ReadInt('/Debug/Y'))
else:
pos = wx.DefaultPosition
size = wx.Size(
cfg.ReadInt('/Debug/Width'),
cfg.ReadInt('/Debug/Height')
)
wx.Frame.__init__(
self,
parent,
-1,
_('Wammu debug log'),
pos,
size,
wx.DEFAULT_FRAME_STYLE | wx.RESIZE_BORDER
)
self.txt = wx.TextCtrl(
self,
-1,
_('Here will appear debug messages from Gammu…\n'),
style=wx.TE_MULTILINE | wx.TE_READONLY
)
self.txt.SetFont(wx.Font(9, wx.MODERN, wx.NORMAL, wx.NORMAL))
Wammu.Events.EVT_LOG(self, self.OnLog)
wx.EVT_SIZE(self, self.OnSize)
self.OnSize(None)
def OnLog(self, evt):
'''
Event handler for text events from Logger.
'''
self.txt.AppendText(evt.txt)
def OnSize(self, evt):
'''
Resize handler to correctly resize text area.
'''
width, height = self.GetClientSizeTuple()
self.txt.SetDimensions(0, 0, width, height)
| gpl-3.0 |
giuliov/ansible | lib/ansible/inventory/group.py | 122 | 4627 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.utils.debug import debug
class Group:
''' a group of ansible hosts '''
#__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
self.depth = 0
self.name = name
self.hosts = []
self.vars = {}
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
#self.clear_hosts_cache()
#if self.name is None:
# raise Exception("group name is required")
def __repr__(self):
return self.get_name()
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def serialize(self):
parent_groups = []
for parent in self.parent_groups:
parent_groups.append(parent.serialize())
result = dict(
name=self.name,
vars=self.vars.copy(),
parent_groups=parent_groups,
depth=self.depth,
)
return result
def deserialize(self, data):
self.__init__()
self.name = data.get('name')
self.vars = data.get('vars', dict())
parent_groups = data.get('parent_groups', [])
for parent_data in parent_groups:
g = Group()
g.deserialize(parent_data)
self.parent_groups.append(g)
def get_name(self):
return self.name
def add_child_group(self, group):
if self == group:
raise Exception("can't add group to itself")
# don't add if it's already there
if not group in self.child_groups:
self.child_groups.append(group)
# update the depth of the child
group.depth = max([self.depth+1, group.depth])
# update the depth of the grandchildren
group._check_children_depth()
# now add self to child's parent_groups list, but only if there
# isn't already a group with the same name
if not self.name in [g.name for g in group.parent_groups]:
group.parent_groups.append(self)
self.clear_hosts_cache()
def _check_children_depth(self):
try:
for group in self.child_groups:
group.depth = max([self.depth+1, group.depth])
group._check_children_depth()
except RuntimeError:
raise AnsibleError("The group named '%s' has a recursive dependency loop." % self.name)
def add_host(self, host):
self.hosts.append(host)
host.add_group(self)
self.clear_hosts_cache()
def set_variable(self, key, value):
self.vars[key] = value
def clear_hosts_cache(self):
self._hosts_cache = None
for g in self.parent_groups:
g.clear_hosts_cache()
def get_hosts(self):
if self._hosts_cache is None:
self._hosts_cache = self._get_hosts()
return self._hosts_cache
def _get_hosts(self):
hosts = []
seen = {}
for kid in self.child_groups:
kid_hosts = kid.get_hosts()
for kk in kid_hosts:
if kk not in seen:
seen[kk] = 1
hosts.append(kk)
for mine in self.hosts:
if mine not in seen:
seen[mine] = 1
hosts.append(mine)
return hosts
def get_vars(self):
return self.vars.copy()
def _get_ancestors(self):
results = {}
for g in self.parent_groups:
results[g.name] = g
results.update(g._get_ancestors())
return results
def get_ancestors(self):
return self._get_ancestors().values()
| gpl-3.0 |
inveniosoftware/invenio-migrator | tests/legacy/test_access_dump.py | 1 | 2580 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016, 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from click.testing import CliRunner
from invenio_migrator.legacy.cli import dump as dump_cli
from invenio_migrator.legacy.access import (dump as dump_access, get as
get_access)
def test_dump_action():
"""Test single action dump."""
action = get_access('viewrestrcoll')
action_dump = dump_access(action[1][0])
assert action_dump
assert action_dump['name'] == 'viewrestrcoll'
assert len(action_dump['roles']) == 5
roles = action_dump['roles']
assert roles[0] == {
'users': {'jekyll@cds.cern.ch'},
'parameters': {'collection': {'Theses', 'Drafts'}, },
'name': 'thesesviewer',
'firerole_def': 'allow group "Theses and Drafts viewers"',
'id': 16L,
'description': 'Theses and Drafts viewer'
}
def test_get_action():
"""Test get action."""
action = get_access('viewrestrcoll')
assert action[0] == 1
assert action[1] == [
{'allowedkeywords': 'view restricted collection',
'optional': 'collection',
'id': 38L,
'name': 'viewrestrcoll'}
]
actions = get_access('%')
assert actions[0] == 63
assert actions[0] == len(actions[1])
actions = get_access('viewrestr%')
assert actions[0] == 3
assert [a['name'] for a in actions[1]
] == ['viewrestrcoll', 'viewrestrcomment', 'viewrestrdoc']
def test_cli():
"""Test CLI."""
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(dump_cli, ['access', '-q', '%'])
assert result.exit_code == 0
| gpl-2.0 |
skudriashev/incubator-airflow | airflow/migrations/versions/bbc73705a13e_add_notification_sent_column_to_sla_miss.py | 62 | 1063 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add notification_sent column to sla_miss
Revision ID: bbc73705a13e
Revises: 4446e08588
Create Date: 2016-01-14 18:05:54.871682
"""
# revision identifiers, used by Alembic.
revision = 'bbc73705a13e'
down_revision = '4446e08588'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('sla_miss', sa.Column('notification_sent', sa.Boolean,default=False))
def downgrade():
op.drop_column('sla_miss', 'notification_sent')
| apache-2.0 |
MediaSapiens/wavesf | django/db/models/deletion.py | 12 | 10108 | from operator import attrgetter
from django.db import connections, transaction, IntegrityError
from django.db.models import signals, sql
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.utils.datastructures import SortedDict
from django.utils.functional import wraps
def CASCADE(collector, field, sub_objs, using):
collector.collect(sub_objs, source=field.rel.to,
source_attr=field.name, nullable=field.null)
if field.null and not connections[using].features.can_defer_constraint_checks:
collector.add_field_update(field, None, sub_objs)
def PROTECT(collector, field, sub_objs, using):
raise IntegrityError("Cannot delete some instances of model '%s' because "
"they are referenced through a protected foreign key: '%s.%s'" % (
field.rel.to.__name__, sub_objs[0].__class__.__name__, field.name
))
def SET(value):
if callable(value):
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value(), sub_objs)
else:
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value, sub_objs)
return set_on_delete
SET_NULL = SET(None)
def SET_DEFAULT(collector, field, sub_objs, using):
collector.add_field_update(field, field.get_default(), sub_objs)
def DO_NOTHING(collector, field, sub_objs, using):
pass
def force_managed(func):
@wraps(func)
def decorated(self, *args, **kwargs):
if not transaction.is_managed(using=self.using):
transaction.enter_transaction_management(using=self.using)
forced_managed = True
else:
forced_managed = False
try:
func(self, *args, **kwargs)
if forced_managed:
transaction.commit(using=self.using)
else:
transaction.commit_unless_managed(using=self.using)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.using)
return decorated
class Collector(object):
def __init__(self, using):
self.using = using
self.data = {} # {model: [instances]}
self.batches = {} # {model: {field: set([instances])}}
self.field_updates = {} # {model: {(field, value): set([instances])}}
self.dependencies = {} # {model: set([models])}
def add(self, objs, source=None, nullable=False):
"""
Adds 'objs' to the collection of objects to be deleted. If the call is
the result of a cascade, 'source' should be the model that caused it
and 'nullable' should be set to True, if the relation can be null.
Returns a list of all objects that were not already collected.
"""
if not objs:
return []
new_objs = []
model = objs[0].__class__
instances = self.data.setdefault(model, [])
for obj in objs:
if obj not in instances:
new_objs.append(obj)
instances.extend(new_objs)
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects have
# to be deleted.
if new_objs and source is not None and not nullable:
self.dependencies.setdefault(source, set()).add(model)
return new_objs
def add_batch(self, model, field, objs):
"""
Schedules a batch delete. Every instance of 'model' that is related to
an instance of 'obj' through 'field' will be deleted.
"""
self.batches.setdefault(model, {}).setdefault(field, set()).update(objs)
def add_field_update(self, field, value, objs):
"""
Schedules a field update. 'objs' must be a homogenous iterable
collection of model instances (e.g. a QuerySet).
"""
if not objs:
return
model = objs[0].__class__
self.field_updates.setdefault(
model, {}).setdefault(
(field, value), set()).update(objs)
def collect(self, objs, source=None, nullable=False, collect_related=True,
source_attr=None):
"""
Adds 'objs' to the collection of objects to be deleted as well as all
parent instances. 'objs' must be a homogenous iterable collection of
model instances (e.g. a QuerySet). If 'collect_related' is True,
related objects will be handled by their respective on_delete handler.
If the call is the result of a cascade, 'source' should be the model
that caused it and 'nullable' should be set to True, if the relation
can be null.
"""
if not connections[self.using].features.supports_deleting_related_objects:
collect_related = False
new_objs = self.add(objs, source, nullable)
if not new_objs:
return
model = new_objs[0].__class__
# Recursively collect parent models, but not their related objects.
# These will be found by meta.get_all_related_objects()
for parent_model, ptr in model._meta.parents.iteritems():
if ptr:
parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
self.collect(parent_objs, source=model,
source_attr=ptr.rel.related_name,
collect_related=False)
if collect_related:
for related in model._meta.get_all_related_objects(include_hidden=True):
field = related.field
if related.model._meta.auto_created:
self.add_batch(related.model, field, new_objs)
else:
sub_objs = self.related_objects(related, new_objs)
if not sub_objs:
continue
field.rel.on_delete(self, field, sub_objs, self.using)
# TODO This entire block is only needed as a special case to
# support cascade-deletes for GenericRelation. It should be
# removed/fixed when the ORM gains a proper abstraction for virtual
# or composite fields, and GFKs are reworked to fit into that.
for relation in model._meta.many_to_many:
if not relation.rel.through:
sub_objs = relation.bulk_related_objects(new_objs, self.using)
self.collect(sub_objs,
source=model,
source_attr=relation.rel.related_name,
nullable=True)
def related_objects(self, related, objs):
"""
Gets a QuerySet of objects related to ``objs`` via the relation ``related``.
"""
return related.model._base_manager.using(self.using).filter(
**{"%s__in" % related.field.name: objs}
)
def instances_with_model(self):
for model, instances in self.data.iteritems():
for obj in instances:
yield model, obj
def sort(self):
sorted_models = []
models = self.data.keys()
while len(sorted_models) < len(models):
found = False
for model in models:
if model in sorted_models:
continue
dependencies = self.dependencies.get(model)
if not (dependencies and dependencies.difference(sorted_models)):
sorted_models.append(model)
found = True
if not found:
return
self.data = SortedDict([(model, self.data[model])
for model in sorted_models])
@force_managed
def delete(self):
# sort instance collections
for instances in self.data.itervalues():
instances.sort(key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer contraint checks until the
# end of a transaction.
self.sort()
# send pre_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
# update fields
for model, instances_for_fieldvalues in self.field_updates.iteritems():
query = sql.UpdateQuery(model)
for (field, value), instances in instances_for_fieldvalues.iteritems():
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in self.data.itervalues():
instances.reverse()
# delete batches
for model, batches in self.batches.iteritems():
query = sql.DeleteQuery(model)
for field, instances in batches.iteritems():
query.delete_batch([obj.pk for obj in instances], self.using, field)
# delete instances
for model, instances in self.data.iteritems():
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
query.delete_batch(pk_list, self.using)
# send post_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
# update collected instances
for model, instances_for_fieldvalues in self.field_updates.iteritems():
for (field, value), instances in instances_for_fieldvalues.iteritems():
for obj in instances:
setattr(obj, field.attname, value)
for model, instances in self.data.iteritems():
for instance in instances:
setattr(instance, model._meta.pk.attname, None)
| bsd-3-clause |
CiscoSystems/vespa | neutron/tests/unit/_test_extension_portbindings.py | 5 | 11690 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 NEC Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Akihiro Motoki, NEC Corporation
#
import contextlib
from oslo.config import cfg
from webob import exc
from neutron import context
from neutron.extensions import portbindings
from neutron.manager import NeutronManager
from neutron.tests.unit import test_db_plugin
class PortBindingsTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
# VIF_TYPE must be overridden according to plugin vif_type
VIF_TYPE = portbindings.VIF_TYPE_OTHER
# The plugin supports the port security feature such as
# security groups and anti spoofing.
HAS_PORT_FILTER = False
def _check_response_portbindings(self, port):
self.assertEqual(port['binding:vif_type'], self.VIF_TYPE)
port_cap = port[portbindings.CAPABILITIES]
self.assertEqual(port_cap[portbindings.CAP_PORT_FILTER],
self.HAS_PORT_FILTER)
def _check_response_no_portbindings(self, port):
self.assertIn('status', port)
self.assertNotIn(portbindings.VIF_TYPE, port)
self.assertNotIn(portbindings.CAPABILITIES, port)
def _get_non_admin_context(self):
return context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False,
read_deleted="no")
def test_port_vif_details(self):
with self.port(name='name') as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings(port['port'])
# Check a response of get_port
ctx = context.get_admin_context()
port = self._show('ports', port_id, neutron_context=ctx)['port']
self._check_response_portbindings(port)
# By default user is admin - now test non admin user
ctx = self._get_non_admin_context()
non_admin_port = self._show(
'ports', port_id, neutron_context=ctx)['port']
self._check_response_no_portbindings(non_admin_port)
def test_ports_vif_details(self):
plugin = NeutronManager.get_plugin()
cfg.CONF.set_default('allow_overlapping_ips', True)
with contextlib.nested(self.port(), self.port()):
ctx = context.get_admin_context()
ports = plugin.get_ports(ctx)
self.assertEqual(len(ports), 2)
for port in ports:
self._check_response_portbindings(port)
# By default user is admin - now test non admin user
ctx = self._get_non_admin_context()
ports = self._list('ports', neutron_context=ctx)['ports']
self.assertEqual(len(ports), 2)
for non_admin_port in ports:
self._check_response_no_portbindings(non_admin_port)
def _check_default_port_binding_profile(self, port):
# For plugins which does not use binding:profile attr
# we just check an operation for the port succeed.
self.assertIn('id', port)
def _test_create_port_binding_profile(self, profile):
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_default_port_binding_profile(port['port'])
def test_create_port_binding_profile_none(self):
self._test_create_port_binding_profile(None)
def test_create_port_binding_profile_with_empty_dict(self):
self._test_create_port_binding_profile({})
def _test_update_port_binding_profile(self, profile):
profile_arg = {portbindings.PROFILE: profile}
with self.port() as port:
# print "(1) %s" % port
self._check_default_port_binding_profile(port['port'])
port_id = port['port']['id']
ctx = context.get_admin_context()
port = self._update('ports', port_id, {'port': profile_arg},
neutron_context=ctx)['port']
self._check_default_port_binding_profile(port)
def test_update_port_binding_profile_none(self):
self._test_update_port_binding_profile(None)
def test_update_port_binding_profile_with_empty_dict(self):
self._test_update_port_binding_profile({})
def test_port_create_portinfo_non_admin(self):
profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}}
with self.network(set_context=True, tenant_id='test') as net1:
with self.subnet(network=net1) as subnet1:
# succeed without binding:profile
with self.port(subnet=subnet1,
set_context=True, tenant_id='test'):
pass
# fail with binding:profile
try:
with self.port(subnet=subnet1,
expected_res_status=403,
arg_list=(portbindings.PROFILE,),
set_context=True, tenant_id='test',
**profile_arg):
pass
except exc.HTTPClientError:
pass
def test_port_update_portinfo_non_admin(self):
profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}}
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1) as port:
# By default user is admin - now test non admin user
# Note that 404 is returned when prohibit by policy.
# See comment for PolicyNotAuthorized except clause
# in update() in neutron.api.v2.base.Controller.
port_id = port['port']['id']
ctx = self._get_non_admin_context()
port = self._update('ports', port_id,
{'port': profile_arg},
expected_code=404,
neutron_context=ctx)
class PortBindingsHostTestCaseMixin(object):
fmt = 'json'
hostname = 'testhost'
def _check_response_portbindings_host(self, port):
self.assertEqual(port[portbindings.HOST_ID], self.hostname)
def _check_response_no_portbindings_host(self, port):
self.assertIn('status', port)
self.assertNotIn(portbindings.HOST_ID, port)
def test_port_vif_non_admin(self):
with self.network(set_context=True,
tenant_id='test') as net1:
with self.subnet(network=net1) as subnet1:
host_arg = {portbindings.HOST_ID: self.hostname}
try:
with self.port(subnet=subnet1,
expected_res_status=403,
arg_list=(portbindings.HOST_ID,),
set_context=True,
tenant_id='test',
**host_arg):
pass
except exc.HTTPClientError:
pass
def test_port_vif_host(self):
host_arg = {portbindings.HOST_ID: self.hostname}
with self.port(name='name', arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings_host(port['port'])
# Check a response of get_port
ctx = context.get_admin_context()
port = self._show('ports', port_id, neutron_context=ctx)['port']
self._check_response_portbindings_host(port)
# By default user is admin - now test non admin user
ctx = context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False,
read_deleted="no")
non_admin_port = self._show(
'ports', port_id, neutron_context=ctx)['port']
self._check_response_no_portbindings_host(non_admin_port)
def test_ports_vif_host(self):
cfg.CONF.set_default('allow_overlapping_ips', True)
host_arg = {portbindings.HOST_ID: self.hostname}
with contextlib.nested(
self.port(name='name1',
arg_list=(portbindings.HOST_ID,),
**host_arg),
self.port(name='name2')):
ctx = context.get_admin_context()
ports = self._list('ports', neutron_context=ctx)['ports']
self.assertEqual(2, len(ports))
for port in ports:
if port['name'] == 'name1':
self._check_response_portbindings_host(port)
else:
self.assertFalse(port[portbindings.HOST_ID])
# By default user is admin - now test non admin user
ctx = context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False,
read_deleted="no")
ports = self._list('ports', neutron_context=ctx)['ports']
self.assertEqual(2, len(ports))
for non_admin_port in ports:
self._check_response_no_portbindings_host(non_admin_port)
def test_ports_vif_host_update(self):
cfg.CONF.set_default('allow_overlapping_ips', True)
host_arg = {portbindings.HOST_ID: self.hostname}
with contextlib.nested(
self.port(name='name1',
arg_list=(portbindings.HOST_ID,),
**host_arg),
self.port(name='name2')) as (port1, port2):
data = {'port': {portbindings.HOST_ID: 'testhosttemp'}}
req = self.new_update_request('ports', data, port1['port']['id'])
req.get_response(self.api)
req = self.new_update_request('ports', data, port2['port']['id'])
ctx = context.get_admin_context()
req.get_response(self.api)
ports = self._list('ports', neutron_context=ctx)['ports']
self.assertEqual(2, len(ports))
for port in ports:
self.assertEqual('testhosttemp', port[portbindings.HOST_ID])
def test_ports_vif_host_list(self):
cfg.CONF.set_default('allow_overlapping_ips', True)
host_arg = {portbindings.HOST_ID: self.hostname}
with contextlib.nested(
self.port(name='name1',
arg_list=(portbindings.HOST_ID,),
**host_arg),
self.port(name='name2'),
self.port(name='name3',
arg_list=(portbindings.HOST_ID,),
**host_arg),) as (port1, _port2, port3):
self._test_list_resources(
'port', (port1, port3),
query_params='%s=%s' % (portbindings.HOST_ID, self.hostname))
| apache-2.0 |
dhalleine/tensorflow | tensorflow/python/training/adam_test.py | 8 | 8216 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def adam_update_numpy(param, g_t, t, m, v, alpha=0.001, beta1=0.9, beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2 ** t) / (1 - beta1 ** t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(tf.test.TestCase):
def testSparse(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = tf.IndexedSlices(tf.constant(grads0_np),
tf.constant(grads0_np_indices),
tf.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = tf.IndexedSlices(tf.constant(grads1_np),
tf.constant(grads1_np_indices),
tf.constant([2]))
opt = tf.train.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testBasic(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = tf.train.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTensorLearningRate(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = tf.train.AdamOptimizer(tf.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = tf.train.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
sebMathieu/dsima | simulator/src/agent/example.py | 1 | 2631 | ##@package Example
# Example of agent-based system based on a supply and a demand curve
#@author Sebastien MATHIEU
from .agent import Agent
from .abstractSystem import AbstractSystem
from .layer import Layer
from .data import Data
import math
## Supply with the target supply function \f$\pi = 0.1 q^2 +2\f$
class Supply(Agent):
def initialize(self, data):
data.general['pi']=10.0 # Set a starting price
def act(self, data, layer):
# Compute the target price from the supply function
targetPrice=0.1*(data.general['q']**2)+2
# Take the mean between the last price and the target price.
data.general['pi']=(data.general['pi']+targetPrice)/2
print("\tSupply propose the price " + str(data.general['pi'])
## Demand with the inverse demand function \f$\pi = 40 - 0.05 q^2\f$
class Demand(Agent):
def initialize(self, data):
data.general['q']=0 # Initial quantity bought
def act(self, data, layer):
pi=data.general['pi']
if pi > 40.0: # Price to high, no demand
data.general['q']=0
else: # Demand function
data.general['q']=math.sqrt((40.0-data.general['pi'])/0.05)
print("\tDemand buy the quantity " + str(data.general['q']))
## Agent based system definition
class System(AbstractSystem):
def __init__(self):
AbstractSystem.__init__(self)
self._lastPrice=None
self.generate()
## Generate the example system.
def generate(self):
# Create actors
supply=Supply()
demand=Demand()
# Create two layers with one actor in each.
layerSupply=Layer([supply])
layerDemand=Layer([demand])
# Add the layers to the layer list
self.layerList.append(layerDemand) # First the system call the demand side
self.layerList.append(layerSupply) # After the system call the supply side
def hasConverged(self):
oldPrice=self._lastPrice
self._lastPrice=self.data.general['pi']
if oldPrice == None:
return None
elif abs(oldPrice - self._lastPrice) < 0.001: # Convergence if the price does not change.
return "System has converged."
else:
return None
# Starting point from python #
if __name__ == "__main__":
system=System()
print("Staring the agent-based simulation...")
convergence=system.run()
print("\nNumber of iterations : "+str(system.iterations))
print(convergence)
| bsd-3-clause |
arsenetar/dupeguru | hscommon/tests/table_test.py | 2 | 9762 | # Created By: Virgil Dupras
# Created On: 2008-08-12
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from ..testutil import CallLogger, eq_
from ..gui.table import Table, GUITable, Row
class TestRow(Row):
__test__ = False
def __init__(self, table, index, is_new=False):
Row.__init__(self, table)
self.is_new = is_new
self._index = index
def load(self):
pass
def save(self):
self.is_new = False
@property
def index(self):
return self._index
class TestGUITable(GUITable):
__test__ = False
def __init__(self, rowcount, viewclass=CallLogger):
GUITable.__init__(self)
self.view = viewclass()
self.view.model = self
self.rowcount = rowcount
self.updated_rows = None
def _do_add(self):
return TestRow(self, len(self), is_new=True), len(self)
def _is_edited_new(self):
return self.edited is not None and self.edited.is_new
def _fill(self):
for i in range(self.rowcount):
self.append(TestRow(self, i))
def _update_selection(self):
self.updated_rows = self.selected_rows[:]
def table_with_footer():
table = Table()
table.append(TestRow(table, 0))
footer = TestRow(table, 1)
table.footer = footer
return table, footer
def table_with_header():
table = Table()
table.append(TestRow(table, 1))
header = TestRow(table, 0)
table.header = header
return table, header
# --- Tests
def test_allow_edit_when_attr_is_property_with_fset():
# When a row has a property that has a fset, by default, make that cell editable.
class TestRow(Row):
@property
def foo(self):
pass
@property
def bar(self):
pass
@bar.setter
def bar(self, value):
pass
row = TestRow(Table())
assert row.can_edit_cell("bar")
assert not row.can_edit_cell("foo")
assert not row.can_edit_cell("baz") # doesn't exist, can't edit
def test_can_edit_prop_has_priority_over_fset_checks():
# When a row has a cen_edit_* property, it's the result of that property that is used, not the
# result of a fset check.
class TestRow(Row):
@property
def bar(self):
pass
@bar.setter
def bar(self, value):
pass
can_edit_bar = False
row = TestRow(Table())
assert not row.can_edit_cell("bar")
def test_in():
# When a table is in a list, doing "in list" with another instance returns false, even if
# they're the same as lists.
table = Table()
some_list = [table]
assert Table() not in some_list
def test_footer_del_all():
# Removing all rows doesn't crash when doing the footer check.
table, footer = table_with_footer()
del table[:]
assert table.footer is None
def test_footer_del_row():
# Removing the footer row sets it to None
table, footer = table_with_footer()
del table[-1]
assert table.footer is None
eq_(len(table), 1)
def test_footer_is_appened_to_table():
# A footer is appended at the table's bottom
table, footer = table_with_footer()
eq_(len(table), 2)
assert table[1] is footer
def test_footer_remove():
# remove() on footer sets it to None
table, footer = table_with_footer()
table.remove(footer)
assert table.footer is None
def test_footer_replaces_old_footer():
table, footer = table_with_footer()
other = Row(table)
table.footer = other
assert table.footer is other
eq_(len(table), 2)
assert table[1] is other
def test_footer_rows_and_row_count():
# rows() and row_count() ignore footer.
table, footer = table_with_footer()
eq_(table.row_count, 1)
eq_(table.rows, table[:-1])
def test_footer_setting_to_none_removes_old_one():
table, footer = table_with_footer()
table.footer = None
assert table.footer is None
eq_(len(table), 1)
def test_footer_stays_there_on_append():
# Appending another row puts it above the footer
table, footer = table_with_footer()
table.append(Row(table))
eq_(len(table), 3)
assert table[2] is footer
def test_footer_stays_there_on_insert():
# Inserting another row puts it above the footer
table, footer = table_with_footer()
table.insert(3, Row(table))
eq_(len(table), 3)
assert table[2] is footer
def test_header_del_all():
# Removing all rows doesn't crash when doing the header check.
table, header = table_with_header()
del table[:]
assert table.header is None
def test_header_del_row():
# Removing the header row sets it to None
table, header = table_with_header()
del table[0]
assert table.header is None
eq_(len(table), 1)
def test_header_is_inserted_in_table():
# A header is inserted at the table's top
table, header = table_with_header()
eq_(len(table), 2)
assert table[0] is header
def test_header_remove():
# remove() on header sets it to None
table, header = table_with_header()
table.remove(header)
assert table.header is None
def test_header_replaces_old_header():
table, header = table_with_header()
other = Row(table)
table.header = other
assert table.header is other
eq_(len(table), 2)
assert table[0] is other
def test_header_rows_and_row_count():
# rows() and row_count() ignore header.
table, header = table_with_header()
eq_(table.row_count, 1)
eq_(table.rows, table[1:])
def test_header_setting_to_none_removes_old_one():
table, header = table_with_header()
table.header = None
assert table.header is None
eq_(len(table), 1)
def test_header_stays_there_on_insert():
# Inserting another row at the top puts it below the header
table, header = table_with_header()
table.insert(0, Row(table))
eq_(len(table), 3)
assert table[0] is header
def test_refresh_view_on_refresh():
# If refresh_view is not False, we refresh the table's view on refresh()
table = TestGUITable(1)
table.refresh()
table.view.check_gui_calls(["refresh"])
table.view.clear_calls()
table.refresh(refresh_view=False)
table.view.check_gui_calls([])
def test_restore_selection():
# By default, after a refresh, selection goes on the last row
table = TestGUITable(10)
table.refresh()
eq_(table.selected_indexes, [9])
def test_restore_selection_after_cancel_edits():
# _restore_selection() is called after cancel_edits(). Previously, only _update_selection would
# be called.
class MyTable(TestGUITable):
def _restore_selection(self, previous_selection):
self.selected_indexes = [6]
table = MyTable(10)
table.refresh()
table.add()
table.cancel_edits()
eq_(table.selected_indexes, [6])
def test_restore_selection_with_previous_selection():
# By default, we try to restore the selection that was there before a refresh
table = TestGUITable(10)
table.refresh()
table.selected_indexes = [2, 4]
table.refresh()
eq_(table.selected_indexes, [2, 4])
def test_restore_selection_custom():
# After a _fill() called, the virtual _restore_selection() is called so that it's possible for a
# GUITable subclass to customize its post-refresh selection behavior.
class MyTable(TestGUITable):
def _restore_selection(self, previous_selection):
self.selected_indexes = [6]
table = MyTable(10)
table.refresh()
eq_(table.selected_indexes, [6])
def test_row_cell_value():
# *_cell_value() correctly mangles attrnames that are Python reserved words.
row = Row(Table())
row.from_ = "foo"
eq_(row.get_cell_value("from"), "foo")
row.set_cell_value("from", "bar")
eq_(row.get_cell_value("from"), "bar")
def test_sort_table_also_tries_attributes_without_underscores():
# When determining a sort key, after having unsuccessfully tried the attribute with the,
# underscore, try the one without one.
table = Table()
row1 = Row(table)
row1._foo = "a" # underscored attr must be checked first
row1.foo = "b"
row1.bar = "c"
row2 = Row(table)
row2._foo = "b"
row2.foo = "a"
row2.bar = "b"
table.append(row1)
table.append(row2)
table.sort_by("foo")
assert table[0] is row1
assert table[1] is row2
table.sort_by("bar")
assert table[0] is row2
assert table[1] is row1
def test_sort_table_updates_selection():
table = TestGUITable(10)
table.refresh()
table.select([2, 4])
table.sort_by("index", desc=True)
# Now, the updated rows should be 7 and 5
eq_(len(table.updated_rows), 2)
r1, r2 = table.updated_rows
eq_(r1.index, 7)
eq_(r2.index, 5)
def test_sort_table_with_footer():
# Sorting a table with a footer keeps it at the bottom
table, footer = table_with_footer()
table.sort_by("index", desc=True)
assert table[-1] is footer
def test_sort_table_with_header():
# Sorting a table with a header keeps it at the top
table, header = table_with_header()
table.sort_by("index", desc=True)
assert table[0] is header
def test_add_with_view_that_saves_during_refresh():
# Calling save_edits during refresh() called by add() is ignored.
class TableView(CallLogger):
def refresh(self):
self.model.save_edits()
table = TestGUITable(10, viewclass=TableView)
table.add()
assert table.edited is not None # still in edit mode
| gpl-3.0 |
OpenSourceActivismTech/call-power | alembic/versions/38f01b0893b8_add_call_in_campaign_id_to_.py | 3 | 1414 | """Add call_in_campaign_id to TwilioPhoneNumber
Revision ID: 38f01b0893b8
Revises: 3c34cfd19bf8
Create Date: 2016-10-21 18:59:13.190060
"""
# revision identifiers, used by Alembic.
revision = '38f01b0893b8'
down_revision = '3c34cfd19bf8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('campaign_phone') as batch_op:
batch_op.add_column(sa.Column('call_in_campaign_id',
sa.Integer(),
sa.ForeignKey('campaign_campaign.id'),
nullable=True))
connection = op.get_bind()
campaign_call_in_numbers = connection.execute(
"""SELECT campaign_phone_numbers.campaign_id, campaign_phone_numbers.phone_id
FROM campaign_phone_numbers
INNER JOIN campaign_phone ON campaign_phone_numbers.phone_id = campaign_phone.id
WHERE campaign_phone.call_in_allowed"""
)
for (campaign_id, phone_id) in campaign_call_in_numbers:
connection.execute("""UPDATE campaign_phone
SET call_in_campaign_id = """+str(campaign_id)+"""
WHERE campaign_phone.id = """+str(phone_id))
def downgrade():
with op.batch_alter_table('campaign_phone') as batch_op:
batch_op.drop_column('call_in_campaign_id')
| agpl-3.0 |
DARKPOP/external_chromium_org | tools/deep_memory_profiler/lib/symbol.py | 99 | 7171 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
_BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
_FIND_RUNTIME_SYMBOLS_PATH = os.path.join(_BASE_PATH,
os.pardir,
'find_runtime_symbols')
_TOOLS_LINUX_PATH = os.path.join(_BASE_PATH,
os.pardir,
'linux')
sys.path.append(_FIND_RUNTIME_SYMBOLS_PATH)
sys.path.append(_TOOLS_LINUX_PATH)
import find_runtime_symbols
import prepare_symbol_info
import procfs # pylint: disable=W0611,F0401
LOGGER = logging.getLogger('dmprof')
FUNCTION_SYMBOLS = find_runtime_symbols.FUNCTION_SYMBOLS
SOURCEFILE_SYMBOLS = find_runtime_symbols.SOURCEFILE_SYMBOLS
TYPEINFO_SYMBOLS = find_runtime_symbols.TYPEINFO_SYMBOLS
class SymbolDataSources(object):
"""Manages symbol data sources in a process.
The symbol data sources consist of maps (/proc/<pid>/maps), nm, readelf and
so on. They are collected into a directory '|prefix|.symmap' from the binary
files by 'prepare()' with tools/find_runtime_symbols/prepare_symbol_info.py.
Binaries are not mandatory to profile. The prepared data sources work in
place of the binary even if the binary has been overwritten with another
binary.
Note that loading the symbol data sources takes a long time. They are often
very big. So, the 'dmprof' profiler is designed to use 'SymbolMappingCache'
which caches actually used symbols.
"""
def __init__(self, prefix, alternative_dirs=None):
self._prefix = prefix
self._prepared_symbol_data_sources_path = None
self._loaded_symbol_data_sources = None
self._alternative_dirs = alternative_dirs or {}
def prepare(self):
"""Prepares symbol data sources by extracting mapping from a binary.
The prepared symbol data sources are stored in a directory. The directory
name is stored in |self._prepared_symbol_data_sources_path|.
Returns:
True if succeeded.
"""
LOGGER.info('Preparing symbol mapping...')
self._prepared_symbol_data_sources_path, used_tempdir = (
prepare_symbol_info.prepare_symbol_info(
self._prefix + '.maps',
output_dir_path=self._prefix + '.symmap',
alternative_dirs=self._alternative_dirs,
use_tempdir=True,
use_source_file_name=True))
if self._prepared_symbol_data_sources_path:
LOGGER.info(' Prepared symbol mapping.')
if used_tempdir:
LOGGER.warn(' Using a temporary directory for symbol mapping.')
LOGGER.warn(' Delete it by yourself.')
LOGGER.warn(' Or, move the directory by yourself to use it later.')
return True
else:
LOGGER.warn(' Failed to prepare symbol mapping.')
return False
def get(self):
"""Returns the prepared symbol data sources.
Returns:
The prepared symbol data sources. None if failed.
"""
if not self._prepared_symbol_data_sources_path and not self.prepare():
return None
if not self._loaded_symbol_data_sources:
LOGGER.info('Loading symbol mapping...')
self._loaded_symbol_data_sources = (
find_runtime_symbols.RuntimeSymbolsInProcess.load(
self._prepared_symbol_data_sources_path))
return self._loaded_symbol_data_sources
def path(self):
"""Returns the path of the prepared symbol data sources if possible."""
if not self._prepared_symbol_data_sources_path and not self.prepare():
return None
return self._prepared_symbol_data_sources_path
class SymbolFinder(object):
"""Finds corresponding symbols from addresses.
This class does only 'find()' symbols from a specified |address_list|.
It is introduced to make a finder mockable.
"""
def __init__(self, symbol_type, symbol_data_sources):
self._symbol_type = symbol_type
self._symbol_data_sources = symbol_data_sources
def find(self, address_list):
return find_runtime_symbols.find_runtime_symbols(
self._symbol_type, self._symbol_data_sources.get(), address_list)
class SymbolMappingCache(object):
"""Caches mapping from actually used addresses to symbols.
'update()' updates the cache from the original symbol data sources via
'SymbolFinder'. Symbols can be looked up by the method 'lookup()'.
"""
def __init__(self):
self._symbol_mapping_caches = {
FUNCTION_SYMBOLS: {},
SOURCEFILE_SYMBOLS: {},
TYPEINFO_SYMBOLS: {},
}
def update(self, symbol_type, bucket_set, symbol_finder, cache_f):
"""Updates symbol mapping cache on memory and in a symbol cache file.
It reads cached symbol mapping from a symbol cache file |cache_f| if it
exists. Unresolved addresses are then resolved and added to the cache
both on memory and in the symbol cache file with using 'SymbolFinder'.
A cache file is formatted as follows:
<Address> <Symbol>
<Address> <Symbol>
<Address> <Symbol>
...
Args:
symbol_type: A type of symbols to update. It should be one of
FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS and TYPEINFO_SYMBOLS.
bucket_set: A BucketSet object.
symbol_finder: A SymbolFinder object to find symbols.
cache_f: A readable and writable IO object of the symbol cache file.
"""
cache_f.seek(0, os.SEEK_SET)
self._load(cache_f, symbol_type)
unresolved_addresses = sorted(
address for address in bucket_set.iter_addresses(symbol_type)
if address not in self._symbol_mapping_caches[symbol_type])
if not unresolved_addresses:
LOGGER.info('No need to resolve any more addresses.')
return
cache_f.seek(0, os.SEEK_END)
LOGGER.info('Loading %d unresolved addresses.' %
len(unresolved_addresses))
symbol_dict = symbol_finder.find(unresolved_addresses)
for address, symbol in symbol_dict.iteritems():
stripped_symbol = symbol.strip() or '?'
self._symbol_mapping_caches[symbol_type][address] = stripped_symbol
cache_f.write('%x %s\n' % (address, stripped_symbol))
def lookup(self, symbol_type, address):
"""Looks up a symbol for a given |address|.
Args:
symbol_type: A type of symbols to update. It should be one of
FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS and TYPEINFO_SYMBOLS.
address: An integer that represents an address.
Returns:
A string that represents a symbol.
"""
return self._symbol_mapping_caches[symbol_type].get(address)
def _load(self, cache_f, symbol_type):
try:
for line in cache_f:
items = line.rstrip().split(None, 1)
if len(items) == 1:
items.append('??')
self._symbol_mapping_caches[symbol_type][int(items[0], 16)] = items[1]
LOGGER.info('Loaded %d entries from symbol cache.' %
len(self._symbol_mapping_caches[symbol_type]))
except IOError as e:
LOGGER.info('The symbol cache file is invalid: %s' % e)
| bsd-3-clause |
hryamzik/ansible | lib/ansible/modules/cloud/ovirt/ovirt_external_provider.py | 12 | 14901 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_external_provider
short_description: Module to manage external providers in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage external providers in oVirt/RHV"
options:
name:
description:
- "Name of the external provider to manage."
state:
description:
- "Should the external be present or absent"
- "When you are using absent for I(os_volume), you need to make
sure that SD is not attached to the data center!"
choices: ['present', 'absent']
default: present
description:
description:
- "Description of the external provider."
type:
description:
- "Type of the external provider."
choices: ['os_image', 'network', 'os_volume', 'foreman']
url:
description:
- "URL where external provider is hosted."
- "Applicable for those types: I(os_image), I(os_volume), I(network) and I(foreman)."
username:
description:
- "Username to be used for login to external provider."
- "Applicable for all types."
password:
description:
- "Password of the user specified in C(username) parameter."
- "Applicable for all types."
tenant_name:
description:
- "Name of the tenant."
- "Applicable for those types: I(os_image), I(os_volume) and I(network)."
aliases: ['tenant']
authentication_url:
description:
- "Keystone authentication URL of the openstack provider."
- "Applicable for those types: I(os_image), I(os_volume) and I(network)."
aliases: ['auth_url']
data_center:
description:
- "Name of the data center where provider should be attached."
- "Applicable for those type: I(os_volume)."
read_only:
description:
- "Specify if the network should be read only."
- "Applicable if C(type) is I(network)."
network_type:
description:
- "Type of the external network provider either external (for example OVN) or neutron."
- "Applicable if C(type) is I(network)."
choices: ['external', 'neutron']
default: ['external']
authentication_keys:
description:
- "List of authentication keys. Each key is represented by dict
like {'uuid': 'our-uuid', 'value': 'YourSecretValue=='}"
- "When you will not pass these keys and there are already some
of them defined in the system they will be removed."
- "Applicable for I(os_volume)."
default: []
version_added: "2.6"
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add image external provider:
- ovirt_external_provider:
name: image_provider
type: os_image
url: http://1.2.3.4:9292
username: admin
password: 123456
tenant: admin
auth_url: http://1.2.3.4:35357/v2.0
# Add volume external provider:
- ovirt_external_provider:
name: image_provider
type: os_volume
url: http://1.2.3.4:9292
username: admin
password: 123456
tenant: admin
auth_url: http://1.2.3.4:5000/v2.0
authentication_keys:
-
uuid: "1234567-a1234-12a3-a234-123abc45678"
value: "ABCD00000000111111222333445w=="
# Add foreman provider:
- ovirt_external_provider:
name: foreman_provider
type: foreman
url: https://foreman.example.com
username: admin
password: 123456
# Add external network provider for OVN:
- ovirt_external_provider:
name: ovn_provider
type: network
network_type: external
url: http://1.2.3.4:9696
# Remove image external provider:
- ovirt_external_provider:
state: absent
name: image_provider
type: os_image
'''
RETURN = '''
id:
description: ID of the external provider which is managed
returned: On success if external provider is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
external_host_provider:
description: "Dictionary of all the external_host_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
returned: "On success and if parameter 'type: foreman' is used."
type: dictionary
openstack_image_provider:
description: "Dictionary of all the openstack_image_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider."
returned: "On success and if parameter 'type: os_image' is used."
type: dictionary
openstack_volume_provider:
description: "Dictionary of all the openstack_volume_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider."
returned: "On success and if parameter 'type: os_volume' is used."
type: dictionary
openstack_network_provider:
description: "Dictionary of all the openstack_network_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider."
returned: "On success and if parameter 'type: network' is used."
type: dictionary
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_params,
check_sdk,
create_connection,
equal,
ovirt_full_argument_spec,
)
OS_VOLUME = 'os_volume'
OS_IMAGE = 'os_image'
NETWORK = 'network'
FOREMAN = 'foreman'
class ExternalProviderModule(BaseModule):
non_provider_params = ['type', 'authentication_keys', 'data_center']
def provider_type(self, provider_type):
self._provider_type = provider_type
def provider_module_params(self):
provider_params = [
(key, value) for key, value in self._module.params.items() if key
not in self.non_provider_params
]
provider_params.append(('data_center', self.get_data_center()))
return provider_params
def get_data_center(self):
dc_name = self._module.params.get("data_center", None)
if dc_name:
system_service = self._connection.system_service()
data_centers_service = system_service.data_centers_service()
return data_centers_service.list(
search='name=%s' % dc_name,
)[0]
return dc_name
def build_entity(self):
provider_type = self._provider_type(
requires_authentication=self._module.params.get('username') is not None,
)
if self._module.params.pop('type') == NETWORK:
setattr(
provider_type,
'type',
otypes.OpenStackNetworkProviderType(self._module.params.pop('network_type'))
)
for key, value in self.provider_module_params():
if hasattr(provider_type, key):
setattr(provider_type, key, value)
return provider_type
def update_check(self, entity):
return (
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('url'), entity.url) and
equal(self._module.params.get('authentication_url'), entity.authentication_url) and
equal(self._module.params.get('tenant_name'), getattr(entity, 'tenant_name', None)) and
equal(self._module.params.get('username'), entity.username)
)
def update_volume_provider_auth_keys(
self, provider, providers_service, keys
):
"""
Update auth keys for volume provider, if not exist add them or remove
if they are not specified and there are already defined in the external
volume provider.
Args:
provider (dict): Volume provider details.
providers_service (openstack_volume_providers_service): Provider
service.
keys (list): Keys to be updated/added to volume provider, each key
is represented as dict with keys: uuid, value.
"""
provider_service = providers_service.provider_service(provider['id'])
auth_keys_service = provider_service.authentication_keys_service()
provider_keys = auth_keys_service.list()
# removing keys which are not defined
for key in [
k.id for k in provider_keys if k.uuid not in [
defined_key['uuid'] for defined_key in keys
]
]:
self.changed = True
if not self._module.check_mode:
auth_keys_service.key_service(key).remove()
if not (provider_keys or keys):
# Nothing need to do when both are empty.
return
for key in keys:
key_id_for_update = None
for existing_key in provider_keys:
if key['uuid'] == existing_key.uuid:
key_id_for_update = existing_key.id
auth_key_usage_type = (
otypes.OpenstackVolumeAuthenticationKeyUsageType("ceph")
)
auth_key = otypes.OpenstackVolumeAuthenticationKey(
usage_type=auth_key_usage_type,
uuid=key['uuid'],
value=key['value'],
)
if not key_id_for_update:
self.changed = True
if not self._module.check_mode:
auth_keys_service.add(auth_key)
else:
# We cannot really distinguish here if it was really updated cause
# we cannot take key value to check if it was changed or not. So
# for sure we update here always.
self.changed = True
if not self._module.check_mode:
auth_key_service = (
auth_keys_service.key_service(key_id_for_update)
)
auth_key_service.update(auth_key)
def _external_provider_service(provider_type, system_service):
if provider_type == OS_IMAGE:
return otypes.OpenStackImageProvider, system_service.openstack_image_providers_service()
elif provider_type == NETWORK:
return otypes.OpenStackNetworkProvider, system_service.openstack_network_providers_service()
elif provider_type == OS_VOLUME:
return otypes.OpenStackVolumeProvider, system_service.openstack_volume_providers_service()
elif provider_type == FOREMAN:
return otypes.ExternalHostProvider, system_service.external_host_providers_service()
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None),
description=dict(default=None),
type=dict(
default=None,
required=True,
choices=[
OS_IMAGE, NETWORK, OS_VOLUME, FOREMAN,
],
aliases=['provider'],
),
url=dict(default=None),
username=dict(default=None),
password=dict(default=None, no_log=True),
tenant_name=dict(default=None, aliases=['tenant']),
authentication_url=dict(default=None, aliases=['auth_url']),
data_center=dict(default=None),
read_only=dict(default=None, type='bool'),
network_type=dict(
default='external',
choices=['external', 'neutron'],
),
authentication_keys=dict(
default=[], aliases=['auth_keys'], type='list', no_log=True,
),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if module._name == 'ovirt_external_providers':
module.deprecate("The 'ovirt_external_providers' module is being renamed 'ovirt_external_provider'", version=2.8)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
provider_type_param = module.params.get('type')
provider_type, external_providers_service = _external_provider_service(
provider_type=provider_type_param,
system_service=connection.system_service(),
)
external_providers_module = ExternalProviderModule(
connection=connection,
module=module,
service=external_providers_service,
)
external_providers_module.provider_type(provider_type)
state = module.params.pop('state')
if state == 'absent':
ret = external_providers_module.remove()
elif state == 'present':
ret = external_providers_module.create()
openstack_volume_provider_id = ret.get('id')
if (
provider_type_param == OS_VOLUME and
openstack_volume_provider_id
):
external_providers_module.update_volume_provider_auth_keys(
ret, external_providers_service,
module.params.get('authentication_keys'),
)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
Athrun29/horizon | openstack_dashboard/test/integration_tests/pages/project/compute/imagespage.py | 3 | 4987 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.common import exceptions
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
class ImagesPage(basepage.BaseNavigationPage):
DEFAULT_IMAGE_SOURCE = 'url'
DEFAULT_IMAGE_FORMAT = 'qcow2'
DEFAULT_ACCESSIBILITY = False
DEFAULT_PROTECTION = False
IMAGES_TABLE_NAME_COLUMN_INDEX = 0
IMAGES_TABLE_STATUS_COLUMN_INDEX = 2
_images_table_locator = (by.By.ID, 'images')
IMAGES_TABLE_NAME = "images"
IMAGES_TABLE_ACTIONS = ("create", "delete")
IMAGES_TABLE_ROW_ACTIONS = {
tables.ComplexActionRowRegion.PRIMARY_ACTION: "launch",
tables.ComplexActionRowRegion.SECONDARY_ACTIONS: ("create_volume",)
}
CREATE_IMAGE_FORM_FIELDS = (
"name", "description", "source_type", "image_url",
"image_file", "kernel", "ramdisk",
"disk_format", "architecture", "minimum_disk",
"minimum_ram", "is_public", "protected"
)
def __init__(self, driver, conf):
super(ImagesPage, self).__init__(driver, conf)
self._page_title = "Images"
def _get_row_with_image_name(self, name):
return self.images_table.get_row(
self.IMAGES_TABLE_NAME_COLUMN_INDEX, name)
@property
def images_table(self):
src_elem = self._get_element(*self._images_table_locator)
return tables.ComplexActionTableRegion(self.driver,
self.conf, src_elem,
self.IMAGES_TABLE_NAME,
self.IMAGES_TABLE_ACTIONS,
self.IMAGES_TABLE_ROW_ACTIONS
)
@property
def create_image_form(self):
return forms.FormRegion(self.driver, self.conf, None,
self.CREATE_IMAGE_FORM_FIELDS)
@property
def confirm_delete_images_form(self):
return forms.BaseFormRegion(self.driver, self.conf, None)
def create_image(self, name, description=None,
image_source_type=DEFAULT_IMAGE_SOURCE,
location=None, image_file=None,
image_format=DEFAULT_IMAGE_FORMAT,
is_public=DEFAULT_ACCESSIBILITY,
is_protected=DEFAULT_PROTECTION):
self.images_table.create.click()
self.create_image_form.name.text = name
if description is not None:
self.create_image_form.description.text = description
self.create_image_form.source_type.value = image_source_type
if image_source_type == 'url':
if location is None:
self.create_image_form.image_url.text = \
self.conf.image.http_image
else:
self.create_image_form.image_url.text = location
else:
self.create_image_form.image_file.choose(image_file)
self.create_image_form.disk_format.value = image_format
if is_public:
self.create_image_form.is_public.mark()
if is_protected:
self.create_image_form.protected.mark()
self.create_image_form.submit.click()
self.wait_till_popups_disappear()
def delete_image(self, name):
row = self._get_row_with_image_name(name)
row.mark()
self.images_table.delete.click()
self.confirm_delete_images_form.submit.click()
self.wait_till_popups_disappear()
def is_image_present(self, name):
return bool(self._get_row_with_image_name(name))
def is_image_active(self, name):
row = self._get_row_with_image_name(name)
# NOTE(tsufiev): better to wrap getting image status cell in a lambda
# to avoid problems with cell being replaced with totally different
# element by Javascript
def cell_getter():
return row.cells[self.IMAGES_TABLE_STATUS_COLUMN_INDEX]
try:
self._wait_till_text_present_in_element(cell_getter, 'Active')
except exceptions.TimeoutException:
return False
return True
def wait_until_image_active(self, name):
self._wait_until(lambda x: self.is_image_active(name))
| apache-2.0 |
rschnapka/odoo | addons/l10n_fr/report/__init__.py | 424 | 1475 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import base_report
import bilan_report
import compute_resultant_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tangledupinblue/pi-node-terminal | cardReader/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 1788 | 1435 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
foss-transportationmodeling/rettina-server | .env/lib/python2.7/site-packages/sqlalchemy/orm/deprecated_interfaces.py | 21 | 21941 | # orm/deprecated_interfaces.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import event, util
from .interfaces import EXT_CONTINUE
@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces")
class MapperExtension(object):
"""Base implementation for :class:`.Mapper` event hooks.
.. note::
:class:`.MapperExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.MapperEvents`.
New extension classes subclass :class:`.MapperExtension` and are specified
using the ``extension`` mapper() argument, which is a single
:class:`.MapperExtension` or a list of such::
from sqlalchemy.orm.interfaces import MapperExtension
class MyExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
print "instance %s before insert !" % instance
m = mapper(User, users_table, extension=MyExtension())
A single mapper can maintain a chain of ``MapperExtension``
objects. When a particular mapping event occurs, the
corresponding method on each ``MapperExtension`` is invoked
serially, and each method has the ability to halt the chain
from proceeding further::
m = mapper(User, users_table, extension=[ext1, ext2, ext3])
Each ``MapperExtension`` method returns the symbol
EXT_CONTINUE by default. This symbol generally means "move
to the next ``MapperExtension`` for processing". For methods
that return objects like translated rows or new object
instances, EXT_CONTINUE means the result of the method
should be ignored. In some cases it's required for a
default mapper activity to be performed, such as adding a
new instance to a result list.
The symbol EXT_STOP has significance within a chain
of ``MapperExtension`` objects that the chain will be stopped
when this symbol is returned. Like EXT_CONTINUE, it also
has additional significance in some cases that a default
mapper activity will not be performed.
"""
@classmethod
def _adapt_instrument_class(cls, self, listener):
cls._adapt_listener_methods(self, listener, ('instrument_class',))
@classmethod
def _adapt_listener(cls, self, listener):
cls._adapt_listener_methods(
self, listener,
(
'init_instance',
'init_failed',
'translate_row',
'create_instance',
'append_result',
'populate_instance',
'reconstruct_instance',
'before_insert',
'after_insert',
'before_update',
'after_update',
'before_delete',
'after_delete'
))
@classmethod
def _adapt_listener_methods(cls, self, listener, methods):
for meth in methods:
me_meth = getattr(MapperExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
if meth == 'reconstruct_instance':
def go(ls_meth):
def reconstruct(instance, ctx):
ls_meth(self, instance)
return reconstruct
event.listen(self.class_manager, 'load',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_instance':
def go(ls_meth):
def init_instance(instance, args, kwargs):
ls_meth(self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_instance
event.listen(self.class_manager, 'init',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_failed':
def go(ls_meth):
def init_failed(instance, args, kwargs):
util.warn_exception(
ls_meth, self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_failed
event.listen(self.class_manager, 'init_failure',
go(ls_meth), raw=False, propagate=True)
else:
event.listen(self, "%s" % meth, ls_meth,
raw=False, retval=True, propagate=True)
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed, and has
applied instrumentation to the mapped class.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when its constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def translate_row(self, mapper, context, row):
"""Perform pre-processing on the given result row and return a
new row instance.
This is called when the mapper first receives a row, before
the object identity or the instance itself has been derived
from that row. The given row may or may not be a
``RowProxy`` object - it will always be a dictionary-like
object which contains mapped columns as keys. The
returned object should also be a dictionary-like object
which recognizes mapped columns as keys.
If the ultimate return value is EXT_CONTINUE, the row
is not translated.
"""
return EXT_CONTINUE
def create_instance(self, mapper, selectcontext, row, class_):
"""Receive a row when a new object instance is about to be
created from that row.
The method can choose to create the instance itself, or it can return
EXT_CONTINUE to indicate normal object creation should take place.
mapper
The mapper doing the operation
selectcontext
The QueryContext generated from the Query.
row
The result row from the database
class\_
The class we are mapping.
return value
A new object instance, or EXT_CONTINUE
"""
return EXT_CONTINUE
def append_result(self, mapper, selectcontext, row, instance,
result, **flags):
"""Receive an object instance before that instance is appended
to a result list.
If this method returns EXT_CONTINUE, result appending will proceed
normally. if this method returns any other value or None,
result appending will not proceed for this instance, giving
this extension an opportunity to do the appending itself, if
desired.
mapper
The mapper doing the operation.
selectcontext
The QueryContext generated from the Query.
row
The result row from the database.
instance
The object instance to be appended to the result.
result
List to which results are being appended.
\**flags
extra information about the row, same as criterion in
``create_row_processor()`` method of
:class:`~sqlalchemy.orm.interfaces.MapperProperty`
"""
return EXT_CONTINUE
def populate_instance(self, mapper, selectcontext, row,
instance, **flags):
"""Receive an instance before that instance has
its attributes populated.
This usually corresponds to a newly loaded instance but may
also correspond to an already-loaded instance which has
unloaded attributes to be populated. The method may be called
many times for a single instance, as multiple result rows are
used to populate eagerly loaded collections.
If this method returns EXT_CONTINUE, instance population will
proceed normally. If any other value or None is returned,
instance population will not proceed, giving this extension an
opportunity to populate the instance itself, if desired.
.. deprecated:: 0.5
Most usages of this hook are obsolete. For a
generic "object has been newly created from a row" hook, use
``reconstruct_instance()``, or the ``@orm.reconstructor``
decorator.
"""
return EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
"""Receive an object instance before that instance is inserted
into its table.
This is a good place to set up primary key values and such
that aren't handled otherwise.
Column-based attributes can be modified within this method
which will result in the new value being inserted. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
"""Receive an object instance after that instance is inserted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_update(self, mapper, connection, instance):
"""Receive an object instance before that instance is updated.
Note that this method is called for all instances that are marked as
"dirty", even those which have no net changes to their column-based
attributes. An object is marked as dirty when any of its column-based
attributes have a "set attribute" operation called or when any of its
collections are modified. If, at update time, no column-based
attributes have any net changes, no UPDATE statement will be issued.
This means that an instance being sent to before_update is *not* a
guarantee that an UPDATE statement will be issued (although you can
affect the outcome here).
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
Column-based attributes can be modified within this method
which will result in the new value being updated. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_update(self, mapper, connection, instance):
"""Receive an object instance after that instance is updated.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
"""Receive an object instance before that instance is deleted.
Note that *no* changes to the overall flush plan can be made
here; and manipulation of the ``Session`` will not have the
desired effect. To manipulate the ``Session`` within an
extension, use ``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
"""Receive an object instance after that instance is deleted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces")
class SessionExtension(object):
"""Base implementation for :class:`.Session` event hooks.
.. note::
:class:`.SessionExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.SessionEvents`.
Subclasses may be installed into a :class:`.Session` (or
:class:`.sessionmaker`) using the ``extension`` keyword
argument::
from sqlalchemy.orm.interfaces import SessionExtension
class MySessionExtension(SessionExtension):
def before_commit(self, session):
print "before commit!"
Session = sessionmaker(extension=MySessionExtension())
The same :class:`.SessionExtension` instance can be used
with any number of sessions.
"""
@classmethod
def _adapt_listener(cls, self, listener):
for meth in [
'before_commit',
'after_commit',
'after_rollback',
'before_flush',
'after_flush',
'after_flush_postexec',
'after_begin',
'after_attach',
'after_bulk_update',
'after_bulk_delete',
]:
me_meth = getattr(SessionExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
event.listen(self, meth, getattr(listener, meth))
def before_commit(self, session):
"""Execute right before commit is called.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_commit(self, session):
"""Execute after a commit has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_rollback(self, session):
"""Execute after a rollback has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def before_flush(self, session, flush_context, instances):
"""Execute before flush process has started.
`instances` is an optional list of objects which were passed to
the ``flush()`` method. """
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes."""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction. """
def after_begin(self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
`transaction` is the SessionTransaction. This method is called
after an engine level transaction is begun on a connection. """
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge. """
def after_bulk_update(self, session, query, query_context, result):
"""Execute after a bulk update operation to the session.
This is called after a session.query(...).update()
`query` is the query object that this update operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
def after_bulk_delete(self, session, query, query_context, result):
"""Execute after a bulk delete operation to the session.
This is called after a session.query(...).delete()
`query` is the query object that this delete operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces")
class AttributeExtension(object):
"""Base implementation for :class:`.AttributeImpl` event hooks, events
that fire upon attribute mutations in user code.
.. note::
:class:`.AttributeExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.AttributeEvents`.
:class:`.AttributeExtension` is used to listen for set,
remove, and append events on individual mapped attributes.
It is established on an individual mapped attribute using
the `extension` argument, available on
:func:`.column_property`, :func:`.relationship`, and
others::
from sqlalchemy.orm.interfaces import AttributeExtension
from sqlalchemy.orm import mapper, relationship, column_property
class MyAttrExt(AttributeExtension):
def append(self, state, value, initiator):
print "append event !"
return value
def set(self, state, value, oldvalue, initiator):
print "set event !"
return value
mapper(SomeClass, sometable, properties={
'foo':column_property(sometable.c.foo, extension=MyAttrExt()),
'bar':relationship(Bar, extension=MyAttrExt())
})
Note that the :class:`.AttributeExtension` methods
:meth:`~.AttributeExtension.append` and
:meth:`~.AttributeExtension.set` need to return the
``value`` parameter. The returned value is used as the
effective value, and allows the extension to change what is
ultimately persisted.
AttributeExtension is assembled within the descriptors associated
with a mapped class.
"""
active_history = True
"""indicates that the set() method would like to receive the 'old' value,
even if it means firing lazy callables.
Note that ``active_history`` can also be set directly via
:func:`.column_property` and :func:`.relationship`.
"""
@classmethod
def _adapt_listener(cls, self, listener):
event.listen(self, 'append', listener.append,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'remove', listener.remove,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'set', listener.set,
active_history=listener.active_history,
raw=True, retval=True)
def append(self, state, value, initiator):
"""Receive a collection append event.
The returned value will be used as the actual value to be
appended.
"""
return value
def remove(self, state, value, initiator):
"""Receive a remove event.
No return value is defined.
"""
pass
def set(self, state, value, oldvalue, initiator):
"""Receive a set event.
The returned value will be used as the actual value to be
set.
"""
return value
| apache-2.0 |
odoo-arg/odoo_l10n_ar | l10n_ar_invoice_presentation/models/presentation_header.py | 1 | 2454 | # - coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.exceptions import Warning
from openerp import models
import l10n_ar_api.presentations.presentation as presentation_builder
class AccountInvoicePresentation(models.Model):
_inherit = 'account.invoice.presentation'
def validate_header(self):
"""
Validamos que la compania tenga los datos necesarios.
"""
if not self.company_id.partner_id.vat:
raise Warning(
"ERROR\nLa presentacion no pudo ser generada porque la compania no tiene CUIT\n"
)
def generate_header_file(self):
"""
Se genera el archivo de cabecera. Utiliza la API de presentaciones y tools para poder crear los archivos
y formatear los datos.
:return: objeto de la api (generator), con las lineas de la presentacion creadas.
"""
self.validate_header()
cabecera = presentation_builder.Presentation("ventasCompras", "cabecera")
line = cabecera.create_line()
line.cuit = self.company_id.vat
line.periodo = self.get_period()
line.secuencia = self.sequence
line.sinMovimiento = 'S'
if self.with_prorate:
line.prorratearCFC = 'S'
line.cFCGlobal = '1'
else:
line.prorratearCFC = 'N'
line.cFCGlobal = '2'
line.importeCFCG = 0
line.importeCFCAD = 0
line.importeCFCP = 0
line.importeCFnCG = 0
line.cFCSSyOC = 0
line.cFCCSSyOC = 0
return cabecera
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gdit-cnd/RAPID | core/tasks.py | 2 | 1287 | import os
import gzip
import urllib.request
from celery import shared_task
from django.core.mail import EmailMessage
from celery.task import periodic_task
from celery.schedules import crontab
@shared_task(name='deliver_email')
def deliver_email(subject=None, body=None, recipients=None):
if recipients:
for recipient in recipients:
email = EmailMessage(subject, body, to=[recipient])
email.send()
@periodic_task(bind=True, run_every=crontab(0, 0, day_of_month='7'))
def update_geolocation(self):
# Establish desired paths and directories
current_directory = os.path.dirname(__file__)
compressed_filepath = os.path.join(current_directory, 'GeoLite2-City.mmdb.gz')
uncompressed_filepath = os.path.join(current_directory, 'GeoLite2-City.mmdb')
# Pull down current database file
url = "http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz"
urllib.request.urlretrieve(url, compressed_filepath)
# Read and unzip compressed file to current directory
zipped = gzip.open(compressed_filepath, "rb")
uncompressed = open(uncompressed_filepath, "wb")
uncompressed.write(zipped.read())
zipped.close()
uncompressed.close()
# Remove zipped file
os.remove(compressed_filepath) | mit |
pylbert/upm | examples/python/ehr.py | 6 | 2666 | #!/usr/bin/env python
# Author: Zion Orent <zorent@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_ehr as upmehr
def main():
# Instantiate a Ear-clip Heart Rate sensor on digital pin D2
myHeartRateSensor = upmehr.EHR(2)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This lets you run code on exit,
# including functions from myHeartRateSensor
def exitHandler():
myHeartRateSensor.stopBeatCounter()
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# set the beat counter to 0, init the clock and start counting beats
myHeartRateSensor.clearBeatCounter()
myHeartRateSensor.initClock()
myHeartRateSensor.startBeatCounter()
while(1):
# we grab these (millis and flowCount) just for display
# purposes in this example
millis = myHeartRateSensor.getMillis()
beats = myHeartRateSensor.beatCounter()
# heartRate() requires that at least 5 seconds pass before
# returning anything other than 0
fr = myHeartRateSensor.heartRate()
# output milliseconds passed, beat count, and computed heart rate
outputStr = "Millis: {0} Beats: {1} Heart Rate: {2}".format(
millis, beats, fr)
print(outputStr)
time.sleep(1)
if __name__ == '__main__':
main()
| mit |
tdtrask/ansible | lib/ansible/parsing/yaml/constructor.py | 96 | 6286 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from yaml.constructor import SafeConstructor, ConstructorError
from yaml.nodes import MappingNode
from ansible.module_utils._text import to_bytes
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.utils.unsafe_proxy import wrap_var
from ansible.parsing.vault import VaultLib
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class AnsibleConstructor(SafeConstructor):
def __init__(self, file_name=None, vault_secrets=None):
self._ansible_file_name = file_name
super(AnsibleConstructor, self).__init__()
self._vaults = {}
self.vault_secrets = vault_secrets or []
self._vaults['default'] = VaultLib(secrets=self.vault_secrets)
def construct_yaml_map(self, node):
data = AnsibleMapping()
yield data
value = self.construct_mapping(node)
data.update(value)
data.ansible_pos = self._node_position_info(node)
def construct_mapping(self, node, deep=False):
# Most of this is from yaml.constructor.SafeConstructor. We replicate
# it here so that we can warn users when they have duplicate dict keys
# (pyyaml silently allows overwriting keys)
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
self.flatten_mapping(node)
mapping = AnsibleMapping()
# Add our extra information to the returned value
mapping.ansible_pos = self._node_position_info(node)
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
if key in mapping:
display.warning(u'While constructing a mapping from {1}, line {2}, column {3}, found a duplicate dict key ({0}).'
u' Using last defined value only.'.format(key, *mapping.ansible_pos))
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_yaml_str(self, node, unsafe=False):
# Override the default string handling function
# to always return unicode objects
value = self.construct_scalar(node)
ret = AnsibleUnicode(value)
ret.ansible_pos = self._node_position_info(node)
if unsafe:
ret = wrap_var(ret)
return ret
def construct_vault_encrypted_unicode(self, node):
value = self.construct_scalar(node)
b_ciphertext_data = to_bytes(value)
# could pass in a key id here to choose the vault to associate with
# TODO/FIXME: plugin vault selector
vault = self._vaults['default']
if vault.secrets is None:
raise ConstructorError(context=None, context_mark=None,
problem="found !vault but no vault password provided",
problem_mark=node.start_mark,
note=None)
ret = AnsibleVaultEncryptedUnicode(b_ciphertext_data)
ret.vault = vault
return ret
def construct_yaml_seq(self, node):
data = AnsibleSequence()
yield data
data.extend(self.construct_sequence(node))
data.ansible_pos = self._node_position_info(node)
def construct_yaml_unsafe(self, node):
return self.construct_yaml_str(node, unsafe=True)
def _node_position_info(self, node):
# the line number where the previous token has ended (plus empty lines)
# Add one so that the first line is line 1 rather than line 0
column = node.start_mark.column + 1
line = node.start_mark.line + 1
# in some cases, we may have pre-read the data and then
# passed it to the load() call for YAML, in which case we
# want to override the default datasource (which would be
# '<string>') to the actual filename we read in
datasource = self._ansible_file_name or node.start_mark.name
return (datasource, line, column)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:map',
AnsibleConstructor.construct_yaml_map)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:python/dict',
AnsibleConstructor.construct_yaml_map)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:str',
AnsibleConstructor.construct_yaml_str)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:python/unicode',
AnsibleConstructor.construct_yaml_str)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:seq',
AnsibleConstructor.construct_yaml_seq)
AnsibleConstructor.add_constructor(
u'!unsafe',
AnsibleConstructor.construct_yaml_unsafe)
AnsibleConstructor.add_constructor(
u'!vault',
AnsibleConstructor.construct_vault_encrypted_unicode)
AnsibleConstructor.add_constructor(u'!vault-encrypted', AnsibleConstructor.construct_vault_encrypted_unicode)
| gpl-3.0 |
ninadpdr/python-saml | setup.py | 1 | 1286 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, OneLogin, Inc.
# All rights reserved.
from setuptools import setup
setup(
name='python-saml',
version='2.1.5',
description='Onelogin Python Toolkit. Add SAML support to your Python software using this library',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
],
author='OneLogin',
author_email='support@onelogin.com',
license='MIT',
url='https://github.com/onelogin/python-saml',
packages=['onelogin','onelogin/saml2'],
include_package_data=True,
package_data = {
'onelogin/saml2/schemas': ['*.xsd'],
},
package_dir={
'': 'src',
},
test_suite='tests',
install_requires=[
'dm.xmlsec.binding==1.3.2',
'isodate==0.5.0',
'defusedxml==0.4.1',
],
extras_require={
'test': (
'coverage==3.7.1',
'pylint==1.3.1',
'pep8==1.5.7',
'pyflakes==0.8.1',
'coveralls==0.4.4',
),
},
keywords='saml saml2 xmlsec django flask',
)
| bsd-3-clause |
roadmapper/ansible | test/units/modules/network/fortios/test_fortios_webfilter_override.py | 20 | 10787 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_webfilter_override
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_webfilter_override.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_webfilter_override_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'webfilter_override': {
'expires': 'test_value_3',
'id': '4',
'initiator': 'test_value_5',
'ip': 'test_value_6',
'ip6': 'test_value_7',
'new_profile': 'test_value_8',
'old_profile': 'test_value_9',
'scope': 'user',
'status': 'enable',
'user': 'test_value_12',
'user_group': 'test_value_13'
},
'vdom': 'root'}
is_error, changed, response = fortios_webfilter_override.fortios_webfilter(input_data, fos_instance)
expected_data = {
'expires': 'test_value_3',
'id': '4',
'initiator': 'test_value_5',
'ip': 'test_value_6',
'ip6': 'test_value_7',
'new-profile': 'test_value_8',
'old-profile': 'test_value_9',
'scope': 'user',
'status': 'enable',
'user': 'test_value_12',
'user-group': 'test_value_13'
}
set_method_mock.assert_called_with('webfilter', 'override', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_webfilter_override_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'webfilter_override': {
'expires': 'test_value_3',
'id': '4',
'initiator': 'test_value_5',
'ip': 'test_value_6',
'ip6': 'test_value_7',
'new_profile': 'test_value_8',
'old_profile': 'test_value_9',
'scope': 'user',
'status': 'enable',
'user': 'test_value_12',
'user_group': 'test_value_13'
},
'vdom': 'root'}
is_error, changed, response = fortios_webfilter_override.fortios_webfilter(input_data, fos_instance)
expected_data = {
'expires': 'test_value_3',
'id': '4',
'initiator': 'test_value_5',
'ip': 'test_value_6',
'ip6': 'test_value_7',
'new-profile': 'test_value_8',
'old-profile': 'test_value_9',
'scope': 'user',
'status': 'enable',
'user': 'test_value_12',
'user-group': 'test_value_13'
}
set_method_mock.assert_called_with('webfilter', 'override', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_webfilter_override_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'webfilter_override': {
'expires': 'test_value_3',
'id': '4',
'initiator': 'test_value_5',
'ip': 'test_value_6',
'ip6': 'test_value_7',
'new_profile': 'test_value_8',
'old_profile': 'test_value_9',
'scope': 'user',
'status': 'enable',
'user': 'test_value_12',
'user_group': 'test_value_13'
},
'vdom': 'root'}
is_error, changed, response = fortios_webfilter_override.fortios_webfilter(input_data, fos_instance)
delete_method_mock.assert_called_with('webfilter', 'override', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_webfilter_override_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'webfilter_override': {
'expires': 'test_value_3',
'id': '4',
'initiator': 'test_value_5',
'ip': 'test_value_6',
'ip6': 'test_value_7',
'new_profile': 'test_value_8',
'old_profile': 'test_value_9',
'scope': 'user',
'status': 'enable',
'user': 'test_value_12',
'user_group': 'test_value_13'
},
'vdom': 'root'}
is_error, changed, response = fortios_webfilter_override.fortios_webfilter(input_data, fos_instance)
delete_method_mock.assert_called_with('webfilter', 'override', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_webfilter_override_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'webfilter_override': {
'expires': 'test_value_3',
'id': '4',
'initiator': 'test_value_5',
'ip': 'test_value_6',
'ip6': 'test_value_7',
'new_profile': 'test_value_8',
'old_profile': 'test_value_9',
'scope': 'user',
'status': 'enable',
'user': 'test_value_12',
'user_group': 'test_value_13'
},
'vdom': 'root'}
is_error, changed, response = fortios_webfilter_override.fortios_webfilter(input_data, fos_instance)
expected_data = {
'expires': 'test_value_3',
'id': '4',
'initiator': 'test_value_5',
'ip': 'test_value_6',
'ip6': 'test_value_7',
'new-profile': 'test_value_8',
'old-profile': 'test_value_9',
'scope': 'user',
'status': 'enable',
'user': 'test_value_12',
'user-group': 'test_value_13'
}
set_method_mock.assert_called_with('webfilter', 'override', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_webfilter_override_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'webfilter_override': {
'random_attribute_not_valid': 'tag',
'expires': 'test_value_3',
'id': '4',
'initiator': 'test_value_5',
'ip': 'test_value_6',
'ip6': 'test_value_7',
'new_profile': 'test_value_8',
'old_profile': 'test_value_9',
'scope': 'user',
'status': 'enable',
'user': 'test_value_12',
'user_group': 'test_value_13'
},
'vdom': 'root'}
is_error, changed, response = fortios_webfilter_override.fortios_webfilter(input_data, fos_instance)
expected_data = {
'expires': 'test_value_3',
'id': '4',
'initiator': 'test_value_5',
'ip': 'test_value_6',
'ip6': 'test_value_7',
'new-profile': 'test_value_8',
'old-profile': 'test_value_9',
'scope': 'user',
'status': 'enable',
'user': 'test_value_12',
'user-group': 'test_value_13'
}
set_method_mock.assert_called_with('webfilter', 'override', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
alphafoobar/intellij-community | python/helpers/pydev/pydev_runfiles.py | 52 | 30194 | from __future__ import nested_scopes
import fnmatch
import os.path
from pydev_runfiles_coverage import StartCoverageSupport
from pydevd_constants import * #@UnusedWildImport
import re
import time
#=======================================================================================================================
# Configuration
#=======================================================================================================================
class Configuration:
def __init__(
self,
files_or_dirs='',
verbosity=2,
include_tests=None,
tests=None,
port=None,
files_to_tests=None,
jobs=1,
split_jobs='tests',
coverage_output_dir=None,
coverage_include=None,
coverage_output_file=None,
exclude_files=None,
exclude_tests=None,
include_files=None,
django=False,
):
self.files_or_dirs = files_or_dirs
self.verbosity = verbosity
self.include_tests = include_tests
self.tests = tests
self.port = port
self.files_to_tests = files_to_tests
self.jobs = jobs
self.split_jobs = split_jobs
self.django = django
if include_tests:
assert isinstance(include_tests, (list, tuple))
if exclude_files:
assert isinstance(exclude_files, (list, tuple))
if exclude_tests:
assert isinstance(exclude_tests, (list, tuple))
self.exclude_files = exclude_files
self.include_files = include_files
self.exclude_tests = exclude_tests
self.coverage_output_dir = coverage_output_dir
self.coverage_include = coverage_include
self.coverage_output_file = coverage_output_file
def __str__(self):
return '''Configuration
- files_or_dirs: %s
- verbosity: %s
- tests: %s
- port: %s
- files_to_tests: %s
- jobs: %s
- split_jobs: %s
- include_files: %s
- include_tests: %s
- exclude_files: %s
- exclude_tests: %s
- coverage_output_dir: %s
- coverage_include_dir: %s
- coverage_output_file: %s
- django: %s
''' % (
self.files_or_dirs,
self.verbosity,
self.tests,
self.port,
self.files_to_tests,
self.jobs,
self.split_jobs,
self.include_files,
self.include_tests,
self.exclude_files,
self.exclude_tests,
self.coverage_output_dir,
self.coverage_include,
self.coverage_output_file,
self.django,
)
#=======================================================================================================================
# parse_cmdline
#=======================================================================================================================
def parse_cmdline(argv=None):
"""
Parses command line and returns test directories, verbosity, test filter and test suites
usage:
runfiles.py -v|--verbosity <level> -t|--tests <Test.test1,Test2> dirs|files
Multiprocessing options:
jobs=number (with the number of jobs to be used to run the tests)
split_jobs='module'|'tests'
if == module, a given job will always receive all the tests from a module
if == tests, the tests will be split independently of their originating module (default)
--exclude_files = comma-separated list of patterns with files to exclude (fnmatch style)
--include_files = comma-separated list of patterns with files to include (fnmatch style)
--exclude_tests = comma-separated list of patterns with test names to exclude (fnmatch style)
Note: if --tests is given, --exclude_files, --include_files and --exclude_tests are ignored!
"""
if argv is None:
argv = sys.argv
verbosity = 2
include_tests = None
tests = None
port = None
jobs = 1
split_jobs = 'tests'
files_to_tests = {}
coverage_output_dir = None
coverage_include = None
exclude_files = None
exclude_tests = None
include_files = None
django = False
from _pydev_getopt import gnu_getopt
optlist, dirs = gnu_getopt(
argv[1:], "",
[
"verbosity=",
"tests=",
"port=",
"config_file=",
"jobs=",
"split_jobs=",
"include_tests=",
"include_files=",
"exclude_files=",
"exclude_tests=",
"coverage_output_dir=",
"coverage_include=",
"django="
]
)
for opt, value in optlist:
if opt in ("-v", "--verbosity"):
verbosity = value
elif opt in ("-p", "--port"):
port = int(value)
elif opt in ("-j", "--jobs"):
jobs = int(value)
elif opt in ("-s", "--split_jobs"):
split_jobs = value
if split_jobs not in ('module', 'tests'):
raise AssertionError('Expected split to be either "module" or "tests". Was :%s' % (split_jobs,))
elif opt in ("-d", "--coverage_output_dir",):
coverage_output_dir = value.strip()
elif opt in ("-i", "--coverage_include",):
coverage_include = value.strip()
elif opt in ("-I", "--include_tests"):
include_tests = value.split(',')
elif opt in ("-E", "--exclude_files"):
exclude_files = value.split(',')
elif opt in ("-F", "--include_files"):
include_files = value.split(',')
elif opt in ("-e", "--exclude_tests"):
exclude_tests = value.split(',')
elif opt in ("-t", "--tests"):
tests = value.split(',')
elif opt in ("--django",):
django = value.strip() in ['true', 'True', '1']
elif opt in ("-c", "--config_file"):
config_file = value.strip()
if os.path.exists(config_file):
f = open(config_file, 'rU')
try:
config_file_contents = f.read()
finally:
f.close()
if config_file_contents:
config_file_contents = config_file_contents.strip()
if config_file_contents:
for line in config_file_contents.splitlines():
file_and_test = line.split('|')
if len(file_and_test) == 2:
file, test = file_and_test
if DictContains(files_to_tests, file):
files_to_tests[file].append(test)
else:
files_to_tests[file] = [test]
else:
sys.stderr.write('Could not find config file: %s\n' % (config_file,))
if type([]) != type(dirs):
dirs = [dirs]
ret_dirs = []
for d in dirs:
if '|' in d:
#paths may come from the ide separated by |
ret_dirs.extend(d.split('|'))
else:
ret_dirs.append(d)
verbosity = int(verbosity)
if tests:
if verbosity > 4:
sys.stdout.write('--tests provided. Ignoring --exclude_files, --exclude_tests and --include_files\n')
exclude_files = exclude_tests = include_files = None
config = Configuration(
ret_dirs,
verbosity,
include_tests,
tests,
port,
files_to_tests,
jobs,
split_jobs,
coverage_output_dir,
coverage_include,
exclude_files=exclude_files,
exclude_tests=exclude_tests,
include_files=include_files,
django=django,
)
if verbosity > 5:
sys.stdout.write(str(config) + '\n')
return config
#=======================================================================================================================
# PydevTestRunner
#=======================================================================================================================
class PydevTestRunner(object):
""" finds and runs a file or directory of files as a unit test """
__py_extensions = ["*.py", "*.pyw"]
__exclude_files = ["__init__.*"]
#Just to check that only this attributes will be written to this file
__slots__ = [
'verbosity', #Always used
'files_to_tests', #If this one is given, the ones below are not used
'files_or_dirs', #Files or directories received in the command line
'include_tests', #The filter used to collect the tests
'tests', #Strings with the tests to be run
'jobs', #Integer with the number of jobs that should be used to run the test cases
'split_jobs', #String with 'tests' or 'module' (how should the jobs be split)
'configuration',
'coverage',
]
def __init__(self, configuration):
self.verbosity = configuration.verbosity
self.jobs = configuration.jobs
self.split_jobs = configuration.split_jobs
files_to_tests = configuration.files_to_tests
if files_to_tests:
self.files_to_tests = files_to_tests
self.files_or_dirs = list(files_to_tests.keys())
self.tests = None
else:
self.files_to_tests = {}
self.files_or_dirs = configuration.files_or_dirs
self.tests = configuration.tests
self.configuration = configuration
self.__adjust_path()
def __adjust_path(self):
""" add the current file or directory to the python path """
path_to_append = None
for n in xrange(len(self.files_or_dirs)):
dir_name = self.__unixify(self.files_or_dirs[n])
if os.path.isdir(dir_name):
if not dir_name.endswith("/"):
self.files_or_dirs[n] = dir_name + "/"
path_to_append = os.path.normpath(dir_name)
elif os.path.isfile(dir_name):
path_to_append = os.path.dirname(dir_name)
else:
if not os.path.exists(dir_name):
block_line = '*' * 120
sys.stderr.write('\n%s\n* PyDev test runner error: %s does not exist.\n%s\n' % (block_line, dir_name, block_line))
return
msg = ("unknown type. \n%s\nshould be file or a directory.\n" % (dir_name))
raise RuntimeError(msg)
if path_to_append is not None:
#Add it as the last one (so, first things are resolved against the default dirs and
#if none resolves, then we try a relative import).
sys.path.append(path_to_append)
def __is_valid_py_file(self, fname):
""" tests that a particular file contains the proper file extension
and is not in the list of files to exclude """
is_valid_fname = 0
for invalid_fname in self.__class__.__exclude_files:
is_valid_fname += int(not fnmatch.fnmatch(fname, invalid_fname))
if_valid_ext = 0
for ext in self.__class__.__py_extensions:
if_valid_ext += int(fnmatch.fnmatch(fname, ext))
return is_valid_fname > 0 and if_valid_ext > 0
def __unixify(self, s):
""" stupid windows. converts the backslash to forwardslash for consistency """
return os.path.normpath(s).replace(os.sep, "/")
def __importify(self, s, dir=False):
""" turns directory separators into dots and removes the ".py*" extension
so the string can be used as import statement """
if not dir:
dirname, fname = os.path.split(s)
if fname.count('.') > 1:
#if there's a file named xxx.xx.py, it is not a valid module, so, let's not load it...
return
imp_stmt_pieces = [dirname.replace("\\", "/").replace("/", "."), os.path.splitext(fname)[0]]
if len(imp_stmt_pieces[0]) == 0:
imp_stmt_pieces = imp_stmt_pieces[1:]
return ".".join(imp_stmt_pieces)
else: #handle dir
return s.replace("\\", "/").replace("/", ".")
def __add_files(self, pyfiles, root, files):
""" if files match, appends them to pyfiles. used by os.path.walk fcn """
for fname in files:
if self.__is_valid_py_file(fname):
name_without_base_dir = self.__unixify(os.path.join(root, fname))
pyfiles.append(name_without_base_dir)
def find_import_files(self):
""" return a list of files to import """
if self.files_to_tests:
pyfiles = self.files_to_tests.keys()
else:
pyfiles = []
for base_dir in self.files_or_dirs:
if os.path.isdir(base_dir):
if hasattr(os, 'walk'):
for root, dirs, files in os.walk(base_dir):
#Note: handling directories that should be excluded from the search because
#they don't have __init__.py
exclude = {}
for d in dirs:
for init in ['__init__.py', '__init__.pyo', '__init__.pyc', '__init__.pyw']:
if os.path.exists(os.path.join(root, d, init).replace('\\', '/')):
break
else:
exclude[d] = 1
if exclude:
new = []
for d in dirs:
if d not in exclude:
new.append(d)
dirs[:] = new
self.__add_files(pyfiles, root, files)
else:
# jython2.1 is too old for os.walk!
os.path.walk(base_dir, self.__add_files, pyfiles)
elif os.path.isfile(base_dir):
pyfiles.append(base_dir)
if self.configuration.exclude_files or self.configuration.include_files:
ret = []
for f in pyfiles:
add = True
basename = os.path.basename(f)
if self.configuration.include_files:
add = False
for pat in self.configuration.include_files:
if fnmatch.fnmatchcase(basename, pat):
add = True
break
if not add:
if self.verbosity > 3:
sys.stdout.write('Skipped file: %s (did not match any include_files pattern: %s)\n' % (f, self.configuration.include_files))
elif self.configuration.exclude_files:
for pat in self.configuration.exclude_files:
if fnmatch.fnmatchcase(basename, pat):
if self.verbosity > 3:
sys.stdout.write('Skipped file: %s (matched exclude_files pattern: %s)\n' % (f, pat))
elif self.verbosity > 2:
sys.stdout.write('Skipped file: %s\n' % (f,))
add = False
break
if add:
if self.verbosity > 3:
sys.stdout.write('Adding file: %s for test discovery.\n' % (f,))
ret.append(f)
pyfiles = ret
return pyfiles
def __get_module_from_str(self, modname, print_exception, pyfile):
""" Import the module in the given import path.
* Returns the "final" module, so importing "coilib40.subject.visu"
returns the "visu" module, not the "coilib40" as returned by __import__ """
try:
mod = __import__(modname)
for part in modname.split('.')[1:]:
mod = getattr(mod, part)
return mod
except:
if print_exception:
import pydev_runfiles_xml_rpc
import pydevd_io
buf_err = pydevd_io.StartRedirect(keep_original_redirection=True, std='stderr')
buf_out = pydevd_io.StartRedirect(keep_original_redirection=True, std='stdout')
try:
import traceback;traceback.print_exc()
sys.stderr.write('ERROR: Module: %s could not be imported (file: %s).\n' % (modname, pyfile))
finally:
pydevd_io.EndRedirect('stderr')
pydevd_io.EndRedirect('stdout')
pydev_runfiles_xml_rpc.notifyTest(
'error', buf_out.getvalue(), buf_err.getvalue(), pyfile, modname, 0)
return None
def find_modules_from_files(self, pyfiles):
""" returns a list of modules given a list of files """
#let's make sure that the paths we want are in the pythonpath...
imports = [(s, self.__importify(s)) for s in pyfiles]
system_paths = []
for s in sys.path:
system_paths.append(self.__importify(s, True))
ret = []
for pyfile, imp in imports:
if imp is None:
continue #can happen if a file is not a valid module
choices = []
for s in system_paths:
if imp.startswith(s):
add = imp[len(s) + 1:]
if add:
choices.append(add)
#sys.stdout.write(' ' + add + ' ')
if not choices:
sys.stdout.write('PYTHONPATH not found for file: %s\n' % imp)
else:
for i, import_str in enumerate(choices):
print_exception = i == len(choices) - 1
mod = self.__get_module_from_str(import_str, print_exception, pyfile)
if mod is not None:
ret.append((pyfile, mod, import_str))
break
return ret
#===================================================================================================================
# GetTestCaseNames
#===================================================================================================================
class GetTestCaseNames:
"""Yes, we need a class for that (cannot use outer context on jython 2.1)"""
def __init__(self, accepted_classes, accepted_methods):
self.accepted_classes = accepted_classes
self.accepted_methods = accepted_methods
def __call__(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass"""
testFnNames = []
className = testCaseClass.__name__
if DictContains(self.accepted_classes, className):
for attrname in dir(testCaseClass):
#If a class is chosen, we select all the 'test' methods'
if attrname.startswith('test') and hasattr(getattr(testCaseClass, attrname), '__call__'):
testFnNames.append(attrname)
else:
for attrname in dir(testCaseClass):
#If we have the class+method name, we must do a full check and have an exact match.
if DictContains(self.accepted_methods, className + '.' + attrname):
if hasattr(getattr(testCaseClass, attrname), '__call__'):
testFnNames.append(attrname)
#sorted() is not available in jython 2.1
testFnNames.sort()
return testFnNames
def _decorate_test_suite(self, suite, pyfile, module_name):
import unittest
if isinstance(suite, unittest.TestSuite):
add = False
suite.__pydev_pyfile__ = pyfile
suite.__pydev_module_name__ = module_name
for t in suite._tests:
t.__pydev_pyfile__ = pyfile
t.__pydev_module_name__ = module_name
if self._decorate_test_suite(t, pyfile, module_name):
add = True
return add
elif isinstance(suite, unittest.TestCase):
return True
else:
return False
def find_tests_from_modules(self, file_and_modules_and_module_name):
""" returns the unittests given a list of modules """
#Use our own suite!
import pydev_runfiles_unittest
import unittest
unittest.TestLoader.suiteClass = pydev_runfiles_unittest.PydevTestSuite
loader = unittest.TestLoader()
ret = []
if self.files_to_tests:
for pyfile, m, module_name in file_and_modules_and_module_name:
accepted_classes = {}
accepted_methods = {}
tests = self.files_to_tests[pyfile]
for t in tests:
accepted_methods[t] = t
loader.getTestCaseNames = self.GetTestCaseNames(accepted_classes, accepted_methods)
suite = loader.loadTestsFromModule(m)
if self._decorate_test_suite(suite, pyfile, module_name):
ret.append(suite)
return ret
if self.tests:
accepted_classes = {}
accepted_methods = {}
for t in self.tests:
splitted = t.split('.')
if len(splitted) == 1:
accepted_classes[t] = t
elif len(splitted) == 2:
accepted_methods[t] = t
loader.getTestCaseNames = self.GetTestCaseNames(accepted_classes, accepted_methods)
for pyfile, m, module_name in file_and_modules_and_module_name:
suite = loader.loadTestsFromModule(m)
if self._decorate_test_suite(suite, pyfile, module_name):
ret.append(suite)
return ret
def filter_tests(self, test_objs, internal_call=False):
""" based on a filter name, only return those tests that have
the test case names that match """
import unittest
if not internal_call:
if not self.configuration.include_tests and not self.tests and not self.configuration.exclude_tests:
#No need to filter if we have nothing to filter!
return test_objs
if self.verbosity > 1:
if self.configuration.include_tests:
sys.stdout.write('Tests to include: %s\n' % (self.configuration.include_tests,))
if self.tests:
sys.stdout.write('Tests to run: %s\n' % (self.tests,))
if self.configuration.exclude_tests:
sys.stdout.write('Tests to exclude: %s\n' % (self.configuration.exclude_tests,))
test_suite = []
for test_obj in test_objs:
if isinstance(test_obj, unittest.TestSuite):
#Note: keep the suites as they are and just 'fix' the tests (so, don't use the iter_tests).
if test_obj._tests:
test_obj._tests = self.filter_tests(test_obj._tests, True)
if test_obj._tests: #Only add the suite if we still have tests there.
test_suite.append(test_obj)
elif isinstance(test_obj, unittest.TestCase):
try:
testMethodName = test_obj._TestCase__testMethodName
except AttributeError:
#changed in python 2.5
testMethodName = test_obj._testMethodName
add = True
if self.configuration.exclude_tests:
for pat in self.configuration.exclude_tests:
if fnmatch.fnmatchcase(testMethodName, pat):
if self.verbosity > 3:
sys.stdout.write('Skipped test: %s (matched exclude_tests pattern: %s)\n' % (testMethodName, pat))
elif self.verbosity > 2:
sys.stdout.write('Skipped test: %s\n' % (testMethodName,))
add = False
break
if add:
if self.__match_tests(self.tests, test_obj, testMethodName):
include = True
if self.configuration.include_tests:
include = False
for pat in self.configuration.include_tests:
if fnmatch.fnmatchcase(testMethodName, pat):
include = True
break
if include:
test_suite.append(test_obj)
else:
if self.verbosity > 3:
sys.stdout.write('Skipped test: %s (did not match any include_tests pattern %s)\n' % (self.configuration.include_tests,))
return test_suite
def iter_tests(self, test_objs):
#Note: not using yield because of Jython 2.1.
import unittest
tests = []
for test_obj in test_objs:
if isinstance(test_obj, unittest.TestSuite):
tests.extend(self.iter_tests(test_obj._tests))
elif isinstance(test_obj, unittest.TestCase):
tests.append(test_obj)
return tests
def list_test_names(self, test_objs):
names = []
for tc in self.iter_tests(test_objs):
try:
testMethodName = tc._TestCase__testMethodName
except AttributeError:
#changed in python 2.5
testMethodName = tc._testMethodName
names.append(testMethodName)
return names
def __match_tests(self, tests, test_case, test_method_name):
if not tests:
return 1
for t in tests:
class_and_method = t.split('.')
if len(class_and_method) == 1:
#only class name
if class_and_method[0] == test_case.__class__.__name__:
return 1
elif len(class_and_method) == 2:
if class_and_method[0] == test_case.__class__.__name__ and class_and_method[1] == test_method_name:
return 1
return 0
def __match(self, filter_list, name):
""" returns whether a test name matches the test filter """
if filter_list is None:
return 1
for f in filter_list:
if re.match(f, name):
return 1
return 0
def run_tests(self, handle_coverage=True):
""" runs all tests """
sys.stdout.write("Finding files... ")
files = self.find_import_files()
if self.verbosity > 3:
sys.stdout.write('%s ... done.\n' % (self.files_or_dirs))
else:
sys.stdout.write('done.\n')
sys.stdout.write("Importing test modules ... ")
if handle_coverage:
coverage_files, coverage = StartCoverageSupport(self.configuration)
file_and_modules_and_module_name = self.find_modules_from_files(files)
sys.stdout.write("done.\n")
all_tests = self.find_tests_from_modules(file_and_modules_and_module_name)
all_tests = self.filter_tests(all_tests)
import pydev_runfiles_unittest
test_suite = pydev_runfiles_unittest.PydevTestSuite(all_tests)
import pydev_runfiles_xml_rpc
pydev_runfiles_xml_rpc.notifyTestsCollected(test_suite.countTestCases())
start_time = time.time()
def run_tests():
executed_in_parallel = False
if self.jobs > 1:
import pydev_runfiles_parallel
#What may happen is that the number of jobs needed is lower than the number of jobs requested
#(e.g.: 2 jobs were requested for running 1 test) -- in which case ExecuteTestsInParallel will
#return False and won't run any tests.
executed_in_parallel = pydev_runfiles_parallel.ExecuteTestsInParallel(
all_tests, self.jobs, self.split_jobs, self.verbosity, coverage_files, self.configuration.coverage_include)
if not executed_in_parallel:
#If in coverage, we don't need to pass anything here (coverage is already enabled for this execution).
runner = pydev_runfiles_unittest.PydevTextTestRunner(stream=sys.stdout, descriptions=1, verbosity=self.verbosity)
sys.stdout.write('\n')
runner.run(test_suite)
if self.configuration.django:
MyDjangoTestSuiteRunner(run_tests).run_tests([])
else:
run_tests()
if handle_coverage:
coverage.stop()
coverage.save()
total_time = 'Finished in: %.2f secs.' % (time.time() - start_time,)
pydev_runfiles_xml_rpc.notifyTestRunFinished(total_time)
try:
from django.test.simple import DjangoTestSuiteRunner
except:
class DjangoTestSuiteRunner:
def __init__(self):
pass
def run_tests(self, *args, **kwargs):
raise AssertionError("Unable to run suite with DjangoTestSuiteRunner because it couldn't be imported.")
class MyDjangoTestSuiteRunner(DjangoTestSuiteRunner):
def __init__(self, on_run_suite):
DjangoTestSuiteRunner.__init__(self)
self.on_run_suite = on_run_suite
def build_suite(self, *args, **kwargs):
pass
def suite_result(self, *args, **kwargs):
pass
def run_suite(self, *args, **kwargs):
self.on_run_suite()
#=======================================================================================================================
# main
#=======================================================================================================================
def main(configuration):
PydevTestRunner(configuration).run_tests()
| apache-2.0 |
itsjeyd/edx-platform | common/test/acceptance/fixtures/__init__.py | 15 | 1026 | import os
# Get the URL of the Studio instance under test
STUDIO_BASE_URL = os.environ.get('studio_url', 'http://localhost:8031')
# Get the URL of the LMS instance under test
LMS_BASE_URL = os.environ.get('lms_url', 'http://localhost:8003')
# Get the URL of the XQueue stub used in the test
XQUEUE_STUB_URL = os.environ.get('xqueue_url', 'http://localhost:8040')
# Get the URL of the Ora stub used in the test
ORA_STUB_URL = os.environ.get('ora_url', 'http://localhost:8041')
# Get the URL of the comments service stub used in the test
COMMENTS_STUB_URL = os.environ.get('comments_url', 'http://localhost:4567')
# Get the URL of the EdxNotes service stub used in the test
EDXNOTES_STUB_URL = os.environ.get('edxnotes_url', 'http://localhost:8042')
# Get the URL of the Programs service stub used in the test
PROGRAMS_STUB_URL = os.environ.get('programs_url', 'http://localhost:8090')
# Get the URL of the Catalog service stub used in the test
CATALOG_STUB_URL = os.environ.get('catalog_url', 'http://localhost:8091')
| agpl-3.0 |
peter-jang/ansible-modules-core | cloud/rackspace/rax.py | 30 | 32929 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax
short_description: create / delete an instance in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud instance and optionally
waits for it to be 'running'.
version_added: "1.2"
options:
auto_increment:
description:
- Whether or not to increment a single number with the name of the
created servers. Only applicable when used with the I(group) attribute
or meta key.
default: yes
choices:
- "yes"
- "no"
version_added: 1.5
boot_from_volume:
description:
- Whether or not to boot the instance from a Cloud Block Storage volume.
If C(yes) and I(image) is specified a new volume will be created at
boot time. I(boot_volume_size) is required with I(image) to create a
new volume at boot time.
default: "no"
choices:
- "yes"
- "no"
version_added: 1.9
boot_volume:
description:
- Cloud Block Storage ID or Name to use as the boot volume of the
instance
version_added: 1.9
boot_volume_size:
description:
- Size of the volume to create in Gigabytes. This is only required with
I(image) and I(boot_from_volume).
default: 100
version_added: 1.9
boot_volume_terminate:
description:
- Whether the I(boot_volume) or newly created volume from I(image) will
be terminated when the server is terminated
default: false
version_added: 1.9
config_drive:
description:
- Attach read-only configuration drive to server as label config-2
default: no
choices:
- "yes"
- "no"
version_added: 1.7
count:
description:
- number of instances to launch
default: 1
version_added: 1.4
count_offset:
description:
- number count to start at
default: 1
version_added: 1.4
disk_config:
description:
- Disk partitioning strategy
choices:
- auto
- manual
version_added: '1.4'
default: auto
exact_count:
description:
- Explicitly ensure an exact count of instances, used with
state=active/present. If specified as C(yes) and I(count) is less than
the servers matched, servers will be deleted to match the count. If
the number of matched servers is fewer than specified in I(count)
additional servers will be added.
default: no
choices:
- "yes"
- "no"
version_added: 1.4
extra_client_args:
description:
- A hash of key/value pairs to be used when creating the cloudservers
client. This is considered an advanced option, use it wisely and
with caution.
version_added: 1.6
extra_create_args:
description:
- A hash of key/value pairs to be used when creating a new server.
This is considered an advanced option, use it wisely and with caution.
version_added: 1.6
files:
description:
- Files to insert into the instance. remotefilename:localcontent
default: null
flavor:
description:
- flavor to use for the instance
default: null
group:
description:
- host group to assign to server, is also used for idempotent operations
to ensure a specific number of instances
version_added: 1.4
image:
description:
- image to use for the instance. Can be an C(id), C(human_id) or C(name).
With I(boot_from_volume), a Cloud Block Storage volume will be created
with this image
default: null
instance_ids:
description:
- list of instance ids, currently only used when state='absent' to
remove instances
version_added: 1.4
key_name:
description:
- key pair to use on the instance
default: null
aliases:
- keypair
meta:
description:
- A hash of metadata to associate with the instance
default: null
name:
description:
- Name to give the instance
default: null
networks:
description:
- The network to attach to the instances. If specified, you must include
ALL networks including the public and private interfaces. Can be C(id)
or C(label).
default:
- public
- private
version_added: 1.4
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
user_data:
description:
- Data to be uploaded to the servers config drive. This option implies
I(config_drive). Can be a file path or a string
version_added: 1.7
wait:
description:
- wait for the instance to be in state 'running' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Jesse Keating (@j2sol)"
- "Matt Martz (@sivel)"
notes:
- I(exact_count) can be "destructive" if the number of running servers in
the I(group) is larger than that specified in I(count). In such a case, the
I(state) is effectively set to C(absent) and the extra servers are deleted.
In the case of deletion, the returned data structure will have C(action)
set to C(delete), and the oldest servers in the group will be deleted.
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Cloud Server
gather_facts: False
tasks:
- name: Server build request
local_action:
module: rax
credentials: ~/.raxpub
name: rax-test1
flavor: 5
image: b11d9567-e412-4255-96b9-bd63ab23bcfe
key_name: my_rackspace_key
files:
/root/test.txt: /home/localuser/test.txt
wait: yes
state: present
networks:
- private
- public
register: rax
- name: Build an exact count of cloud servers with incremented names
hosts: local
gather_facts: False
tasks:
- name: Server build requests
local_action:
module: rax
credentials: ~/.raxpub
name: test%03d.example.org
flavor: performance1-1
image: ubuntu-1204-lts-precise-pangolin
state: present
count: 10
count_offset: 10
exact_count: yes
group: test
wait: yes
register: rax
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_find_server_image(module, server, image, boot_volume):
if not image and boot_volume:
vol = rax_find_bootable_volume(module, pyrax, server,
exit=False)
if not vol:
return None
volume_image_metadata = vol.volume_image_metadata
vol_image_id = volume_image_metadata.get('image_id')
if vol_image_id:
server_image = rax_find_image(module, pyrax,
vol_image_id, exit=False)
if server_image:
server.image = dict(id=server_image)
# Match image IDs taking care of boot from volume
if image and not server.image:
vol = rax_find_bootable_volume(module, pyrax, server)
volume_image_metadata = vol.volume_image_metadata
vol_image_id = volume_image_metadata.get('image_id')
if not vol_image_id:
return None
server_image = rax_find_image(module, pyrax,
vol_image_id, exit=False)
if image != server_image:
return None
server.image = dict(id=server_image)
elif image and server.image['id'] != image:
return None
return server.image
def create(module, names=[], flavor=None, image=None, meta={}, key_name=None,
files={}, wait=True, wait_timeout=300, disk_config=None,
group=None, nics=[], extra_create_args={}, user_data=None,
config_drive=False, existing=[], block_device_mapping_v2=[]):
cs = pyrax.cloudservers
changed = False
if user_data:
config_drive = True
if user_data and os.path.isfile(os.path.expanduser(user_data)):
try:
user_data = os.path.expanduser('user_data')
f = open(user_data)
user_data = f.read()
f.close()
except Exception as e:
module.fail_json(msg='Failed to load %s' % user_data)
# Handle the file contents
for rpath in files.keys():
lpath = os.path.expanduser(files[rpath])
try:
fileobj = open(lpath, 'r')
files[rpath] = fileobj.read()
fileobj.close()
except Exception as e:
module.fail_json(msg='Failed to load %s' % lpath)
try:
servers = []
bdmv2 = block_device_mapping_v2
for name in names:
servers.append(cs.servers.create(name=name, image=image,
flavor=flavor, meta=meta,
key_name=key_name,
files=files, nics=nics,
disk_config=disk_config,
config_drive=config_drive,
userdata=user_data,
block_device_mapping_v2=bdmv2,
**extra_create_args))
except Exception as e:
if e.message:
msg = str(e.message)
else:
msg = repr(e)
module.fail_json(msg=msg)
else:
changed = True
if wait:
end_time = time.time() + wait_timeout
infinite = wait_timeout == 0
while infinite or time.time() < end_time:
for server in servers:
try:
server.get()
except:
server.status == 'ERROR'
if not filter(lambda s: s.status not in FINAL_STATUSES,
servers):
break
time.sleep(5)
success = []
error = []
timeout = []
for server in servers:
try:
server.get()
except:
server.status == 'ERROR'
instance = rax_to_dict(server, 'server')
if server.status == 'ACTIVE' or not wait:
success.append(instance)
elif server.status == 'ERROR':
error.append(instance)
elif wait:
timeout.append(instance)
untouched = [rax_to_dict(s, 'server') for s in existing]
instances = success + untouched
results = {
'changed': changed,
'action': 'create',
'instances': instances,
'success': success,
'error': error,
'timeout': timeout,
'instance_ids': {
'instances': [i['id'] for i in instances],
'success': [i['id'] for i in success],
'error': [i['id'] for i in error],
'timeout': [i['id'] for i in timeout]
}
}
if timeout:
results['msg'] = 'Timeout waiting for all servers to build'
elif error:
results['msg'] = 'Failed to build all servers'
if 'msg' in results:
module.fail_json(**results)
else:
module.exit_json(**results)
def delete(module, instance_ids=[], wait=True, wait_timeout=300, kept=[]):
cs = pyrax.cloudservers
changed = False
instances = {}
servers = []
for instance_id in instance_ids:
servers.append(cs.servers.get(instance_id))
for server in servers:
try:
server.delete()
except Exception as e:
module.fail_json(msg=e.message)
else:
changed = True
instance = rax_to_dict(server, 'server')
instances[instance['id']] = instance
# If requested, wait for server deletion
if wait:
end_time = time.time() + wait_timeout
infinite = wait_timeout == 0
while infinite or time.time() < end_time:
for server in servers:
instance_id = server.id
try:
server.get()
except:
instances[instance_id]['status'] = 'DELETED'
instances[instance_id]['rax_status'] = 'DELETED'
if not filter(lambda s: s['status'] not in ('', 'DELETED',
'ERROR'),
instances.values()):
break
time.sleep(5)
timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'),
instances.values())
error = filter(lambda s: s['status'] in ('ERROR'),
instances.values())
success = filter(lambda s: s['status'] in ('', 'DELETED'),
instances.values())
instances = [rax_to_dict(s, 'server') for s in kept]
results = {
'changed': changed,
'action': 'delete',
'instances': instances,
'success': success,
'error': error,
'timeout': timeout,
'instance_ids': {
'instances': [i['id'] for i in instances],
'success': [i['id'] for i in success],
'error': [i['id'] for i in error],
'timeout': [i['id'] for i in timeout]
}
}
if timeout:
results['msg'] = 'Timeout waiting for all servers to delete'
elif error:
results['msg'] = 'Failed to delete all servers'
if 'msg' in results:
module.fail_json(**results)
else:
module.exit_json(**results)
def cloudservers(module, state=None, name=None, flavor=None, image=None,
meta={}, key_name=None, files={}, wait=True, wait_timeout=300,
disk_config=None, count=1, group=None, instance_ids=[],
exact_count=False, networks=[], count_offset=0,
auto_increment=False, extra_create_args={}, user_data=None,
config_drive=False, boot_from_volume=False,
boot_volume=None, boot_volume_size=None,
boot_volume_terminate=False):
cs = pyrax.cloudservers
cnw = pyrax.cloud_networks
if not cnw:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present' or (state == 'absent' and instance_ids is None):
if not boot_from_volume and not boot_volume and not image:
module.fail_json(msg='image is required for the "rax" module')
for arg, value in dict(name=name, flavor=flavor).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax" module' %
arg)
if boot_from_volume and not image and not boot_volume:
module.fail_json(msg='image or boot_volume are required for the '
'"rax" with boot_from_volume')
if boot_from_volume and image and not boot_volume_size:
module.fail_json(msg='boot_volume_size is required for the "rax" '
'module with boot_from_volume and image')
if boot_from_volume and image and boot_volume:
image = None
servers = []
# Add the group meta key
if group and 'group' not in meta:
meta['group'] = group
elif 'group' in meta and group is None:
group = meta['group']
# Normalize and ensure all metadata values are strings
for k, v in meta.items():
if isinstance(v, list):
meta[k] = ','.join(['%s' % i for i in v])
elif isinstance(v, dict):
meta[k] = json.dumps(v)
elif not isinstance(v, basestring):
meta[k] = '%s' % v
# When using state=absent with group, the absent block won't match the
# names properly. Use the exact_count functionality to decrease the count
# to the desired level
was_absent = False
if group is not None and state == 'absent':
exact_count = True
state = 'present'
was_absent = True
if image:
image = rax_find_image(module, pyrax, image)
nics = []
if networks:
for network in networks:
nics.extend(rax_find_network(module, pyrax, network))
# act on the state
if state == 'present':
# Idempotent ensurance of a specific count of servers
if exact_count is not False:
# See if we can find servers that match our options
if group is None:
module.fail_json(msg='"group" must be provided when using '
'"exact_count"')
if auto_increment:
numbers = set()
# See if the name is a printf like string, if not append
# %d to the end
try:
name % 0
except TypeError as e:
if e.message.startswith('not all'):
name = '%s%%d' % name
else:
module.fail_json(msg=e.message)
# regex pattern to match printf formatting
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list():
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if server.metadata.get('group') == group:
servers.append(server)
match = re.search(pattern, server.name)
if match:
number = int(match.group(1))
numbers.add(number)
number_range = xrange(count_offset, count_offset + count)
available_numbers = list(set(number_range)
.difference(numbers))
else: # Not auto incrementing
for server in cs.servers.list():
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if server.metadata.get('group') == group:
servers.append(server)
# available_numbers not needed here, we inspect auto_increment
# again later
# If state was absent but the count was changed,
# assume we only wanted to remove that number of instances
if was_absent:
diff = len(servers) - count
if diff < 0:
count = 0
else:
count = diff
if len(servers) > count:
# We have more servers than we need, set state='absent'
# and delete the extras, this should delete the oldest
state = 'absent'
kept = servers[:count]
del servers[:count]
instance_ids = []
for server in servers:
instance_ids.append(server.id)
delete(module, instance_ids=instance_ids, wait=wait,
wait_timeout=wait_timeout, kept=kept)
elif len(servers) < count:
# we have fewer servers than we need
if auto_increment:
# auto incrementing server numbers
names = []
name_slice = count - len(servers)
numbers_to_use = available_numbers[:name_slice]
for number in numbers_to_use:
names.append(name % number)
else:
# We are not auto incrementing server numbers,
# create a list of 'name' that matches how many we need
names = [name] * (count - len(servers))
else:
# we have the right number of servers, just return info
# about all of the matched servers
instances = []
instance_ids = []
for server in servers:
instances.append(rax_to_dict(server, 'server'))
instance_ids.append(server.id)
module.exit_json(changed=False, action=None,
instances=instances,
success=[], error=[], timeout=[],
instance_ids={'instances': instance_ids,
'success': [], 'error': [],
'timeout': []})
else: # not called with exact_count=True
if group is not None:
if auto_increment:
# we are auto incrementing server numbers, but not with
# exact_count
numbers = set()
# See if the name is a printf like string, if not append
# %d to the end
try:
name % 0
except TypeError as e:
if e.message.startswith('not all'):
name = '%s%%d' % name
else:
module.fail_json(msg=e.message)
# regex pattern to match printf formatting
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list():
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if server.metadata.get('group') == group:
servers.append(server)
match = re.search(pattern, server.name)
if match:
number = int(match.group(1))
numbers.add(number)
number_range = xrange(count_offset,
count_offset + count + len(numbers))
available_numbers = list(set(number_range)
.difference(numbers))
names = []
numbers_to_use = available_numbers[:count]
for number in numbers_to_use:
names.append(name % number)
else:
# Not auto incrementing
names = [name] * count
else:
# No group was specified, and not using exact_count
# Perform more simplistic matching
search_opts = {
'name': '^%s$' % name,
'flavor': flavor
}
servers = []
for server in cs.servers.list(search_opts=search_opts):
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if not rax_find_server_image(module, server, image,
boot_volume):
continue
# Ignore servers with non matching metadata
if server.metadata != meta:
continue
servers.append(server)
if len(servers) >= count:
# We have more servers than were requested, don't do
# anything. Not running with exact_count=True, so we assume
# more is OK
instances = []
for server in servers:
instances.append(rax_to_dict(server, 'server'))
instance_ids = [i['id'] for i in instances]
module.exit_json(changed=False, action=None,
instances=instances, success=[], error=[],
timeout=[],
instance_ids={'instances': instance_ids,
'success': [], 'error': [],
'timeout': []})
# We need more servers to reach out target, create names for
# them, we aren't performing auto_increment here
names = [name] * (count - len(servers))
block_device_mapping_v2 = []
if boot_from_volume:
mapping = {
'boot_index': '0',
'delete_on_termination': boot_volume_terminate,
'destination_type': 'volume',
}
if image:
mapping.update({
'uuid': image,
'source_type': 'image',
'volume_size': boot_volume_size,
})
image = None
elif boot_volume:
volume = rax_find_volume(module, pyrax, boot_volume)
mapping.update({
'uuid': pyrax.utils.get_id(volume),
'source_type': 'volume',
})
block_device_mapping_v2.append(mapping)
create(module, names=names, flavor=flavor, image=image,
meta=meta, key_name=key_name, files=files, wait=wait,
wait_timeout=wait_timeout, disk_config=disk_config, group=group,
nics=nics, extra_create_args=extra_create_args,
user_data=user_data, config_drive=config_drive,
existing=servers,
block_device_mapping_v2=block_device_mapping_v2)
elif state == 'absent':
if instance_ids is None:
# We weren't given an explicit list of server IDs to delete
# Let's match instead
search_opts = {
'name': '^%s$' % name,
'flavor': flavor
}
for server in cs.servers.list(search_opts=search_opts):
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if not rax_find_server_image(module, server, image,
boot_volume):
continue
# Ignore servers with non matching metadata
if meta != server.metadata:
continue
servers.append(server)
# Build a list of server IDs to delete
instance_ids = []
for server in servers:
if len(instance_ids) < count:
instance_ids.append(server.id)
else:
break
if not instance_ids:
# No server IDs were matched for deletion, or no IDs were
# explicitly provided, just exit and don't do anything
module.exit_json(changed=False, action=None, instances=[],
success=[], error=[], timeout=[],
instance_ids={'instances': [],
'success': [], 'error': [],
'timeout': []})
delete(module, instance_ids=instance_ids, wait=wait,
wait_timeout=wait_timeout)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
auto_increment=dict(default=True, type='bool'),
boot_from_volume=dict(default=False, type='bool'),
boot_volume=dict(type='str'),
boot_volume_size=dict(type='int', default=100),
boot_volume_terminate=dict(type='bool', default=False),
config_drive=dict(default=False, type='bool'),
count=dict(default=1, type='int'),
count_offset=dict(default=1, type='int'),
disk_config=dict(choices=['auto', 'manual']),
exact_count=dict(default=False, type='bool'),
extra_client_args=dict(type='dict', default={}),
extra_create_args=dict(type='dict', default={}),
files=dict(type='dict', default={}),
flavor=dict(),
group=dict(),
image=dict(),
instance_ids=dict(type='list'),
key_name=dict(aliases=['keypair']),
meta=dict(type='dict', default={}),
name=dict(),
networks=dict(type='list', default=['public', 'private']),
service=dict(),
state=dict(default='present', choices=['present', 'absent']),
user_data=dict(no_log=True),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
service = module.params.get('service')
if service is not None:
module.fail_json(msg='The "service" attribute has been deprecated, '
'please remove "service: cloudservers" from your '
'playbook pertaining to the "rax" module')
auto_increment = module.params.get('auto_increment')
boot_from_volume = module.params.get('boot_from_volume')
boot_volume = module.params.get('boot_volume')
boot_volume_size = module.params.get('boot_volume_size')
boot_volume_terminate = module.params.get('boot_volume_terminate')
config_drive = module.params.get('config_drive')
count = module.params.get('count')
count_offset = module.params.get('count_offset')
disk_config = module.params.get('disk_config')
if disk_config:
disk_config = disk_config.upper()
exact_count = module.params.get('exact_count', False)
extra_client_args = module.params.get('extra_client_args')
extra_create_args = module.params.get('extra_create_args')
files = module.params.get('files')
flavor = module.params.get('flavor')
group = module.params.get('group')
image = module.params.get('image')
instance_ids = module.params.get('instance_ids')
key_name = module.params.get('key_name')
meta = module.params.get('meta')
name = module.params.get('name')
networks = module.params.get('networks')
state = module.params.get('state')
user_data = module.params.get('user_data')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
setup_rax_module(module, pyrax)
if extra_client_args:
pyrax.cloudservers = pyrax.connect_to_cloudservers(
region=pyrax.cloudservers.client.region_name,
**extra_client_args)
client = pyrax.cloudservers.client
if 'bypass_url' in extra_client_args:
client.management_url = extra_client_args['bypass_url']
if pyrax.cloudservers is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
cloudservers(module, state=state, name=name, flavor=flavor,
image=image, meta=meta, key_name=key_name, files=files,
wait=wait, wait_timeout=wait_timeout, disk_config=disk_config,
count=count, group=group, instance_ids=instance_ids,
exact_count=exact_count, networks=networks,
count_offset=count_offset, auto_increment=auto_increment,
extra_create_args=extra_create_args, user_data=user_data,
config_drive=config_drive, boot_from_volume=boot_from_volume,
boot_volume=boot_volume, boot_volume_size=boot_volume_size,
boot_volume_terminate=boot_volume_terminate)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
| gpl-3.0 |
marcore/edx-platform | lms/djangoapps/instructor/tests/views/test_instructor_dashboard.py | 8 | 16630 | """
Unit tests for instructor_dashboard.py.
"""
import ddt
import datetime
from mock import patch
from nose.plugins.attrib import attr
from pytz import UTC
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from django.test.utils import override_settings
from edxmako.shortcuts import render_to_response
from courseware.tabs import get_course_tab_list
from courseware.tests.factories import UserFactory, StudentModuleFactory
from courseware.tests.helpers import LoginEnrollmentTestCase
from instructor.views.gradebook_api import calculate_page_info
from common.test.utils import XssTestMixin
from student.tests.factories import AdminFactory, CourseEnrollmentFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, TEST_DATA_SPLIT_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
from shoppingcart.models import PaidCourseRegistration, Order, CourseRegCodeItem
from course_modes.models import CourseMode
from student.roles import CourseFinanceAdminRole
from student.models import CourseEnrollment
def intercept_renderer(path, context):
"""
Intercept calls to `render_to_response` and attach the context dict to the
response for examination in unit tests.
"""
# I think Django already does this for you in their TestClient, except
# we're bypassing that by using edxmako. Probably edxmako should be
# integrated better with Django's rendering and event system.
response = render_to_response(path, context)
response.mako_context = context
response.mako_template = path
return response
@attr('shard_3')
@ddt.ddt
class TestInstructorDashboard(ModuleStoreTestCase, LoginEnrollmentTestCase, XssTestMixin):
"""
Tests for the instructor dashboard (not legacy).
"""
def setUp(self):
"""
Set up tests
"""
super(TestInstructorDashboard, self).setUp()
self.course = CourseFactory.create(
grading_policy={"GRADE_CUTOFFS": {"A": 0.75, "B": 0.63, "C": 0.57, "D": 0.5}},
display_name='<script>alert("XSS")</script>'
)
self.course_mode = CourseMode(
course_id=self.course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE.name,
min_price=40
)
self.course_mode.save()
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
def get_dashboard_enrollment_message(self):
"""
Returns expected dashboard enrollment message with link to Insights.
"""
return 'Enrollment data is now available in <a href="http://example.com/courses/{}" ' \
'target="_blank">Example</a>.'.format(unicode(self.course.id))
def get_dashboard_analytics_message(self):
"""
Returns expected dashboard demographic message with link to Insights.
"""
return 'For analytics about your course, go to <a href="http://example.com/courses/{}" ' \
'target="_blank">Example</a>.'.format(unicode(self.course.id))
def test_instructor_tab(self):
"""
Verify that the instructor tab appears for staff only.
"""
def has_instructor_tab(user, course):
"""Returns true if the "Instructor" tab is shown."""
request = RequestFactory().request()
request.user = user
tabs = get_course_tab_list(request, course)
return len([tab for tab in tabs if tab.name == 'Instructor']) == 1
self.assertTrue(has_instructor_tab(self.instructor, self.course))
student = UserFactory.create()
self.assertFalse(has_instructor_tab(student, self.course))
def test_default_currency_in_the_html_response(self):
"""
Test that checks the default currency_symbol ($) in the response
"""
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id)
response = self.client.get(self.url)
self.assertTrue('${amount}'.format(amount=total_amount) in response.content)
def test_course_name_xss(self):
"""Test that the instructor dashboard correctly escapes course names
with script tags.
"""
response = self.client.get(self.url)
self.assert_no_xss(response, '<script>alert("XSS")</script>')
@override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs'])
def test_override_currency_settings_in_the_html_response(self):
"""
Test that checks the default currency_symbol ($) in the response
"""
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id)
response = self.client.get(self.url)
self.assertIn('{currency}{amount}'.format(currency='Rs', amount=total_amount), response.content)
@patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': False})
@override_settings(ANALYTICS_DASHBOARD_URL='')
def test_no_enrollments(self):
"""
Test enrollment section is hidden.
"""
response = self.client.get(self.url)
# no enrollment information should be visible
self.assertNotIn('<h3 class="hd hd-3">Enrollment Information</h3>', response.content)
@patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': True})
@override_settings(ANALYTICS_DASHBOARD_URL='')
def test_show_enrollments_data(self):
"""
Test enrollment data is shown.
"""
response = self.client.get(self.url)
# enrollment information visible
self.assertIn('<h3 class="hd hd-3">Enrollment Information</h3>', response.content)
self.assertTrue('<td>Verified</td>' in response.content)
self.assertTrue('<td>Audit</td>' in response.content)
self.assertTrue('<td>Honor</td>' in response.content)
self.assertTrue('<td>Professional</td>' in response.content)
# dashboard link hidden
self.assertFalse(self.get_dashboard_enrollment_message() in response.content)
@patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': True})
@override_settings(ANALYTICS_DASHBOARD_URL='')
def test_show_enrollment_data_for_prof_ed(self):
# Create both "professional" (meaning professional + verification)
# and "no-id-professional" (meaning professional without verification)
# These should be aggregated for display purposes.
users = [UserFactory() for _ in range(2)]
CourseEnrollment.enroll(users[0], self.course.id, mode="professional")
CourseEnrollment.enroll(users[1], self.course.id, mode="no-id-professional")
response = self.client.get(self.url)
# Check that the number of professional enrollments is two
self.assertContains(response, "<td>Professional</td><td>2</td>")
@patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': False})
@override_settings(ANALYTICS_DASHBOARD_URL='http://example.com')
@override_settings(ANALYTICS_DASHBOARD_NAME='Example')
def test_show_dashboard_enrollment_message(self):
"""
Test enrollment dashboard message is shown and data is hidden.
"""
response = self.client.get(self.url)
# enrollment information hidden
self.assertFalse('<td>Verified</td>' in response.content)
self.assertFalse('<td>Audit</td>' in response.content)
self.assertFalse('<td>Honor</td>' in response.content)
self.assertFalse('<td>Professional</td>' in response.content)
# link to dashboard shown
expected_message = self.get_dashboard_enrollment_message()
self.assertTrue(expected_message in response.content)
@override_settings(ANALYTICS_DASHBOARD_URL='')
@override_settings(ANALYTICS_DASHBOARD_NAME='')
def test_dashboard_analytics_tab_not_shown(self):
"""
Test dashboard analytics tab isn't shown if insights isn't configured.
"""
response = self.client.get(self.url)
analytics_section = '<li class="nav-item"><a href="" data-section="instructor_analytics">Analytics</a></li>'
self.assertFalse(analytics_section in response.content)
@override_settings(ANALYTICS_DASHBOARD_URL='http://example.com')
@override_settings(ANALYTICS_DASHBOARD_NAME='Example')
def test_dashboard_analytics_points_at_insights(self):
"""
Test analytics dashboard message is shown
"""
response = self.client.get(self.url)
analytics_section = '<li class="nav-item"><a href="" data-section="instructor_analytics">Analytics</a></li>'
self.assertTrue(analytics_section in response.content)
# link to dashboard shown
expected_message = self.get_dashboard_analytics_message()
self.assertTrue(expected_message in response.content)
def add_course_to_user_cart(self, cart, course_key):
"""
adding course to user cart
"""
reg_item = PaidCourseRegistration.add_to_order(cart, course_key)
return reg_item
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_total_credit_cart_sales_amount(self):
"""
Test to check the total amount for all the credit card purchases.
"""
student = UserFactory.create()
self.client.login(username=student.username, password="test")
student_cart = Order.get_cart_for_user(student)
item = self.add_course_to_user_cart(student_cart, self.course.id)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': 4})
self.assertEqual(resp.status_code, 200)
student_cart.purchase()
self.client.login(username=self.instructor.username, password="test")
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(self.course.id)
total_amount = single_purchase_total + bulk_purchase_total
response = self.client.get(self.url)
self.assertIn('{currency}{amount}'.format(currency='$', amount=total_amount), response.content)
@ddt.data(
(True, True, True),
(True, False, False),
(True, None, False),
(False, True, False),
(False, False, False),
(False, None, False),
)
@ddt.unpack
def test_ccx_coaches_option_on_admin_list_management_instructor(
self, ccx_feature_flag, enable_ccx, expected_result
):
"""
Test whether the "CCX Coaches" option is visible or hidden depending on the value of course.enable_ccx.
"""
with patch.dict(settings.FEATURES, {'CUSTOM_COURSES_EDX': ccx_feature_flag}):
self.course.enable_ccx = enable_ccx
self.store.update_item(self.course, self.instructor.id)
response = self.client.get(self.url)
self.assertEquals(
expected_result,
'CCX Coaches are able to create their own Custom Courses based on this course' in response.content
)
def test_grade_cutoffs(self):
"""
Verify that grade cutoffs are displayed in the correct order.
"""
response = self.client.get(self.url)
self.assertIn('D: 0.5, C: 0.57, B: 0.63, A: 0.75', response.content)
@patch('instructor.views.gradebook_api.MAX_STUDENTS_PER_PAGE_GRADE_BOOK', 2)
def test_calculate_page_info(self):
page = calculate_page_info(offset=0, total_students=2)
self.assertEqual(page["offset"], 0)
self.assertEqual(page["page_num"], 1)
self.assertEqual(page["next_offset"], None)
self.assertEqual(page["previous_offset"], None)
self.assertEqual(page["total_pages"], 1)
@patch('instructor.views.gradebook_api.render_to_response', intercept_renderer)
@patch('instructor.views.gradebook_api.MAX_STUDENTS_PER_PAGE_GRADE_BOOK', 1)
def test_spoc_gradebook_pages(self):
for i in xrange(2):
username = "user_%d" % i
student = UserFactory.create(username=username)
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
url = reverse(
'spoc_gradebook',
kwargs={'course_id': self.course.id}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Max number of student per page is one. Patched setting MAX_STUDENTS_PER_PAGE_GRADE_BOOK = 1
self.assertEqual(len(response.mako_context['students']), 1) # pylint: disable=no-member
@ddt.ddt
class TestInstructorDashboardPerformance(ModuleStoreTestCase, LoginEnrollmentTestCase, XssTestMixin):
"""
Tests for the instructor dashboard from the performance point of view.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""
Set up tests
"""
super(TestInstructorDashboardPerformance, self).setUp()
self.course = CourseFactory.create(
grading_policy={"GRADE_CUTOFFS": {"A": 0.75, "B": 0.63, "C": 0.57, "D": 0.5}},
display_name='<script>alert("XSS")</script>',
default_store=ModuleStoreEnum.Type.split
)
self.course_mode = CourseMode(
course_id=self.course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE.name,
min_price=40
)
self.course_mode.save()
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
def test_spoc_gradebook_mongo_calls(self):
"""
Test that the MongoDB cache is used in API to return grades
"""
# prepare course structure
course = ItemFactory.create(
parent_location=self.course.location,
category="course",
display_name="Test course",
)
students = []
for i in xrange(20):
username = "user_%d" % i
student = UserFactory.create(username=username)
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
students.append(student)
chapter = ItemFactory.create(
parent=course,
category='chapter',
display_name="Chapter",
publish_item=True,
start=datetime.datetime(2015, 3, 1, tzinfo=UTC),
)
sequential = ItemFactory.create(
parent=chapter,
category='sequential',
display_name="Lesson",
publish_item=True,
start=datetime.datetime(2015, 3, 1, tzinfo=UTC),
metadata={'graded': True, 'format': 'Homework'},
)
vertical = ItemFactory.create(
parent=sequential,
category='vertical',
display_name='Subsection',
publish_item=True,
start=datetime.datetime(2015, 4, 1, tzinfo=UTC),
)
for i in xrange(10):
problem = ItemFactory.create(
category="problem",
parent=vertical,
display_name="A Problem Block %d" % i,
weight=1,
publish_item=False,
metadata={'rerandomize': 'always'},
)
for j in students:
grade = i % 2
StudentModuleFactory.create(
grade=grade,
max_grade=1,
student=j,
course_id=self.course.id,
module_state_key=problem.location
)
# check MongoDB calls count
url = reverse('spoc_gradebook', kwargs={'course_id': self.course.id})
with check_mongo_calls(7):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
| agpl-3.0 |
GbalsaC/bitnamiP | lms/djangoapps/django_comment_client/migrations/0001_initial.py | 188 | 8982 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Role'
db.create_table('django_comment_client_role', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=30)),
('course_id', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, blank=True)),
))
db.send_create_signal('django_comment_client', ['Role'])
# Adding M2M table for field users on 'Role'
db.create_table('django_comment_client_role_users', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('role', models.ForeignKey(orm['django_comment_client.role'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('django_comment_client_role_users', ['role_id', 'user_id'])
# Adding model 'Permission'
db.create_table('django_comment_client_permission', (
('name', self.gf('django.db.models.fields.CharField')(max_length=30, primary_key=True)),
))
db.send_create_signal('django_comment_client', ['Permission'])
# Adding M2M table for field roles on 'Permission'
db.create_table('django_comment_client_permission_roles', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('permission', models.ForeignKey(orm['django_comment_client.permission'], null=False)),
('role', models.ForeignKey(orm['django_comment_client.role'], null=False))
))
db.create_unique('django_comment_client_permission_roles', ['permission_id', 'role_id'])
def backwards(self, orm):
# Deleting model 'Role'
db.delete_table('django_comment_client_role')
# Removing M2M table for field users on 'Role'
db.delete_table('django_comment_client_role_users')
# Deleting model 'Permission'
db.delete_table('django_comment_client_permission')
# Removing M2M table for field roles on 'Permission'
db.delete_table('django_comment_client_permission_roles')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_comment_client.permission': {
'Meta': {'object_name': 'Permission'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'permissions'", 'symmetrical': 'False', 'to': "orm['django_comment_client.Role']"})
},
'django_comment_client.role': {
'Meta': {'object_name': 'Role'},
'course_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'roles'", 'symmetrical': 'False', 'to': "orm['auth.User']"})
}
}
complete_apps = ['django_comment_client']
| agpl-3.0 |
noxora/flask-base | flask/lib/python3.4/site-packages/sqlalchemy/testing/suite/test_results.py | 35 | 11736 | from .. import fixtures, config
from ..config import requirements
from .. import exclusions
from ..assertions import eq_
from .. import engines
from ... import testing
from sqlalchemy import Integer, String, select, util, sql, DateTime, text, func
import datetime
from ..schema import Table, Column
class RowFetchTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('plain_pk', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50))
)
Table('has_dates', metadata,
Column('id', Integer, primary_key=True),
Column('today', DateTime)
)
@classmethod
def insert_data(cls):
config.db.execute(
cls.tables.plain_pk.insert(),
[
{"id": 1, "data": "d1"},
{"id": 2, "data": "d2"},
{"id": 3, "data": "d3"},
]
)
config.db.execute(
cls.tables.has_dates.insert(),
[
{"id": 1, "today": datetime.datetime(2006, 5, 12, 12, 0, 0)}
]
)
def test_via_string(self):
row = config.db.execute(
self.tables.plain_pk.select().
order_by(self.tables.plain_pk.c.id)
).first()
eq_(
row['id'], 1
)
eq_(
row['data'], "d1"
)
def test_via_int(self):
row = config.db.execute(
self.tables.plain_pk.select().
order_by(self.tables.plain_pk.c.id)
).first()
eq_(
row[0], 1
)
eq_(
row[1], "d1"
)
def test_via_col_object(self):
row = config.db.execute(
self.tables.plain_pk.select().
order_by(self.tables.plain_pk.c.id)
).first()
eq_(
row[self.tables.plain_pk.c.id], 1
)
eq_(
row[self.tables.plain_pk.c.data], "d1"
)
@requirements.duplicate_names_in_cursor_description
def test_row_with_dupe_names(self):
result = config.db.execute(
select([self.tables.plain_pk.c.data,
self.tables.plain_pk.c.data.label('data')]).
order_by(self.tables.plain_pk.c.id)
)
row = result.first()
eq_(result.keys(), ['data', 'data'])
eq_(row, ('d1', 'd1'))
def test_row_w_scalar_select(self):
"""test that a scalar select as a column is returned as such
and that type conversion works OK.
(this is half a SQLAlchemy Core test and half to catch database
backends that may have unusual behavior with scalar selects.)
"""
datetable = self.tables.has_dates
s = select([datetable.alias('x').c.today]).as_scalar()
s2 = select([datetable.c.id, s.label('somelabel')])
row = config.db.execute(s2).first()
eq_(row['somelabel'], datetime.datetime(2006, 5, 12, 12, 0, 0))
class PercentSchemaNamesTest(fixtures.TablesTest):
"""tests using percent signs, spaces in table and column names.
This is a very fringe use case, doesn't work for MySQL
or PostgreSQL. the requirement, "percent_schema_names",
is marked "skip" by default.
"""
__requires__ = ('percent_schema_names', )
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.tables.percent_table = Table('percent%table', metadata,
Column("percent%", Integer),
Column(
"spaces % more spaces", Integer),
)
cls.tables.lightweight_percent_table = sql.table(
'percent%table', sql.column("percent%"),
sql.column("spaces % more spaces")
)
def test_single_roundtrip(self):
percent_table = self.tables.percent_table
for params in [
{'percent%': 5, 'spaces % more spaces': 12},
{'percent%': 7, 'spaces % more spaces': 11},
{'percent%': 9, 'spaces % more spaces': 10},
{'percent%': 11, 'spaces % more spaces': 9}
]:
config.db.execute(percent_table.insert(), params)
self._assert_table()
def test_executemany_roundtrip(self):
percent_table = self.tables.percent_table
config.db.execute(
percent_table.insert(),
{'percent%': 5, 'spaces % more spaces': 12}
)
config.db.execute(
percent_table.insert(),
[{'percent%': 7, 'spaces % more spaces': 11},
{'percent%': 9, 'spaces % more spaces': 10},
{'percent%': 11, 'spaces % more spaces': 9}]
)
self._assert_table()
def _assert_table(self):
percent_table = self.tables.percent_table
lightweight_percent_table = self.tables.lightweight_percent_table
for table in (
percent_table,
percent_table.alias(),
lightweight_percent_table,
lightweight_percent_table.alias()):
eq_(
list(
config.db.execute(
table.select().order_by(table.c['percent%'])
)
),
[
(5, 12),
(7, 11),
(9, 10),
(11, 9)
]
)
eq_(
list(
config.db.execute(
table.select().
where(table.c['spaces % more spaces'].in_([9, 10])).
order_by(table.c['percent%']),
)
),
[
(9, 10),
(11, 9)
]
)
row = config.db.execute(table.select().
order_by(table.c['percent%'])).first()
eq_(row['percent%'], 5)
eq_(row['spaces % more spaces'], 12)
eq_(row[table.c['percent%']], 5)
eq_(row[table.c['spaces % more spaces']], 12)
config.db.execute(
percent_table.update().values(
{percent_table.c['spaces % more spaces']: 15}
)
)
eq_(
list(
config.db.execute(
percent_table.
select().
order_by(percent_table.c['percent%'])
)
),
[(5, 15), (7, 15), (9, 15), (11, 15)]
)
class ServerSideCursorsTest(fixtures.TestBase, testing.AssertsExecutionResults):
__requires__ = ('server_side_cursors', )
__backend__ = True
def _is_server_side(self, cursor):
if self.engine.url.drivername == 'postgresql':
return cursor.name
elif self.engine.url.drivername == 'mysql':
sscursor = __import__('MySQLdb.cursors').cursors.SSCursor
return isinstance(cursor, sscursor)
elif self.engine.url.drivername == 'mysql+pymysql':
sscursor = __import__('pymysql.cursors').cursors.SSCursor
return isinstance(cursor, sscursor)
else:
return False
def _fixture(self, server_side_cursors):
self.engine = engines.testing_engine(
options={'server_side_cursors': server_side_cursors}
)
return self.engine
def tearDown(self):
engines.testing_reaper.close_all()
self.engine.dispose()
def test_global_string(self):
engine = self._fixture(True)
result = engine.execute('select 1')
assert self._is_server_side(result.cursor)
def test_global_text(self):
engine = self._fixture(True)
result = engine.execute(text('select 1'))
assert self._is_server_side(result.cursor)
def test_global_expr(self):
engine = self._fixture(True)
result = engine.execute(select([1]))
assert self._is_server_side(result.cursor)
def test_global_off_explicit(self):
engine = self._fixture(False)
result = engine.execute(text('select 1'))
# It should be off globally ...
assert not self._is_server_side(result.cursor)
def test_stmt_option(self):
engine = self._fixture(False)
s = select([1]).execution_options(stream_results=True)
result = engine.execute(s)
# ... but enabled for this one.
assert self._is_server_side(result.cursor)
def test_conn_option(self):
engine = self._fixture(False)
# and this one
result = \
engine.connect().execution_options(stream_results=True).\
execute('select 1'
)
assert self._is_server_side(result.cursor)
def test_stmt_enabled_conn_option_disabled(self):
engine = self._fixture(False)
s = select([1]).execution_options(stream_results=True)
# not this one
result = \
engine.connect().execution_options(stream_results=False).\
execute(s)
assert not self._is_server_side(result.cursor)
def test_stmt_option_disabled(self):
engine = self._fixture(True)
s = select([1]).execution_options(stream_results=False)
result = engine.execute(s)
assert not self._is_server_side(result.cursor)
def test_aliases_and_ss(self):
engine = self._fixture(False)
s1 = select([1]).execution_options(stream_results=True).alias()
result = engine.execute(s1)
assert self._is_server_side(result.cursor)
# s1's options shouldn't affect s2 when s2 is used as a
# from_obj.
s2 = select([1], from_obj=s1)
result = engine.execute(s2)
assert not self._is_server_side(result.cursor)
def test_for_update_expr(self):
engine = self._fixture(True)
s1 = select([1], for_update=True)
result = engine.execute(s1)
assert self._is_server_side(result.cursor)
def test_for_update_string(self):
engine = self._fixture(True)
result = engine.execute('SELECT 1 FOR UPDATE')
assert self._is_server_side(result.cursor)
def test_text_no_ss(self):
engine = self._fixture(False)
s = text('select 42')
result = engine.execute(s)
assert not self._is_server_side(result.cursor)
def test_text_ss_option(self):
engine = self._fixture(False)
s = text('select 42').execution_options(stream_results=True)
result = engine.execute(s)
assert self._is_server_side(result.cursor)
@testing.provide_metadata
def test_roundtrip(self):
md = self.metadata
engine = self._fixture(True)
test_table = Table('test_table', md,
Column('id', Integer, primary_key=True),
Column('data', String(50)))
test_table.create(checkfirst=True)
test_table.insert().execute(data='data1')
test_table.insert().execute(data='data2')
eq_(test_table.select().execute().fetchall(), [(1, 'data1'
), (2, 'data2')])
test_table.update().where(
test_table.c.id == 2).values(
data=test_table.c.data +
' updated').execute()
eq_(test_table.select().execute().fetchall(),
[(1, 'data1'), (2, 'data2 updated')])
test_table.delete().execute()
eq_(select([func.count('*')]).select_from(test_table).scalar(), 0)
| mit |
mifl/android_kernel_pantech_presto | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/numpy/core/_methods.py | 5 | 8399 | """
Array methods which are called by both the C-code for the method
and the Python code for the NumPy-namespace function
"""
from __future__ import division, absolute_import, print_function
import warnings
from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core._asarray import asanyarray
from numpy.core import numerictypes as nt
from numpy.core import _exceptions
from numpy._globals import _NoValue
from numpy.compat import pickle, os_fspath, contextlib_nullcontext
# save those O(100) nanoseconds!
umr_maximum = um.maximum.reduce
umr_minimum = um.minimum.reduce
umr_sum = um.add.reduce
umr_prod = um.multiply.reduce
umr_any = um.logical_or.reduce
umr_all = um.logical_and.reduce
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
# small reductions
def _amax(a, axis=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_maximum(a, axis, None, out, keepdims, initial, where)
def _amin(a, axis=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_minimum(a, axis, None, out, keepdims, initial, where)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_sum(a, axis, dtype, out, keepdims, initial, where)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_prod(a, axis, dtype, out, keepdims, initial, where)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_any(a, axis, dtype, out, keepdims)
def _all(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_all(a, axis, dtype, out, keepdims)
def _count_reduce_items(arr, axis):
if axis is None:
axis = tuple(range(arr.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
items = 1
for ax in axis:
items *= arr.shape[ax]
return items
# Numpy 1.17.0, 2019-02-24
# Various clip behavior deprecations, marked with _clip_dep as a prefix.
def _clip_dep_is_scalar_nan(a):
# guarded to protect circular imports
from numpy.core.fromnumeric import ndim
if ndim(a) != 0:
return False
try:
return um.isnan(a)
except TypeError:
return False
def _clip_dep_is_byte_swapped(a):
if isinstance(a, mu.ndarray):
return not a.dtype.isnative
return False
def _clip_dep_invoke_with_casting(ufunc, *args, out=None, casting=None, **kwargs):
# normal path
if casting is not None:
return ufunc(*args, out=out, casting=casting, **kwargs)
# try to deal with broken casting rules
try:
return ufunc(*args, out=out, **kwargs)
except _exceptions._UFuncOutputCastingError as e:
# Numpy 1.17.0, 2019-02-24
warnings.warn(
"Converting the output of clip from {!r} to {!r} is deprecated. "
"Pass `casting=\"unsafe\"` explicitly to silence this warning, or "
"correct the type of the variables.".format(e.from_, e.to),
DeprecationWarning,
stacklevel=2
)
return ufunc(*args, out=out, casting="unsafe", **kwargs)
def _clip(a, min=None, max=None, out=None, *, casting=None, **kwargs):
if min is None and max is None:
raise ValueError("One of max or min must be given")
# Numpy 1.17.0, 2019-02-24
# This deprecation probably incurs a substantial slowdown for small arrays,
# it will be good to get rid of it.
if not _clip_dep_is_byte_swapped(a) and not _clip_dep_is_byte_swapped(out):
using_deprecated_nan = False
if _clip_dep_is_scalar_nan(min):
min = -float('inf')
using_deprecated_nan = True
if _clip_dep_is_scalar_nan(max):
max = float('inf')
using_deprecated_nan = True
if using_deprecated_nan:
warnings.warn(
"Passing `np.nan` to mean no clipping in np.clip has always "
"been unreliable, and is now deprecated. "
"In future, this will always return nan, like it already does "
"when min or max are arrays that contain nan. "
"To skip a bound, pass either None or an np.inf of an "
"appropriate sign.",
DeprecationWarning,
stacklevel=2
)
if min is None:
return _clip_dep_invoke_with_casting(
um.minimum, a, max, out=out, casting=casting, **kwargs)
elif max is None:
return _clip_dep_invoke_with_casting(
um.maximum, a, min, out=out, casting=casting, **kwargs)
else:
return _clip_dep_invoke_with_casting(
um.clip, a, min, max, out=out, casting=casting, **kwargs)
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
is_float16_result = False
rcount = _count_reduce_items(arr, axis)
# Make this warning show up first
if rcount == 0:
warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None:
if issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
elif issubclass(arr.dtype.type, nt.float16):
dtype = mu.dtype('f4')
is_float16_result = True
ret = umr_sum(arr, axis, dtype, out, keepdims)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
if is_float16_result and out is None:
ret = arr.dtype.type(ret)
elif hasattr(ret, 'dtype'):
if is_float16_result:
ret = arr.dtype.type(ret / rcount)
else:
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis)
# Make this warning show up on top.
if ddof >= rcount:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
stacklevel=2)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
# Compute the mean.
# Note that if dtype is not of inexact type then arraymean will
# not be either.
arrmean = umr_sum(arr, axis, dtype, keepdims=True)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(
arrmean, rcount, out=arrmean, casting='unsafe', subok=False)
else:
arrmean = arrmean.dtype.type(arrmean / rcount)
# Compute sum of squared deviations from mean
# Note that x may not be inexact and that we need it to be an array,
# not a scalar.
x = asanyarray(arr - arrmean)
if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
x = um.multiply(x, x, out=x)
else:
x = um.multiply(x, um.conjugate(x), out=x).real
ret = umr_sum(x, axis, dtype, out, keepdims)
# Compute degrees of freedom and make sure it is not negative.
rcount = max([rcount - ddof, 0])
# divide by degrees of freedom
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(um.sqrt(ret))
else:
ret = um.sqrt(ret)
return ret
def _ptp(a, axis=None, out=None, keepdims=False):
return um.subtract(
umr_maximum(a, axis, None, out, keepdims),
umr_minimum(a, axis, None, None, keepdims),
out
)
def _dump(self, file, protocol=2):
if hasattr(file, 'write'):
ctx = contextlib_nullcontext(file)
else:
ctx = open(os_fspath(file), "wb")
with ctx as f:
pickle.dump(self, f, protocol=protocol)
def _dumps(self, protocol=2):
return pickle.dumps(self, protocol=protocol)
| apache-2.0 |
google/assetMG | app/backend/helpers.py | 1 | 1488 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to be used by the backend."""
from google.ads.googleads.client import GoogleAdsClient
def populate_adgroup_details(client, account, ag_id):
"""Gets an adgroup ID and returns an adgroup object including
adgroup id, adgroup name and campaign name."""
ga_service = client.get_service('GoogleAdsService', version='v7')
query = '''
SELECT
campaign.name,
ad_group.name,
ad_group.id
FROM
ad_group
WHERE
ad_group.id = %s
''' % (ag_id)
request = client.get_type("SearchGoogleAdsStreamRequest")
request.customer_id = account
request.query = query
response = ga_service.search_stream(request=request)
for batch in response:
for row in batch.results:
return {
'adgroup_id': row.ad_group.id,
'adgroup_name': row.ad_group.name,
'campaign_name': row.campaign.name
} | apache-2.0 |
aaltinisik/OCBAltinkaya | addons/email_template/email_template.py | 1 | 31022 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import datetime
import dateutil.relativedelta as relativedelta
import logging
import lxml
import urlparse
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp import tools, api
from openerp.tools.translate import _
from urllib import urlencode, quote as quote
_logger = logging.getLogger(__name__)
def format_tz(pool, cr, uid, dt, tz=False, format=False, context=None):
context = dict(context or {})
if tz:
context['tz'] = tz or pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz'] or "UTC"
timestamp = datetime.datetime.strptime(dt, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ts = fields.datetime.context_timestamp(cr, uid, timestamp, context)
# Babel allows to format datetime in a specific language without change locale
# So month 1 = January in English, and janvier in French
# Be aware that the default value for format is 'medium', instead of 'short'
# medium: Jan 5, 2016, 10:20:31 PM | 5 janv. 2016 22:20:31
# short: 1/5/16, 10:20 PM | 5/01/16 22:20
if context.get('use_babel'):
# Formatting available here : http://babel.pocoo.org/en/latest/dates.html#date-fields
from babel.dates import format_datetime
return format_datetime(ts, format or 'medium', locale=context.get("lang") or 'en_US')
if format:
return ts.strftime(format)
else:
lang = context.get("lang")
lang_params = {}
if lang:
res_lang = pool.get('res.lang')
ids = res_lang.search(cr, uid, [("code", "=", lang)])
if ids:
lang_params = res_lang.read(cr, uid, ids[0], ["date_format", "time_format"])
format_date = lang_params.get("date_format", '%B-%d-%Y')
format_time = lang_params.get("time_format", '%I-%M %p')
fdate = ts.strftime(format_date).decode('utf-8')
ftime = ts.strftime(format_time).decode('utf-8')
return "%s %s%s" % (fdate, ftime, (' (%s)' % tz) if tz else '')
try:
# We use a jinja2 sandboxed environment to render mako templates.
# Note that the rendering does not cover all the mako syntax, in particular
# arbitrary Python statements are not accepted, and not all expressions are
# allowed: only "public" attributes (not starting with '_') of objects may
# be accessed.
# This is done on purpose: it prevents incidental or malicious execution of
# Python code that may break the security of the server.
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="${",
variable_end_string="}",
comment_start_string="<%doc>",
comment_end_string="</%doc>",
line_statement_prefix="%",
line_comment_prefix="##",
trim_blocks=True, # do not output newline after blocks
autoescape=True, # XML/HTML automatic escaping
)
mako_template_env.globals.update({
'str': str,
'quote': quote,
'urlencode': urlencode,
'datetime': tools.wrap_module(datetime, []),
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': reduce,
'map': map,
'round': round,
# dateutil.relativedelta is an old-style class and cannot be directly
# instanciated wihtin a jinja2 expression, so a lambda "proxy" is
# is needed, apparently.
'relativedelta': lambda *a, **kw : relativedelta.relativedelta(*a, **kw),
})
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
class email_template(osv.osv):
"Templates for sending email"
_name = "email.template"
_description = 'Email Templates'
_order = 'name'
def default_get(self, cr, uid, fields, context=None):
res = super(email_template, self).default_get(cr, uid, fields, context)
if res.get('model'):
res['model_id'] = self.pool['ir.model'].search(cr, uid, [('model', '=', res.pop('model'))], context=context)[0]
return res
def _replace_local_links(self, cr, uid, html, context=None):
""" Post-processing of html content to replace local links to absolute
links, using web.base.url as base url. """
if not html:
return html
# form a tree
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
(base_scheme, base_netloc, bpath, bparams, bquery, bfragment) = urlparse.urlparse(base_url)
def _process_link(url):
new_url = url
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if not scheme and not netloc:
new_url = urlparse.urlunparse((base_scheme, base_netloc, path, params, query, fragment))
return new_url
# check all nodes, replace :
# - img src -> check URL
# - a href -> check URL
for node in root.iter():
if node.tag == 'a' and node.get('href'):
node.set('href', _process_link(node.get('href')))
elif node.tag == 'img' and not node.get('src', 'data').startswith('data'):
node.set('src', _process_link(node.get('src')))
html = lxml.html.tostring(root, pretty_print=False, method='html',encoding='unicode')
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if html.startswith('<div>') and html.endswith('</div>'):
html = html[5:-6]
return html
def render_post_process(self, cr, uid, html, context=None):
html = self._replace_local_links(cr, uid, html, context=context)
return html
def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False):
"""Render the given template text, replace mako expressions ``${expr}``
with the result of evaluating these expressions with
an evaluation context containing:
* ``user``: browse_record of the current user
* ``object``: browse_record of the document record this mail is
related to
* ``context``: the context passed to the mail composition wizard
:param str template: the template text to render
:param str model: model name of the document record this mail is related to.
:param int res_ids: list of ids of document records those mails are related to.
"""
if context is None:
context = {}
res_ids = filter(None, res_ids) # to avoid browsing [None] below
results = dict.fromkeys(res_ids, u"")
# try to load the template
try:
template = mako_template_env.from_string(tools.ustr(template))
except Exception:
_logger.exception("Failed to load template %r", template)
return results
# prepare template variables
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
records = self.pool[model].browse(cr, uid, res_ids, context=context) or [None]
variables = {
'format_tz': lambda dt, tz=False, format=False, context=context: format_tz(self.pool, cr, uid, dt, tz, format, context),
'user': user,
'ctx': context, # context kw would clash with mako internals
}
for record in records:
res_id = record.id if record else None
variables['object'] = record
try:
render_result = template.render(variables)
except Exception:
_logger.exception("Failed to render template %r using values %r" % (template, variables))
render_result = u""
if render_result == u"False":
render_result = u""
results[res_id] = render_result
if post_process:
for res_id, result in results.iteritems():
results[res_id] = self.render_post_process(cr, uid, result, context=context)
return results
def get_email_template_batch(self, cr, uid, template_id=False, res_ids=None, context=None):
if context is None:
context = {}
if res_ids is None:
res_ids = [None]
results = dict.fromkeys(res_ids, False)
if not template_id:
return results
template = self.browse(cr, uid, template_id, context)
langs = self.render_template_batch(cr, uid, template.lang, template.model, res_ids, context)
for res_id, lang in langs.iteritems():
if lang:
# Use translated template if necessary
ctx = context.copy()
ctx['lang'] = lang
template = self.browse(cr, uid, template.id, ctx)
else:
template = self.browse(cr, uid, int(template_id), context)
results[res_id] = template
return results
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
mod_name = False
if model_id:
mod_name = self.pool.get('ir.model').browse(cr, uid, model_id, context).model
return {'value': {'model': mod_name}}
_columns = {
'name': fields.char('Name'),
'model_id': fields.many2one('ir.model', 'Applies to', help="The kind of document with with this template can be used"),
'model': fields.related('model_id', 'model', type='char', string='Related Document Model',
select=True, store=True, readonly=True),
'lang': fields.char('Language',
help="Optional translation language (ISO code) to select when sending out an email. "
"If not set, the english version will be used. "
"This should usually be a placeholder expression "
"that provides the appropriate language, e.g. "
"${object.partner_id.lang}.",
placeholder="${object.partner_id.lang}"),
'user_signature': fields.boolean('Add Signature',
help="If checked, the user's signature will be appended to the text version "
"of the message"),
'subject': fields.char('Subject', translate=True, help="Subject (placeholders may be used here)",),
'email_from': fields.char('From',
help="Sender address (placeholders may be used here). If not set, the default "
"value will be the author's email alias if configured, or email address."),
'use_default_to': fields.boolean(
'Default recipients',
help="Default recipients of the record:\n"
"- partner (using id on a partner or the partner_id field) OR\n"
"- email (using email_from or email field)"),
'email_to': fields.char('To (Emails)', help="Comma-separated recipient addresses (placeholders may be used here)"),
'partner_to': fields.char('To (Partners)',
help="Comma-separated ids of recipient partners (placeholders may be used here)",
oldname='email_recipients'),
'email_cc': fields.char('Cc', help="Carbon copy recipients (placeholders may be used here)"),
'reply_to': fields.char('Reply-To', help="Preferred response address (placeholders may be used here)"),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing Mail Server', readonly=False,
help="Optional preferred server for outgoing mails. If not set, the highest "
"priority one will be used."),
'body_html': fields.html('Body', translate=True, sanitize=False, help="Rich-text/HTML version of the message (placeholders may be used here)"),
'report_name': fields.char('Report Filename', translate=True,
help="Name to use for the generated report file (may contain placeholders)\n"
"The extension can be omitted and will then come from the report type."),
'report_template': fields.many2one('ir.actions.report.xml', 'Optional report to print and attach'),
'ref_ir_act_window': fields.many2one('ir.actions.act_window', 'Sidebar action', readonly=True, copy=False,
help="Sidebar action to make this template available on records "
"of the related document model"),
'ref_ir_value': fields.many2one('ir.values', 'Sidebar Button', readonly=True, copy=False,
help="Sidebar button to open the sidebar action"),
'attachment_ids': fields.many2many('ir.attachment', 'email_template_attachment_rel', 'email_template_id',
'attachment_id', 'Attachments',
help="You may attach files to this template, to be added to all "
"emails created from this template"),
'auto_delete': fields.boolean('Auto Delete', help="Permanently delete this email after sending it, to save space"),
# Fake fields used to implement the placeholder assistant
'model_object_field': fields.many2one('ir.model.fields', string="Field",
help="Select target field from the related document model.\n"
"If it is a relationship field you will be able to select "
"a target field at the destination of the relationship."),
'sub_object': fields.many2one('ir.model', 'Sub-model', readonly=True,
help="When a relationship field is selected as first field, "
"this field shows the document model the relationship goes to."),
'sub_model_object_field': fields.many2one('ir.model.fields', 'Sub-field',
help="When a relationship field is selected as first field, "
"this field lets you select the target field within the "
"destination document model (sub-model)."),
'null_value': fields.char('Default Value', help="Optional value to use if the target field is empty"),
'copyvalue': fields.char('Placeholder Expression', help="Final placeholder expression, to be copy-pasted in the desired template field."),
}
_defaults = {
'auto_delete': True,
}
def create_action(self, cr, uid, ids, context=None):
action_obj = self.pool.get('ir.actions.act_window')
data_obj = self.pool.get('ir.model.data')
for template in self.browse(cr, uid, ids, context=context):
src_obj = template.model_id.model
model_data_id = data_obj._get_id(cr, uid, 'mail', 'email_compose_message_wizard_form')
res_id = data_obj.browse(cr, uid, model_data_id, context=context).res_id
button_name = _('Send Mail (%s)') % template.name
act_id = action_obj.create(cr, uid, {
'name': button_name,
'type': 'ir.actions.act_window',
'res_model': 'mail.compose.message',
'src_model': src_obj,
'view_type': 'form',
'context': "{'default_composition_mode': 'mass_mail', 'default_template_id' : %d, 'default_use_template': True}" % (template.id),
'view_mode':'form,tree',
'view_id': res_id,
'target': 'new',
'auto_refresh':1
}, context)
ir_values_id = self.pool.get('ir.values').create(cr, uid, {
'name': button_name,
'model': src_obj,
'key2': 'client_action_multi',
'value': "ir.actions.act_window,%s" % act_id,
'object': True,
}, context)
template.write({
'ref_ir_act_window': act_id,
'ref_ir_value': ir_values_id,
})
return True
def unlink_action(self, cr, uid, ids, context=None):
for template in self.browse(cr, uid, ids, context=context):
try:
if template.ref_ir_act_window:
self.pool.get('ir.actions.act_window').unlink(cr, uid, template.ref_ir_act_window.id, context)
if template.ref_ir_value:
ir_values_obj = self.pool.get('ir.values')
ir_values_obj.unlink(cr, uid, template.ref_ir_value.id, context)
except Exception:
raise osv.except_osv(_("Warning"), _("Deletion of the action record failed."))
return True
def unlink(self, cr, uid, ids, context=None):
self.unlink_action(cr, uid, ids, context=context)
return super(email_template, self).unlink(cr, uid, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
template = self.browse(cr, uid, id, context=context)
default = dict(default or {},
name=_("%s (copy)") % template.name)
return super(email_template, self).copy(cr, uid, id, default, context)
def build_expression(self, field_name, sub_field_name, null_value):
"""Returns a placeholder expression for use in a template field,
based on the values provided in the placeholder assistant.
:param field_name: main field name
:param sub_field_name: sub field name (M2O)
:param null_value: default value if the target value is empty
:return: final placeholder expression
"""
expression = ''
if field_name:
expression = "${object." + field_name
if sub_field_name:
expression += "." + sub_field_name
if null_value:
expression += " or '''%s'''" % null_value
expression += "}"
return expression
def onchange_sub_model_object_value_field(self, cr, uid, ids, model_object_field, sub_model_object_field=False, null_value=None, context=None):
result = {
'sub_object': False,
'copyvalue': False,
'sub_model_object_field': False,
'null_value': False
}
if model_object_field:
fields_obj = self.pool.get('ir.model.fields')
field_value = fields_obj.browse(cr, uid, model_object_field, context)
if field_value.ttype in ['many2one', 'one2many', 'many2many']:
res_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', field_value.relation)], context=context)
sub_field_value = False
if sub_model_object_field:
sub_field_value = fields_obj.browse(cr, uid, sub_model_object_field, context)
if res_ids:
result.update({
'sub_object': res_ids[0],
'copyvalue': self.build_expression(field_value.name, sub_field_value and sub_field_value.name or False, null_value or False),
'sub_model_object_field': sub_model_object_field or False,
'null_value': null_value or False
})
else:
result.update({
'copyvalue': self.build_expression(field_value.name, False, null_value or False),
'null_value': null_value or False
})
return {'value': result}
def generate_recipients_batch(self, cr, uid, results, template_id, res_ids, context=None):
"""Generates the recipients of the template. Default values can ben generated
instead of the template values if requested by template or context.
Emails (email_to, email_cc) can be transformed into partners if requested
in the context. """
if context is None:
context = {}
template = self.browse(cr, uid, template_id, context=context)
if template.use_default_to or context.get('tpl_force_default_to'):
ctx = dict(context, thread_model=template.model)
default_recipients = self.pool['mail.thread'].message_get_default_recipients(cr, uid, res_ids, context=ctx)
for res_id, recipients in default_recipients.iteritems():
results[res_id].pop('partner_to', None)
results[res_id].update(recipients)
for res_id, values in results.iteritems():
partner_ids = values.get('partner_ids', list())
if context and context.get('tpl_partners_only'):
mails = tools.email_split(values.pop('email_to', '')) + tools.email_split(values.pop('email_cc', ''))
for mail in mails:
partner_id = self.pool.get('res.partner').find_or_create(cr, uid, mail, context=context)
partner_ids.append(partner_id)
partner_to = values.pop('partner_to', '')
if partner_to:
# placeholders could generate '', 3, 2 due to some empty field values
tpl_partner_ids = [int(pid) for pid in partner_to.split(',') if pid]
partner_ids += self.pool['res.partner'].exists(cr, SUPERUSER_ID, tpl_partner_ids, context=context)
results[res_id]['partner_ids'] = partner_ids
return results
def generate_email_batch(self, cr, uid, template_id, res_ids, context=None, fields=None):
"""Generates an email from the template for given the given model based on
records given by res_ids.
:param template_id: id of the template to render.
:param res_id: id of the record to use for rendering the template (model
is taken from template definition)
:returns: a dict containing all relevant fields for creating a new
mail.mail entry, with one extra key ``attachments``, in the
format [(report_name, data)] where data is base64 encoded.
"""
if context is None:
context = {}
if fields is None:
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to']
report_xml_pool = self.pool.get('ir.actions.report.xml')
res_ids_to_templates = self.get_email_template_batch(cr, uid, template_id, res_ids, context)
# templates: res_id -> template; template -> res_ids
templates_to_res_ids = {}
for res_id, template in res_ids_to_templates.iteritems():
templates_to_res_ids.setdefault(template, []).append(res_id)
results = dict()
for template, template_res_ids in templates_to_res_ids.iteritems():
# generate fields value for all res_ids linked to the current template
ctx = context.copy()
if template.lang:
ctx['lang'] = template._context.get('lang')
for field in fields:
generated_field_values = self.render_template_batch(
cr, uid, getattr(template, field), template.model, template_res_ids,
post_process=(field == 'body_html'),
context=ctx)
for res_id, field_value in generated_field_values.iteritems():
results.setdefault(res_id, dict())[field] = field_value
# compute recipients
results = self.generate_recipients_batch(cr, uid, results, template.id, template_res_ids, context=context)
# update values for all res_ids
for res_id in template_res_ids:
values = results[res_id]
# body: add user signature, sanitize
if 'body_html' in fields and template.user_signature:
signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature
if signature:
values['body_html'] = tools.append_content_to_html(values['body_html'], signature, plaintext=False)
if values.get('body_html'):
values['body'] = tools.html_sanitize(values['body_html'])
# technical settings
values.update(
mail_server_id=template.mail_server_id.id or False,
auto_delete=template.auto_delete,
model=template.model,
res_id=res_id or False,
attachment_ids=[attach.id for attach in template.attachment_ids],
)
# Add report in attachments: generate once for all template_res_ids
if template.report_template:
# Fix : Force report to use res ids and not active_ids
if ctx and 'active_ids' in ctx:
del ctx['active_ids']
for res_id in template_res_ids:
attachments = []
report_name = self.render_template(cr, uid, template.report_name, template.model, res_id, context=ctx)
report = report_xml_pool.browse(cr, uid, template.report_template.id, context)
report_service = report.report_name
if report.report_type in ['qweb-html', 'qweb-pdf']:
result, format = self.pool['report'].get_pdf(cr, uid, [res_id], report_service, context=ctx), 'pdf'
else:
result, format = openerp.report.render_report(cr, uid, [res_id], report_service, {'model': template.model}, ctx)
# TODO in trunk, change return format to binary to match message_post expected format
result = base64.b64encode(result)
if not report_name:
report_name = 'report.' + report_service
ext = "." + format
if not report_name.endswith(ext):
report_name += ext
attachments.append((report_name, result))
results[res_id]['attachments'] = attachments
return results
@api.cr_uid_id_context
def send_mail(self, cr, uid, template_id, res_id, force_send=False, raise_exception=False, context=None):
"""Generates a new mail message for the given template and record,
and schedules it for delivery through the ``mail`` module's scheduler.
:param int template_id: id of the template to render
:param int res_id: id of the record to render the template with
(model is taken from the template)
:param bool force_send: if True, the generated mail.message is
immediately sent after being created, as if the scheduler
was executed for this message only.
:returns: id of the mail.message that was created
"""
if context is None:
context = {}
mail_mail = self.pool.get('mail.mail')
ir_attachment = self.pool.get('ir.attachment')
# create a mail_mail based on values, without attachments
values = self.generate_email(cr, uid, template_id, res_id, context=context)
if not values.get('email_from'):
raise osv.except_osv(_('Warning!'), _("Sender email is missing or empty after template rendering. Specify one to deliver your message"))
values['recipient_ids'] = [(4, pid) for pid in values.get('partner_ids', list())]
attachment_ids = values.pop('attachment_ids', [])
attachments = values.pop('attachments', [])
msg_id = mail_mail.create(cr, uid, values, context=context)
mail = mail_mail.browse(cr, uid, msg_id, context=context)
# manage attachments
for attachment in attachments:
attachment_data = {
'name': attachment[0],
'datas_fname': attachment[0],
'datas': attachment[1],
'res_model': 'mail.message',
'res_id': mail.mail_message_id.id,
}
context = dict(context)
context.pop('default_type', None)
attachment_ids.append(ir_attachment.create(cr, uid, attachment_data, context=context))
if attachment_ids:
values['attachment_ids'] = [(6, 0, attachment_ids)]
mail_mail.write(cr, uid, msg_id, {'attachment_ids': [(6, 0, attachment_ids)]}, context=context)
if force_send:
mail_mail.send(cr, uid, [msg_id], raise_exception=raise_exception, context=context)
return msg_id
# Compatibility method
def render_template(self, cr, uid, template, model, res_id, context=None):
return self.render_template_batch(cr, uid, template, model, [res_id], context)[res_id]
def get_email_template(self, cr, uid, template_id=False, record_id=None, context=None):
return self.get_email_template_batch(cr, uid, template_id, [record_id], context)[record_id]
def generate_email(self, cr, uid, template_id, res_id, context=None):
return self.generate_email_batch(cr, uid, template_id, [res_id], context=context)[res_id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kenorb/BitTorrent | twisted/conch/scripts/cftp.py | 4 | 27273 | # -*- test-case-name: twisted.conch.test.test_cftp -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
# $Id: cftp.py,v 1.65 2004/03/11 00:29:14 z3p Exp $
#""" Implementation module for the `cftp` command.
#"""
from twisted.conch.client import agent, connect, default, options
from twisted.conch.error import ConchError
from twisted.conch.ssh import connection, common
from twisted.conch.ssh import channel, filetransfer
from twisted.protocols import basic
from twisted.internet import reactor, stdio, defer, utils
from twisted.python import log, usage, failure
import os, sys, getpass, struct, tty, fcntl, base64, signal, stat, errno
import fnmatch, pwd, time, glob
class ClientOptions(options.ConchOptions):
synopsis = """Usage: cftp [options] [user@]host
cftp [options] [user@]host[:dir[/]]
cftp [options] [user@]host[:file [localfile]]
"""
optParameters = [
['buffersize', 'B', 32768, 'Size of the buffer to use for sending/receiving.'],
['batchfile', 'b', None, 'File to read commands from, or \'-\' for stdin.'],
['requests', 'R', 5, 'Number of requests to make before waiting for a reply.'],
['subsystem', 's', 'sftp', 'Subsystem/server program to connect to.']]
zsh_altArgDescr = {"buffersize":"Size of send/receive buffer (default: 32768)"}
#zsh_multiUse = ["foo", "bar"]
#zsh_mutuallyExclusive = [("foo", "bar"), ("bar", "baz")]
#zsh_actions = {"foo":'_files -g "*.foo"', "bar":"(one two three)"}
#zsh_actionDescr = {"logfile":"log file name", "random":"random seed"}
zsh_extras = ['2::localfile:{if [[ $words[1] == *:* ]]; then; _files; fi}']
def parseArgs(self, host, localPath=None):
self['remotePath'] = ''
if ':' in host:
host, self['remotePath'] = host.split(':', 1)
self['remotePath'].rstrip('/')
self['host'] = host
self['localPath'] = localPath
def run():
# import hotshot
# prof = hotshot.Profile('cftp.prof')
# prof.start()
args = sys.argv[1:]
if '-l' in args: # cvs is an idiot
i = args.index('-l')
args = args[i:i+2]+args
del args[i+2:i+4]
options = ClientOptions()
try:
options.parseOptions(args)
except usage.UsageError, u:
print 'ERROR: %s' % u
sys.exit(1)
if options['log']:
realout = sys.stdout
log.startLogging(sys.stderr)
sys.stdout = realout
else:
log.discardLogs()
doConnect(options)
reactor.run()
# prof.stop()
# prof.close()
def handleError():
from twisted.python import failure
global exitStatus
exitStatus = 2
try:
reactor.stop()
except: pass
log.err(failure.Failure())
raise
def doConnect(options):
# log.deferr = handleError # HACK
if '@' in options['host']:
options['user'], options['host'] = options['host'].split('@',1)
host = options['host']
if not options['user']:
options['user'] = getpass.getuser()
if not options['port']:
options['port'] = 22
else:
options['port'] = int(options['port'])
host = options['host']
port = options['port']
conn = SSHConnection()
conn.options = options
vhk = default.verifyHostKey
uao = default.SSHUserAuthClient(options['user'], options, conn)
connect.connect(host, port, options, vhk, uao).addErrback(_ebExit)
def _ebExit(f):
#global exitStatus
if hasattr(f.value, 'value'):
s = f.value.value
else:
s = str(f)
print s
#exitStatus = "conch: exiting with error %s" % f
try:
reactor.stop()
except: pass
def _ignore(*args): pass
class FileWrapper:
def __init__(self, f):
self.f = f
self.total = 0.0
f.seek(0, 2) # seek to the end
self.size = f.tell()
def __getattr__(self, attr):
return getattr(self.f, attr)
class StdioClient(basic.LineReceiver):
ps = 'cftp> '
delimiter = '\n'
def __init__(self, client, f = None):
self.client = client
self.currentDirectory = ''
self.file = f
self.useProgressBar = (not f and 1) or 0
def connectionMade(self):
self.client.realPath('').addCallback(self._cbSetCurDir)
def _cbSetCurDir(self, path):
self.currentDirectory = path
self._newLine()
def lineReceived(self, line):
if self.client.transport.localClosed:
return
log.msg('got line %s' % repr(line))
line = line.lstrip()
if not line:
self._newLine()
return
if self.file and line.startswith('-'):
self.ignoreErrors = 1
line = line[1:]
else:
self.ignoreErrors = 0
if ' ' in line:
command, rest = line.split(' ', 1)
rest = rest.lstrip()
else:
command, rest = line, ''
if command.startswith('!'): # command
f = self.cmd_EXEC
rest = (command[1:] + ' ' + rest).strip()
else:
command = command.upper()
log.msg('looking up cmd %s' % command)
f = getattr(self, 'cmd_%s' % command, None)
if f is not None:
d = defer.maybeDeferred(f, rest)
d.addCallback(self._cbCommand)
d.addErrback(self._ebCommand)
else:
self._ebCommand(failure.Failure(NotImplementedError(
"No command called `%s'" % command)))
self._newLine()
def _printFailure(self, f):
log.msg(f)
e = f.trap(NotImplementedError, filetransfer.SFTPError, OSError, IOError)
if e == NotImplementedError:
self.transport.write(self.cmd_HELP(''))
elif e == filetransfer.SFTPError:
self.transport.write("remote error %i: %s\n" %
(f.value.code, f.value.message))
elif e in (OSError, IOError):
self.transport.write("local error %i: %s\n" %
(f.value.errno, f.value.strerror))
def _newLine(self):
if self.client.transport.localClosed:
return
self.transport.write(self.ps)
self.ignoreErrors = 0
if self.file:
l = self.file.readline()
if not l:
self.client.transport.loseConnection()
else:
self.transport.write(l)
self.lineReceived(l.strip())
def _cbCommand(self, result):
if result is not None:
self.transport.write(result)
if not result.endswith('\n'):
self.transport.write('\n')
self._newLine()
def _ebCommand(self, f):
self._printFailure(f)
if self.file and not self.ignoreErrors:
self.client.transport.loseConnection()
self._newLine()
def cmd_CD(self, path):
path, rest = self._getFilename(path)
if not path.endswith('/'):
path += '/'
newPath = path and os.path.join(self.currentDirectory, path) or ''
d = self.client.openDirectory(newPath)
d.addCallback(self._cbCd)
d.addErrback(self._ebCommand)
return d
def _cbCd(self, directory):
directory.close()
d = self.client.realPath(directory.name)
d.addCallback(self._cbCurDir)
return d
def _cbCurDir(self, path):
self.currentDirectory = path
def cmd_CHGRP(self, rest):
grp, rest = rest.split(None, 1)
path, rest = self._getFilename(rest)
grp = int(grp)
d = self.client.getAttrs(path)
d.addCallback(self._cbSetUsrGrp, path, grp=grp)
return d
def cmd_CHMOD(self, rest):
mod, rest = rest.split(None, 1)
path, rest = self._getFilename(rest)
mod = int(mod, 8)
d = self.client.setAttrs(path, {'permissions':mod})
d.addCallback(_ignore)
return d
def cmd_CHOWN(self, rest):
usr, rest = rest.split(None, 1)
path, rest = self._getFilename(rest)
usr = int(usr)
d = self.client.getAttrs(path)
d.addCallback(self._cbSetUsrGrp, path, usr=usr)
return d
def _cbSetUsrGrp(self, attrs, path, usr=None, grp=None):
new = {}
new['uid'] = (usr is not None) and usr or attrs['uid']
new['gid'] = (grp is not None) and grp or attrs['gid']
d = self.client.setAttrs(path, new)
d.addCallback(_ignore)
return d
def cmd_GET(self, rest):
remote, rest = self._getFilename(rest)
if '*' in remote or '?' in remote: # wildcard
if rest:
local, rest = self._getFilename(rest)
if not os.path.isdir(local):
return "Wildcard get with non-directory target."
else:
local = ''
d = self._remoteGlob(remote)
d.addCallback(self._cbGetMultiple, local)
return d
if rest:
local, rest = self._getFilename(rest)
else:
local = os.path.split(remote)[1]
log.msg((remote, local))
lf = file(local, 'w', 0)
path = os.path.join(self.currentDirectory, remote)
d = self.client.openFile(path, filetransfer.FXF_READ, {})
d.addCallback(self._cbGetOpenFile, lf)
d.addErrback(self._ebCloseLf, lf)
return d
def _cbGetMultiple(self, files, local):
#if self._useProgressBar: # one at a time
# XXX this can be optimized for times w/o progress bar
return self._cbGetMultipleNext(None, files, local)
def _cbGetMultipleNext(self, res, files, local):
if isinstance(res, failure.Failure):
self._printFailure(res)
elif res:
self.transport.write(res)
if not res.endswith('\n'):
self.transport.write('\n')
if not files:
return
f = files.pop(0)[0]
lf = file(os.path.join(local, os.path.split(f)[1]), 'w', 0)
path = os.path.join(self.currentDirectory, f)
d = self.client.openFile(path, filetransfer.FXF_READ, {})
d.addCallback(self._cbGetOpenFile, lf)
d.addErrback(self._ebCloseLf, lf)
d.addBoth(self._cbGetMultipleNext, files, local)
return d
def _ebCloseLf(self, f, lf):
lf.close()
return f
def _cbGetOpenFile(self, rf, lf):
return rf.getAttrs().addCallback(self._cbGetFileSize, rf, lf)
def _cbGetFileSize(self, attrs, rf, lf):
if not stat.S_ISREG(attrs['permissions']):
rf.close()
lf.close()
return "Can't get non-regular file: %s" % rf.name
rf.size = attrs['size']
bufferSize = self.client.transport.conn.options['buffersize']
numRequests = self.client.transport.conn.options['requests']
rf.total = 0.0
dList = []
chunks = []
startTime = time.time()
for i in range(numRequests):
d = self._cbGetRead('', rf, lf, chunks, 0, bufferSize, startTime)
dList.append(d)
dl = defer.DeferredList(dList, fireOnOneErrback=1)
dl.addCallback(self._cbGetDone, rf, lf)
return dl
def _getNextChunk(self, chunks):
end = 0
for chunk in chunks:
if end == 'eof':
return # nothing more to get
if end != chunk[0]:
i = chunks.index(chunk)
chunks.insert(i, (end, chunk[0]))
return (end, chunk[0] - end)
end = chunk[1]
bufSize = int(self.client.transport.conn.options['buffersize'])
chunks.append((end, end + bufSize))
return (end, bufSize)
def _cbGetRead(self, data, rf, lf, chunks, start, size, startTime):
if data and isinstance(data, failure.Failure):
log.msg('get read err: %s' % data)
reason = data
reason.trap(EOFError)
i = chunks.index((start, start + size))
del chunks[i]
chunks.insert(i, (start, 'eof'))
elif data:
log.msg('get read data: %i' % len(data))
lf.seek(start)
lf.write(data)
if len(data) != size:
log.msg('got less than we asked for: %i < %i' %
(len(data), size))
i = chunks.index((start, start + size))
del chunks[i]
chunks.insert(i, (start, start + len(data)))
rf.total += len(data)
if self.useProgressBar:
self._printProgessBar(rf, startTime)
chunk = self._getNextChunk(chunks)
if not chunk:
return
else:
start, length = chunk
log.msg('asking for %i -> %i' % (start, start+length))
d = rf.readChunk(start, length)
d.addBoth(self._cbGetRead, rf, lf, chunks, start, length, startTime)
return d
def _cbGetDone(self, ignored, rf, lf):
log.msg('get done')
rf.close()
lf.close()
if self.useProgressBar:
self.transport.write('\n')
return "Transferred %s to %s" % (rf.name, lf.name)
def cmd_PUT(self, rest):
local, rest = self._getFilename(rest)
if '*' in local or '?' in local: # wildcard
if rest:
remote, rest = self._getFilename(rest)
path = os.path.join(self.currentDirectory, remote)
d = self.client.getAttrs(path)
d.addCallback(self._cbPutTargetAttrs, remote, local)
return d
else:
remote = ''
files = glob.glob(local)
return self._cbPutMultipleNext(None, files, remote)
if rest:
remote, rest = self._getFilename(rest)
else:
remote = os.path.split(local)[1]
lf = file(local, 'r')
path = os.path.join(self.currentDirectory, remote)
d = self.client.openFile(path, filetransfer.FXF_WRITE|filetransfer.FXF_CREAT, {})
d.addCallback(self._cbPutOpenFile, lf)
d.addErrback(self._ebCloseLf, lf)
return d
def _cbPutTargetAttrs(self, attrs, path, local):
if not stat.S_ISDIR(attrs['permissions']):
return "Wildcard put with non-directory target."
return self._cbPutMultipleNext(None, files, path)
def _cbPutMultipleNext(self, res, files, path):
if isinstance(res, failure.Failure):
self._printFailure(res)
elif res:
self.transport.write(res)
if not res.endswith('\n'):
self.transport.write('\n')
f = None
while files and not f:
try:
f = files.pop(0)
lf = file(f, 'r')
except:
self._printFailure(failure.Failure())
f = None
if not f:
return
name = os.path.split(f)[1]
remote = os.path.join(self.currentDirectory, path, name)
log.msg((name, remote, path))
d = self.client.openFile(remote, filetransfer.FXF_WRITE|filetransfer.FXF_CREAT, {})
d.addCallback(self._cbPutOpenFile, lf)
d.addErrback(self._ebCloseLf, lf)
d.addBoth(self._cbPutMultipleNext, files, path)
return d
def _cbPutOpenFile(self, rf, lf):
numRequests = self.client.transport.conn.options['requests']
if self.useProgressBar:
lf = FileWrapper(lf)
dList = []
chunks = []
startTime = time.time()
for i in range(numRequests):
d = self._cbPutWrite(None, rf, lf, chunks, startTime)
if d:
dList.append(d)
dl = defer.DeferredList(dList, fireOnOneErrback=1)
dl.addCallback(self._cbPutDone, rf, lf)
return dl
def _cbPutWrite(self, ignored, rf, lf, chunks, startTime):
chunk = self._getNextChunk(chunks)
start, size = chunk
lf.seek(start)
data = lf.read(size)
if self.useProgressBar:
lf.total += len(data)
self._printProgessBar(lf, startTime)
if data:
d = rf.writeChunk(start, data)
d.addCallback(self._cbPutWrite, rf, lf, chunks, startTime)
return d
else:
return
def _cbPutDone(self, ignored, rf, lf):
lf.close()
rf.close()
if self.useProgressBar:
self.transport.write('\n')
return 'Transferred %s to %s' % (lf.name, rf.name)
def cmd_LCD(self, path):
os.chdir(path)
def cmd_LN(self, rest):
linkpath, rest = self._getFilename(rest)
targetpath, rest = self._getFilename(rest)
linkpath, targetpath = map(
lambda x: os.path.join(self.currentDirectory, x),
(linkpath, targetpath))
return self.client.makeLink(linkpath, targetpath).addCallback(_ignore)
def cmd_LS(self, rest):
# possible lines:
# ls current directory
# ls name_of_file that file
# ls name_of_directory that directory
# ls some_glob_string current directory, globbed for that string
options = []
rest = rest.split()
while rest and rest[0] and rest[0][0] == '-':
opts = rest.pop(0)[1:]
for o in opts:
if o == 'l':
options.append('verbose')
elif o == 'a':
options.append('all')
rest = ' '.join(rest)
path, rest = self._getFilename(rest)
if not path:
fullPath = self.currentDirectory + '/'
else:
fullPath = os.path.join(self.currentDirectory, path)
d = self._remoteGlob(fullPath)
d.addCallback(self._cbDisplayFiles, options)
return d
def _cbDisplayFiles(self, files, options):
files.sort()
if 'all' not in options:
files = [f for f in files if not f[0].startswith('.')]
if 'verbose' in options:
lines = [f[1] for f in files]
else:
lines = [f[0] for f in files]
if not lines:
return None
else:
return '\n'.join(lines)
def cmd_MKDIR(self, path):
path, rest = self._getFilename(path)
path = os.path.join(self.currentDirectory, path)
return self.client.makeDirectory(path, {}).addCallback(_ignore)
def cmd_RMDIR(self, path):
path, rest = self._getFilename(path)
path = os.path.join(self.currentDirectory, path)
return self.client.removeDirectory(path).addCallback(_ignore)
def cmd_LMKDIR(self, path):
os.system("mkdir %s" % path)
def cmd_RM(self, path):
path, rest = self._getFilename(path)
path = os.path.join(self.currentDirectory, path)
return self.client.removeFile(path).addCallback(_ignore)
def cmd_LLS(self, rest):
os.system("ls %s" % rest)
def cmd_RENAME(self, rest):
oldpath, rest = self._getFilename(rest)
newpath, rest = self._getFilename(rest)
oldpath, newpath = map (
lambda x: os.path.join(self.currentDirectory, x),
(oldpath, newpath))
return self.client.renameFile(oldpath, newpath).addCallback(_ignore)
def cmd_EXIT(self, ignored):
self.client.transport.loseConnection()
cmd_QUIT = cmd_EXIT
def cmd_VERSION(self, ignored):
return "SFTP version %i" % self.client.version
def cmd_HELP(self, ignored):
return """Available commands:
cd path Change remote directory to 'path'.
chgrp gid path Change gid of 'path' to 'gid'.
chmod mode path Change mode of 'path' to 'mode'.
chown uid path Change uid of 'path' to 'uid'.
exit Disconnect from the server.
get remote-path [local-path] Get remote file.
help Get a list of available commands.
lcd path Change local directory to 'path'.
lls [ls-options] [path] Display local directory listing.
lmkdir path Create local directory.
ln linkpath targetpath Symlink remote file.
lpwd Print the local working directory.
ls [-l] [path] Display remote directory listing.
mkdir path Create remote directory.
progress Toggle progress bar.
put local-path [remote-path] Put local file.
pwd Print the remote working directory.
quit Disconnect from the server.
rename oldpath newpath Rename remote file.
rmdir path Remove remote directory.
rm path Remove remote file.
version Print the SFTP version.
? Synonym for 'help'.
"""
def cmd_PWD(self, ignored):
return self.currentDirectory
def cmd_LPWD(self, ignored):
return os.getcwd()
def cmd_PROGRESS(self, ignored):
self.useProgressBar = not self.useProgressBar
return "%ssing progess bar." % (self.useProgressBar and "U" or "Not u")
def cmd_EXEC(self, rest):
shell = pwd.getpwnam(getpass.getuser())[6]
print repr(rest)
if rest:
cmds = ['-c', rest]
return utils.getProcessOutput(shell, cmds, errortoo=1)
else:
os.system(shell)
# accessory functions
def _remoteGlob(self, fullPath):
log.msg('looking up %s' % fullPath)
head, tail = os.path.split(fullPath)
if '*' in tail or '?' in tail:
glob = 1
else:
glob = 0
if tail and not glob: # could be file or directory
# try directory first
d = self.client.openDirectory(fullPath)
d.addCallback(self._cbOpenList, '')
d.addErrback(self._ebNotADirectory, head, tail)
else:
d = self.client.openDirectory(head)
d.addCallback(self._cbOpenList, tail)
return d
def _cbOpenList(self, directory, glob):
files = []
d = directory.read()
d.addBoth(self._cbReadFile, files, directory, glob)
return d
def _ebNotADirectory(self, reason, path, glob):
d = self.client.openDirectory(path)
d.addCallback(self._cbOpenList, glob)
return d
def _cbReadFile(self, files, l, directory, glob):
if not isinstance(files, failure.Failure):
if glob:
l.extend([f for f in files if fnmatch.fnmatch(f[0], glob)])
else:
l.extend(files)
d = directory.read()
d.addBoth(self._cbReadFile, l, directory, glob)
return d
else:
reason = files
reason.trap(EOFError)
directory.close()
return l
def _abbrevSize(self, size):
# from http://mail.python.org/pipermail/python-list/1999-December/018395.html
_abbrevs = [
(1<<50L, 'PB'),
(1<<40L, 'TB'),
(1<<30L, 'GB'),
(1<<20L, 'MB'),
(1<<10L, 'kb'),
(1, '')
]
for factor, suffix in _abbrevs:
if size > factor:
break
return '%.1f' % (size/factor) + suffix
def _abbrevTime(self, t):
if t > 3600: # 1 hour
hours = int(t / 3600)
t -= (3600 * hours)
mins = int(t / 60)
t -= (60 * mins)
return "%i:%02i:%02i" % (hours, mins, t)
else:
mins = int(t/60)
t -= (60 * mins)
return "%02i:%02i" % (mins, t)
def _printProgessBar(self, f, startTime):
diff = time.time() - startTime
total = f.total
try:
winSize = struct.unpack('4H',
fcntl.ioctl(0, tty.TIOCGWINSZ, '12345679'))
except IOError:
winSize = [None, 80]
speed = total/diff
if speed:
timeLeft = (f.size - total) / speed
else:
timeLeft = 0
front = f.name
back = '%3i%% %s %sps %s ' % ((total/f.size)*100, self._abbrevSize(total),
self._abbrevSize(total/diff), self._abbrevTime(timeLeft))
spaces = (winSize[1] - (len(front) + len(back) + 1)) * ' '
self.transport.write('\r%s%s%s' % (front, spaces, back))
def _getFilename(self, line):
line.lstrip()
if not line:
return None, ''
if line[0] in '\'"':
ret = []
line = list(line)
try:
for i in range(1,len(line)):
c = line[i]
if c == line[0]:
return ''.join(ret), ''.join(line[i+1:]).lstrip()
elif c == '\\': # quoted character
del line[i]
if line[i] not in '\'"\\':
raise IndexError, "bad quote: \\%s" % line[i]
ret.append(line[i])
else:
ret.append(line[i])
except IndexError:
raise IndexError, "unterminated quote"
ret = line.split(None, 1)
if len(ret) == 1:
return ret[0], ''
else:
return ret
StdioClient.__dict__['cmd_?'] = StdioClient.cmd_HELP
class SSHConnection(connection.SSHConnection):
def serviceStarted(self):
self.openChannel(SSHSession())
class SSHSession(channel.SSHChannel):
name = 'session'
def channelOpen(self, foo):
log.msg('session %s open' % self.id)
if self.conn.options['subsystem'].startswith('/'):
request = 'exec'
else:
request = 'subsystem'
d = self.conn.sendRequest(self, request, \
common.NS(self.conn.options['subsystem']), wantReply=1)
d.addCallback(self._cbSubsystem)
d.addErrback(_ebExit)
def _cbSubsystem(self, result):
self.client = filetransfer.FileTransferClient()
self.client.makeConnection(self)
self.dataReceived = self.client.dataReceived
f = None
if self.conn.options['batchfile']:
fn = self.conn.options['batchfile']
if fn != '-':
f = file(fn)
self.stdio = stdio.StandardIO(StdioClient(self.client, f))
def extReceived(self, t, data):
if t==connection.EXTENDED_DATA_STDERR:
log.msg('got %s stderr data' % len(data))
sys.stderr.write(data)
sys.stderr.flush()
def eofReceived(self):
log.msg('got eof')
self.stdio.closeStdin()
def closeReceived(self):
log.msg('remote side closed %s' % self)
self.conn.sendClose(self)
def closed(self):
try:
reactor.stop()
except:
pass
def stopWriting(self):
self.stdio.pauseProducing()
def startWriting(self):
self.stdio.resumeProducing()
if __name__ == '__main__':
run()
| gpl-3.0 |
NSLS-II/filestore | filestore/test/test_write.py | 4 | 5524 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import logging
import numpy as np
from itertools import chain, repeat
import filestore.api as fsa
import filestore.file_writers as fs_write
import filestore.handlers as fs_read
from .utils import fs_setup, fs_teardown
import pytest
import uuid
from numpy.testing import assert_array_equal
import tempfile
import shutil
import os
logger = logging.getLogger(__name__)
CLEAN_FILES = True
BASE_PATH = None
db_name = str(uuid.uuid4())
dummy_db_name = str(uuid.uuid4())
db_name = str(uuid.uuid4())
conn = None
def setup_module(module):
fs_setup()
global BASE_PATH
BASE_PATH = tempfile.mkdtemp()
def teardown_module(module):
fs_teardown()
if CLEAN_FILES:
shutil.rmtree(BASE_PATH)
def _npsave_helper(dd, base_path):
eid = fs_write.save_ndarray(dd, base_path)
with fsa.handler_context({'npy': fs_read.NpyHandler}):
ret = fsa.retrieve(eid)
assert_array_equal(dd, ret)
def test_np_save():
shape = (7, 5)
dd = np.arange(np.prod(shape)).reshape(*shape)
for d, bp in zip([dd, dd.astype('float'), np.ones(15)],
chain([None, ], repeat(BASE_PATH))):
yield _npsave_helper, d, bp
def test_file_exist_fail():
test_path = os.path.join(BASE_PATH, str(uuid.uuid4()) + '.npy')
test_w = fs_write.NpyWriter(test_path)
test_w.add_data(5)
with pytest.raises(IOError):
fs_write.NpyWriter(test_path)
def test_multi_write_fail():
test_path = os.path.join(BASE_PATH, str(uuid.uuid4()) + '.npy')
test_w = fs_write.NpyWriter(test_path)
test_w.add_data(5)
with pytest.raises(RuntimeError):
test_w.add_data(6)
def test_event_custom_fail():
test_path = os.path.join(BASE_PATH, str(uuid.uuid4()) + '.npy')
test_w = fs_write.NpyWriter(test_path)
with pytest.raises(ValueError):
test_w.add_data(6, None, {'aardvark': 3.14})
def test_file_custom_fail():
test_path = os.path.join(BASE_PATH, str(uuid.uuid4()) + '.npy')
with pytest.raises(ValueError):
fs_write.NpyWriter(test_path, {'aardvark': 3.14})
def test_sneaky_write_fail():
test_path = os.path.join(BASE_PATH, str(uuid.uuid4()) + '.npy')
test_w = fs_write.NpyWriter(test_path)
with open(test_path, 'w') as fout:
fout.write('aardvark')
with pytest.raises(IOError):
test_w.add_data(5)
def test_give_uid():
test_path = os.path.join(BASE_PATH, str(uuid.uuid4()) + '.npy')
uid = str(uuid.uuid4())
with fs_write.NpyWriter(test_path) as fout:
eid = fout.add_data([1, 2, 3], uid)
assert uid == eid
def test_custom():
test_path = os.path.join(BASE_PATH, str(uuid.uuid4()) + '.npy')
dd = np.random.rand(500, 500)
with fs_write.NpyWriter(test_path, resource_kwargs={'mmap_mode': 'r'}) as f:
eid = f.add_data(dd)
with fsa.handler_context({'npy': fs_read.NpyHandler}):
ret = fsa.retrieve(eid)
assert_array_equal(dd, ret)
| bsd-3-clause |
pboechat/OperatorGraph | auto_tuner/Configuration.py | 1 | 9246 | import xml.sax
import ParseUtils
from Enums import *
####################################################################################################
class BaseConfiguration(xml.sax.ContentHandler):
def loadFromDisk(self, filename):
xml.sax.parse(filename, self)
####################################################################################################
class Pipeline(BaseConfiguration):
def __init__(self):
#self.globalSeed = -1
self.deviceId = 0
self.pgaBasePath = ""
self.analyzerBin = ""
self.partitionBin = ""
self.binTemplatePath = ""
self.workspaceTemplatePath = ""
self.databaseScript = ""
self.compileScript = ""
self.compileDependenciesScript = ""
self.cachePath = ""
self.generatePartitions = False
self.prepareWorkspaces = False
self.compile = False
self.findMaxNumAxioms = False
self.run = False
self.processResults = False
self.saveToDatabase = False
self.decorateDotFiles = False
self.analyzerExecutionTimeout = 300
self.dependenciesCompilationTimeout = 600
self.partitionCompilationTimeout = 3600
self.partitionExecutionTimeout = 600
self.logLevel = LogLevel.DEBUG
self.keepTemp = False
self.rebuild = False
self.compileDependencies = False
self.useMultiprocess = False
self.automaticPoolSize = False
self.poolSize = 0
self.cudaComputeCapability = None
def startElement(self, name, attributes):
if name == "Pipeline":
#self.globalSeed = ParseUtils.try_to_parse_int(attributes.getValue("globalSeed"), -1)
self.deviceId = ParseUtils.try_to_parse_int(attributes.getValue("deviceId"))
self.pgaBasePath = attributes.getValue("pgaBasePath")
self.analyzerBin = attributes.getValue("analyzerBin")
self.partitionBin = attributes.getValue("partitionBin")
self.binTemplatePath = attributes.getValue("binTemplatePath")
self.workspaceTemplatePath = attributes.getValue("workspaceTemplatePath")
self.databaseScript = attributes.getValue("databaseScript")
self.compileScript = attributes.getValue("compileScript")
self.compileDependenciesScript = attributes.getValue("compileDependenciesScript")
self.cachePath = attributes.getValue("cachePath")
self.generatePartitions = bool(ParseUtils.try_to_parse_int(attributes.getValue("generatePartitions")))
self.prepareWorkspaces = bool(ParseUtils.try_to_parse_int(attributes.getValue("prepareWorkspaces")))
self.compile = bool(ParseUtils.try_to_parse_int(attributes.getValue("compile")))
self.findMaxNumAxioms = bool(ParseUtils.try_to_parse_int(attributes.getValue("findMaxNumAxioms")))
self.run = bool(ParseUtils.try_to_parse_int(attributes.getValue("run")))
self.processResults = bool(ParseUtils.try_to_parse_int(attributes.getValue("processResults")))
self.saveToDatabase = bool(ParseUtils.try_to_parse_int(attributes.getValue("saveToDatabase")))
self.decorateDotFiles = bool(ParseUtils.try_to_parse_int(attributes.getValue("decorateDotFiles")))
self.analyzerExecutionTimeout = ParseUtils.try_to_parse_int(attributes.getValue("analyzerExecutionTimeout"))
self.dependenciesCompilationTimeout = ParseUtils.try_to_parse_int(attributes.getValue("dependenciesCompilationTimeout"))
self.partitionCompilationTimeout = ParseUtils.try_to_parse_int(attributes.getValue("partitionCompilationTimeout"))
self.partitionExecutionTimeout = ParseUtils.try_to_parse_int(attributes.getValue("partitionExecutionTimeout"))
# NOTE: if a valid log level is not found, the default value (LogLevel.DEBUG) is used
if "logLevel" in attributes.keys():
log_level_name = attributes.getValue("logLevel")
for log_level, log_level_name_ in LogLevel.NAMES.items():
if log_level_name_ == log_level_name:
self.logLevel = log_level
self.keepTemp = bool(ParseUtils.try_to_parse_int(attributes.getValue("keepTemp")))
self.rebuild = bool(ParseUtils.try_to_parse_int(attributes.getValue("rebuild")))
self.compileDependencies = bool(ParseUtils.try_to_parse_int(attributes.getValue("compileDependencies")))
self.useMultiprocess = bool(ParseUtils.try_to_parse_int(attributes.getValue("useMultiprocess")))
self.automaticPoolSize = bool(ParseUtils.try_to_parse_int(attributes.getValue("automaticPoolSize")))
self.poolSize = ParseUtils.try_to_parse_int(attributes.getValue("poolSize"))
self.cudaComputeCapability = attributes.getValue("cudaComputeCapability")
####################################################################################################
class Scene(BaseConfiguration):
def __init__(self):
self.runSeed = -1
self.grammarFile = ""
self.templateFile = ""
self.outputPath = ""
self.gridX = 0
self.gridY = 0
self.itemSize = 0
self.maxNumVertices = 0
self.maxNumIndices = 0
self.maxNumElements = 0
self.queuesMem = 0
self.numRuns = 0
self.minNumAxioms = 0
self.maxNumAxioms = 0
self.axiomStep = 0
self.globalMinNumAxioms = -1
self.optimization = 0
self.instrumentation = False
self.whippletreeTechnique = WhippletreeTechnique.MEGAKERNEL
self.H1TO1 = False
self.HRR = False
self.HSTO = -1
self.HTSPECS = ""
self.RSAMPL = 0
self.analyzerSeed = -1
def startElement(self, name, attributes):
if name == "Scene":
if "runSeed" in attributes.keys():
self.runSeed = ParseUtils.try_to_parse_int(attributes.getValue("runSeed"), -1)
self.grammarFile = attributes.getValue("grammarFile")
self.templateFile = attributes.getValue("templateFile")
self.outputPath = attributes.getValue("outputPath")
self.gridX = ParseUtils.try_to_parse_int(attributes.getValue("gridX"))
self.gridY = ParseUtils.try_to_parse_int(attributes.getValue("gridY"))
self.itemSize = ParseUtils.try_to_parse_int(attributes.getValue("itemSize"))
if "maxNumVertices" in attributes.keys():
self.maxNumVertices = ParseUtils.try_to_parse_int(attributes.getValue("maxNumVertices"))
if "maxNumIndices" in attributes.keys():
self.maxNumIndices = ParseUtils.try_to_parse_int(attributes.getValue("maxNumIndices"))
if "maxNumElements" in attributes.keys():
self.maxNumElements = ParseUtils.try_to_parse_int(attributes.getValue("maxNumElements"))
self.queuesMem = ParseUtils.try_to_parse_int(attributes.getValue("queuesMem"))
self.numRuns = ParseUtils.try_to_parse_int(attributes.getValue("numRuns"))
self.minNumAxioms = ParseUtils.try_to_parse_int(attributes.getValue("minNumAxioms"))
self.maxNumAxioms = ParseUtils.try_to_parse_int(attributes.getValue("maxNumAxioms"))
self.axiomStep = ParseUtils.try_to_parse_int(attributes.getValue("axiomStep"))
if "globalMinNumAxioms" in attributes.keys():
self.globalMinNumAxioms = ParseUtils.try_to_parse_int(attributes.getValue("globalMinNumAxioms"))
if "optimization" in attributes.keys():
self.optimization = ParseUtils.try_to_parse_int(attributes.getValue("optimization"))
if "instrumentation" in attributes.keys():
self.instrumentation = ParseUtils.try_to_parse_bool(attributes.getValue("instrumentation"))
# NOTE: if a valid whippletree technique is not found, the default value (WhippletreeTechnique.MEGAKERNELS) is used
if "whippletreeTechnique" in attributes.keys():
whippletree_technique_name = attributes.getValue("whippletreeTechnique")
for whippletree_technique, whippletree_technique_name_ in WhippletreeTechnique.NAMES.items():
if whippletree_technique_name_ == whippletree_technique_name:
self.whippletreeTechnique = whippletree_technique
if "H1TO1" in attributes.keys():
self.H1TO1 = ParseUtils.try_to_parse_int(attributes.getValue("H1TO1"))
if "HRR" in attributes.keys():
self.HRR = ParseUtils.try_to_parse_int(attributes.getValue("HRR"))
if "HSTO" in attributes.keys():
self.HSTO = ParseUtils.try_to_parse_int(attributes.getValue("HSTO"))
if "HTSPECS" in attributes.keys():
self.HTSPECS = attributes.getValue("HTSPECS")
if "RSAMPL" in attributes.keys():
self.RSAMPL = ParseUtils.try_to_parse_int(attributes.getValue("RSAMPL"))
if "analyzerSeed" in attributes.keys():
self.analyzerSeed = ParseUtils.try_to_parse_int(attributes.getValue("analyzerSeed"))
| mit |
jcoady9/python-for-android | python3-alpha/extra_modules/gdata/tlslite/utils/OpenSSL_TripleDES.py | 48 | 1668 | """OpenSSL/M2Crypto 3DES implementation."""
from .cryptomath import *
from .TripleDES import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_TripleDES(key, mode, IV)
class OpenSSL_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
cipherType = m2.des_ede3_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return ciphertext
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will ignore it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return plaintext | apache-2.0 |
daisymax/nvda | source/core.py | 2 | 13078 | #core.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2008 NVDA Contributors <http://www.nvda-project.org/>
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""NVDA core"""
# Do this first to initialise comtypes.client.gen_dir and the comtypes.gen search path.
import comtypes.client
# Append our comInterfaces directory to the comtypes.gen search path.
import comtypes.gen
import comInterfaces
comtypes.gen.__path__.append(comInterfaces.__path__[0])
#Apply several monky patches to comtypes
import comtypesMonkeyPatches
import sys
import nvwave
import os
import time
import logHandler
import globalVars
from logHandler import log
import addonHandler
def doStartupDialogs():
import config
import gui
if config.conf["general"]["showWelcomeDialogAtStartup"]:
gui.WelcomeDialog.run()
if globalVars.configFileError:
gui.ConfigFileErrorDialog.run()
import inputCore
if inputCore.manager.userGestureMap.lastUpdateContainedError:
import wx
gui.messageBox(_("Your gesture map file contains errors.\n"
"More details about the errors can be found in the log file."),
_("gesture map File Error"), wx.OK|wx.ICON_EXCLAMATION)
if not config.conf["upgrade"]["newLaptopKeyboardLayout"]:
from gui import upgradeAlerts
upgradeAlerts.NewLaptopKeyboardLayout.run()
def restart():
"""Restarts NVDA by starting a new copy with -r."""
if globalVars.appArgs.launcher:
import wx
globalVars.exitCode=2
wx.GetApp().ExitMainLoop()
return
import subprocess
import shellapi
shellapi.ShellExecute(None, None,
sys.executable.decode("mbcs"),
subprocess.list2cmdline(sys.argv + ["-r"]).decode("mbcs"),
None, 0)
def resetConfiguration(factoryDefaults=False):
"""Loads the configuration, installs the correct language support and initialises audio so that it will use the configured synth and speech settings.
"""
import config
import braille
import speech
import languageHandler
import inputCore
log.debug("Terminating braille")
braille.terminate()
log.debug("terminating speech")
speech.terminate()
log.debug("terminating addonHandler")
addonHandler.terminate()
log.debug("Reloading config")
config.load(factoryDefaults=factoryDefaults)
logHandler.setLogLevelFromConfig()
#Language
lang = config.conf["general"]["language"]
log.debug("setting language to %s"%lang)
languageHandler.setLanguage(lang)
# Addons
addonHandler.initialize()
#Speech
log.debug("initializing speech")
speech.initialize()
#braille
log.debug("Initializing braille")
braille.initialize()
log.debug("Reloading user and locale input gesture maps")
inputCore.manager.loadUserGestureMap()
inputCore.manager.loadLocaleGestureMap()
log.info("Reverted to saved configuration")
def _setInitialFocus():
"""Sets the initial focus if no focus event was received at startup.
"""
import eventHandler
import api
if eventHandler.lastQueuedFocusObject:
# The focus has already been set or a focus event is pending.
return
try:
focus = api.getDesktopObject().objectWithFocus()
if focus:
eventHandler.queueEvent('gainFocus', focus)
except:
log.exception("Error retrieving initial focus")
def main():
"""NVDA's core main loop.
This initializes all modules such as audio, IAccessible, keyboard, mouse, and GUI. Then it initialises the wx application object and installs the core pump timer, which checks the queues and executes functions every 1 ms. Finally, it starts the wx main loop.
"""
log.debug("Core starting")
import config
if not globalVars.appArgs.configPath:
globalVars.appArgs.configPath=config.getUserDefaultConfigPath(useInstalledPathIfExists=globalVars.appArgs.launcher)
#Initialize the config path (make sure it exists)
config.initConfigPath()
log.info("Config dir: %s"%os.path.abspath(globalVars.appArgs.configPath))
log.debug("loading config")
import config
config.load()
if not globalVars.appArgs.minimal:
try:
nvwave.playWaveFile("waves\\start.wav")
except:
pass
logHandler.setLogLevelFromConfig()
try:
lang = config.conf["general"]["language"]
import languageHandler
log.debug("setting language to %s"%lang)
languageHandler.setLanguage(lang)
except:
log.warning("Could not set language to %s"%lang)
import versionInfo
log.info("NVDA version %s" % versionInfo.version)
log.info("Using Windows version %r" % (sys.getwindowsversion(),))
log.info("Using Python version %s"%sys.version)
log.info("Using comtypes version %s"%comtypes.__version__)
# Set a reasonable timeout for any socket connections NVDA makes.
import socket
socket.setdefaulttimeout(10)
log.debug("Initializing addons system.")
addonHandler.initialize()
import appModuleHandler
log.debug("Initializing appModule Handler")
appModuleHandler.initialize()
import NVDAHelper
log.debug("Initializing NVDAHelper")
NVDAHelper.initialize()
import speechDictHandler
log.debug("Speech Dictionary processing")
speechDictHandler.initialize()
import speech
log.debug("Initializing speech")
speech.initialize()
if not globalVars.appArgs.minimal and (time.time()-globalVars.startTime)>5:
log.debugWarning("Slow starting core (%.2f sec)" % (time.time()-globalVars.startTime))
# Translators: This is spoken when NVDA is starting.
speech.speakMessage(_("Loading NVDA. Please wait..."))
import wx
log.info("Using wx version %s"%wx.version())
app = wx.App(redirect=False)
# HACK: wx currently raises spurious assertion failures when a timer is stopped but there is already an event in the queue for that timer.
# Unfortunately, these assertion exceptions are raised in the middle of other code, which causes problems.
# Therefore, disable assertions for now.
app.SetAssertMode(wx.PYAPP_ASSERT_SUPPRESS)
# We do support QueryEndSession events, but we don't want to do anything for them.
app.Bind(wx.EVT_QUERY_END_SESSION, lambda evt: None)
def onEndSession(evt):
# NVDA will be terminated as soon as this function returns, so save configuration if appropriate.
config.saveOnExit()
speech.cancelSpeech()
if not globalVars.appArgs.minimal:
try:
nvwave.playWaveFile("waves\\exit.wav",async=False)
except:
pass
log.info("Windows session ending")
app.Bind(wx.EVT_END_SESSION, onEndSession)
import braille
log.debug("Initializing braille")
braille.initialize()
log.debug("Initializing braille input")
import brailleInput
brailleInput.initialize()
import displayModel
log.debug("Initializing displayModel")
displayModel.initialize()
log.debug("Initializing GUI")
import gui
gui.initialize()
# initialize wxpython localization support
locale = wx.Locale()
lang=languageHandler.getLanguage()
if '_' in lang:
wxLang=lang.split('_')[0]
else:
wxLang=lang
if hasattr(sys,'frozen'):
locale.AddCatalogLookupPathPrefix(os.path.join(os.getcwdu(),"locale"))
try:
locale.Init(lang,wxLang)
except:
pass
import api
import winUser
import NVDAObjects.window
desktopObject=NVDAObjects.window.Window(windowHandle=winUser.getDesktopWindow())
api.setDesktopObject(desktopObject)
api.setFocusObject(desktopObject)
api.setNavigatorObject(desktopObject)
api.setMouseObject(desktopObject)
import JABHandler
log.debug("initializing Java Access Bridge support")
try:
JABHandler.initialize()
except NotImplementedError:
log.warning("Java Access Bridge not available")
except:
log.error("Error initializing Java Access Bridge support", exc_info=True)
import winConsoleHandler
log.debug("Initializing winConsole support")
winConsoleHandler.initialize()
import UIAHandler
log.debug("Initializing UIA support")
try:
UIAHandler.initialize()
except NotImplementedError:
log.warning("UIA not available")
except:
log.error("Error initializing UIA support", exc_info=True)
import IAccessibleHandler
log.debug("Initializing IAccessible support")
IAccessibleHandler.initialize()
log.debug("Initializing input core")
import inputCore
inputCore.initialize()
import keyboardHandler
log.debug("Initializing keyboard handler")
keyboardHandler.initialize()
import mouseHandler
log.debug("initializing mouse handler")
mouseHandler.initialize()
import touchHandler
log.debug("Initializing touchHandler")
try:
touchHandler.initialize()
except NotImplementedError:
pass
import globalPluginHandler
log.debug("Initializing global plugin handler")
globalPluginHandler.initialize()
if globalVars.appArgs.install:
import wx
import gui.installerGui
wx.CallAfter(gui.installerGui.doSilentInstall)
elif not globalVars.appArgs.minimal:
try:
# Translators: This is shown on a braille display (if one is connected) when NVDA starts.
braille.handler.message(_("NVDA started"))
except:
log.error("", exc_info=True)
if globalVars.appArgs.launcher:
gui.LauncherDialog.run()
# LauncherDialog will call doStartupDialogs() afterwards if required.
else:
wx.CallAfter(doStartupDialogs)
import queueHandler
# Queue the handling of initial focus,
# as API handlers might need to be pumped to get the first focus event.
queueHandler.queueFunction(queueHandler.eventQueue, _setInitialFocus)
import watchdog
import baseObject
class CorePump(wx.Timer):
"Checks the queues and executes functions."
def __init__(self,*args,**kwargs):
log.debug("Core pump starting")
super(CorePump,self).__init__(*args,**kwargs)
def Notify(self):
try:
JABHandler.pumpAll()
IAccessibleHandler.pumpAll()
queueHandler.pumpAll()
mouseHandler.pumpAll()
braille.pumpAll()
except:
log.exception("errors in this core pump cycle")
baseObject.AutoPropertyObject.invalidateCaches()
watchdog.alive()
log.debug("starting core pump")
pump = CorePump()
pump.Start(1)
log.debug("Initializing watchdog")
watchdog.initialize()
try:
import updateCheck
except RuntimeError:
updateCheck=None
log.debug("Update checking not supported")
else:
log.debug("initializing updateCheck")
updateCheck.initialize()
log.info("NVDA initialized")
log.debug("entering wx application main loop")
app.MainLoop()
log.info("Exiting")
if updateCheck:
log.debug("Terminating updateCheck")
updateCheck.terminate()
log.debug("Terminating watchdog")
watchdog.terminate()
log.debug("Terminating global plugin handler")
globalPluginHandler.terminate()
log.debug("Terminating GUI")
gui.terminate()
config.saveOnExit()
try:
if globalVars.focusObject and hasattr(globalVars.focusObject,"event_loseFocus"):
log.debug("calling lose focus on object with focus")
globalVars.focusObject.event_loseFocus()
except:
log.error("Lose focus error",exc_info=True)
try:
speech.cancelSpeech()
except:
pass
log.debug("Cleaning up running treeInterceptors")
try:
import treeInterceptorHandler
treeInterceptorHandler.terminate()
except:
log.error("Error cleaning up treeInterceptors",exc_info=True)
log.debug("Terminating IAccessible support")
try:
IAccessibleHandler.terminate()
except:
log.error("Error terminating IAccessible support",exc_info=True)
log.debug("Terminating UIA support")
try:
UIAHandler.terminate()
except:
log.error("Error terminating UIA support",exc_info=True)
log.debug("Terminating winConsole support")
try:
winConsoleHandler.terminate()
except:
log.error("Error terminating winConsole support",exc_info=True)
log.debug("Terminating Java Access Bridge support")
try:
JABHandler.terminate()
except:
log.error("Error terminating Java Access Bridge support",exc_info=True)
log.debug("Terminating app module handler")
appModuleHandler.terminate()
log.debug("Terminating NVDAHelper")
try:
NVDAHelper.terminate()
except:
log.error("Error terminating NVDAHelper",exc_info=True)
log.debug("Terminating touchHandler")
try:
touchHandler.terminate()
except:
log.error("Error terminating touchHandler")
log.debug("Terminating keyboard handler")
try:
keyboardHandler.terminate()
except:
log.error("Error terminating keyboard handler")
log.debug("Terminating mouse handler")
try:
mouseHandler.terminate()
except:
log.error("error terminating mouse handler",exc_info=True)
log.debug("Terminating input core")
inputCore.terminate()
log.debug("Terminating brailleInput")
brailleInput.terminate()
log.debug("Terminating braille")
try:
braille.terminate()
except:
log.error("Error terminating braille",exc_info=True)
log.debug("Terminating speech")
try:
speech.terminate()
except:
log.error("Error terminating speech",exc_info=True)
try:
addonHandler.terminate()
except:
log.error("Error terminating addonHandler",exc_info=True)
if not globalVars.appArgs.minimal:
try:
nvwave.playWaveFile("waves\\exit.wav",async=False)
except:
pass
log.debug("core done")
| gpl-2.0 |
sharifulgeo/networkx | networkx/algorithms/tests/test_cluster.py | 89 | 7321 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestTriangles:
def test_empty(self):
G = nx.Graph()
assert_equal(list(nx.triangles(G).values()),[])
def test_path(self):
G = nx.path_graph(10)
assert_equal(list(nx.triangles(G).values()),
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert_equal(nx.triangles(G),
{0: 0, 1: 0, 2: 0, 3: 0, 4: 0,
5: 0, 6: 0, 7: 0, 8: 0, 9: 0})
def test_cubical(self):
G = nx.cubical_graph()
assert_equal(list(nx.triangles(G).values()),
[0, 0, 0, 0, 0, 0, 0, 0])
assert_equal(nx.triangles(G,1),0)
assert_equal(list(nx.triangles(G,[1,2]).values()),[0, 0])
assert_equal(nx.triangles(G,1),0)
assert_equal(nx.triangles(G,[1,2]),{1: 0, 2: 0})
def test_k5(self):
G = nx.complete_graph(5)
assert_equal(list(nx.triangles(G).values()),[6, 6, 6, 6, 6])
assert_equal(sum(nx.triangles(G).values())/3.0,10)
assert_equal(nx.triangles(G,1),6)
G.remove_edge(1,2)
assert_equal(list(nx.triangles(G).values()),[5, 3, 3, 5, 5])
assert_equal(nx.triangles(G,1),3)
class TestWeightedClustering:
def test_clustering(self):
G = nx.Graph()
assert_equal(list(nx.clustering(G,weight='weight').values()),[])
assert_equal(nx.clustering(G),{})
def test_path(self):
G = nx.path_graph(10)
assert_equal(list(nx.clustering(G,weight='weight').values()),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert_equal(nx.clustering(G,weight='weight'),
{0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
5: 0.0, 6: 0.0, 7: 0.0, 8: 0.0, 9: 0.0})
def test_cubical(self):
G = nx.cubical_graph()
assert_equal(list(nx.clustering(G,weight='weight').values()),
[0, 0, 0, 0, 0, 0, 0, 0])
assert_equal(nx.clustering(G,1),0)
assert_equal(list(nx.clustering(G,[1,2],weight='weight').values()),[0, 0])
assert_equal(nx.clustering(G,1,weight='weight'),0)
assert_equal(nx.clustering(G,[1,2],weight='weight'),{1: 0, 2: 0})
def test_k5(self):
G = nx.complete_graph(5)
assert_equal(list(nx.clustering(G,weight='weight').values()),[1, 1, 1, 1, 1])
assert_equal(nx.average_clustering(G,weight='weight'),1)
G.remove_edge(1,2)
assert_equal(list(nx.clustering(G,weight='weight').values()),
[5./6., 1.0, 1.0, 5./6., 5./6.])
assert_equal(nx.clustering(G,[1,4],weight='weight'),{1: 1.0, 4: 0.83333333333333337})
def test_triangle_and_edge(self):
G=nx.Graph()
G.add_cycle([0,1,2])
G.add_edge(0,4,weight=2)
assert_equal(nx.clustering(G)[0],1.0/3.0)
assert_equal(nx.clustering(G,weight='weight')[0],1.0/6.0)
class TestClustering:
def test_clustering(self):
G = nx.Graph()
assert_equal(list(nx.clustering(G).values()),[])
assert_equal(nx.clustering(G),{})
def test_path(self):
G = nx.path_graph(10)
assert_equal(list(nx.clustering(G).values()),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert_equal(nx.clustering(G),
{0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
5: 0.0, 6: 0.0, 7: 0.0, 8: 0.0, 9: 0.0})
def test_cubical(self):
G = nx.cubical_graph()
assert_equal(list(nx.clustering(G).values()),
[0, 0, 0, 0, 0, 0, 0, 0])
assert_equal(nx.clustering(G,1),0)
assert_equal(list(nx.clustering(G,[1,2]).values()),[0, 0])
assert_equal(nx.clustering(G,1),0)
assert_equal(nx.clustering(G,[1,2]),{1: 0, 2: 0})
def test_k5(self):
G = nx.complete_graph(5)
assert_equal(list(nx.clustering(G).values()),[1, 1, 1, 1, 1])
assert_equal(nx.average_clustering(G),1)
G.remove_edge(1,2)
assert_equal(list(nx.clustering(G).values()),
[5./6., 1.0, 1.0, 5./6., 5./6.])
assert_equal(nx.clustering(G,[1,4]),{1: 1.0, 4: 0.83333333333333337})
class TestTransitivity:
def test_transitivity(self):
G = nx.Graph()
assert_equal(nx.transitivity(G),0.0)
def test_path(self):
G = nx.path_graph(10)
assert_equal(nx.transitivity(G),0.0)
def test_cubical(self):
G = nx.cubical_graph()
assert_equal(nx.transitivity(G),0.0)
def test_k5(self):
G = nx.complete_graph(5)
assert_equal(nx.transitivity(G),1.0)
G.remove_edge(1,2)
assert_equal(nx.transitivity(G),0.875)
# def test_clustering_transitivity(self):
# # check that weighted average of clustering is transitivity
# G = nx.complete_graph(5)
# G.remove_edge(1,2)
# t1=nx.transitivity(G)
# (cluster_d2,weights)=nx.clustering(G,weights=True)
# trans=[]
# for v in G.nodes():
# trans.append(cluster_d2[v]*weights[v])
# t2=sum(trans)
# assert_almost_equal(abs(t1-t2),0)
class TestSquareClustering:
def test_clustering(self):
G = nx.Graph()
assert_equal(list(nx.square_clustering(G).values()),[])
assert_equal(nx.square_clustering(G),{})
def test_path(self):
G = nx.path_graph(10)
assert_equal(list(nx.square_clustering(G).values()),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert_equal(nx.square_clustering(G),
{0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
5: 0.0, 6: 0.0, 7: 0.0, 8: 0.0, 9: 0.0})
def test_cubical(self):
G = nx.cubical_graph()
assert_equal(list(nx.square_clustering(G).values()),
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
assert_equal(list(nx.square_clustering(G,[1,2]).values()),[0.5, 0.5])
assert_equal(nx.square_clustering(G,[1])[1],0.5)
assert_equal(nx.square_clustering(G,[1,2]),{1: 0.5, 2: 0.5})
def test_k5(self):
G = nx.complete_graph(5)
assert_equal(list(nx.square_clustering(G).values()),[1, 1, 1, 1, 1])
def test_bipartite_k5(self):
G = nx.complete_bipartite_graph(5,5)
assert_equal(list(nx.square_clustering(G).values()),
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
def test_lind_square_clustering(self):
"""Test C4 for figure 1 Lind et al (2005)"""
G = nx.Graph([(1,2),(1,3),(1,6),(1,7),(2,4),(2,5),
(3,4),(3,5),(6,7),(7,8),(6,8),(7,9),
(7,10),(6,11),(6,12),(2,13),(2,14),(3,15),(3,16)])
G1 = G.subgraph([1,2,3,4,5,13,14,15,16])
G2 = G.subgraph([1,6,7,8,9,10,11,12])
assert_equal(nx.square_clustering(G, [1])[1], 3/75.0)
assert_equal(nx.square_clustering(G1, [1])[1], 2/6.0)
assert_equal(nx.square_clustering(G2, [1])[1], 1/5.0)
def test_average_clustering():
G=nx.cycle_graph(3)
G.add_edge(2,3)
assert_equal(nx.average_clustering(G),(1+1+1/3.0)/4.0)
assert_equal(nx.average_clustering(G,count_zeros=True),(1+1+1/3.0)/4.0)
assert_equal(nx.average_clustering(G,count_zeros=False),(1+1+1/3.0)/3.0)
| bsd-3-clause |
nysan/yocto-autobuilder | lib/python2.6/site-packages/Twisted-11.0.0-py2.6-linux-x86_64.egg/twisted/trial/test/packages.py | 25 | 3955 | import sys, os
from twisted.trial import unittest
testModule = """
from twisted.trial import unittest
class FooTest(unittest.TestCase):
def testFoo(self):
pass
"""
dosModule = testModule.replace('\n', '\r\n')
testSample = """
'''This module is used by test_loader to test the Trial test loading
functionality. Do NOT change the number of tests in this module.
Do NOT change the names the tests in this module.
'''
import unittest as pyunit
from twisted.trial import unittest
class FooTest(unittest.TestCase):
def test_foo(self):
pass
def test_bar(self):
pass
class PyunitTest(pyunit.TestCase):
def test_foo(self):
pass
def test_bar(self):
pass
class NotATest(object):
def test_foo(self):
pass
class AlphabetTest(unittest.TestCase):
def test_a(self):
pass
def test_b(self):
pass
def test_c(self):
pass
"""
testInheritanceSample = """
'''This module is used by test_loader to test the Trial test loading
functionality. Do NOT change the number of tests in this module.
Do NOT change the names the tests in this module.
'''
from twisted.trial import unittest
class X(object):
def test_foo(self):
pass
class A(unittest.TestCase, X):
pass
class B(unittest.TestCase, X):
pass
"""
class PackageTest(unittest.TestCase):
files = [
('badpackage/__init__.py', 'frotz\n'),
('badpackage/test_module.py', ''),
('package2/__init__.py', ''),
('package2/test_module.py', 'import frotz\n'),
('package/__init__.py', ''),
('package/frotz.py', 'frotz\n'),
('package/test_bad_module.py',
'raise ZeroDivisionError("fake error")'),
('package/test_dos_module.py', dosModule),
('package/test_import_module.py', 'import frotz'),
('package/test_module.py', testModule),
('goodpackage/__init__.py', ''),
('goodpackage/test_sample.py', testSample),
('goodpackage/sub/__init__.py', ''),
('goodpackage/sub/test_sample.py', testSample),
('inheritancepackage/__init__.py', ''),
('inheritancepackage/test_x.py', testInheritanceSample),
]
def _toModuleName(self, filename):
name = os.path.splitext(filename)[0]
segs = name.split('/')
if segs[-1] == '__init__':
segs = segs[:-1]
return '.'.join(segs)
def getModules(self):
return map(self._toModuleName, zip(*self.files)[0])
def cleanUpModules(self):
modules = self.getModules()
modules.sort()
modules.reverse()
for module in modules:
try:
del sys.modules[module]
except KeyError:
pass
def createFiles(self, files, parentDir='.'):
for filename, contents in self.files:
filename = os.path.join(parentDir, filename)
self._createDirectory(filename)
fd = open(filename, 'w')
fd.write(contents)
fd.close()
def _createDirectory(self, filename):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
def setUp(self, parentDir=None):
if parentDir is None:
parentDir = self.mktemp()
self.parent = parentDir
self.createFiles(self.files, parentDir)
def tearDown(self):
self.cleanUpModules()
class SysPathManglingTest(PackageTest):
def setUp(self, parent=None):
self.oldPath = sys.path[:]
self.newPath = sys.path[:]
if parent is None:
parent = self.mktemp()
PackageTest.setUp(self, parent)
self.newPath.append(self.parent)
self.mangleSysPath(self.newPath)
def tearDown(self):
PackageTest.tearDown(self)
self.mangleSysPath(self.oldPath)
def mangleSysPath(self, pathVar):
sys.path[:] = pathVar
| gpl-2.0 |
mindw/numpy | numpy/distutils/line_endings.py | 256 | 2053 | """ Functions for converting from DOS to UNIX line endings
"""
from __future__ import division, absolute_import, print_function
import sys, re, os
def dos2unix(file):
"Replace CRLF with LF in argument files. Print names of changed files."
if os.path.isdir(file):
print(file, "Directory!")
return
data = open(file, "rb").read()
if '\0' in data:
print(file, "Binary!")
return
newdata = re.sub("\r\n", "\n", data)
if newdata != data:
print('dos2unix:', file)
f = open(file, "wb")
f.write(newdata)
f.close()
return file
else:
print(file, 'ok')
def dos2unix_one_dir(modified_files, dir_name, file_names):
for file in file_names:
full_path = os.path.join(dir_name, file)
file = dos2unix(full_path)
if file is not None:
modified_files.append(file)
def dos2unix_dir(dir_name):
modified_files = []
os.path.walk(dir_name, dos2unix_one_dir, modified_files)
return modified_files
#----------------------------------
def unix2dos(file):
"Replace LF with CRLF in argument files. Print names of changed files."
if os.path.isdir(file):
print(file, "Directory!")
return
data = open(file, "rb").read()
if '\0' in data:
print(file, "Binary!")
return
newdata = re.sub("\r\n", "\n", data)
newdata = re.sub("\n", "\r\n", newdata)
if newdata != data:
print('unix2dos:', file)
f = open(file, "wb")
f.write(newdata)
f.close()
return file
else:
print(file, 'ok')
def unix2dos_one_dir(modified_files, dir_name, file_names):
for file in file_names:
full_path = os.path.join(dir_name, file)
unix2dos(full_path)
if file is not None:
modified_files.append(file)
def unix2dos_dir(dir_name):
modified_files = []
os.path.walk(dir_name, unix2dos_one_dir, modified_files)
return modified_files
if __name__ == "__main__":
dos2unix_dir(sys.argv[1])
| bsd-3-clause |
cgarrard/osgeopy-code | Chapter4/listing4_2.py | 1 | 1083 | import os
from osgeo import ogr
def layers_to_feature_dataset(ds_name, gdb_fn, dataset_name):
"""Copy layers to a feature dataset in a file geodatabase."""
# Open the input datasource.
in_ds = ogr.Open(ds_name)
if in_ds is None:
raise RuntimeError('Could not open datasource')
# Open the geodatabase or create it if it doesn't exist.
gdb_driver = ogr.GetDriverByName('FileGDB')
if os.path.exists(gdb_fn):
gdb_ds = gdb_driver.Open(gdb_fn, 1)
else:
gdb_ds = gdb_driver.CreateDataSource(gdb_fn)
if gdb_ds is None:
raise RuntimeError('Could not open file geodatabase')
# Create an option list so the feature classes will be
# saved in a feature dataset.
options = ['FEATURE_DATASET=' + dataset_name]
# Loop through the layers in the input datasource and copy
# each one into the geodatabase.
for i in range(in_ds.GetLayerCount()):
lyr = in_ds.GetLayer(i)
lyr_name = lyr.GetName()
print('Copying ' + lyr_name + '...')
gdb_ds.CopyLayer(lyr, lyr_name, options)
| mit |
ltilve/ChromiumGStreamerBackend | chrome/common/extensions/docs/server2/environment.py | 36 | 1521 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
import sys
from app_yaml_helper import AppYamlHelper
from third_party.json_schema_compiler.memoize import memoize
@memoize
def GetAppVersion():
return GetAppVersionNonMemoized()
# This one is for running from tests, which memoization messes up.
def GetAppVersionNonMemoized():
if 'CURRENT_VERSION_ID' in os.environ:
# The version ID looks like 2-0-25.36712548 or 2-0-25.23/223; we only
# want the 2-0-25.
return re.compile('[./]').split(os.environ['CURRENT_VERSION_ID'])[0]
# Not running on appengine, get it from the app.yaml file ourselves.
app_yaml_path = os.path.join(os.path.split(__file__)[0], 'app.yaml')
with open(app_yaml_path, 'r') as app_yaml:
return AppYamlHelper.ExtractVersion(app_yaml.read())
def _IsServerSoftware(name):
return os.environ.get('SERVER_SOFTWARE', '').find(name) == 0
def IsComputeEngine():
return _IsServerSoftware('Compute Engine')
def IsDevServer():
return _IsServerSoftware('Development')
def IsReleaseServer():
return _IsServerSoftware('Google App Engine')
def IsPreviewServer():
return sys.argv and os.path.basename(sys.argv[0]) == 'preview.py'
def IsAppEngine():
return IsDevServer() or IsReleaseServer()
def IsTest():
return sys.argv and os.path.basename(sys.argv[0]).endswith('_test.py')
class UnknownEnvironmentError(Exception):
pass
| bsd-3-clause |
spbguru/repo1 | tests/unit/py2/nupic/algorithms/anomaly_test.py | 2 | 4583 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests for anomaly-related algorithms."""
import unittest
from numpy import array
from nupic.algorithms import anomaly
class AnomalyTest(unittest.TestCase):
"""Tests for anomaly score functions and classes."""
def testComputeRawAnomalyScoreNoActiveOrPredicted(self):
score = anomaly.computeRawAnomalyScore(array([]), array([]))
self.assertAlmostEqual(score, 0.0)
def testComputeRawAnomalyScoreNoActive(self):
score = anomaly.computeRawAnomalyScore(array([]), array([3, 5]))
self.assertAlmostEqual(score, 1.0)
def testComputeRawAnomalyScorePerfectMatch(self):
score = anomaly.computeRawAnomalyScore(array([3, 5, 7]), array([3, 5, 7]))
self.assertAlmostEqual(score, 0.0)
def testComputeRawAnomalyScoreNoMatch(self):
score = anomaly.computeRawAnomalyScore(array([2, 4, 6]), array([3, 5, 7]))
self.assertAlmostEqual(score, 1.0)
def testComputeRawAnomalyScorePartialMatch(self):
score = anomaly.computeRawAnomalyScore(array([2, 3, 6]), array([3, 5, 7]))
self.assertAlmostEqual(score, 2.0 / 3.0)
def testComputeAnomalyScoreNoActiveOrPredicted(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.computeAnomalyScore(array([]), array([]))
self.assertAlmostEqual(score, 0.0)
def testComputeAnomalyScoreNoActive(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.computeAnomalyScore(array([]), array([3, 5]))
self.assertAlmostEqual(score, 1.0)
def testComputeAnomalyScorePerfectMatch(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.computeAnomalyScore(array([3, 5, 7]),
array([3, 5, 7]))
self.assertAlmostEqual(score, 0.0)
def testComputeAnomalyScoreNoMatch(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.computeAnomalyScore(array([2, 4, 6]),
array([3, 5, 7]))
self.assertAlmostEqual(score, 1.0)
def testComputeAnomalyScorePartialMatch(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.computeAnomalyScore(array([2, 3, 6]),
array([3, 5, 7]))
self.assertAlmostEqual(score, 2.0 / 3.0)
def testAnomalyCumulative(self):
"""Test cumulative anomaly scores."""
anomalyComputer = anomaly.Anomaly(slidingWindowSize=3)
predicted = (array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]),
array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]),
array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]))
actual = (array([1, 2, 6]), array([1, 2, 6]), array([1, 4, 6]),
array([10, 11, 6]), array([10, 11, 12]), array([10, 11, 12]),
array([10, 11, 12]), array([1, 2, 6]), array([1, 2, 6]))
anomalyExpected = (0.0, 0.0, 1.0/9.0, 3.0/9.0, 2.0/3.0, 8.0/9.0, 1.0,
2.0/3.0, 1.0/3.0)
for act, pred, expected in zip(actual, predicted, anomalyExpected):
score = anomalyComputer.computeAnomalyScore(act, pred)
self.assertAlmostEqual(
score, expected, places=5,
msg="Anomaly score of %f doesn't match expected of %f" % (
score, expected))
def testComputeAnomalySelectModePure(self):
anomalyComputer = anomaly.Anomaly(mode=anomaly.Anomaly.MODE_PURE)
score = anomalyComputer.computeAnomalyScore(array([2, 3, 6]),
array([3, 5, 7]))
self.assertAlmostEqual(score, 2.0 / 3.0)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
pylada/pylada-light | src/pylada/config/vasp.py | 1 | 1440 | ###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" VASP parameters for pylada. """
vasp_program = None
""" Path of vasp binary executable (if launching as external program). """
vasp_has_nlep = False
""" Should be set to True if one wants to use NLEP. """
is_vasp_4 = False
""" Set to True to use vasp4-style POSCARS and INCARS. """
| gpl-3.0 |
kurtdawg24/robotframework | src/robot/model/message.py | 16 | 2493 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import html_escape, setter
from .itemlist import ItemList
from .modelobject import ModelObject
class Message(ModelObject):
"""A message outputted during the test execution.
The message can be a log message triggered by a keyword, or a warning
or an error occurred during the test execution.
"""
__slots__ = ['message', 'level', 'html', 'timestamp', 'parent', '_sort_key']
def __init__(self, message='', level='INFO', html=False, timestamp=None,
parent=None):
#: The message content as a string.
self.message = message
#: Severity of the message. Either ``TRACE``, ``INFO``,
#: ``WARN``, ``DEBUG`` or ``FAIL``/``ERROR``.
self.level = level
#: ``True`` if the content is in HTML, ``False`` otherwise.
self.html = html
#: Timestamp in format ``%Y%m%d %H:%M:%S.%f``.
self.timestamp = timestamp
self._sort_key = -1
#: The object this message was triggered by.
self.parent = parent
@setter
def parent(self, parent):
if parent and parent is not getattr(self, 'parent', None):
self._sort_key = getattr(parent, '_child_sort_key', -1)
return parent
@property
def html_message(self):
"""Returns the message content as HTML."""
return self.message if self.html else html_escape(self.message)
def visit(self, visitor):
visitor.visit_message(self)
def __unicode__(self):
return self.message
class Messages(ItemList):
__slots__ = []
def __init__(self, message_class=Message, parent=None, messages=None):
ItemList.__init__(self, message_class, {'parent': parent}, messages)
def __setitem__(self, index, item):
old = self[index]
ItemList.__setitem__(self, index, item)
self[index]._sort_key = old._sort_key
| apache-2.0 |
jennywoites/MUSSA | MUSSA_Flask/app/API_Rest/Services/DocenteServices/DocenteService.py | 1 | 5919 | from app.API_Rest.codes import *
from app.models.docentes_models import Docente
from app import db
from flask_user import roles_accepted
from app.API_Rest.Services.BaseService import BaseService
from app.models.generadorJSON.docentes_generadorJSON import generarJSON_docente
from app.models.docentes_models import CursosDocente
from app.models.horarios_models import Curso
class DocenteService(BaseService):
def getNombreClaseServicio(self):
return "Docente Service"
##########################################
## Servicios ##
##########################################
def get(self, idDocente):
return self.servicio_get_base(idDocente, "idDocente", Docente, generarJSON_docente)
@roles_accepted('admin')
def delete(self, idDocente):
self.logg_parametros_recibidos()
parametros_son_validos, msj, codigo = self.validar_parametros(dict([
self.get_validaciones_entidad_basica("idDocente", idDocente, Docente)
]))
if not parametros_son_validos:
self.logg_error(msj)
return {'Error': msj}, codigo
# Borrado logico
docente = Docente.query.get(idDocente)
docente.eliminado = True
db.session.commit()
cursos = CursosDocente.query.filter_by(docente_id=docente.id).all()
for curso in cursos:
curso.eliminado = True
db.session.commit()
result = SUCCESS_NO_CONTENT
self.logg_resultado(result)
return result
@roles_accepted('admin')
def put(self):
self.logg_parametros_recibidos()
apellido = self.obtener_texto('apellido')
nombre = self.obtener_texto('nombre')
l_ids_cursos = self.obtener_lista('l_ids_cursos')
parametros_son_validos, msj, codigo = self.validar_parametros(dict([
self.get_validaciones_entidad_basica("idDocente", idDocente, Docente),
("apellido", {
self.PARAMETRO: apellido,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.validar_contenido_y_longitud_texto, [3, 35])
]
}),
("nombre", {
self.PARAMETRO: nombre,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.validar_contenido_y_longitud_texto, [0, 40])
]
}),
("l_ids_cursos", {
self.PARAMETRO: l_ids_cursos,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.id_es_valido, []),
(self.existe_id, [Curso])
]
})
]))
if not parametros_son_validos:
self.logg_error(msj)
return {'Error': msj}, codigo
docente = Docente(
apellido=apellido,
nombre=nombre
)
db.session.add(docente)
db.session.commit()
self.actualizar_cursos_que_dicta_el_docente(docente.id, l_ids_cursos)
result = SUCCESS_OK
self.logg_resultado(result)
return result
@roles_accepted('admin')
def post(self, idDocente):
self.logg_parametros_recibidos()
apellido = self.obtener_texto('apellido')
nombre = self.obtener_texto('nombre')
l_ids_cursos = self.obtener_lista('l_ids_cursos')
parametros_son_validos, msj, codigo = self.validar_parametros(dict([
self.get_validaciones_entidad_basica("idDocente", idDocente, Docente),
("apellido", {
self.PARAMETRO: apellido,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.validar_contenido_y_longitud_texto, [3, 35])
]
}),
("nombre", {
self.PARAMETRO: nombre,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.validar_contenido_y_longitud_texto, [0, 40])
]
}),
("l_ids_cursos", {
self.PARAMETRO: l_ids_cursos,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.id_es_valido, []),
(self.existe_id, [Curso])
]
})
]))
if not parametros_son_validos:
self.logg_error(msj)
return {'Error': msj}, codigo
self.actualizar_datos_docente(idDocente, apellido, nombre)
self.actualizar_cursos_que_dicta_el_docente(idDocente, l_ids_cursos)
result = SUCCESS_OK
self.logg_resultado(result)
return result
def actualizar_datos_docente(self, idDocente, apellido, nombre):
docente = Docente.query.get(idDocente)
docente.apellido = apellido
docente.nombre = nombre
db.session.commit()
def actualizar_cursos_que_dicta_el_docente(self, idDocente, l_ids_cursos):
#Marcar como eliminados los que existen pero no estaban en l_ids
for curso_docente in CursosDocente.query.filter_by(docente_id=idDocente).all():
if not curso_docente.curso_id in l_ids_cursos:
curso_docente.eliminado = True
db.session.commit()
for id_curso in l_ids_cursos:
curso = CursosDocente.query.filter_by(docente_id=idDocente) \
.filter_by(curso_id=id_curso).first()
if not curso:
curso = CursosDocente(docente_id=idDocente, curso_id=id_curso)
db.session.add(curso)
curso.eliminado = False
db.session.commit()
#########################################
CLASE = DocenteService
URLS_SERVICIOS = (
'/api/docente/<int:idDocente>',
)
#########################################
| gpl-3.0 |
allenlavoie/tensorflow | tensorflow/python/training/learning_rate_decay_test.py | 6 | 18959 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for learning rate decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_state_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import learning_rate_decay
class LRDecayTest(test_util.TensorFlowTestCase):
def testContinuous(self):
with self.test_session():
step = 5
decayed_lr = learning_rate_decay.exponential_decay(0.05, step, 10, 0.96)
expected = .05 * 0.96 ** (5.0 / 10.0)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testStaircase(self):
with self.test_session():
step = gen_state_ops.variable(shape=[], dtype=dtypes.int32,
name="step", container="", shared_name="")
assign_100 = state_ops.assign(step, 100)
assign_1 = state_ops.assign(step, 1)
assign_2 = state_ops.assign(step, 2)
decayed_lr = learning_rate_decay.exponential_decay(.1, step, 3, 0.96,
staircase=True)
# No change to learning rate
assign_1.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
assign_2.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
# Decayed learning rate
assign_100.op.run()
expected = .1 * 0.96 ** (100 // 3)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testVariables(self):
with self.test_session():
step = variables.Variable(1)
assign_1 = step.assign(1)
assign_2 = step.assign(2)
assign_100 = step.assign(100)
decayed_lr = learning_rate_decay.exponential_decay(.1, step, 3, 0.96,
staircase=True)
variables.global_variables_initializer().run()
# No change to learning rate
assign_1.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
assign_2.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
# Decayed learning rate
assign_100.op.run()
expected = .1 * 0.96 ** (100 // 3)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
def testPiecewiseConstant(self):
x = resource_variable_ops.ResourceVariable(-999)
def pc():
return learning_rate_decay.piecewise_constant(x, [100, 110, 120],
[1.0, 0.1, 0.01, 0.001])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(pc()), 1.0, 1e-6)
self.evaluate(x.assign(100))
self.assertAllClose(self.evaluate(pc()), 1.0, 1e-6)
self.evaluate(x.assign(105))
self.assertAllClose(self.evaluate(pc()), 0.1, 1e-6)
self.evaluate(x.assign(110))
self.assertAllClose(self.evaluate(pc()), 0.1, 1e-6)
self.evaluate(x.assign(120))
self.assertAllClose(self.evaluate(pc()), 0.01, 1e-6)
self.evaluate(x.assign(999))
self.assertAllClose(self.evaluate(pc()), 0.001, 1e-6)
@test_util.run_in_graph_and_eager_modes()
def testPiecewiseConstantEdgeCases(self):
x_int = resource_variable_ops.ResourceVariable(
0, dtype=variables.dtypes.int32)
boundaries, values = [-1.0, 1.0], [1, 2, 3]
with self.assertRaises(ValueError):
learning_rate_decay.piecewise_constant(x_int, boundaries, values)
x = resource_variable_ops.ResourceVariable(0.0)
boundaries, values = [-1.0, 1.0], [1.0, 2, 3]
with self.assertRaises(ValueError):
learning_rate_decay.piecewise_constant(x, boundaries, values)
# Test that ref types are valid.
if not context.executing_eagerly():
x = variables.Variable(0.0)
x_ref = x.op.outputs[0] # float32_ref tensor should be accepted
boundaries, values = [1.0, 2.0], [1, 2, 3]
learning_rate_decay.piecewise_constant(x_ref, boundaries, values)
# Test casting boundaries from int32 to int64.
x_int64 = resource_variable_ops.ResourceVariable(
0, dtype=variables.dtypes.int64)
boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7]
def pc():
return learning_rate_decay.piecewise_constant(x_int64, boundaries, values)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(pc()), 0.4, 1e-6)
self.evaluate(x_int64.assign(1))
self.assertAllClose(self.evaluate(pc()), 0.4, 1e-6)
self.evaluate(x_int64.assign(2))
self.assertAllClose(self.evaluate(pc()), 0.5, 1e-6)
self.evaluate(x_int64.assign(3))
self.assertAllClose(self.evaluate(pc()), 0.6, 1e-6)
self.evaluate(x_int64.assign(4))
self.assertAllClose(self.evaluate(pc()), 0.7, 1e-6)
class LinearDecayTest(test_util.TensorFlowTestCase):
def testHalfWay(self):
with self.test_session():
step = 5
lr = 0.05
end_lr = 0.0
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = lr * 0.5
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testEnd(self):
with self.test_session():
step = 10
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testHalfWayWithEnd(self):
with self.test_session():
step = 5
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = (lr + end_lr) * 0.5
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testBeyondEnd(self):
with self.test_session():
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testBeyondEndWithCycle(self):
with self.test_session():
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr,
cycle=True)
expected = (lr - end_lr) * 0.25 + end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
class SqrtDecayTest(test_util.TensorFlowTestCase):
def testHalfWay(self):
with self.test_session():
step = 5
lr = 0.05
end_lr = 0.0
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr,
power=power)
expected = lr * 0.5 ** power
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testEnd(self):
with self.test_session():
step = 10
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr,
power=power)
expected = end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testHalfWayWithEnd(self):
with self.test_session():
step = 5
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr,
power=power)
expected = (lr - end_lr) * 0.5 ** power + end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testBeyondEnd(self):
with self.test_session():
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr,
power=power)
expected = end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testBeyondEndWithCycle(self):
with self.test_session():
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr,
power=power, cycle=True)
expected = (lr - end_lr) * 0.25 ** power + end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
class PolynomialDecayTest(test_util.TensorFlowTestCase):
def testBeginWithCycle(self):
with self.test_session():
lr = 0.001
decay_steps = 10
step = 0
decayed_lr = learning_rate_decay.polynomial_decay(lr, step,
decay_steps, cycle=True)
expected = lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
class ExponentialDecayTest(test_util.TensorFlowTestCase):
def testDecay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = gen_state_ops.variable(
shape=[], dtype=dtypes.int32, name="step", container="", shared_name="")
assign_step = state_ops.assign(step, 0)
increment_step = state_ops.assign_add(step, 1)
decayed_lr = learning_rate_decay.natural_exp_decay(initial_lr, step,
k, decay_rate)
with self.test_session():
assign_step.op.run()
for i in range(k+1):
expected = initial_lr * math.exp(-i / k * decay_rate)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
increment_step.op.run()
def testStaircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = gen_state_ops.variable(
shape=[], dtype=dtypes.int32, name="step", container="", shared_name="")
assign_step = state_ops.assign(step, 0)
increment_step = state_ops.assign_add(step, 1)
decayed_lr = learning_rate_decay.natural_exp_decay(initial_lr,
step,
k,
decay_rate,
staircase=True)
with self.test_session():
assign_step.op.run()
for i in range(k+1):
expected = initial_lr * math.exp(-decay_rate * (i // k))
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
increment_step.op.run()
class InverseDecayTest(test_util.TensorFlowTestCase):
def testDecay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = gen_state_ops.variable(
shape=[], dtype=dtypes.int32, name="step", container="", shared_name="")
assign_step = state_ops.assign(step, 0)
increment_step = state_ops.assign_add(step, 1)
decayed_lr = learning_rate_decay.inverse_time_decay(initial_lr,
step,
k,
decay_rate)
with self.test_session():
assign_step.op.run()
for i in range(k+1):
expected = initial_lr / (1 + i / k * decay_rate)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
increment_step.op.run()
def testStaircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = gen_state_ops.variable(
shape=[], dtype=dtypes.int32, name="step", container="", shared_name="")
assign_step = state_ops.assign(step, 0)
increment_step = state_ops.assign_add(step, 1)
decayed_lr = learning_rate_decay.inverse_time_decay(initial_lr,
step,
k,
decay_rate,
staircase=True)
with self.test_session():
assign_step.op.run()
for i in range(k+1):
expected = initial_lr / (1 + decay_rate * (i // k))
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
increment_step.op.run()
class CosineDecayTest(test_util.TensorFlowTestCase):
def np_cosine_decay(self, step, decay_steps, alpha=0.0):
step = min(step, decay_steps)
completed_fraction = step / decay_steps
decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
with self.test_session():
decayed_lr = learning_rate_decay.cosine_decay(
initial_lr, step, num_training_steps)
expected = self.np_cosine_decay(step, num_training_steps)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testAlpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
with self.test_session():
decayed_lr = learning_rate_decay.cosine_decay(
initial_lr, step, num_training_steps, alpha)
expected = self.np_cosine_decay(step, num_training_steps, alpha)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
class CosineDecayRestartsTest(test_util.TensorFlowTestCase):
def np_cosine_decay_restarts(self, step, decay_steps, t_mul=2.0, m_mul=1.0,
alpha=0.0):
fac = 1.0
while step >= decay_steps:
step = step - decay_steps
decay_steps *= t_mul
fac *= m_mul
completed_fraction = step / decay_steps
decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
with self.test_session():
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps)
expected = self.np_cosine_decay_restarts(step, num_training_steps)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testAlpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
with self.test_session():
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps, alpha=alpha)
expected = self.np_cosine_decay_restarts(step, num_training_steps,
alpha=alpha)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testMMul(self):
num_training_steps = 1000
initial_lr = 1.0
m_mul = 0.9
for step in range(0, 1500, 250):
with self.test_session():
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps, m_mul=m_mul)
expected = self.np_cosine_decay_restarts(step, num_training_steps,
m_mul=m_mul)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testTMul(self):
num_training_steps = 1000
initial_lr = 1.0
t_mul = 1.0
for step in range(0, 1500, 250):
with self.test_session():
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps, t_mul=t_mul)
expected = self.np_cosine_decay_restarts(step, num_training_steps,
t_mul=t_mul)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
class LinearCosineDecayTest(test_util.TensorFlowTestCase):
def np_linear_cosine_decay(self,
step,
decay_steps,
alpha=0.0,
beta=0.001,
num_periods=0.5):
step = min(step, decay_steps)
linear_decayed = float(decay_steps - step) / decay_steps
fraction = 2.0 * num_periods * step / float(decay_steps)
cosine_decayed = 0.5 * (1.0 + math.cos(math.pi * fraction))
return (alpha + linear_decayed) * cosine_decayed + beta
def testDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
with self.test_session():
decayed_lr = learning_rate_decay.linear_cosine_decay(
initial_lr, step, num_training_steps)
expected = self.np_linear_cosine_decay(step, num_training_steps)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testNonDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
with self.test_session():
decayed_lr = learning_rate_decay.linear_cosine_decay(
initial_lr,
step,
num_training_steps,
alpha=0.1,
beta=1e-4,
num_periods=5)
expected = self.np_linear_cosine_decay(
step,
num_training_steps,
alpha=0.1,
beta=1e-4,
num_periods=5)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
class NoisyLinearCosineDecayTest(test_util.TensorFlowTestCase):
def testDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
with self.test_session():
# No numerical check because of noise
decayed_lr = learning_rate_decay.noisy_linear_cosine_decay(
initial_lr, step, num_training_steps)
decayed_lr.eval()
def testNonDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
with self.test_session():
# No numerical check because of noise
decayed_lr = learning_rate_decay.noisy_linear_cosine_decay(
initial_lr,
step,
num_training_steps,
initial_variance=0.5,
variance_decay=0.1,
alpha=0.1,
beta=1e-4,
num_periods=5)
decayed_lr.eval()
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
takeflight/django | tests/forms_tests/tests/test_extra.py | 16 | 35987 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import warnings
from django.forms import (
CharField, DateField, EmailField, FileField, Form, GenericIPAddressField,
HiddenInput, ImageField, IPAddressField, MultipleChoiceField,
MultiValueField, MultiWidget, PasswordInput, SelectMultiple, SlugField,
SplitDateTimeField, SplitDateTimeWidget, TextInput, URLField,
)
from django.forms.extras import SelectDateWidget
from django.forms.utils import ErrorList
from django.test import TestCase, override_settings
from django.utils import six
from django.utils import translation
from django.utils.dates import MONTHS_AP
from django.utils.encoding import force_text, smart_text, python_2_unicode_compatible
from .test_error_messages import AssertFormErrorsMixin
class GetDate(Form):
mydate = DateField(widget=SelectDateWidget)
class GetDateShowHiddenInitial(Form):
mydate = DateField(widget=SelectDateWidget, show_hidden_initial=True)
class FormsExtraTestCase(TestCase, AssertFormErrorsMixin):
###############
# Extra stuff #
###############
# The forms library comes with some extra, higher-level Field and Widget
def test_selectdate(self):
self.maxDiff = None
w = SelectDateWidget(years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'))
# Rendering the default state.
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering the None or '' values should yield the same output.
self.assertHTMLEqual(w.render('mydate', None), w.render('mydate', ''))
# Rendering a string value.
self.assertHTMLEqual(w.render('mydate', '2010-04-15'), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected="selected">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected="selected">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering a datetime value.
self.assertHTMLEqual(w.render('mydate', datetime.date(2010, 4, 15)), w.render('mydate', '2010-04-15'))
# Invalid dates should still render the failed date.
self.assertHTMLEqual(w.render('mydate', '2010-02-31'), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2" selected="selected">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31" selected="selected">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering with a custom months dict.
w = SelectDateWidget(months=MONTHS_AP, years=('2013',))
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">Jan.</option>
<option value="2">Feb.</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">Aug.</option>
<option value="9">Sept.</option>
<option value="10">Oct.</option>
<option value="11">Nov.</option>
<option value="12">Dec.</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2013">2013</option>
</select>""")
a = GetDate({'mydate_month': '4', 'mydate_day': '1', 'mydate_year': '2008'})
self.assertTrue(a.is_valid())
self.assertEqual(a.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# As with any widget that implements get_value_from_datadict,
# we must be prepared to accept the input from the "as_hidden"
# rendering as well.
self.assertHTMLEqual(a['mydate'].as_hidden(), '<input type="hidden" name="mydate" value="2008-4-1" id="id_mydate" />')
b = GetDate({'mydate': '2008-4-1'})
self.assertTrue(b.is_valid())
self.assertEqual(b.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# Invalid dates shouldn't be allowed
c = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(c.is_valid())
self.assertEqual(c.errors, {'mydate': ['Enter a valid date.']})
# label tag is correctly associated with month dropdown
d = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_month">', d.as_p())
def test_selectdate_empty_label(self):
w = SelectDateWidget(years=('2014',), empty_label='empty_label')
# Rendering the default state with empty_label setted as string.
self.assertInHTML('<option value="0">empty_label</option>', w.render('mydate', ''), count=3)
w = SelectDateWidget(years=('2014',), empty_label=('empty_year', 'empty_month', 'empty_day'))
# Rendering the default state with empty_label tuple.
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">empty_month</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">empty_day</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">empty_year</option>
<option value="2014">2014</option>
</select>""")
self.assertRaisesMessage(ValueError, 'empty_label list/tuple must have 3 elements.',
SelectDateWidget, years=('2014',), empty_label=('not enough', 'values'))
def test_multiwidget(self):
# MultiWidget and MultiValueField #############################################
# MultiWidgets are widgets composed of other widgets. They are usually
# combined with MultiValueFields - a field that is composed of other fields.
# MulitWidgets can themselved be composed of other MultiWidgets.
# SplitDateTimeWidget is one example of a MultiWidget.
class ComplexMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = (
TextInput(),
SelectMultiple(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeWidget(),
)
super(ComplexMultiWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
data = value.split(',')
return [data[0], list(data[1]), datetime.datetime.strptime(data[2], "%Y-%m-%d %H:%M:%S")]
return [None, None, None]
def format_output(self, rendered_widgets):
return '\n'.join(rendered_widgets)
w = ComplexMultiWidget()
self.assertHTMLEqual(w.render('name', 'some text,JP,2007-04-25 06:24:00'), """<input type="text" name="name_0" value="some text" />
<select multiple="multiple" name="name_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="name_2_0" value="2007-04-25" /><input type="text" name="name_2_1" value="06:24:00" />""")
class ComplexField(MultiValueField):
def __init__(self, required=True, widget=None, label=None, initial=None):
fields = (
CharField(),
MultipleChoiceField(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeField()
)
super(ComplexField, self).__init__(fields, required, widget, label, initial)
def compress(self, data_list):
if data_list:
return '%s,%s,%s' % (data_list[0], ''.join(data_list[1]), data_list[2])
return None
f = ComplexField(widget=w)
self.assertEqual(f.clean(['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]), 'some text,JP,2007-04-25 06:24:00')
self.assertFormErrors(['Select a valid choice. X is not one of the available choices.'], f.clean, ['some text', ['X'], ['2007-04-25', '6:24:00']])
# If insufficient data is provided, None is substituted
self.assertFormErrors(['This field is required.'], f.clean, ['some text', ['JP']])
# test with no initial data
self.assertTrue(f.has_changed(None, ['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the data is the same as initial
self.assertFalse(f.has_changed('some text,JP,2007-04-25 06:24:00',
['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the first widget's data has changed
self.assertTrue(f.has_changed('some text,JP,2007-04-25 06:24:00',
['other text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the last widget's data has changed. this ensures that it is not
# short circuiting while testing the widgets.
self.assertTrue(f.has_changed('some text,JP,2007-04-25 06:24:00',
['some text', ['J', 'P'], ['2009-04-25', '11:44:00']]))
class ComplexFieldForm(Form):
field1 = ComplexField(widget=w)
f = ComplexFieldForm()
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_field1_0">Field1:</label></th><td><input type="text" name="field1_0" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" id="id_field1_2_0" /><input type="text" name="field1_2_1" id="id_field1_2_1" /></td></tr>""")
f = ComplexFieldForm({'field1_0': 'some text', 'field1_1': ['J', 'P'], 'field1_2_0': '2007-04-25', 'field1_2_1': '06:24:00'})
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_field1_0">Field1:</label></th><td><input type="text" name="field1_0" value="some text" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" value="2007-04-25" id="id_field1_2_0" /><input type="text" name="field1_2_1" value="06:24:00" id="id_field1_2_1" /></td></tr>""")
self.assertEqual(f.cleaned_data['field1'], 'some text,JP,2007-04-25 06:24:00')
def test_ipaddress(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
f = IPAddressField()
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
f = IPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean(' 127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
def test_generic_ipaddress_invalid_arguments(self):
self.assertRaises(ValueError, GenericIPAddressField, protocol="hamster")
self.assertRaises(ValueError, GenericIPAddressField, protocol="ipv4", unpack_ipv4=True)
def test_generic_ipaddress_as_generic(self):
# The edge cases of the IPv6 validation code are not deeply tested
# here, they are covered in the tests for django.utils.ipv6
f = GenericIPAddressField()
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_as_ipv4_only(self):
f = GenericIPAddressField(protocol="IPv4")
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'fe80::223:6cff:fe8a:2e8a')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '2a02::223:6cff:fe8a:2e8a')
def test_generic_ipaddress_as_ipv6_only(self):
f = GenericIPAddressField(protocol="IPv6")
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_as_generic_not_required(self):
f = GenericIPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_normalization(self):
# Test the normalizing code
f = GenericIPAddressField()
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' ::ffff:10.10.10.10 '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' 2001:000:a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
self.assertEqual(f.clean(' 2001::a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
f = GenericIPAddressField(unpack_ipv4=True)
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a'), '10.10.10.10')
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc')
def test_urlfield_normalization(self):
f = URLField()
self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/')
def test_smart_text(self):
class Test:
if six.PY3:
def __str__(self):
return 'ŠĐĆŽćžšđ'
else:
def __str__(self):
return 'ŠĐĆŽćžšđ'.encode('utf-8')
class TestU:
if six.PY3:
def __str__(self):
return 'ŠĐĆŽćžšđ'
def __bytes__(self):
return b'Foo'
else:
def __str__(self):
return b'Foo'
def __unicode__(self):
return '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111'
self.assertEqual(smart_text(Test()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(TestU()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(1), '1')
self.assertEqual(smart_text('foo'), 'foo')
def test_accessing_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
if not self.errors:
data['username'] = data['username'].lower()
return data
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_nothing_returned(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
self.cleaned_data['username'] = self.cleaned_data['username'].lower()
# don't return anything
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_in_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
# Return a different dict. We have not changed self.cleaned_data.
return {
'username': data['username'].lower(),
'password': 'this_is_not_a_secret',
}
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_overriding_errorlist(self):
@python_2_unicode_compatible
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return '<div class="errorlist">%s</div>' % ''.join('<div class="error">%s</div>' % force_text(e) for e in self)
class CommentForm(Form):
name = CharField(max_length=50, required=False)
email = EmailField()
comment = CharField()
data = dict(email='invalid')
f = CommentForm(data, auto_id=False, error_class=DivErrorList)
self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50" /></p>
<div class="errorlist"><div class="error">Enter a valid email address.</div></div>
<p>Email: <input type="email" name="email" value="invalid" /></p>
<div class="errorlist"><div class="error">This field is required.</div></div>
<p>Comment: <input type="text" name="comment" /></p>""")
def test_multipart_encoded_form(self):
class FormWithoutFile(Form):
username = CharField()
class FormWithFile(Form):
username = CharField()
file = FileField()
class FormWithImage(Form):
image = ImageField()
self.assertFalse(FormWithoutFile().is_multipart())
self.assertTrue(FormWithFile().is_multipart())
self.assertTrue(FormWithImage().is_multipart())
def test_selectdatewidget_required(self):
class GetNotRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=False)
class GetRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=True)
self.assertFalse(GetNotRequiredDate().fields['mydate'].widget.is_required)
self.assertTrue(GetRequiredDate().fields['mydate'].widget.is_required)
@override_settings(USE_L10N=True)
class FormsExtraL10NTestCase(TestCase):
def setUp(self):
super(FormsExtraL10NTestCase, self).setUp()
translation.activate('nl')
def tearDown(self):
translation.deactivate()
super(FormsExtraL10NTestCase, self).tearDown()
def test_l10n(self):
w = SelectDateWidget(years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'))
self.assertEqual(w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-2010')
self.assertHTMLEqual(w.render('date', '13-08-2010'), """<select name="date_day" id="id_date_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13" selected="selected">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="date_month" id="id_date_month">
<option value="0">---</option>
<option value="1">januari</option>
<option value="2">februari</option>
<option value="3">maart</option>
<option value="4">april</option>
<option value="5">mei</option>
<option value="6">juni</option>
<option value="7">juli</option>
<option value="8" selected="selected">augustus</option>
<option value="9">september</option>
<option value="10">oktober</option>
<option value="11">november</option>
<option value="12">december</option>
</select>
<select name="date_year" id="id_date_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Years before 1900 work
w = SelectDateWidget(years=('1899',))
self.assertEqual(w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-1899')
def test_l10n_date_changed(self):
"""
Ensure that DateField.has_changed() with SelectDateWidget works
correctly with a localized date format.
Refs #17165.
"""
# With Field.show_hidden_initial=False -----------------------
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '2',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
# With Field.show_hidden_initial=True ------------------------
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 22)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 22))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
def test_l10n_invalid_date_in(self):
# Invalid dates shouldn't be allowed
a = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(a.is_valid())
# 'Geef een geldige datum op.' = 'Enter a valid date.'
self.assertEqual(a.errors, {'mydate': ['Geef een geldige datum op.']})
def test_form_label_association(self):
# label tag is correctly associated with first rendered dropdown
a = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_day">', a.as_p())
| bsd-3-clause |
darmaa/odoo | addons/account_followup/__openerp__.py | 64 | 2998 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Payment Follow-up Management',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
Module to automate letters for unpaid invoices, with multi-level recalls.
=========================================================================
You can define your multiple levels of recall through the menu:
---------------------------------------------------------------
Configuration / Follow-up / Follow-up Levels
Once it is defined, you can automatically print recalls every day through simply clicking on the menu:
------------------------------------------------------------------------------------------------------
Payment Follow-Up / Send Email and letters
It will generate a PDF / send emails / set manual actions according to the the different levels
of recall defined. You can define different policies for different companies.
Note that if you want to check the follow-up level for a given partner/account entry, you can do from in the menu:
------------------------------------------------------------------------------------------------------------------
Reporting / Accounting / **Follow-ups Analysis
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/follow_ups.jpeg','images/send_followups.jpeg'],
'depends': ['account_accountant', 'mail'],
'data': [
'security/account_followup_security.xml',
'security/ir.model.access.csv',
'report/account_followup_report.xml',
'account_followup_data.xml',
'account_followup_view.xml',
'account_followup_customers.xml',
'wizard/account_followup_print_view.xml',
'res_config_view.xml',
'views/report_followup.xml',
'account_followup_reports.xml'
],
'demo': ['account_followup_demo.xml'],
'test': [
'test/account_followup.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kennedyshead/home-assistant | homeassistant/helpers/event.py | 2 | 48126 | """Helpers for listening to events."""
from __future__ import annotations
import asyncio
from collections.abc import Awaitable, Iterable
import copy
from dataclasses import dataclass
from datetime import datetime, timedelta
import functools as ft
import logging
import time
from typing import Any, Callable, List, cast
import attr
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NOW,
EVENT_CORE_CONFIG_UPDATE,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
MATCH_ALL,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
from homeassistant.core import (
CALLBACK_TYPE,
Event,
HassJob,
HomeAssistant,
State,
callback,
split_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers.entity_registry import EVENT_ENTITY_REGISTRY_UPDATED
from homeassistant.helpers.ratelimit import KeyedRateLimit
from homeassistant.helpers.sun import get_astral_event_next
from homeassistant.helpers.template import RenderInfo, Template, result_as_boolean
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
from homeassistant.util.async_ import run_callback_threadsafe
TRACK_STATE_CHANGE_CALLBACKS = "track_state_change_callbacks"
TRACK_STATE_CHANGE_LISTENER = "track_state_change_listener"
TRACK_STATE_ADDED_DOMAIN_CALLBACKS = "track_state_added_domain_callbacks"
TRACK_STATE_ADDED_DOMAIN_LISTENER = "track_state_added_domain_listener"
TRACK_STATE_REMOVED_DOMAIN_CALLBACKS = "track_state_removed_domain_callbacks"
TRACK_STATE_REMOVED_DOMAIN_LISTENER = "track_state_removed_domain_listener"
TRACK_ENTITY_REGISTRY_UPDATED_CALLBACKS = "track_entity_registry_updated_callbacks"
TRACK_ENTITY_REGISTRY_UPDATED_LISTENER = "track_entity_registry_updated_listener"
_ALL_LISTENER = "all"
_DOMAINS_LISTENER = "domains"
_ENTITIES_LISTENER = "entities"
_LOGGER = logging.getLogger(__name__)
@dataclass
class TrackStates:
"""Class for keeping track of states being tracked.
all_states: All states on the system are being tracked
entities: Entities to track
domains: Domains to track
"""
all_states: bool
entities: set
domains: set
@dataclass
class TrackTemplate:
"""Class for keeping track of a template with variables.
The template is template to calculate.
The variables are variables to pass to the template.
The rate_limit is a rate limit on how often the template is re-rendered.
"""
template: Template
variables: TemplateVarsType
rate_limit: timedelta | None = None
@dataclass
class TrackTemplateResult:
"""Class for result of template tracking.
template
The template that has changed.
last_result
The output from the template on the last successful run, or None
if no previous successful run.
result
Result from the template run. This will be a string or an
TemplateError if the template resulted in an error.
"""
template: Template
last_result: Any
result: Any
def threaded_listener_factory(
async_factory: Callable[..., Any]
) -> Callable[..., CALLBACK_TYPE]:
"""Convert an async event helper to a threaded one."""
@ft.wraps(async_factory)
def factory(*args: Any, **kwargs: Any) -> CALLBACK_TYPE:
"""Call async event helper safely."""
hass = args[0]
if not isinstance(hass, HomeAssistant):
raise TypeError("First parameter needs to be a hass instance")
async_remove = run_callback_threadsafe(
hass.loop, ft.partial(async_factory, *args, **kwargs)
).result()
def remove() -> None:
"""Threadsafe removal."""
run_callback_threadsafe(hass.loop, async_remove).result()
return remove
return factory
@callback
@bind_hass
def async_track_state_change(
hass: HomeAssistant,
entity_ids: str | Iterable[str],
action: Callable[[str, State, State], Awaitable[None] | None],
from_state: None | str | Iterable[str] = None,
to_state: None | str | Iterable[str] = None,
) -> CALLBACK_TYPE:
"""Track specific state changes.
entity_ids, from_state and to_state can be string or list.
Use list to match multiple.
Returns a function that can be called to remove the listener.
If entity_ids are not MATCH_ALL along with from_state and to_state
being None, async_track_state_change_event should be used instead
as it is slightly faster.
Must be run within the event loop.
"""
if from_state is not None:
match_from_state = process_state_match(from_state)
if to_state is not None:
match_to_state = process_state_match(to_state)
# Ensure it is a lowercase list with entity ids we want to match on
if entity_ids == MATCH_ALL:
pass
elif isinstance(entity_ids, str):
entity_ids = (entity_ids.lower(),)
else:
entity_ids = tuple(entity_id.lower() for entity_id in entity_ids)
job = HassJob(action)
@callback
def state_change_filter(event: Event) -> bool:
"""Handle specific state changes."""
if from_state is not None:
old_state = event.data.get("old_state")
if old_state is not None:
old_state = old_state.state
if not match_from_state(old_state):
return False
if to_state is not None:
new_state = event.data.get("new_state")
if new_state is not None:
new_state = new_state.state
if not match_to_state(new_state):
return False
return True
@callback
def state_change_dispatcher(event: Event) -> None:
"""Handle specific state changes."""
hass.async_run_hass_job(
job,
event.data.get("entity_id"),
event.data.get("old_state"),
event.data.get("new_state"),
)
@callback
def state_change_listener(event: Event) -> None:
"""Handle specific state changes."""
if not state_change_filter(event):
return
state_change_dispatcher(event)
if entity_ids != MATCH_ALL:
# If we have a list of entity ids we use
# async_track_state_change_event to route
# by entity_id to avoid iterating though state change
# events and creating a jobs where the most
# common outcome is to return right away because
# the entity_id does not match since usually
# only one or two listeners want that specific
# entity_id.
return async_track_state_change_event(hass, entity_ids, state_change_listener)
return hass.bus.async_listen(
EVENT_STATE_CHANGED, state_change_dispatcher, event_filter=state_change_filter
)
track_state_change = threaded_listener_factory(async_track_state_change)
@bind_hass
def async_track_state_change_event(
hass: HomeAssistant,
entity_ids: str | Iterable[str],
action: Callable[[Event], Any],
) -> Callable[[], None]:
"""Track specific state change events indexed by entity_id.
Unlike async_track_state_change, async_track_state_change_event
passes the full event to the callback.
In order to avoid having to iterate a long list
of EVENT_STATE_CHANGED and fire and create a job
for each one, we keep a dict of entity ids that
care about the state change events so we can
do a fast dict lookup to route events.
"""
entity_ids = _async_string_to_lower_list(entity_ids)
if not entity_ids:
return _remove_empty_listener
entity_callbacks = hass.data.setdefault(TRACK_STATE_CHANGE_CALLBACKS, {})
if TRACK_STATE_CHANGE_LISTENER not in hass.data:
@callback
def _async_state_change_filter(event: Event) -> bool:
"""Filter state changes by entity_id."""
return event.data.get("entity_id") in entity_callbacks
@callback
def _async_state_change_dispatcher(event: Event) -> None:
"""Dispatch state changes by entity_id."""
entity_id = event.data.get("entity_id")
if entity_id not in entity_callbacks:
return
for job in entity_callbacks[entity_id][:]:
try:
hass.async_run_hass_job(job, event)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error while processing state change for %s", entity_id
)
hass.data[TRACK_STATE_CHANGE_LISTENER] = hass.bus.async_listen(
EVENT_STATE_CHANGED,
_async_state_change_dispatcher,
event_filter=_async_state_change_filter,
)
job = HassJob(action)
for entity_id in entity_ids:
entity_callbacks.setdefault(entity_id, []).append(job)
@callback
def remove_listener() -> None:
"""Remove state change listener."""
_async_remove_indexed_listeners(
hass,
TRACK_STATE_CHANGE_CALLBACKS,
TRACK_STATE_CHANGE_LISTENER,
entity_ids,
job,
)
return remove_listener
@callback
def _remove_empty_listener() -> None:
"""Remove a listener that does nothing."""
@callback
def _async_remove_indexed_listeners(
hass: HomeAssistant,
data_key: str,
listener_key: str,
storage_keys: Iterable[str],
job: HassJob,
) -> None:
"""Remove a listener."""
callbacks = hass.data[data_key]
for storage_key in storage_keys:
callbacks[storage_key].remove(job)
if len(callbacks[storage_key]) == 0:
del callbacks[storage_key]
if not callbacks:
hass.data[listener_key]()
del hass.data[listener_key]
@bind_hass
def async_track_entity_registry_updated_event(
hass: HomeAssistant,
entity_ids: str | Iterable[str],
action: Callable[[Event], Any],
) -> Callable[[], None]:
"""Track specific entity registry updated events indexed by entity_id.
Similar to async_track_state_change_event.
"""
entity_ids = _async_string_to_lower_list(entity_ids)
if not entity_ids:
return _remove_empty_listener
entity_callbacks = hass.data.setdefault(TRACK_ENTITY_REGISTRY_UPDATED_CALLBACKS, {})
if TRACK_ENTITY_REGISTRY_UPDATED_LISTENER not in hass.data:
@callback
def _async_entity_registry_updated_filter(event: Event) -> bool:
"""Filter entity registry updates by entity_id."""
entity_id = event.data.get("old_entity_id", event.data["entity_id"])
return entity_id in entity_callbacks
@callback
def _async_entity_registry_updated_dispatcher(event: Event) -> None:
"""Dispatch entity registry updates by entity_id."""
entity_id = event.data.get("old_entity_id", event.data["entity_id"])
if entity_id not in entity_callbacks:
return
for job in entity_callbacks[entity_id][:]:
try:
hass.async_run_hass_job(job, event)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error while processing entity registry update for %s",
entity_id,
)
hass.data[TRACK_ENTITY_REGISTRY_UPDATED_LISTENER] = hass.bus.async_listen(
EVENT_ENTITY_REGISTRY_UPDATED,
_async_entity_registry_updated_dispatcher,
event_filter=_async_entity_registry_updated_filter,
)
job = HassJob(action)
for entity_id in entity_ids:
entity_callbacks.setdefault(entity_id, []).append(job)
@callback
def remove_listener() -> None:
"""Remove state change listener."""
_async_remove_indexed_listeners(
hass,
TRACK_ENTITY_REGISTRY_UPDATED_CALLBACKS,
TRACK_ENTITY_REGISTRY_UPDATED_LISTENER,
entity_ids,
job,
)
return remove_listener
@callback
def _async_dispatch_domain_event(
hass: HomeAssistant, event: Event, callbacks: dict[str, list]
) -> None:
domain = split_entity_id(event.data["entity_id"])[0]
if domain not in callbacks and MATCH_ALL not in callbacks:
return
listeners = callbacks.get(domain, []) + callbacks.get(MATCH_ALL, [])
for job in listeners:
try:
hass.async_run_hass_job(job, event)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error while processing event %s for domain %s", event, domain
)
@bind_hass
def async_track_state_added_domain(
hass: HomeAssistant,
domains: str | Iterable[str],
action: Callable[[Event], Any],
) -> Callable[[], None]:
"""Track state change events when an entity is added to domains."""
domains = _async_string_to_lower_list(domains)
if not domains:
return _remove_empty_listener
domain_callbacks = hass.data.setdefault(TRACK_STATE_ADDED_DOMAIN_CALLBACKS, {})
if TRACK_STATE_ADDED_DOMAIN_LISTENER not in hass.data:
@callback
def _async_state_change_filter(event: Event) -> bool:
"""Filter state changes by entity_id."""
return event.data.get("old_state") is None
@callback
def _async_state_change_dispatcher(event: Event) -> None:
"""Dispatch state changes by entity_id."""
if event.data.get("old_state") is not None:
return
_async_dispatch_domain_event(hass, event, domain_callbacks)
hass.data[TRACK_STATE_ADDED_DOMAIN_LISTENER] = hass.bus.async_listen(
EVENT_STATE_CHANGED,
_async_state_change_dispatcher,
event_filter=_async_state_change_filter,
)
job = HassJob(action)
for domain in domains:
domain_callbacks.setdefault(domain, []).append(job)
@callback
def remove_listener() -> None:
"""Remove state change listener."""
_async_remove_indexed_listeners(
hass,
TRACK_STATE_ADDED_DOMAIN_CALLBACKS,
TRACK_STATE_ADDED_DOMAIN_LISTENER,
domains,
job,
)
return remove_listener
@bind_hass
def async_track_state_removed_domain(
hass: HomeAssistant,
domains: str | Iterable[str],
action: Callable[[Event], Any],
) -> Callable[[], None]:
"""Track state change events when an entity is removed from domains."""
domains = _async_string_to_lower_list(domains)
if not domains:
return _remove_empty_listener
domain_callbacks = hass.data.setdefault(TRACK_STATE_REMOVED_DOMAIN_CALLBACKS, {})
if TRACK_STATE_REMOVED_DOMAIN_LISTENER not in hass.data:
@callback
def _async_state_change_filter(event: Event) -> bool:
"""Filter state changes by entity_id."""
return event.data.get("new_state") is None
@callback
def _async_state_change_dispatcher(event: Event) -> None:
"""Dispatch state changes by entity_id."""
if event.data.get("new_state") is not None:
return
_async_dispatch_domain_event(hass, event, domain_callbacks)
hass.data[TRACK_STATE_REMOVED_DOMAIN_LISTENER] = hass.bus.async_listen(
EVENT_STATE_CHANGED,
_async_state_change_dispatcher,
event_filter=_async_state_change_filter,
)
job = HassJob(action)
for domain in domains:
domain_callbacks.setdefault(domain, []).append(job)
@callback
def remove_listener() -> None:
"""Remove state change listener."""
_async_remove_indexed_listeners(
hass,
TRACK_STATE_REMOVED_DOMAIN_CALLBACKS,
TRACK_STATE_REMOVED_DOMAIN_LISTENER,
domains,
job,
)
return remove_listener
@callback
def _async_string_to_lower_list(instr: str | Iterable[str]) -> list[str]:
if isinstance(instr, str):
return [instr.lower()]
return [mstr.lower() for mstr in instr]
class _TrackStateChangeFiltered:
"""Handle removal / refresh of tracker."""
def __init__(
self,
hass: HomeAssistant,
track_states: TrackStates,
action: Callable[[Event], Any],
) -> None:
"""Handle removal / refresh of tracker init."""
self.hass = hass
self._action = action
self._listeners: dict[str, Callable] = {}
self._last_track_states: TrackStates = track_states
@callback
def async_setup(self) -> None:
"""Create listeners to track states."""
track_states = self._last_track_states
if (
not track_states.all_states
and not track_states.domains
and not track_states.entities
):
return
if track_states.all_states:
self._setup_all_listener()
return
self._setup_domains_listener(track_states.domains)
self._setup_entities_listener(track_states.domains, track_states.entities)
@property
def listeners(self) -> dict:
"""State changes that will cause a re-render."""
track_states = self._last_track_states
return {
_ALL_LISTENER: track_states.all_states,
_ENTITIES_LISTENER: track_states.entities,
_DOMAINS_LISTENER: track_states.domains,
}
@callback
def async_update_listeners(self, new_track_states: TrackStates) -> None:
"""Update the listeners based on the new TrackStates."""
last_track_states = self._last_track_states
self._last_track_states = new_track_states
had_all_listener = last_track_states.all_states
if new_track_states.all_states:
if had_all_listener:
return
self._cancel_listener(_DOMAINS_LISTENER)
self._cancel_listener(_ENTITIES_LISTENER)
self._setup_all_listener()
return
if had_all_listener:
self._cancel_listener(_ALL_LISTENER)
domains_changed = new_track_states.domains != last_track_states.domains
if had_all_listener or domains_changed:
domains_changed = True
self._cancel_listener(_DOMAINS_LISTENER)
self._setup_domains_listener(new_track_states.domains)
if (
had_all_listener
or domains_changed
or new_track_states.entities != last_track_states.entities
):
self._cancel_listener(_ENTITIES_LISTENER)
self._setup_entities_listener(
new_track_states.domains, new_track_states.entities
)
@callback
def async_remove(self) -> None:
"""Cancel the listeners."""
for key in list(self._listeners):
self._listeners.pop(key)()
@callback
def _cancel_listener(self, listener_name: str) -> None:
if listener_name not in self._listeners:
return
self._listeners.pop(listener_name)()
@callback
def _setup_entities_listener(self, domains: set, entities: set) -> None:
if domains:
entities = entities.copy()
entities.update(self.hass.states.async_entity_ids(domains))
# Entities has changed to none
if not entities:
return
self._listeners[_ENTITIES_LISTENER] = async_track_state_change_event(
self.hass, entities, self._action
)
@callback
def _setup_domains_listener(self, domains: set) -> None:
if not domains:
return
self._listeners[_DOMAINS_LISTENER] = async_track_state_added_domain(
self.hass, domains, self._action
)
@callback
def _setup_all_listener(self) -> None:
self._listeners[_ALL_LISTENER] = self.hass.bus.async_listen(
EVENT_STATE_CHANGED, self._action
)
@callback
@bind_hass
def async_track_state_change_filtered(
hass: HomeAssistant,
track_states: TrackStates,
action: Callable[[Event], Any],
) -> _TrackStateChangeFiltered:
"""Track state changes with a TrackStates filter that can be updated.
Parameters
----------
hass
Home assistant object.
track_states
A TrackStates data class.
action
Callable to call with results.
Returns
-------
Object used to update the listeners (async_update_listeners) with a new TrackStates or
cancel the tracking (async_remove).
"""
tracker = _TrackStateChangeFiltered(hass, track_states, action)
tracker.async_setup()
return tracker
@callback
@bind_hass
def async_track_template(
hass: HomeAssistant,
template: Template,
action: Callable[[str, State | None, State | None], Awaitable[None] | None],
variables: TemplateVarsType | None = None,
) -> Callable[[], None]:
"""Add a listener that fires when a a template evaluates to 'true'.
Listen for the result of the template becoming true, or a true-like
string result, such as 'On', 'Open', or 'Yes'. If the template results
in an error state when the value changes, this will be logged and not
passed through.
If the initial check of the template is invalid and results in an
exception, the listener will still be registered but will only
fire if the template result becomes true without an exception.
Action arguments
----------------
entity_id
ID of the entity that triggered the state change.
old_state
The old state of the entity that changed.
new_state
New state of the entity that changed.
Parameters
----------
hass
Home assistant object.
template
The template to calculate.
action
Callable to call with results. See above for arguments.
variables
Variables to pass to the template.
Returns
-------
Callable to unregister the listener.
"""
job = HassJob(action)
@callback
def _template_changed_listener(
event: Event, updates: list[TrackTemplateResult]
) -> None:
"""Check if condition is correct and run action."""
track_result = updates.pop()
template = track_result.template
last_result = track_result.last_result
result = track_result.result
if isinstance(result, TemplateError):
_LOGGER.error(
"Error while processing template: %s",
template.template,
exc_info=result,
)
return
if (
not isinstance(last_result, TemplateError)
and result_as_boolean(last_result)
or not result_as_boolean(result)
):
return
hass.async_run_hass_job(
job,
event and event.data.get("entity_id"),
event and event.data.get("old_state"),
event and event.data.get("new_state"),
)
info = async_track_template_result(
hass, [TrackTemplate(template, variables)], _template_changed_listener
)
return info.async_remove
track_template = threaded_listener_factory(async_track_template)
class _TrackTemplateResultInfo:
"""Handle removal / refresh of tracker."""
def __init__(
self,
hass: HomeAssistant,
track_templates: Iterable[TrackTemplate],
action: Callable,
) -> None:
"""Handle removal / refresh of tracker init."""
self.hass = hass
self._job = HassJob(action)
for track_template_ in track_templates:
track_template_.template.hass = hass
self._track_templates = track_templates
self._last_result: dict[Template, str | TemplateError] = {}
self._rate_limit = KeyedRateLimit(hass)
self._info: dict[Template, RenderInfo] = {}
self._track_state_changes: _TrackStateChangeFiltered | None = None
self._time_listeners: dict[Template, Callable] = {}
def async_setup(self, raise_on_template_error: bool, strict: bool = False) -> None:
"""Activation of template tracking."""
for track_template_ in self._track_templates:
template = track_template_.template
variables = track_template_.variables
self._info[template] = info = template.async_render_to_info(
variables, strict=strict
)
if info.exception:
if raise_on_template_error:
raise info.exception
_LOGGER.error(
"Error while processing template: %s",
track_template_.template,
exc_info=info.exception,
)
self._track_state_changes = async_track_state_change_filtered(
self.hass, _render_infos_to_track_states(self._info.values()), self._refresh
)
self._update_time_listeners()
_LOGGER.debug(
"Template group %s listens for %s",
self._track_templates,
self.listeners,
)
@property
def listeners(self) -> dict:
"""State changes that will cause a re-render."""
assert self._track_state_changes
return {
**self._track_state_changes.listeners,
"time": bool(self._time_listeners),
}
@callback
def _setup_time_listener(self, template: Template, has_time: bool) -> None:
if not has_time:
if template in self._time_listeners:
# now() or utcnow() has left the scope of the template
self._time_listeners.pop(template)()
return
if template in self._time_listeners:
return
track_templates = [
track_template_
for track_template_ in self._track_templates
if track_template_.template == template
]
@callback
def _refresh_from_time(now: datetime) -> None:
self._refresh(None, track_templates=track_templates)
self._time_listeners[template] = async_track_utc_time_change(
self.hass, _refresh_from_time, second=0
)
@callback
def _update_time_listeners(self) -> None:
for template, info in self._info.items():
self._setup_time_listener(template, info.has_time)
@callback
def async_remove(self) -> None:
"""Cancel the listener."""
assert self._track_state_changes
self._track_state_changes.async_remove()
self._rate_limit.async_remove()
for template in list(self._time_listeners):
self._time_listeners.pop(template)()
@callback
def async_refresh(self) -> None:
"""Force recalculate the template."""
self._refresh(None)
def _render_template_if_ready(
self,
track_template_: TrackTemplate,
now: datetime,
event: Event | None,
) -> bool | TrackTemplateResult:
"""Re-render the template if conditions match.
Returns False if the template was not be re-rendered
Returns True if the template re-rendered and did not
change.
Returns TrackTemplateResult if the template re-render
generates a new result.
"""
template = track_template_.template
if event:
info = self._info[template]
if not _event_triggers_rerender(event, info):
return False
had_timer = self._rate_limit.async_has_timer(template)
if self._rate_limit.async_schedule_action(
template,
_rate_limit_for_event(event, info, track_template_),
now,
self._refresh,
event,
(track_template_,),
True,
):
return not had_timer
_LOGGER.debug(
"Template update %s triggered by event: %s",
template.template,
event,
)
self._rate_limit.async_triggered(template, now)
self._info[template] = info = template.async_render_to_info(
track_template_.variables
)
try:
result: str | TemplateError = info.result()
except TemplateError as ex:
result = ex
last_result = self._last_result.get(template)
# Check to see if the result has changed
if result == last_result:
return True
if isinstance(result, TemplateError) and isinstance(last_result, TemplateError):
return True
return TrackTemplateResult(template, last_result, result)
@callback
def _refresh(
self,
event: Event | None,
track_templates: Iterable[TrackTemplate] | None = None,
replayed: bool | None = False,
) -> None:
"""Refresh the template.
The event is the state_changed event that caused the refresh
to be considered.
track_templates is an optional list of TrackTemplate objects
to refresh. If not provided, all tracked templates will be
considered.
replayed is True if the event is being replayed because the
rate limit was hit.
"""
updates = []
info_changed = False
now = event.time_fired if not replayed and event else dt_util.utcnow()
for track_template_ in track_templates or self._track_templates:
update = self._render_template_if_ready(track_template_, now, event)
if not update:
continue
template = track_template_.template
self._setup_time_listener(template, self._info[template].has_time)
info_changed = True
if isinstance(update, TrackTemplateResult):
updates.append(update)
if info_changed:
assert self._track_state_changes
self._track_state_changes.async_update_listeners(
_render_infos_to_track_states(
[
_suppress_domain_all_in_render_info(self._info[template])
if self._rate_limit.async_has_timer(template)
else self._info[template]
for template in self._info
]
)
)
_LOGGER.debug(
"Template group %s listens for %s",
self._track_templates,
self.listeners,
)
if not updates:
return
for track_result in updates:
self._last_result[track_result.template] = track_result.result
self.hass.async_run_hass_job(self._job, event, updates)
TrackTemplateResultListener = Callable[
[
Event,
List[TrackTemplateResult],
],
None,
]
"""Type for the listener for template results.
Action arguments
----------------
event
Event that caused the template to change output. None if not
triggered by an event.
updates
A list of TrackTemplateResult
"""
@callback
@bind_hass
def async_track_template_result(
hass: HomeAssistant,
track_templates: Iterable[TrackTemplate],
action: TrackTemplateResultListener,
raise_on_template_error: bool = False,
strict: bool = False,
) -> _TrackTemplateResultInfo:
"""Add a listener that fires when the result of a template changes.
The action will fire with the initial result from the template, and
then whenever the output from the template changes. The template will
be reevaluated if any states referenced in the last run of the
template change, or if manually triggered. If the result of the
evaluation is different from the previous run, the listener is passed
the result.
If the template results in an TemplateError, this will be returned to
the listener the first time this happens but not for subsequent errors.
Once the template returns to a non-error condition the result is sent
to the action as usual.
Parameters
----------
hass
Home assistant object.
track_templates
An iterable of TrackTemplate.
action
Callable to call with results.
raise_on_template_error
When set to True, if there is an exception
processing the template during setup, the system
will raise the exception instead of setting up
tracking.
strict
When set to True, raise on undefined variables.
Returns
-------
Info object used to unregister the listener, and refresh the template.
"""
tracker = _TrackTemplateResultInfo(hass, track_templates, action)
tracker.async_setup(raise_on_template_error, strict=strict)
return tracker
@callback
@bind_hass
def async_track_same_state(
hass: HomeAssistant,
period: timedelta,
action: Callable[..., Awaitable[None] | None],
async_check_same_func: Callable[[str, State | None, State | None], bool],
entity_ids: str | Iterable[str] = MATCH_ALL,
) -> CALLBACK_TYPE:
"""Track the state of entities for a period and run an action.
If async_check_func is None it use the state of orig_value.
Without entity_ids we track all state changes.
"""
async_remove_state_for_cancel: CALLBACK_TYPE | None = None
async_remove_state_for_listener: CALLBACK_TYPE | None = None
job = HassJob(action)
@callback
def clear_listener() -> None:
"""Clear all unsub listener."""
nonlocal async_remove_state_for_cancel, async_remove_state_for_listener
if async_remove_state_for_listener is not None:
async_remove_state_for_listener()
async_remove_state_for_listener = None
if async_remove_state_for_cancel is not None:
async_remove_state_for_cancel()
async_remove_state_for_cancel = None
@callback
def state_for_listener(now: Any) -> None:
"""Fire on state changes after a delay and calls action."""
nonlocal async_remove_state_for_listener
async_remove_state_for_listener = None
clear_listener()
hass.async_run_hass_job(job)
@callback
def state_for_cancel_listener(event: Event) -> None:
"""Fire on changes and cancel for listener if changed."""
entity: str = event.data["entity_id"]
from_state: State | None = event.data.get("old_state")
to_state: State | None = event.data.get("new_state")
if not async_check_same_func(entity, from_state, to_state):
clear_listener()
async_remove_state_for_listener = async_track_point_in_utc_time(
hass, state_for_listener, dt_util.utcnow() + period
)
if entity_ids == MATCH_ALL:
async_remove_state_for_cancel = hass.bus.async_listen(
EVENT_STATE_CHANGED, state_for_cancel_listener
)
else:
async_remove_state_for_cancel = async_track_state_change_event(
hass,
[entity_ids] if isinstance(entity_ids, str) else entity_ids,
state_for_cancel_listener,
)
return clear_listener
track_same_state = threaded_listener_factory(async_track_same_state)
@callback
@bind_hass
def async_track_point_in_time(
hass: HomeAssistant,
action: HassJob | Callable[..., Awaitable[None] | None],
point_in_time: datetime,
) -> CALLBACK_TYPE:
"""Add a listener that fires once after a specific point in time."""
job = action if isinstance(action, HassJob) else HassJob(action)
@callback
def utc_converter(utc_now: datetime) -> None:
"""Convert passed in UTC now to local now."""
hass.async_run_hass_job(job, dt_util.as_local(utc_now))
return async_track_point_in_utc_time(hass, utc_converter, point_in_time)
track_point_in_time = threaded_listener_factory(async_track_point_in_time)
@callback
@bind_hass
def async_track_point_in_utc_time(
hass: HomeAssistant,
action: HassJob | Callable[..., Awaitable[None] | None],
point_in_time: datetime,
) -> CALLBACK_TYPE:
"""Add a listener that fires once after a specific point in UTC time."""
# Ensure point_in_time is UTC
utc_point_in_time = dt_util.as_utc(point_in_time)
# Since this is called once, we accept a HassJob so we can avoid
# having to figure out how to call the action every time its called.
cancel_callback: asyncio.TimerHandle | None = None
@callback
def run_action(job: HassJob) -> None:
"""Call the action."""
nonlocal cancel_callback
now = time_tracker_utcnow()
# Depending on the available clock support (including timer hardware
# and the OS kernel) it can happen that we fire a little bit too early
# as measured by utcnow(). That is bad when callbacks have assumptions
# about the current time. Thus, we rearm the timer for the remaining
# time.
delta = (utc_point_in_time - now).total_seconds()
if delta > 0:
_LOGGER.debug("Called %f seconds too early, rearming", delta)
cancel_callback = hass.loop.call_later(delta, run_action, job)
return
hass.async_run_hass_job(job, utc_point_in_time)
job = action if isinstance(action, HassJob) else HassJob(action)
delta = utc_point_in_time.timestamp() - time.time()
cancel_callback = hass.loop.call_later(delta, run_action, job)
@callback
def unsub_point_in_time_listener() -> None:
"""Cancel the call_later."""
assert cancel_callback is not None
cancel_callback.cancel()
return unsub_point_in_time_listener
track_point_in_utc_time = threaded_listener_factory(async_track_point_in_utc_time)
@callback
@bind_hass
def async_call_later(
hass: HomeAssistant,
delay: float,
action: HassJob | Callable[..., Awaitable[None] | None],
) -> CALLBACK_TYPE:
"""Add a listener that is called in <delay>."""
return async_track_point_in_utc_time(
hass, action, dt_util.utcnow() + timedelta(seconds=delay)
)
call_later = threaded_listener_factory(async_call_later)
@callback
@bind_hass
def async_track_time_interval(
hass: HomeAssistant,
action: Callable[..., Awaitable[None] | None],
interval: timedelta,
) -> CALLBACK_TYPE:
"""Add a listener that fires repetitively at every timedelta interval."""
remove = None
interval_listener_job = None
job = HassJob(action)
def next_interval() -> datetime:
"""Return the next interval."""
return dt_util.utcnow() + interval
@callback
def interval_listener(now: datetime) -> None:
"""Handle elapsed intervals."""
nonlocal remove
nonlocal interval_listener_job
remove = async_track_point_in_utc_time(
hass, interval_listener_job, next_interval() # type: ignore
)
hass.async_run_hass_job(job, now)
interval_listener_job = HassJob(interval_listener)
remove = async_track_point_in_utc_time(hass, interval_listener_job, next_interval())
def remove_listener() -> None:
"""Remove interval listener."""
remove() # type: ignore
return remove_listener
track_time_interval = threaded_listener_factory(async_track_time_interval)
@attr.s
class SunListener:
"""Helper class to help listen to sun events."""
hass: HomeAssistant = attr.ib()
job: HassJob = attr.ib()
event: str = attr.ib()
offset: timedelta | None = attr.ib()
_unsub_sun: CALLBACK_TYPE | None = attr.ib(default=None)
_unsub_config: CALLBACK_TYPE | None = attr.ib(default=None)
@callback
def async_attach(self) -> None:
"""Attach a sun listener."""
assert self._unsub_config is None
self._unsub_config = self.hass.bus.async_listen(
EVENT_CORE_CONFIG_UPDATE, self._handle_config_event
)
self._listen_next_sun_event()
@callback
def async_detach(self) -> None:
"""Detach the sun listener."""
assert self._unsub_sun is not None
assert self._unsub_config is not None
self._unsub_sun()
self._unsub_sun = None
self._unsub_config()
self._unsub_config = None
@callback
def _listen_next_sun_event(self) -> None:
"""Set up the sun event listener."""
assert self._unsub_sun is None
self._unsub_sun = async_track_point_in_utc_time(
self.hass,
self._handle_sun_event,
get_astral_event_next(self.hass, self.event, offset=self.offset),
)
@callback
def _handle_sun_event(self, _now: Any) -> None:
"""Handle solar event."""
self._unsub_sun = None
self._listen_next_sun_event()
self.hass.async_run_hass_job(self.job)
@callback
def _handle_config_event(self, _event: Any) -> None:
"""Handle core config update."""
assert self._unsub_sun is not None
self._unsub_sun()
self._unsub_sun = None
self._listen_next_sun_event()
@callback
@bind_hass
def async_track_sunrise(
hass: HomeAssistant, action: Callable[..., None], offset: timedelta | None = None
) -> CALLBACK_TYPE:
"""Add a listener that will fire a specified offset from sunrise daily."""
listener = SunListener(hass, HassJob(action), SUN_EVENT_SUNRISE, offset)
listener.async_attach()
return listener.async_detach
track_sunrise = threaded_listener_factory(async_track_sunrise)
@callback
@bind_hass
def async_track_sunset(
hass: HomeAssistant, action: Callable[..., None], offset: timedelta | None = None
) -> CALLBACK_TYPE:
"""Add a listener that will fire a specified offset from sunset daily."""
listener = SunListener(hass, HassJob(action), SUN_EVENT_SUNSET, offset)
listener.async_attach()
return listener.async_detach
track_sunset = threaded_listener_factory(async_track_sunset)
# For targeted patching in tests
time_tracker_utcnow = dt_util.utcnow
@callback
@bind_hass
def async_track_utc_time_change(
hass: HomeAssistant,
action: Callable[..., Awaitable[None] | None],
hour: Any | None = None,
minute: Any | None = None,
second: Any | None = None,
local: bool = False,
) -> CALLBACK_TYPE:
"""Add a listener that will fire if time matches a pattern."""
job = HassJob(action)
# We do not have to wrap the function with time pattern matching logic
# if no pattern given
if all(val is None for val in (hour, minute, second)):
@callback
def time_change_listener(event: Event) -> None:
"""Fire every time event that comes in."""
hass.async_run_hass_job(job, event.data[ATTR_NOW])
return hass.bus.async_listen(EVENT_TIME_CHANGED, time_change_listener)
matching_seconds = dt_util.parse_time_expression(second, 0, 59)
matching_minutes = dt_util.parse_time_expression(minute, 0, 59)
matching_hours = dt_util.parse_time_expression(hour, 0, 23)
def calculate_next(now: datetime) -> datetime:
"""Calculate and set the next time the trigger should fire."""
localized_now = dt_util.as_local(now) if local else now
return dt_util.find_next_time_expression_time(
localized_now, matching_seconds, matching_minutes, matching_hours
)
time_listener: CALLBACK_TYPE | None = None
@callback
def pattern_time_change_listener(_: datetime) -> None:
"""Listen for matching time_changed events."""
nonlocal time_listener
now = time_tracker_utcnow()
hass.async_run_hass_job(job, dt_util.as_local(now) if local else now)
time_listener = async_track_point_in_utc_time(
hass,
pattern_time_change_listener,
calculate_next(now + timedelta(seconds=1)),
)
time_listener = async_track_point_in_utc_time(
hass, pattern_time_change_listener, calculate_next(dt_util.utcnow())
)
@callback
def unsub_pattern_time_change_listener() -> None:
"""Cancel the time listener."""
assert time_listener is not None
time_listener()
return unsub_pattern_time_change_listener
track_utc_time_change = threaded_listener_factory(async_track_utc_time_change)
@callback
@bind_hass
def async_track_time_change(
hass: HomeAssistant,
action: Callable[..., None],
hour: Any | None = None,
minute: Any | None = None,
second: Any | None = None,
) -> CALLBACK_TYPE:
"""Add a listener that will fire if UTC time matches a pattern."""
return async_track_utc_time_change(hass, action, hour, minute, second, local=True)
track_time_change = threaded_listener_factory(async_track_time_change)
def process_state_match(parameter: None | str | Iterable[str]) -> Callable[[str], bool]:
"""Convert parameter to function that matches input against parameter."""
if parameter is None or parameter == MATCH_ALL:
return lambda _: True
if isinstance(parameter, str) or not hasattr(parameter, "__iter__"):
return lambda state: state == parameter
parameter_set = set(parameter)
return lambda state: state in parameter_set
@callback
def _entities_domains_from_render_infos(
render_infos: Iterable[RenderInfo],
) -> tuple[set[str], set[str]]:
"""Combine from multiple RenderInfo."""
entities: set[str] = set()
domains: set[str] = set()
for render_info in render_infos:
if render_info.entities:
entities.update(render_info.entities)
if render_info.domains:
domains.update(render_info.domains)
if render_info.domains_lifecycle:
domains.update(render_info.domains_lifecycle)
return entities, domains
@callback
def _render_infos_needs_all_listener(render_infos: Iterable[RenderInfo]) -> bool:
"""Determine if an all listener is needed from RenderInfo."""
for render_info in render_infos:
# Tracking all states
if render_info.all_states or render_info.all_states_lifecycle:
return True
# Previous call had an exception
# so we do not know which states
# to track
if render_info.exception:
return True
return False
@callback
def _render_infos_to_track_states(render_infos: Iterable[RenderInfo]) -> TrackStates:
"""Create a TrackStates dataclass from the latest RenderInfo."""
if _render_infos_needs_all_listener(render_infos):
return TrackStates(True, set(), set())
return TrackStates(False, *_entities_domains_from_render_infos(render_infos))
@callback
def _event_triggers_rerender(event: Event, info: RenderInfo) -> bool:
"""Determine if a template should be re-rendered from an event."""
entity_id = cast(str, event.data.get(ATTR_ENTITY_ID))
if info.filter(entity_id):
return True
if (
event.data.get("new_state") is not None
and event.data.get("old_state") is not None
):
return False
return bool(info.filter_lifecycle(entity_id))
@callback
def _rate_limit_for_event(
event: Event, info: RenderInfo, track_template_: TrackTemplate
) -> timedelta | None:
"""Determine the rate limit for an event."""
entity_id = event.data.get(ATTR_ENTITY_ID)
# Specifically referenced entities are excluded
# from the rate limit
if entity_id in info.entities:
return None
if track_template_.rate_limit is not None:
return track_template_.rate_limit
rate_limit: timedelta | None = info.rate_limit
return rate_limit
def _suppress_domain_all_in_render_info(render_info: RenderInfo) -> RenderInfo:
"""Remove the domains and all_states from render info during a ratelimit."""
rate_limited_render_info = copy.copy(render_info)
rate_limited_render_info.all_states = False
rate_limited_render_info.all_states_lifecycle = False
rate_limited_render_info.domains = set()
rate_limited_render_info.domains_lifecycle = set()
return rate_limited_render_info
| apache-2.0 |
lovelysystems/pyjamas | library/pyjamas/ui/HorizontalPanel.py | 1 | 2463 | # Copyright 2006 James Tauber and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas.ui.CellPanel import CellPanel
from pyjamas.ui import HasHorizontalAlignment
from pyjamas.ui import HasVerticalAlignment
class HorizontalPanel(CellPanel):
def __init__(self, **kwargs):
if not kwargs.has_key('Spacing'): kwargs['Spacing'] = 0
if not kwargs.has_key('Padding'): kwargs['Padding'] = 0
self.horzAlign = HasHorizontalAlignment.ALIGN_LEFT
self.vertAlign = HasVerticalAlignment.ALIGN_TOP
CellPanel.__init__(self, **kwargs)
self.tableRow = DOM.createTR()
DOM.appendChild(self.getBody(), self.tableRow)
def add(self, widget):
self.insert(widget, self.getWidgetCount())
def getHorizontalAlignment(self):
return self.horzAlign
def getVerticalAlignment(self):
return self.vertAlign
def getWidget(self, index):
return self.children[index]
def getWidgetCount(self):
return len(self.children)
def getWidgetIndex(self, child):
return self.children.index(child)
def insert(self, widget, beforeIndex):
widget.removeFromParent()
td = DOM.createTD()
DOM.insertChild(self.tableRow, td, beforeIndex)
CellPanel.insert(self, widget, td, beforeIndex)
self.setCellHorizontalAlignment(widget, self.horzAlign)
self.setCellVerticalAlignment(widget, self.vertAlign)
def remove(self, widget):
if isinstance(widget, int):
widget = self.getWidget(widget)
if widget.getParent() != self:
return False
td = DOM.getParent(widget.getElement())
DOM.removeChild(self.tableRow, td)
CellPanel.remove(self, widget)
return True
def setHorizontalAlignment(self, align):
self.horzAlign = align
def setVerticalAlignment(self, align):
self.vertAlign = align
| apache-2.0 |
homme/ansible-modules-core | system/group.py | 81 | 13394 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: group
author: "Stephen Fromm (@sfromm)"
version_added: "0.0.2"
short_description: Add or remove groups
requirements: [ groupadd, groupdel, groupmod ]
description:
- Manage presence of groups on a host.
options:
name:
required: true
description:
- Name of the group to manage.
gid:
required: false
description:
- Optional I(GID) to set for the group.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the group should be present or not on the remote host.
system:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If I(yes), indicates that the group created is a system group.
'''
EXAMPLES = '''
# Example group command from Ansible Playbooks
- group: name=somegroup state=present
'''
import grp
import syslog
import platform
class Group(object):
"""
This is a generic Group manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- group_del()
- group_add()
- group_mod()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
GROUPFILE = '/etc/group'
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Group, args, kwargs)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.gid = module.params['gid']
self.system = module.params['system']
self.syslogging = False
def execute_command(self, cmd):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
return self.module.run_command(cmd)
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('-g')
cmd.append(kwargs[key])
elif key == 'system' and kwargs[key] == True:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('-g')
cmd.append(kwargs[key])
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self):
try:
if grp.getgrnam(self.name):
return True
except KeyError:
return False
def group_info(self):
if not self.group_exists():
return False
try:
info = list(grp.getgrnam(self.name))
except KeyError:
return False
return info
# ===========================================
class SunOS(Group):
"""
This is a SunOS Group manipulation class. Solaris doesn't have
the 'system' group concept.
This overrides the following methods from the generic class:-
- group_add()
"""
platform = 'SunOS'
distribution = None
GROUPFILE = '/etc/group'
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('-g')
cmd.append(kwargs[key])
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class AIX(Group):
"""
This is a AIX Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'AIX'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('rmgroup', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('mkgroup', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('id='+kwargs[key])
elif key == 'system' and kwargs[key] == True:
cmd.append('-a')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('chgroup', True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('id='+kwargs[key])
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class FreeBsdGroup(Group):
"""
This is a FreeBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'FreeBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name]
if self.gid is not None:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
# modify the group if cmd will do anything
if cmd_len != len(cmd):
if self.module.check_mode:
return (0, '', '')
return self.execute_command(cmd)
return (None, '', '')
# ===========================================
class DarwinGroup(Group):
"""
This is a Mac OS X Darwin Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
group manupulation are done using dseditgroup(1).
"""
platform = 'Darwin'
distribution = None
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += [ '-o', 'create' ]
if self.gid is not None:
cmd += [ '-i', self.gid ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
def group_del(self):
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += [ '-o', 'delete' ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
def group_mod(self, gid=None):
info = self.group_info()
if self.gid is not None and int(self.gid) != info[2]:
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += [ '-o', 'edit' ]
if gid is not None:
cmd += [ '-i', gid ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
return (None, '', '')
class OpenBsdGroup(Group):
"""
This is a OpenBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'OpenBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class NetBsdGroup(Group):
"""
This is a NetBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'NetBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
gid=dict(default=None, type='str'),
system=dict(default=False, type='bool'),
),
supports_check_mode=True
)
group = Group(module)
if group.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - platform %s' % group.platform)
if user.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - distribution %s' % group.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = group.name
result['state'] = group.state
if group.state == 'absent':
if group.group_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = group.group_del()
if rc != 0:
module.fail_json(name=group.name, msg=err)
elif group.state == 'present':
if not group.group_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = group.group_add(gid=group.gid, system=group.system)
else:
(rc, out, err) = group.group_mod(gid=group.gid)
if rc is not None and rc != 0:
module.fail_json(name=group.name, msg=err)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if group.group_exists():
info = group.group_info()
result['system'] = group.system
result['gid'] = info[2]
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
fbradyirl/home-assistant | tests/components/persistent_notification/test_init.py | 4 | 7233 | """The tests for the persistent notification component."""
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.setup import setup_component, async_setup_component
import homeassistant.components.persistent_notification as pn
from tests.common import get_test_home_assistant
class TestPersistentNotification:
"""Test persistent notification component."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
setup_component(self.hass, pn.DOMAIN, {})
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_create(self):
"""Test creating notification without title or notification id."""
notifications = self.hass.data[pn.DOMAIN]["notifications"]
assert len(self.hass.states.entity_ids(pn.DOMAIN)) == 0
assert len(notifications) == 0
pn.create(self.hass, "Hello World {{ 1 + 1 }}", title="{{ 1 + 1 }} beers")
self.hass.block_till_done()
entity_ids = self.hass.states.entity_ids(pn.DOMAIN)
assert len(entity_ids) == 1
assert len(notifications) == 1
state = self.hass.states.get(entity_ids[0])
assert state.state == pn.STATE
assert state.attributes.get("message") == "Hello World 2"
assert state.attributes.get("title") == "2 beers"
notification = notifications.get(entity_ids[0])
assert notification["status"] == pn.STATUS_UNREAD
assert notification["message"] == "Hello World 2"
assert notification["title"] == "2 beers"
assert notification["created_at"] is not None
notifications.clear()
def test_create_notification_id(self):
"""Ensure overwrites existing notification with same id."""
notifications = self.hass.data[pn.DOMAIN]["notifications"]
assert len(self.hass.states.entity_ids(pn.DOMAIN)) == 0
assert len(notifications) == 0
pn.create(self.hass, "test", notification_id="Beer 2")
self.hass.block_till_done()
assert len(self.hass.states.entity_ids()) == 1
assert len(notifications) == 1
entity_id = "persistent_notification.beer_2"
state = self.hass.states.get(entity_id)
assert state.attributes.get("message") == "test"
notification = notifications.get(entity_id)
assert notification["message"] == "test"
assert notification["title"] is None
pn.create(self.hass, "test 2", notification_id="Beer 2")
self.hass.block_till_done()
# We should have overwritten old one
assert len(self.hass.states.entity_ids()) == 1
state = self.hass.states.get(entity_id)
assert state.attributes.get("message") == "test 2"
notification = notifications.get(entity_id)
assert notification["message"] == "test 2"
notifications.clear()
def test_create_template_error(self):
"""Ensure we output templates if contain error."""
notifications = self.hass.data[pn.DOMAIN]["notifications"]
assert len(self.hass.states.entity_ids(pn.DOMAIN)) == 0
assert len(notifications) == 0
pn.create(self.hass, "{{ message + 1 }}", "{{ title + 1 }}")
self.hass.block_till_done()
entity_ids = self.hass.states.entity_ids(pn.DOMAIN)
assert len(entity_ids) == 1
assert len(notifications) == 1
state = self.hass.states.get(entity_ids[0])
assert state.attributes.get("message") == "{{ message + 1 }}"
assert state.attributes.get("title") == "{{ title + 1 }}"
notification = notifications.get(entity_ids[0])
assert notification["message"] == "{{ message + 1 }}"
assert notification["title"] == "{{ title + 1 }}"
notifications.clear()
def test_dismiss_notification(self):
"""Ensure removal of specific notification."""
notifications = self.hass.data[pn.DOMAIN]["notifications"]
assert len(self.hass.states.entity_ids(pn.DOMAIN)) == 0
assert len(notifications) == 0
pn.create(self.hass, "test", notification_id="Beer 2")
self.hass.block_till_done()
assert len(self.hass.states.entity_ids(pn.DOMAIN)) == 1
assert len(notifications) == 1
pn.dismiss(self.hass, notification_id="Beer 2")
self.hass.block_till_done()
assert len(self.hass.states.entity_ids(pn.DOMAIN)) == 0
assert len(notifications) == 0
notifications.clear()
def test_mark_read(self):
"""Ensure notification is marked as Read."""
notifications = self.hass.data[pn.DOMAIN]["notifications"]
assert len(notifications) == 0
pn.create(self.hass, "test", notification_id="Beer 2")
self.hass.block_till_done()
entity_id = "persistent_notification.beer_2"
assert len(notifications) == 1
notification = notifications.get(entity_id)
assert notification["status"] == pn.STATUS_UNREAD
self.hass.services.call(
pn.DOMAIN, pn.SERVICE_MARK_READ, {"notification_id": "Beer 2"}
)
self.hass.block_till_done()
assert len(notifications) == 1
notification = notifications.get(entity_id)
assert notification["status"] == pn.STATUS_READ
notifications.clear()
async def test_ws_get_notifications(hass, hass_ws_client):
"""Test websocket endpoint for retrieving persistent notifications."""
await async_setup_component(hass, pn.DOMAIN, {})
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "persistent_notification/get"})
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
notifications = msg["result"]
assert len(notifications) == 0
# Create
hass.components.persistent_notification.async_create(
"test", notification_id="Beer 2"
)
await client.send_json({"id": 6, "type": "persistent_notification/get"})
msg = await client.receive_json()
assert msg["id"] == 6
assert msg["type"] == TYPE_RESULT
assert msg["success"]
notifications = msg["result"]
assert len(notifications) == 1
notification = notifications[0]
assert notification["notification_id"] == "Beer 2"
assert notification["message"] == "test"
assert notification["title"] is None
assert notification["status"] == pn.STATUS_UNREAD
assert notification["created_at"] is not None
# Mark Read
await hass.services.async_call(
pn.DOMAIN, pn.SERVICE_MARK_READ, {"notification_id": "Beer 2"}
)
await client.send_json({"id": 7, "type": "persistent_notification/get"})
msg = await client.receive_json()
notifications = msg["result"]
assert len(notifications) == 1
assert notifications[0]["status"] == pn.STATUS_READ
# Dismiss
hass.components.persistent_notification.async_dismiss("Beer 2")
await client.send_json({"id": 8, "type": "persistent_notification/get"})
msg = await client.receive_json()
notifications = msg["result"]
assert len(notifications) == 0
| apache-2.0 |
jackrzhang/zulip | zerver/migrations/0130_text_choice_in_emojiset.py | 13 | 1745 | from django.db import migrations, models
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
# change emojiset to text if emoji_alt_code is true.
def change_emojiset(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
UserProfile = apps.get_model("zerver", "UserProfile")
for user in UserProfile.objects.filter(emoji_alt_code=True):
user.emojiset = "text"
user.save(update_fields=["emojiset"])
def reverse_change_emojiset(apps: StateApps,
schema_editor: DatabaseSchemaEditor) -> None:
UserProfile = apps.get_model("zerver", "UserProfile")
for user in UserProfile.objects.filter(emojiset="text"):
# Resetting `emojiset` to "google" (the default) doesn't make an
# exact round trip, but it's nearly indistinguishable -- the setting
# shouldn't really matter while `emoji_alt_code` is true.
user.emoji_alt_code = True
user.emojiset = "google"
user.save(update_fields=["emoji_alt_code", "emojiset"])
class Migration(migrations.Migration):
dependencies = [
('zerver', '0129_remove_userprofile_autoscroll_forever'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='emojiset',
field=models.CharField(choices=[('google', 'Google'), ('apple', 'Apple'), ('twitter', 'Twitter'), ('emojione', 'EmojiOne'), ('text', 'Plain text')], default='google', max_length=20),
),
migrations.RunPython(change_emojiset, reverse_change_emojiset),
migrations.RemoveField(
model_name='userprofile',
name='emoji_alt_code',
),
]
| apache-2.0 |
benpatterson/edx-platform | common/djangoapps/django_locale/tests.py | 81 | 6516 | # pylint: disable=invalid-name, line-too-long, super-method-not-called
"""
Tests taken from Django upstream:
https://github.com/django/django/blob/e6b34193c5c7d117ededdab04bb16caf8864f07c/tests/regressiontests/i18n/tests.py
"""
from django.conf import settings
from django.test import TestCase, RequestFactory
from django_locale.trans_real import (
parse_accept_lang_header, get_language_from_request, LANGUAGE_SESSION_KEY
)
# Added to test middleware around dark lang
from django.contrib.auth.models import User
from django.test.utils import override_settings
from dark_lang.models import DarkLangConfig
# Adding to support test differences between Django and our own settings
@override_settings(LANGUAGES=[
('pt', 'Portuguese'),
('pt-br', 'Portuguese-Brasil'),
('es', 'Spanish'),
('es-ar', 'Spanish (Argentina)'),
('de', 'Deutch'),
('zh-cn', 'Chinese (China)'),
('ar-sa', 'Arabic (Saudi Arabia)'),
])
class MiscTests(TestCase):
"""
Tests taken from Django upstream:
https://github.com/django/django/blob/e6b34193c5c7d117ededdab04bb16caf8864f07c/tests/regressiontests/i18n/tests.py
"""
def setUp(self):
self.rf = RequestFactory()
# Added to test middleware around dark lang
user = User()
user.save()
DarkLangConfig(
released_languages='pt, pt-br, es, de, es-ar, zh-cn, ar-sa',
changed_by=user,
enabled=True
).save()
def test_parse_spec_http_header(self):
"""
Testing HTTP header parsing. First, we test that we can parse the
values according to the spec (and that we extract all the pieces in
the right order).
"""
p = parse_accept_lang_header
# Good headers.
self.assertEqual([('de', 1.0)], p('de'))
self.assertEqual([('en-AU', 1.0)], p('en-AU'))
self.assertEqual([('es-419', 1.0)], p('es-419'))
self.assertEqual([('*', 1.0)], p('*;q=1.00'))
self.assertEqual([('en-AU', 0.123)], p('en-AU;q=0.123'))
self.assertEqual([('en-au', 0.5)], p('en-au;q=0.5'))
self.assertEqual([('en-au', 1.0)], p('en-au;q=1.0'))
self.assertEqual([('da', 1.0), ('en', 0.5), ('en-gb', 0.25)], p('da, en-gb;q=0.25, en;q=0.5'))
self.assertEqual([('en-au-xx', 1.0)], p('en-au-xx'))
self.assertEqual([('de', 1.0), ('en-au', 0.75), ('en-us', 0.5), ('en', 0.25), ('es', 0.125), ('fa', 0.125)], p('de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125'))
self.assertEqual([('*', 1.0)], p('*'))
self.assertEqual([('de', 1.0)], p('de;q=0.'))
self.assertEqual([('en', 1.0), ('*', 0.5)], p('en; q=1.0, * ; q=0.5'))
self.assertEqual([], p(''))
# Bad headers; should always return [].
self.assertEqual([], p('en-gb;q=1.0000'))
self.assertEqual([], p('en;q=0.1234'))
self.assertEqual([], p('en;q=.2'))
self.assertEqual([], p('abcdefghi-au'))
self.assertEqual([], p('**'))
self.assertEqual([], p('en,,gb'))
self.assertEqual([], p('en-au;q=0.1.0'))
self.assertEqual([], p('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXZ,en'))
self.assertEqual([], p('da, en-gb;q=0.8, en;q=0.7,#'))
self.assertEqual([], p('de;q=2.0'))
self.assertEqual([], p('de;q=0.a'))
self.assertEqual([], p('12-345'))
self.assertEqual([], p(''))
def test_parse_literal_http_header(self):
"""
Now test that we parse a literal HTTP header correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}
self.assertEqual('pt-br', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt'}
self.assertEqual('pt', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es,de'}
self.assertEqual('es', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-ar,de'}
self.assertEqual('es-ar', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-us'}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh_CN)
# the user sets zh-cn as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-cn,de'}
self.assertEqual(g(r), 'zh-cn')
def test_logic_masked_by_darklang(self):
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'ar-qa'}
self.assertEqual('ar-sa', g(r))
r.session = {LANGUAGE_SESSION_KEY: 'es'}
self.assertEqual('es', g(r))
def test_parse_language_cookie(self):
"""
Now test that we parse language preferences stored in a cookie correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt-br'}
r.META = {}
self.assertEqual('pt-br', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt'}
r.META = {}
self.assertEqual('pt', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual('es', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es-us'}
r.META = {}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh_CN)
# the user sets zh-cn as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'zh-cn'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual(g(r), 'zh-cn')
| agpl-3.0 |
Eforcers/inbox-cleaner | src/lib/gae_mini_profiler/instrumented_profiler.py | 13 | 2781 | """CPU profiler that works by instrumenting all function calls (uses cProfile).
This profiler provides detailed function timings for all function calls
during a request.
This is just a simple wrapper for cProfile with result formatting. See
http://docs.python.org/2/library/profile.html for more.
PRO: since every function call is instrumented, you'll be sure to see
everything that goes on during a request. For code that doesn't have lots of
deeply nested function calls, this can be the easiest and most accurate way to
get an idea for which functions are taking lots of time.
CON: overhead is added to each function call due to this instrumentation. If
you're profiling code with deeply nested function calls or tight loops going
over lots of function calls, this perf overhead will add up.
"""
import cProfile
import pstats
import StringIO
import util
class Profile(object):
"""Profiler that wraps cProfile for programmatic access and reporting."""
def __init__(self):
self.c_profile = cProfile.Profile()
def results(self):
"""Return cProfile results in a dictionary for template context."""
# Make sure nothing is printed to stdout
output = StringIO.StringIO()
stats = pstats.Stats(self.c_profile, stream=output)
stats.sort_stats("cumulative")
results = {
"total_call_count": stats.total_calls,
"total_time": util.seconds_fmt(stats.total_tt),
"calls": []
}
width, list_func_names = stats.get_print_list([80])
for func_name in list_func_names:
primitive_call_count, total_call_count, total_time, cumulative_time, callers = stats.stats[func_name]
func_desc = pstats.func_std_string(func_name)
callers_names = map(lambda func_name: pstats.func_std_string(func_name), callers.keys())
callers_desc = map(
lambda name: {"func_desc": name, "func_desc_short": util.short_method_fmt(name)},
callers_names)
results["calls"].append({
"primitive_call_count": primitive_call_count,
"total_call_count": total_call_count,
"cumulative_time": util.seconds_fmt(cumulative_time, 2),
"per_call_cumulative": util.seconds_fmt(cumulative_time / primitive_call_count, 2) if primitive_call_count else "",
"func_desc": func_desc,
"func_desc_short": util.short_method_fmt(func_desc),
"callers_desc": callers_desc,
})
output.close()
return results
def run(self, fxn):
"""Run function with cProfile enabled, saving results."""
return self.c_profile.runcall(lambda *args, **kwargs: fxn(), None, None)
| mit |
gechong/XlsxWriter | xlsxwriter/test/comparison/test_escapes01.py | 8 | 2872 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'escapes01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/calcChain.xml',
'[Content_Types].xml',
'xl/_rels/workbook.xml.rels']
self.ignore_elements = {}
def test_create_file(self):
"""Test creation of a file with strings that require XML escaping."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet('5&4')
worksheet.write_formula(0, 0, '=IF(1>2,0,1)', None, 1)
worksheet.write_formula(1, 0, """=CONCATENATE("'","<>&")""", None, "'<>&")
worksheet.write_formula(2, 0, '=1&"b"', None, '1b')
worksheet.write_formula(3, 0, """="'\"""", None, "'")
worksheet.write_formula(4, 0, '=""""', None, '"')
worksheet.write_formula(5, 0, '="&" & "&"', None, '&&')
worksheet.write_string(7, 0, '"&<>')
workbook.close()
self.assertExcelEqual()
def test_create_file_write(self):
"""Test formulas with write() method."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet('5&4')
worksheet.write(0, 0, '=IF(1>2,0,1)', None, 1)
worksheet.write(1, 0, """=CONCATENATE("'","<>&")""", None, "'<>&")
worksheet.write(2, 0, '=1&"b"', None, '1b')
worksheet.write(3, 0, """="'\"""", None, "'")
worksheet.write(4, 0, '=""""', None, '"')
worksheet.write(5, 0, '="&" & "&"', None, '&&')
worksheet.write_string(7, 0, '"&<>')
workbook.close()
self.assertExcelEqual()
def test_create_file_A1(self):
"""Test formulas with A1 notation."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet('5&4')
worksheet.write_formula('A1', '=IF(1>2,0,1)', None, 1)
worksheet.write_formula('A2', """=CONCATENATE("'","<>&")""", None, "'<>&")
worksheet.write_formula('A3', '=1&"b"', None, '1b')
worksheet.write_formula('A4', """="'\"""", None, "'")
worksheet.write_formula('A5', '=""""', None, '"')
worksheet.write_formula('A6', '="&" & "&"', None, '&&')
worksheet.write_string(7, 0, '"&<>')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
seocam/mirror-test | colab/search/forms.py | 1 | 7726 | # -*- coding: utf-8 -*-
import unicodedata
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from haystack.forms import SearchForm
from haystack.inputs import AltParser
from haystack.inputs import AutoQuery
from colab.super_archives.models import MailingList
class ColabSearchForm(SearchForm):
q = forms.CharField(label=_('Search'), required=False)
order = forms.CharField(widget=forms.HiddenInput(), required=False)
type = forms.CharField(required=False, label=_(u'Type'))
author = forms.CharField(required=False, label=_(u'Author'))
modified_by = forms.CharField(required=False, label=_(u'Modified by'))
# ticket status
tag = forms.CharField(required=False, label=_(u'Status'))
# mailinglist tag
list = forms.MultipleChoiceField(
required=False,
label=_(u'Mailinglist'),
choices=[(v, v) for v in MailingList.objects.values_list(
'name', flat=True)]
)
milestone = forms.CharField(required=False, label=_(u'Milestone'))
priority = forms.CharField(required=False, label=_(u'Priority'))
component = forms.CharField(required=False, label=_(u'Component'))
severity = forms.CharField(required=False, label=_(u'Severity'))
reporter = forms.CharField(required=False, label=_(u'Reporter'))
keywords = forms.CharField(required=False, label=_(u'Keywords'))
collaborators = forms.CharField(required=False, label=_(u'Collaborators'))
repository_name = forms.CharField(required=False, label=_(u'Repository'))
username = forms.CharField(required=False, label=_(u'Username'))
name = forms.CharField(required=False, label=_(u'Name'))
institution = forms.CharField(required=False, label=_(u'Institution'))
role = forms.CharField(required=False, label=_(u'Role'))
since = forms.DateField(required=False, label=_(u'Since'))
until = forms.DateField(required=False, label=_(u'Until'))
filename = forms.CharField(required=False, label=_(u'Filename'))
used_by = forms.CharField(required=False, label=_(u'Used by'))
mimetype = forms.CharField(required=False, label=_(u'File type'))
size = forms.CharField(required=False, label=_(u'Size'))
def search(self):
if not self.is_valid():
return self.no_query_found()
# filter_or goes here
sqs = self.searchqueryset.all()
mimetype = self.cleaned_data['mimetype']
if mimetype:
filter_mimetypes = {'mimetype__in': []}
for type_, display, mimelist in settings.FILE_TYPE_GROUPINGS:
if type_ in mimetype:
filter_mimetypes['mimetype__in'] += mimelist
if not self.cleaned_data['size']:
sqs = sqs.filter_or(mimetype__in=mimelist)
if self.cleaned_data['size']:
# (1024 * 1024) / 2
# (1024 * 1024) * 10
filter_sizes = {}
filter_sizes_exp = {}
if '<500KB' in self.cleaned_data['size']:
filter_sizes['size__lt'] = 524288
if '500KB__10MB' in self.cleaned_data['size']:
filter_sizes_exp['size__gte'] = 524288
filter_sizes_exp['size__lte'] = 10485760
if '>10MB' in self.cleaned_data['size']:
filter_sizes['size__gt'] = 10485760
if self.cleaned_data['mimetype']:
# Add the mimetypes filters to this dict and filter it
if filter_sizes_exp:
filter_sizes_exp.update(filter_mimetypes)
sqs = sqs.filter_or(**filter_sizes_exp)
for filter_or in filter_sizes.items():
filter_or = dict((filter_or, ))
filter_or.update(filter_mimetypes)
sqs = sqs.filter_or(**filter_or)
else:
for filter_or in filter_sizes.items():
filter_or = dict((filter_or, ))
sqs = sqs.filter_or(**filter_or)
sqs = sqs.filter_or(**filter_sizes_exp)
if self.cleaned_data['used_by']:
sqs = sqs.filter_or(used_by__in=self.cleaned_data['used_by']
.split())
if self.cleaned_data['q']:
q = unicodedata.normalize(
'NFKD', self.cleaned_data.get('q')
).encode('ascii', 'ignore')
dismax_opts = {
'q.alt': '*.*',
'pf': 'title^2.1 author^1.9 description^1.7',
'mm': '2<70%',
# Date boosting:
# http://wiki.apache.org/solr/FunctionQuery#Date_Boosting
'bf': 'recip(ms(NOW/HOUR,modified),3.16e-11,1,1)^10',
}
hayString = 'haystack.backends.whoosh_backend.WhooshEngine'
if settings.HAYSTACK_CONNECTIONS['default']['ENGINE'] != hayString:
sqs = sqs.filter(content=AltParser(
'edismax', q, **dismax_opts))
else:
sqs = sqs.filter(content=AutoQuery(q))
if self.cleaned_data['type']:
sqs = sqs.filter(type=self.cleaned_data['type'])
if self.cleaned_data['order']:
for option, dict_order in settings.ORDERING_DATA.items():
if self.cleaned_data['order'] == option:
if dict_order['fields']:
sqs = sqs.order_by(*dict_order['fields'])
if self.cleaned_data['author']:
sqs = sqs.filter(
fullname_and_username__contains=self.cleaned_data['author']
)
if self.cleaned_data['modified_by']:
modified_by_data = self.cleaned_date['modified_by']
sqs = sqs.filter(
fullname_and_username__contains=modified_by_data
)
if self.cleaned_data['milestone']:
sqs = sqs.filter(milestone=self.cleaned_data['milestone'])
if self.cleaned_data['priority']:
sqs = sqs.filter(priority=self.cleaned_data['priority'])
if self.cleaned_data['severity']:
sqs = sqs.filter(severity=self.cleaned_data['severity'])
if self.cleaned_data['reporter']:
sqs = sqs.filter(reporter=self.cleaned_data['reporter'])
if self.cleaned_data['keywords']:
sqs = sqs.filter(keywords=self.cleaned_data['keywords'])
if self.cleaned_data['collaborators']:
sqs = sqs.filter(collaborators=self.cleaned_data['collaborators'])
if self.cleaned_data['repository_name']:
sqs = sqs.filter(
repository_name=self.cleaned_data['repository_name']
)
if self.cleaned_data['username']:
sqs = sqs.filter(username=self.cleaned_data['username'])
if self.cleaned_data['name']:
sqs = sqs.filter(name=self.cleaned_data['name'])
if self.cleaned_data['institution']:
sqs = sqs.filter(institution=self.cleaned_data['institution'])
if self.cleaned_data['role']:
sqs = sqs.filter(role=self.cleaned_data['role'])
if self.cleaned_data['tag']:
sqs = sqs.filter(tag=self.cleaned_data['tag'])
if self.cleaned_data['list']:
sqs = sqs.filter(tag__in=self.cleaned_data['list'])
if self.cleaned_data['since']:
sqs = sqs.filter(modified__gte=self.cleaned_data['since'])
if self.cleaned_data['until']:
sqs = sqs.filter(modified__lte=self.cleaned_data['until'])
if self.cleaned_data['filename']:
sqs = sqs.filter(filename=self.cleaned_data['filename'])
return sqs
| gpl-2.0 |
mitsuhiko/sqlalchemy | lib/sqlalchemy/dialects/mysql/oursql.py | 5 | 8694 | # mysql/oursql.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+oursql
:name: OurSQL
:dbapi: oursql
:connectstring: mysql+oursql://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://packages.python.org/oursql/
Unicode
-------
oursql defaults to using ``utf8`` as the connection charset, but other
encodings may be used instead. Like the MySQL-Python driver, unicode support
can be completely disabled::
# oursql sets the connection charset to utf8 automatically; all strings come
# back as utf8 str
create_engine('mysql+oursql:///mydb?use_unicode=0')
To not automatically use ``utf8`` and instead use whatever the connection
defaults to, there is a separate parameter::
# use the default connection charset; all strings come back as unicode
create_engine('mysql+oursql:///mydb?default_charset=1')
# use latin1 as the connection charset; all strings come back as unicode
create_engine('mysql+oursql:///mydb?charset=latin1')
"""
import re
from .base import (BIT, MySQLDialect, MySQLExecutionContext)
from ... import types as sqltypes, util
class _oursqlBIT(BIT):
def result_processor(self, dialect, coltype):
"""oursql already converts mysql bits, so."""
return None
class MySQLExecutionContext_oursql(MySQLExecutionContext):
@property
def plain_query(self):
return self.execution_options.get('_oursql_plain_query', False)
class MySQLDialect_oursql(MySQLDialect):
driver = 'oursql'
if util.py2k:
supports_unicode_binds = True
supports_unicode_statements = True
supports_native_decimal = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
execution_ctx_cls = MySQLExecutionContext_oursql
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
sqltypes.Time: sqltypes.Time,
BIT: _oursqlBIT,
}
)
@classmethod
def dbapi(cls):
return __import__('oursql')
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of *cursor.execute(statement, parameters)*."""
if context and context.plain_query:
cursor.execute(statement, plain_query=True)
else:
cursor.execute(statement, parameters)
def do_begin(self, connection):
connection.cursor().execute('BEGIN', plain_query=True)
def _xa_query(self, connection, query, xid):
if util.py2k:
arg = connection.connection._escape_string(xid)
else:
charset = self._connection_charset
arg = connection.connection._escape_string(xid.encode(charset)).decode(charset)
arg = "'%s'" % arg
connection.execution_options(_oursql_plain_query=True).execute(query % arg)
# Because mysql is bad, these methods have to be
# reimplemented to use _PlainQuery. Basically, some queries
# refuse to return any data if they're run through
# the parameterized query API, or refuse to be parameterized
# in the first place.
def do_begin_twophase(self, connection, xid):
self._xa_query(connection, 'XA BEGIN %s', xid)
def do_prepare_twophase(self, connection, xid):
self._xa_query(connection, 'XA END %s', xid)
self._xa_query(connection, 'XA PREPARE %s', xid)
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self._xa_query(connection, 'XA END %s', xid)
self._xa_query(connection, 'XA ROLLBACK %s', xid)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
self._xa_query(connection, 'XA COMMIT %s', xid)
# Q: why didn't we need all these "plain_query" overrides earlier ?
# am i on a newer/older version of OurSQL ?
def has_table(self, connection, table_name, schema=None):
return MySQLDialect.has_table(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema
)
def get_table_options(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_table_options(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_columns(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_columns(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_view_names(self, connection, schema=None, **kw):
return MySQLDialect.get_view_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema=schema,
**kw
)
def get_table_names(self, connection, schema=None, **kw):
return MySQLDialect.get_table_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema
)
def get_schema_names(self, connection, **kw):
return MySQLDialect.get_schema_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
**kw
)
def initialize(self, connection):
return MySQLDialect.initialize(
self,
connection.execution_options(_oursql_plain_query=True)
)
def _show_create_table(self, connection, table, charset=None,
full_name=None):
return MySQLDialect._show_create_table(
self,
connection.contextual_connect(close_with_result=True).
execution_options(_oursql_plain_query=True),
table, charset, full_name
)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return e.errno is None and 'cursor' not in e.args[1] and e.args[1].endswith('closed')
else:
return e.errno in (2006, 2013, 2014, 2045, 2055)
def create_connect_args(self, url):
opts = url.translate_connect_args(database='db', username='user',
password='passwd')
opts.update(url.query)
util.coerce_kw_type(opts, 'port', int)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'autoping', bool)
util.coerce_kw_type(opts, 'raise_on_warnings', bool)
util.coerce_kw_type(opts, 'default_charset', bool)
if opts.pop('default_charset', False):
opts['charset'] = None
else:
util.coerce_kw_type(opts, 'charset', str)
opts['use_unicode'] = opts.get('use_unicode', True)
util.coerce_kw_type(opts, 'use_unicode', bool)
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
opts.setdefault('found_rows', True)
ssl = {}
for key in ['ssl_ca', 'ssl_key', 'ssl_cert',
'ssl_capath', 'ssl_cipher']:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts['ssl'] = ssl
return [[], opts]
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.server_info):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _extract_error_code(self, exception):
return exception.errno
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return connection.connection.charset
def _compat_fetchall(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchone()
def _compat_first(self, rp, charset=None):
return rp.first()
dialect = MySQLDialect_oursql
| mit |
aidan-/ansible-modules-extras | source_control/gitlab_user.py | 22 | 12534 | #!/usr/bin/python
# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gitlab_user
short_description: Creates/updates/deletes Gitlab Users
description:
- When the user does not exists in Gitlab, it will be created.
- When the user does exists and state=absent, the user will be deleted.
- When changes are made to user, the user will be updated.
version_added: "2.1"
author: "Werner Dijkerman (@dj-wasabi)"
requirements:
- pyapi-gitlab python module
options:
server_url:
description:
- Url of Gitlab server, with protocol (http or https).
required: true
validate_certs:
description:
- When using https if SSL certificate needs to be verified.
required: false
default: true
aliases:
- verify_ssl
login_user:
description:
- Gitlab user name.
required: false
default: null
login_password:
description:
- Gitlab password for login_user
required: false
default: null
login_token:
description:
- Gitlab token for logging in.
required: false
default: null
name:
description:
- Name of the user you want to create
required: true
username:
description:
- The username of the user.
required: true
password:
description:
- The password of the user.
required: true
email:
description:
- The email that belongs to the user.
required: true
sshkey_name:
description:
- The name of the sshkey
required: false
default: null
sshkey_file:
description:
- The ssh key itself.
required: false
default: null
group:
description:
- Add user as an member to this group.
required: false
default: null
access_level:
description:
- The access level to the group. One of the following can be used.
- guest
- reporter
- developer
- master
- owner
required: false
default: null
state:
description:
- create or delete group.
- Possible values are present and absent.
required: false
default: present
choices: ["present", "absent"]
'''
EXAMPLES = '''
- name: "Delete Gitlab User"
local_action: gitlab_user
server_url="http://gitlab.dj-wasabi.local"
validate_certs=false
login_token="WnUzDsxjy8230-Dy_k"
username=myusername
state=absent
- name: "Create Gitlab User"
local_action: gitlab_user
server_url="https://gitlab.dj-wasabi.local"
validate_certs=true
login_user=dj-wasabi
login_password="MySecretPassword"
name=My Name
username=myusername
password=mysecretpassword
email=me@home.com
sshkey_name=MySSH
sshkey_file=ssh-rsa AAAAB3NzaC1yc...
state=present
'''
RETURN = '''# '''
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except:
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
class GitLabUser(object):
def __init__(self, module, git):
self._module = module
self._gitlab = git
def addToGroup(self, group_id, user_id, access_level):
if access_level == "guest":
level = 10
elif access_level == "reporter":
level = 20
elif access_level == "developer":
level = 30
elif access_level == "master":
level = 40
elif access_level == "owner":
level = 50
return self._gitlab.addgroupmember(group_id, user_id, level)
def createOrUpdateUser(self, user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level):
group_id = ''
arguments = {"name": user_name,
"username": user_username,
"email": user_email}
if group_name is not None:
if self.existsGroup(group_name):
group_id = self.getGroupId(group_name)
if self.existsUser(user_username):
self.updateUser(group_id, user_sshkey_name, user_sshkey_file, access_level, arguments)
else:
if self._module.check_mode:
self._module.exit_json(changed=True)
self.createUser(group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments)
def createUser(self, group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments):
user_changed = False
# Create the user
user_username = arguments['username']
user_name = arguments['name']
user_email = arguments['email']
if self._gitlab.createuser(password=user_password, **arguments):
user_id = self.getUserId(user_username)
if self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
user_changed = True
# Add the user to the group if group_id is not empty
if group_id != '':
if self.addToGroup(group_id, user_id, access_level):
user_changed = True
user_changed = True
# Exit with change to true or false
if user_changed:
self._module.exit_json(changed=True, result="Created the user")
else:
self._module.exit_json(changed=False)
def deleteUser(self, user_username):
user_id = self.getUserId(user_username)
if self._gitlab.deleteuser(user_id):
self._module.exit_json(changed=True, result="Successfully deleted user %s" % user_username)
else:
self._module.exit_json(changed=False, result="User %s already deleted or something went wrong" % user_username)
def existsGroup(self, group_name):
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return True
return False
def existsUser(self, username):
found_user = self._gitlab.getusers(search=username)
for user in found_user:
if user['id'] != '':
return True
return False
def getGroupId(self, group_name):
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return group['id']
def getUserId(self, username):
found_user = self._gitlab.getusers(search=username)
for user in found_user:
if user['id'] != '':
return user['id']
def updateUser(self, group_id, user_sshkey_name, user_sshkey_file, access_level, arguments):
user_changed = False
user_username = arguments['username']
user_id = self.getUserId(user_username)
user_data = self._gitlab.getuser(user_id=user_id)
# Lets check if we need to update the user
for arg_key, arg_value in arguments.items():
if user_data[arg_key] != arg_value:
user_changed = True
if user_changed:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._gitlab.edituser(user_id=user_id, **arguments)
user_changed = True
if self._module.check_mode or self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
user_changed = True
if group_id != '':
if self._module.check_mode or self.addToGroup(group_id, user_id, access_level):
user_changed = True
if user_changed:
self._module.exit_json(changed=True, result="The user %s is updated" % user_username)
else:
self._module.exit_json(changed=False, result="The user %s is already up2date" % user_username)
def main():
global user_id
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
login_user=dict(required=False, no_log=True),
login_password=dict(required=False, no_log=True),
login_token=dict(required=False, no_log=True),
name=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
email=dict(required=True),
sshkey_name=dict(required=False),
sshkey_file=dict(required=False),
group=dict(required=False),
access_level=dict(required=False, choices=["guest", "reporter", "developer", "master", "owner"]),
state=dict(default="present", choices=["present", "absent"]),
),
supports_check_mode=True
)
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
server_url = module.params['server_url']
verify_ssl = module.params['validate_certs']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_token = module.params['login_token']
user_name = module.params['name']
user_username = module.params['username']
user_password = module.params['password']
user_email = module.params['email']
user_sshkey_name = module.params['sshkey_name']
user_sshkey_file = module.params['sshkey_file']
group_name = module.params['group']
access_level = module.params['access_level']
state = module.params['state']
# We need both login_user and login_password or login_token, otherwise we fail.
if login_user is not None and login_password is not None:
use_credentials = True
elif login_token is not None:
use_credentials = False
else:
module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
# Check if vars are none
if user_sshkey_file is not None and user_sshkey_name is not None:
use_sshkey = True
else:
use_sshkey = False
if group_name is not None and access_level is not None:
add_to_group = True
group_name = group_name.lower()
else:
add_to_group = False
user_username = user_username.lower()
# Lets make an connection to the Gitlab server_url, with either login_user and login_password
# or with login_token
try:
if use_credentials:
git = gitlab.Gitlab(host=server_url)
git.login(user=login_user, password=login_password)
else:
git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to connect to Gitlab server: %s " % e)
# Validate if group exists and take action based on "state"
user = GitLabUser(module, git)
# Check if user exists, if not exists and state = absent, we exit nicely.
if not user.existsUser(user_username) and state == "absent":
module.exit_json(changed=False, result="User already deleted or does not exists")
else:
# User exists,
if state == "absent":
user.deleteUser(user_username)
else:
user.createOrUpdateUser(user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level)
if __name__ == '__main__':
main()
| gpl-3.0 |
fluxw42/youtube-dl | youtube_dl/extractor/abcotvs.py | 15 | 3903 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
)
class ABCOTVSIE(InfoExtractor):
IE_NAME = 'abcotvs'
IE_DESC = 'ABC Owned Television Stations'
_VALID_URL = r'https?://(?:abc(?:7(?:news|ny|chicago)?|11|13|30)|6abc)\.com(?:/[^/]+/(?P<display_id>[^/]+))?/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://abc7news.com/entertainment/east-bay-museum-celebrates-vintage-synthesizers/472581/',
'info_dict': {
'id': '472581',
'display_id': 'east-bay-museum-celebrates-vintage-synthesizers',
'ext': 'mp4',
'title': 'East Bay museum celebrates vintage synthesizers',
'description': 'md5:a4f10fb2f2a02565c1749d4adbab4b10',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1421123075,
'upload_date': '20150113',
'uploader': 'Jonathan Bloom',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://abc7news.com/472581',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id') or video_id
webpage = self._download_webpage(url, display_id)
m3u8 = self._html_search_meta(
'contentURL', webpage, 'm3u8 url', fatal=True).split('?')[0]
formats = self._extract_m3u8_formats(m3u8, display_id, 'mp4')
self._sort_formats(formats)
title = self._og_search_title(webpage).strip()
description = self._og_search_description(webpage).strip()
thumbnail = self._og_search_thumbnail(webpage)
timestamp = parse_iso8601(self._search_regex(
r'<div class="meta">\s*<time class="timeago" datetime="([^"]+)">',
webpage, 'upload date', fatal=False))
uploader = self._search_regex(
r'rel="author">([^<]+)</a>',
webpage, 'uploader', default=None)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'uploader': uploader,
'formats': formats,
}
class ABCOTVSClipsIE(InfoExtractor):
IE_NAME = 'abcotvs:clips'
_VALID_URL = r'https?://clips\.abcotvs\.com/(?:[^/]+/)*video/(?P<id>\d+)'
_TEST = {
'url': 'https://clips.abcotvs.com/kabc/video/214814',
'info_dict': {
'id': '214814',
'ext': 'mp4',
'title': 'SpaceX launch pad explosion destroys rocket, satellite',
'description': 'md5:9f186e5ad8f490f65409965ee9c7be1b',
'upload_date': '20160901',
'timestamp': 1472756695,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json('https://clips.abcotvs.com/vogo/video/getByIds?ids=' + video_id, video_id)['results'][0]
title = video_data['title']
formats = self._extract_m3u8_formats(
video_data['videoURL'].split('?')[0], video_id, 'mp4')
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'thumbnail': video_data.get('thumbnailURL'),
'duration': int_or_none(video_data.get('duration')),
'timestamp': int_or_none(video_data.get('pubDate')),
'formats': formats,
}
| unlicense |
MartinHjelmare/home-assistant | homeassistant/components/lifx/__init__.py | 5 | 1836 | """Support for LIFX."""
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant import config_entries
from homeassistant.const import CONF_PORT
from homeassistant.helpers import config_entry_flow
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
DOMAIN = 'lifx'
CONF_SERVER = 'server'
CONF_BROADCAST = 'broadcast'
INTERFACE_SCHEMA = vol.Schema({
vol.Optional(CONF_SERVER): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_BROADCAST): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: {
LIGHT_DOMAIN:
vol.Schema(vol.All(cv.ensure_list, [INTERFACE_SCHEMA])),
}
}, extra=vol.ALLOW_EXTRA)
DATA_LIFX_MANAGER = 'lifx_manager'
async def async_setup(hass, config):
"""Set up the LIFX component."""
conf = config.get(DOMAIN)
hass.data[DOMAIN] = conf or {}
if conf is not None:
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT}))
return True
async def async_setup_entry(hass, entry):
"""Set up LIFX from a config entry."""
hass.async_create_task(hass.config_entries.async_forward_entry_setup(
entry, LIGHT_DOMAIN))
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.data.pop(DATA_LIFX_MANAGER).cleanup()
await hass.config_entries.async_forward_entry_unload(entry, LIGHT_DOMAIN)
return True
async def _async_has_devices(hass):
"""Return if there are devices that can be discovered."""
import aiolifx
lifx_ip_addresses = await aiolifx.LifxScan(hass.loop).scan()
return len(lifx_ip_addresses) > 0
config_entry_flow.register_discovery_flow(
DOMAIN, 'LIFX', _async_has_devices, config_entries.CONN_CLASS_LOCAL_POLL)
| apache-2.0 |
cherusk/ansible | lib/ansible/modules/network/ios/ios_config.py | 19 | 13988 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: ios_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage Cisco IOS configuration sections
description:
- Cisco IOS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with IOS configuration sections in
a deterministic way.
extends_documentation_fragment: ios
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines).
required: false
default: null
version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block']
multiline_delimiter:
description:
- This argument is used when pushing a multiline configuration
element to the IOS device. It specifies the character to use
as the delimiting character. This only applies to the
configuration action.
required: false
default: "@"
version_added: "2.3"
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: ["true", "false"]
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
config:
description:
- The C(config) argument allows the playbook designer to supply
the base configuration to be used to validate configuration
changes necessary. If this argument is provided, the module
will not download the running-config from the remote node.
required: false
default: null
version_added: "2.2"
defaults:
description:
- This argument specifies whether or not to collect all defaults
when getting the remote device running config. When enabled,
the module will get the current config by issuing the command
C(show running-config all).
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
"""
EXAMPLES = """
- name: configure top level configuration
ios_config:
lines: hostname {{ inventory_hostname }}
- name: configure interface settings
ios_config:
lines:
- description test interface
- ip address 172.31.1.1 255.255.255.0
parents: interface Ethernet1
- name: load new acl into device
ios_config:
lines:
- 10 permit ip host 1.1.1.1 any log
- 20 permit ip host 2.2.2.2 any log
- 30 permit ip host 3.3.3.3 any log
- 40 permit ip host 4.4.4.4 any log
- 50 permit ip host 5.5.5.5 any log
parents: ip access-list extended test
before: no ip access-list extended test
match: exact
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: path
sample: /playbooks/ansible/backup/ios_config.2016-07-16@22:28:34
"""
import re
import time
from ansible.module_utils.ios import run_commands, get_config, load_config
from ansible.module_utils.ios import get_defaults_flag
from ansible.module_utils.ios import ios_argument_spec
from ansible.module_utils.ios import check_args as ios_check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcli import Conditional
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.six import iteritems
def check_args(module, warnings):
ios_check_args(module, warnings)
if module.params['multiline_delimiter']:
if len(module.params['multiline_delimiter']) != 1:
module.fail_json(msg='multiline_delimiter value can only be a '
'single character')
if module.params['force']:
warnings.append('The force argument is deprecated as of Ansible 2.2, '
'please use match=none instead. This argument will '
'be removed in the future')
def extract_banners(config):
banners = {}
banner_cmds = re.findall(r'^banner (\w+)', config, re.M)
for cmd in banner_cmds:
regex = r'banner %s \^C(.+?)(?=\^C)' % cmd
match = re.search(regex, config, re.S)
if match:
key = 'banner %s' % cmd
banners[key] = match.group(1).strip()
for cmd in banner_cmds:
regex = r'banner %s \^C(.+?)(?=\^C)' % cmd
match = re.search(regex, config, re.S)
if match:
config = config.replace(str(match.group(1)), '')
config = re.sub(r'banner \w+ \^C\^C', '!! banner removed', config)
return (config, banners)
def diff_banners(want, have):
candidate = {}
for key, value in iteritems(want):
if value != have.get(key):
candidate[key] = value
return candidate
def load_banners(module, banners):
delimiter = module.params['multiline_delimiter']
for key, value in iteritems(banners):
key += ' %s' % delimiter
for cmd in ['config terminal', key, value, delimiter, 'end']:
obj = {'command': cmd, 'sendonly': True}
run_commands(module, [cmd])
time.sleep(0.1)
run_commands(module, ['\n'])
def get_running_config(module):
contents = module.params['config']
if not contents:
flags = []
if module.params['defaults']:
flags.append(get_defaults_flag(module))
contents = get_config(module, flags=flags)
contents, banners = extract_banners(contents)
return NetworkConfig(indent=1, contents=contents), banners
def get_candidate(module):
candidate = NetworkConfig(indent=1)
banners = {}
if module.params['src']:
src, banners = extract_banners(module.params['src'])
candidate.load(src)
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate, banners
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
multiline_delimiter=dict(default='@'),
# this argument is deprecated (2.2) in favor of setting match: none
# it will be removed in a future version
force=dict(default=False, type='bool'),
config=dict(),
defaults=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
save=dict(type='bool', default=False),
)
argument_spec.update(ios_argument_spec)
mutually_exclusive = [('lines', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
if module.params['force'] is True:
module.params['match'] = 'none'
result = {'changed': False}
warnings = list()
check_args(module, warnings)
result['warnings'] = warnings
if any((module.params['lines'], module.params['src'])):
match = module.params['match']
replace = module.params['replace']
path = module.params['parents']
candidate, want_banners = get_candidate(module)
if match != 'none':
config, have_banners = get_running_config(module)
path = module.params['parents']
configobjs = candidate.difference(config, path=path, match=match,
replace=replace)
else:
configobjs = candidate.items
have_banners = {}
banners = diff_banners(want_banners, have_banners)
if configobjs or banners:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
result['banners'] = banners
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
if commands:
load_config(module, commands)
if banners:
load_banners(module, banners)
result['changed'] = True
if module.params['backup']:
result['__backup__'] = get_config(module=module)
if module.params['save']:
if not module.check_mode:
run_commands(module, ['copy running-config startup-config\r'])
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ekasitk/sahara | sahara/service/edp/oozie/workflow_creator/shell_workflow.py | 9 | 1685 | # Copyright (c) 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service.edp.oozie.workflow_creator import base_workflow
from sahara.utils import xmlutils as x
class ShellWorkflowCreator(base_workflow.OozieWorkflowCreator):
SHELL_XMLNS = {"xmlns": "uri:oozie:shell-action:0.1"}
def __init__(self):
super(ShellWorkflowCreator, self).__init__('shell')
def build_workflow_xml(self, script_name, prepare={},
job_xml=None, configuration=None, env_vars={},
arguments=[], files=[]):
x.add_attributes_to_element(self.doc, self.tag_name, self.SHELL_XMLNS)
for k in sorted(prepare):
self._add_to_prepare_element(k, prepare[k])
self._add_configuration_elements(configuration)
x.add_text_element_to_tag(self.doc, self.tag_name, 'exec', script_name)
for arg in arguments:
x.add_text_element_to_tag(self.doc, self.tag_name, 'argument', arg)
x.add_equal_separated_dict(self.doc, self.tag_name,
'env-var', env_vars)
self._add_files_and_archives(files + [script_name], [])
| apache-2.0 |
BartVB/ansible-modules-core | cloud/amazon/ec2_metric_alarm.py | 43 | 10680 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms
- Metrics you wish to alarm on must already exist
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
options: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
options: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None']
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment: aws
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
import sys
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed=True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
action = module.params.get(attr) or []
if getattr(alarm, attr) != action:
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError, e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict'),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
main()
| gpl-3.0 |
jeasoft/odoo | marcos_addons/marcos_ncf/__openerp__.py | 2 | 2410 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata (eneldo@marcos.do)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Marcos NCF",
'summary': """
Administracion de comprobantes fiscales
""",
'description': """
Este modulo agiliza y configura el proceso de asignacion y
y asignacion de comprobantes fiscales por sucursales.
""",
'author': "Marcos Organizador de Negocios SRL",
'website': "http://marcos.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Extra Tools',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'web', 'account', 'marcos_l10n_do', 'debit_credit_note', 'account_voucher', 'account_check_writing', 'marcos_branding'],
# always loaded
'data': [
'security/security.xml',
'security/ir.model.access.csv',
'account/shop_view.xml',
'res_partner/res_partner_view.xml',
'data.xml',
'templates.xml',
'account/account_invoice_view.xml',
'res/res_users_view.xml',
'dgii_compras/view.xml',
'res/res_currency_view.xml',
'account_voucher/account_voucher_view.xml',
'wizard/invoice_credit_apply_view.xml'
],
# 'js': ['static/src/js/ipf.js'],
'qweb': [],
# only loaded in demonstration mode
# 'demo': [
# ],
} | agpl-3.0 |
turtleloveshoes/kitsune | kitsune/forums/tests/test_feeds.py | 17 | 1665 | from datetime import datetime, timedelta
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.forums.feeds import ThreadsFeed, PostsFeed
from kitsune.forums.tests import ForumTestCase, forum, thread, post
from kitsune.sumo.tests import get
YESTERDAY = datetime.now() - timedelta(days=1)
class ForumTestFeedSorting(ForumTestCase):
def setUp(self):
super(ForumTestFeedSorting, self).setUp()
def test_threads_sort(self):
"""Ensure that threads are being sorted properly by date/time."""
# Threads are sorted descending by last post date.
f = forum(save=True)
t1 = thread(forum=f, created=YESTERDAY, save=True)
post(thread=t1, created=YESTERDAY, save=True)
t2 = thread(forum=f, save=True)
post(thread=t2, save=True)
eq_(t2.id, ThreadsFeed().items(f)[0].id)
def test_posts_sort(self):
"""Ensure that posts are being sorted properly by date/time."""
t = thread(save=True)
post(thread=t, created=YESTERDAY, save=True)
post(thread=t, created=YESTERDAY, save=True)
p = post(thread=t, save=True)
# The newest post should be the first one listed.
eq_(p.id, PostsFeed().items(t)[0].id)
def test_multi_feed_titling(self):
"""Ensure that titles are being applied properly to feeds."""
t = thread(save=True)
forum = t.forum
post(thread=t, save=True)
response = get(self.client, 'forums.threads', args=[forum.slug])
doc = pq(response.content)
eq_(ThreadsFeed().title(forum),
doc('link[type="application/atom+xml"]')[0].attrib['title'])
| bsd-3-clause |
ewiseblatt/spinnaker | dev/buildtool/hal_support.py | 2 | 5890 | #!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper class for administering halyard.
This module is a helper module for commands that need to interact with the
halyard runtime repositories (build and release info). It has nothing to
do with the static git source repositories.
This module encapsulates knowledge of how to administer halyard, and
more importantly to ensure that the locally running halyard is configured
as intended when publishing artifacts. This is a safety measure to ensure
that custom or test builds do not show up in the production repositories
unless the builds explicitly asked for the production repositories.
"""
import logging
import os
import yaml
try:
from urllib2 import urlopen, HTTPError
except ImportError:
from urllib.request import urlopen
from urllib.error import HTTPError
from buildtool import (
add_parser_argument,
check_subprocess,
raise_and_log_error,
ConfigError,
ResponseError)
class HalRunner(object):
"""Encapsulates knowledge of administering halyard releases."""
@staticmethod
def add_parser_args(parser, defaults):
"""Add parser arguments used to administer halyard."""
if hasattr(parser, 'added_halrunner'):
return
parser.added_halrunner = True
add_parser_argument(
parser, 'hal_path', defaults, '/usr/local/bin/hal',
help='Path to local Halyard "hal" CLI.')
add_parser_argument(
parser, 'halyard_daemon', defaults, 'localhost:8064',
help='Network location for halyard server.')
@property
def options(self):
"""Returns bound options."""
return self.__options
def check_property(self, name, want):
"""Check a configuration property meets our needs."""
have = self.__halyard_runtime_config[name]
if have == want:
logging.debug('Confirmed Halyard server is configured with %s="%s"',
name, have)
else:
raise_and_log_error(
ConfigError(
'Halyard server is not configured to support this request.\n'
'It is using {name}={have!r} rather than {want!r}.\n'
'You will need to modify /opt/spinnaker/config/halyard-local.yml'
' and restart the halyard server.'.format(
name=name, have=have, want=want),
cause='config/halyard'))
def __init__(self, options):
self.__options = options
self.__hal_path = options.hal_path
logging.debug('Retrieving halyard runtime configuration.')
url = 'http://' + options.halyard_daemon + '/resolvedEnv'
try:
response = urlopen(url)
except HTTPError as error:
raise_and_log_error(
ResponseError(
'{url}: {code}\n{body}'.format(
url=url, code=error.code, body=error.read()),
server='halyard'))
self.__halyard_runtime_config = yaml.safe_load(response)
def check_writer_enabled(self):
"""Ensure halyard has writerEnabled true."""
self.check_property('spinnaker.config.input.writerEnabled', 'true')
def check_run(self, command_line):
"""Run hal with the supplied command_line."""
args = ' --color false --daemon-endpoint http://{daemon} '.format(
daemon=self.__options.halyard_daemon)
return check_subprocess(self.__hal_path + args + command_line)
def publish_profile(self, component, profile_path, bom_path):
"""Publish the profile for the given component for the given bom."""
logging.info('Publishing %s profile=%s for bom=%s',
component, profile_path, bom_path)
self.check_run('admin publish profile ' + component
+ ' --bom-path ' + bom_path
+ ' --profile-path ' + profile_path)
def publish_bom_path(self, path):
"""Publish a bom path via halyard."""
logging.info('Publishing bom from %s', path)
self.check_run('admin publish bom --bom-path ' + os.path.abspath(path))
def retrieve_bom_version(self, version):
"""Retrieve the specified BOM version as a dict."""
logging.info('Getting bom version %s', version)
content = self.check_run('version bom ' + version + ' --quiet')
return yaml.safe_load(content)
def publish_halyard_release(self, release_version):
"""Make release_version available as the latest version."""
logging.info('Publishing latest halyard version "%s"', release_version)
self.check_run('admin publish latest-halyard ' + release_version)
def publish_spinnaker_release(
self, release_version, alias_name, changelog_uri, min_halyard_version,
latest=True):
"""Release spinnaker version to halyard repository."""
logging.info('Publishing spinnaker version "%s" to halyard',
release_version)
self.check_run('admin publish version --version "{version}"'
' --alias "{alias}" --changelog {changelog}'
' --minimum-halyard-version {halyard_version}'
.format(version=release_version, alias=alias_name,
changelog=changelog_uri,
halyard_version=min_halyard_version))
if latest:
logging.info(
'Publishing spinnaker version "%s" as latest', release_version)
self.check_run('admin publish latest "{version}"'.format(
version=release_version))
| apache-2.0 |
hjanime/VisTrails | vistrails/gui/variable_dropbox.py | 1 | 22113 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This file contains widgets that can be used for dropping Constant class
variables. It will construct an input form for the value.
QVariableDropBox
QVerticalWidget
QVariableInputWidget
QVariableInputForm
QDragVariableLabel
QHoverVariableLabel
"""
from __future__ import division
from PyQt4 import QtCore, QtGui
from vistrails.core import debug
from vistrails.core.vistrail.module_function import ModuleFunction
from vistrails.core.vistrail.module_param import ModuleParam
from vistrails.core.modules import module_registry
from vistrails.core.modules.basic_modules import Constant
from vistrails.core.vistrail.vistrailvariable import VistrailVariable
from vistrails.gui.common_widgets import QPromptWidget
from vistrails.gui.modules.utils import get_widget_class
from vistrails.gui.modules.constant_configuration import StandardConstantWidget
from vistrails.gui.module_palette import QModuleTreeWidget
from vistrails.gui.theme import CurrentTheme
from vistrails.gui.utils import show_question, YES_BUTTON, NO_BUTTON
import uuid
################################################################################
class QVariableDropBox(QtGui.QScrollArea):
"""
QVariableDropBox is just a widget such that items that subclass
Constant from the module palette can be dropped into its client rect.
It then constructs an input form based on the type of handling widget
"""
def __init__(self, parent=None):
""" QVariableDropBox(parent: QWidget) -> QVariableDropBox
Initialize widget constraints
"""
QtGui.QScrollArea.__init__(self, parent)
self.setAcceptDrops(True)
self.setWidgetResizable(True)
self.vWidget = QVerticalWidget()
self.setWidget(self.vWidget)
self.updateLocked = False
self.controller = None
def dragEnterEvent(self, event):
""" dragEnterEvent(event: QDragEnterEvent) -> None
Set to accept drops from the module palette
"""
if isinstance(event.source(), QModuleTreeWidget):
data = event.mimeData()
if hasattr(data, 'items'):
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
""" dragMoveEvent(event: QDragMoveEvent) -> None
Set to accept drag move event from the module palette
"""
if isinstance(event.source(), QModuleTreeWidget):
data = event.mimeData()
if hasattr(data, 'items'):
event.accept()
def dropEvent(self, event):
""" dropEvent(event: QDragMoveEvent) -> None
Accept drop event to add a new variable
"""
if isinstance(event.source(), QModuleTreeWidget):
data = event.mimeData()
if hasattr(data, 'items'):
event.accept()
assert len(data.items) == 1
item = data.items[0]
if issubclass(item.descriptor.module, Constant):
if item.descriptor and self.controller:
self.lockUpdate()
(text, ok) = QtGui.QInputDialog.getText(self,
'Set Variable Name',
'Enter the variable name',
QtGui.QLineEdit.Normal,
'')
var_name = str(text).strip()
while ok and self.controller.check_vistrail_variable(var_name):
msg =" This variable name is already being used.\
Please enter a different variable name "
(text, ok) = QtGui.QInputDialog.getText(self,
'Set Variable Name',
msg,
QtGui.QLineEdit.Normal,
text)
var_name = str(text).strip()
if ok:
self.vWidget.addVariable(str(uuid.uuid1()), var_name, item.descriptor)
self.scrollContentsBy(0, self.viewport().height())
self.unlockUpdate()
#self.emit(QtCore.SIGNAL("paramsAreaChanged"))
def updateController(self, controller):
""" updateController(controller: VistrailController) -> None
Construct input forms for a controller's variables
"""
# we shouldn't do this whenver the controller changes...
if self.controller != controller:
self.controller = controller
if self.updateLocked: return
self.vWidget.clear()
if controller:
reg = module_registry.get_module_registry()
for var in [v for v in controller.vistrail.vistrail_vars]:
try:
descriptor = reg.get_descriptor_by_name(var.package,
var.module,
var.namespace)
except module_registry.ModuleRegistryException:
debug.critical("Missing Module Descriptor for vistrail"
" variable %s\nPackage: %s\nType: %s"
"\nNamespace: %s" % \
(var.name, var.package, var.module,
var.namespace))
continue
self.vWidget.addVariable(var.uuid, var.name, descriptor,
var.value)
self.vWidget.showPromptByChildren()
else:
self.vWidget.showPrompt(False)
def lockUpdate(self):
""" lockUpdate() -> None
Do not allow updateModule()
"""
self.updateLocked = True
def unlockUpdate(self):
""" unlockUpdate() -> None
Allow updateModule()
"""
self.updateLocked = False
class QVerticalWidget(QPromptWidget):
"""
QVerticalWidget is a widget holding other variable widgets
vertically
"""
def __init__(self, parent=None):
""" QVerticalWidget(parent: QWidget) -> QVerticalWidget
Initialize with a vertical layout
"""
QPromptWidget.__init__(self, parent)
self.setPromptText("Drag a constant from the Modules panel to create a variable")
self.setLayout(QtGui.QVBoxLayout())
self.layout().setMargin(0)
self.layout().setSpacing(5)
self.layout().setAlignment(QtCore.Qt.AlignTop)
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
self.setMinimumHeight(20)
self._variable_widgets = []
def addVariable(self, uuid, name, descriptor, value=''):
""" addVariable(uuid:str, name: str, descriptor: ModuleDescriptor, value: str) -> None
Add an input form for the variable
"""
inputForm = QVariableInputWidget(uuid, name, descriptor, value, self)
self.connect(inputForm, QtCore.SIGNAL('deleted(QWidget*)'),
self.delete_form)
self.layout().addWidget(inputForm)
inputForm.show()
self.setMinimumHeight(self.layout().minimumSize().height())
self.showPrompt(False)
self._variable_widgets.append(inputForm)
def clear(self):
""" clear() -> None
Clear and delete all widgets in the layout
"""
self.setEnabled(False)
for v in self._variable_widgets:
self.disconnect(v, QtCore.SIGNAL('deleted(QWidget*)'),
self.delete_form)
self.layout().removeWidget(v)
v.setParent(None)
v.deleteLater()
self._variable_widgets = []
self.setEnabled(True)
def delete_form(self, input_form):
self.disconnect(input_form, QtCore.SIGNAL('deleted(QWidget*)'),
self.delete_form)
var_name = input_form.var_name
variableBox = self.parent().parent()
self.layout().removeWidget(input_form)
self._variable_widgets.remove(input_form)
input_form.setParent(None)
input_form.deleteLater()
self.showPromptByChildren()
if variableBox.controller:
variableBox.lockUpdate()
variableBox.controller.set_vistrail_variable(var_name, None)
variableBox.unlockUpdate()
self.setMinimumHeight(self.layout().minimumSize().height())
class QVariableInputWidget(QtGui.QDockWidget):
def __init__(self, uuid, name, descriptor, value='', parent=None):
QtGui.QDockWidget.__init__(self, parent)
self.var_uuid = uuid
self.var_name = name
self.descriptor = descriptor
self.setFeatures(QtGui.QDockWidget.DockWidgetClosable)
# Create group and titlebar widgets for input widget
self.group_box = QVariableInputForm(descriptor, value, self)
self.setWidget(self.group_box)
title_widget = QtGui.QWidget()
title_layout = QtGui.QHBoxLayout()
self.closeButton = QtGui.QToolButton()
self.closeButton.setAutoRaise(True)
self.closeButton.setIcon(QtGui.QIcon(self.style().standardPixmap(QtGui.QStyle.SP_TitleBarCloseButton)))
self.closeButton.setIconSize(QtCore.QSize(13, 13))
self.closeButton.setFixedWidth(16)
self.label = QHoverVariableLabel(name)
title_layout.addWidget(self.label)
title_layout.addWidget(self.closeButton)
title_widget.setLayout(title_layout)
self.setTitleBarWidget(title_widget)
self.connect(self.closeButton, QtCore.SIGNAL('clicked()'), self.close)
def renameVariable(self, var_name):
# First delete old var entry
variableBox = self.parent().parent().parent()
if variableBox.controller:
variableBox.lockUpdate()
variableBox.controller.set_vistrail_variable(self.var_name, None, False)
variableBox.unlockUpdate()
# Create var entry with new name, but keeping the same uuid
self.var_name = var_name
self.label.setText(var_name)
self.group_box.updateMethod()
def closeEvent(self, event):
choice = show_question('Delete %s?'%self.var_name,
'Are you sure you want to permanently delete the VisTrail variable\
"%s"?\n\nNote: Any workflows using this variable will be left in an invalid state.'%self.var_name,
[NO_BUTTON,YES_BUTTON],
NO_BUTTON)
if choice == NO_BUTTON:
event.ignore()
return
self.emit(QtCore.SIGNAL('deleted(QWidget*)'), self)
def keyPressEvent(self, e):
if e.key() in [QtCore.Qt.Key_Delete, QtCore.Qt.Key_Backspace]:
self.close()
else:
QtGui.QDockWidget.keyPressEvent(self, e)
def check_variable(self, name):
""" check_variable(name: str) -> Boolean
Returns True if the vistrail already has the variable name
"""
variableBox = self.parent().parent().parent()
if variableBox.controller:
return variableBox.controller.check_vistrail_variable(name)
return False
class QVariableInputForm(QtGui.QGroupBox):
"""
QVariableInputForm is a widget with multiple input lines depends on
the method signature
"""
def __init__(self, descriptor, var_strValue="", parent=None):
""" QVariableInputForm(descriptor: ModuleDescriptor, var_strValue: str,
parent: QWidget) -> QVariableInputForm
Initialize with a vertical layout
"""
QtGui.QGroupBox.__init__(self, parent)
self.setLayout(QtGui.QGridLayout())
self.layout().setMargin(5)
self.layout().setSpacing(5)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
self.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Fixed)
self.palette().setColor(QtGui.QPalette.Window,
CurrentTheme.METHOD_SELECT_COLOR)
# Create widget for editing variable
p = ModuleParam(type=descriptor.name, identifier=descriptor.identifier,
namespace=descriptor.namespace)
p.strValue = var_strValue
widget_type = get_widget_class(descriptor)
self.widget = widget_type(p, self)
self.label = QDragVariableLabel(p.type)
self.layout().addWidget(self.label, 0, 0)
self.layout().addWidget(self.widget, 0, 1)
self.updateMethod()
def focusInEvent(self, event):
""" gotFocus() -> None
Make sure the form painted as selected
"""
self.setAutoFillBackground(True)
def focusOutEvent(self, event):
""" lostFocus() -> None
Make sure the form painted as non-selected and then
perform a parameter changes
"""
self.setAutoFillBackground(False)
def updateMethod(self):
""" updateMethod() -> None
Update the variable values in vistrail controller
"""
inputWidget = self.parent()
variableBox = inputWidget.parent().parent().parent()
if variableBox.controller:
variableBox.lockUpdate()
descriptor = inputWidget.descriptor
var = VistrailVariable(inputWidget.var_name, inputWidget.var_uuid,
descriptor.identifier, descriptor.name,
descriptor.namespace, str(self.widget.contents()))
variableBox.controller.set_vistrail_variable(inputWidget.var_name, var)
variableBox.unlockUpdate()
class QDragVariableLabel(QtGui.QLabel):
"""
QDragVariableLabel is a QLabel that can be dragged to connect
to an input port
"""
def __init__(self, var_type='', parent=None):
""" QDragVariableLabel(var_type:str,
parent: QWidget) -> QDragVariableLabel
Initialize the label with a variable type
"""
QtGui.QLabel.__init__(self, parent)
self.var_type = var_type
self.setText(var_type)
self.setAttribute(QtCore.Qt.WA_Hover)
self.setCursor(CurrentTheme.OPEN_HAND_CURSOR)
self.setToolTip('Drag to an input port')
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
def event(self, event):
""" event(event: QEvent) -> Event Result
Override to handle hover enter and leave events for hot links
"""
if event.type()==QtCore.QEvent.HoverEnter:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_SELECT_COLOR)
if event.type()==QtCore.QEvent.HoverLeave:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
return QtGui.QLabel.event(self, event)
def mousePressEvent(self, event):
""" mousePressEvent(event: QMouseEvent) -> None
If mouse click on the label, show up a dialog to change/add
the variable name
"""
if event.button()==QtCore.Qt.LeftButton:
inputWidget = self.parent().parent()
var_name = inputWidget.var_name
var_uuid = inputWidget.var_uuid
# Create pixmap from variable name and type
drag_str = var_name + ' : ' + self.var_type
drag_label = QDragVariableLabel(drag_str)
drag_label.adjustSize()
painter = QtGui.QPainter()
font = QtGui.QFont()
size = drag_label.size()
image = QtGui.QImage(size.width()+4, size.height()+4, QtGui.QImage.Format_ARGB32_Premultiplied)
image.fill(0)
painter.begin(image)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(self.palette().highlight())
painter.drawRect(QtCore.QRectF(0, 0, image.width(), image.height()))
painter.setFont(font)
painter.setPen(QtCore.Qt.black)
painter.drawText(QtCore.QRect(QtCore.QPoint(2,2), size), QtCore.Qt.AlignLeft | QtCore.Qt.TextSingleLine, drag_str)
painter.end()
pixmap = QtGui.QPixmap.fromImage(image)
# Create drag action
mimeData = QtCore.QMimeData()
portspec = inputWidget.descriptor.get_port_spec('value', 'output')
mimeData.variableData = (portspec, var_uuid, var_name)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setHotSpot(pixmap.rect().bottomRight())
drag.setPixmap(pixmap)
drag.start(QtCore.Qt.MoveAction)
class QHoverVariableLabel(QtGui.QLabel):
"""
QHoverVariableLabel is a QLabel that supports hover actions similar
to a hot link
"""
def __init__(self, var_name='', parent=None):
""" QHoverVariableLabel(var_name:str,
parent: QWidget) -> QHoverVariableLabel
Initialize the label with a variable name
"""
QtGui.QLabel.__init__(self, parent)
self.var_name = var_name
self.setText(var_name)
self.setAttribute(QtCore.Qt.WA_Hover)
self.setCursor(QtCore.Qt.PointingHandCursor)
self.setToolTip('Click to rename')
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
def event(self, event):
""" event(event: QEvent) -> Event Result
Override to handle hover enter and leave events for hot links
"""
if event.type()==QtCore.QEvent.HoverEnter:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_SELECT_COLOR)
if event.type()==QtCore.QEvent.HoverLeave:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
return QtGui.QLabel.event(self, event)
def mousePressEvent(self, event):
""" mousePressEvent(event: QMouseEvent) -> None
If mouse click on the label, show up a dialog to change/add
the variable name
"""
if event.button()==QtCore.Qt.LeftButton:
inputWidget = self.parent().parent()
orig_var_name = inputWidget.var_name
(text, ok) = QtGui.QInputDialog.getText(self,
'Set New Variable Name',
'Enter the new variable name',
QtGui.QLineEdit.Normal,
orig_var_name)
var_name = str(text).strip()
while ok and self.parent().parent().check_variable(var_name):
msg =" This variable name is already being used.\
Please enter a different variable name "
(text, ok) = QtGui.QInputDialog.getText(self,
'Set New Variable Name',
msg,
QtGui.QLineEdit.Normal,
text)
var_name = str(text).strip()
if ok and var_name != orig_var_name:
self.setText(var_name)
inputWidget.renameVariable(var_name)
| bsd-3-clause |
fishjord/gsutil | gslib/tests/test_signurl.py | 2 | 15810 | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for signurl command."""
from datetime import datetime
from datetime import timedelta
import pkgutil
import gslib.commands.signurl
from gslib.commands.signurl import HAVE_OPENSSL
from gslib.exception import CommandException
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import unittest
# pylint: disable=protected-access
@unittest.skipUnless(HAVE_OPENSSL, 'signurl requires pyopenssl.')
@SkipForS3('Signed URLs are only supported for gs:// URLs.')
class TestSignUrl(testcase.GsUtilIntegrationTestCase):
"""Integration tests for signurl command."""
def _GetJSONKsFile(self):
if not hasattr(self, 'json_ks_file'):
# Dummy json keystore constructed from test.p12.
contents = pkgutil.get_data('gslib', 'tests/test_data/test.json')
self.json_ks_file = self.CreateTempFile(contents=contents)
return self.json_ks_file
def _GetKsFile(self):
if not hasattr(self, 'ks_file'):
# Dummy pkcs12 keystore generated with the command
# openssl req -new -passout pass:notasecret -batch \
# -x509 -keyout signed_url_test.key -out signed_url_test.pem \
# -subj '/CN=test.apps.googleusercontent.com'
# &&
# openssl pkcs12 -export -passin pass:notasecret \
# -passout pass:notasecret -inkey signed_url_test.key \
# -in signed_url_test.pem -out test.p12
# &&
# rm signed_url_test.key signed_url_test.pem
contents = pkgutil.get_data('gslib', 'tests/test_data/test.p12')
self.ks_file = self.CreateTempFile(contents=contents)
return self.ks_file
def testSignUrlOutputP12(self):
"""Tests signurl output of a sample object with pkcs12 keystore."""
self._DoTestSignUrlOutput(self._GetKsFile())
def testSignUrlOutputJSON(self):
"""Tests signurl output of a sample object with json keystore."""
self._DoTestSignUrlOutput(self._GetJSONKsFile(), json_keystore=True)
def _DoTestSignUrlOutput(self, ks_file, json_keystore=False):
"""Tests signurl output of a sample object."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents='z')
cmd_base = ['signurl'] if json_keystore else ['signurl', '-p', 'notasecret']
stdout = self.RunGsUtil(cmd_base + ['-m', 'PUT', ks_file, suri(object_uri)],
return_stdout=True)
self.assertIn(
'x-goog-credential=test%40developer.gserviceaccount.com', stdout)
self.assertIn('x-goog-expires=3600', stdout)
self.assertIn('%2Fus-central1%2F', stdout)
self.assertIn('\tPUT\t', stdout)
def testSignUrlWithURLEncodeRequiredChars(self):
objs = ['gs://example.org/test 1', 'gs://example.org/test/test 2',
'gs://example.org/Аудиоарi хив']
expected_partial_urls = [
'https://storage.googleapis.com/example.org/test%201?x-goog-signature=',
('https://storage.googleapis.com/example.org/test/test%202'
'?x-goog-signature='),
('https://storage.googleapis.com/example.org/%D0%90%D1%83%D0%B4%D0%B8%D'
'0%BE%D0%B0%D1%80i%20%D1%85%D0%B8%D0%B2?x-goog-signature=')
]
self.assertEquals(len(objs), len(expected_partial_urls))
cmd_args = ['signurl', '-m', 'PUT', '-p', 'notasecret',
'-r', 'us', self._GetKsFile()]
cmd_args.extend(objs)
stdout = self.RunGsUtil(cmd_args, return_stdout=True)
lines = stdout.split('\n')
# Header, signed urls, trailing newline.
self.assertEquals(len(lines), len(objs) + 2)
# Strip the header line to make the indices line up.
lines = lines[1:]
for obj, line, partial_url in zip(objs, lines, expected_partial_urls):
self.assertIn(obj, line)
self.assertIn(partial_url, line)
self.assertIn('x-goog-credential=test%40developer.gserviceaccount.com',
line)
self.assertIn('%2Fus%2F', stdout)
def testSignUrlWithWildcard(self):
objs = ['test1', 'test2', 'test3']
obj_urls = []
bucket = self.CreateBucket()
for obj_name in objs:
obj_urls.append(self.CreateObject(bucket_uri=bucket,
object_name=obj_name, contents=''))
stdout = self.RunGsUtil(['signurl', '-p',
'notasecret', self._GetKsFile(),
suri(bucket) + '/*'], return_stdout=True)
# Header, 3 signed urls, trailing newline
self.assertEquals(len(stdout.split('\n')), 5)
for obj_url in obj_urls:
self.assertIn(suri(obj_url), stdout)
def testSignUrlOfNonObjectUrl(self):
"""Tests the signurl output of a non-existent file."""
self.RunGsUtil(['signurl', self._GetKsFile(), 'gs://'],
expected_status=1, stdin='notasecret')
self.RunGsUtil(['signurl', 'file://tmp/abc'], expected_status=1)
@unittest.skipUnless(HAVE_OPENSSL, 'signurl requires pyopenssl.')
class UnitTestSignUrl(testcase.GsUtilUnitTestCase):
"""Unit tests for the signurl command."""
def setUp(self):
super(UnitTestSignUrl, self).setUp()
ks_contents = pkgutil.get_data('gslib', 'tests/test_data/test.p12')
self.key, self.client_email = gslib.commands.signurl._ReadKeystore(
ks_contents, 'notasecret')
def fake_now():
return datetime(1900, 01, 01, 00, 05, 55)
gslib.commands.signurl._NowUTC = fake_now
def testDurationSpec(self):
tests = [('1h', timedelta(hours=1)),
('2d', timedelta(days=2)),
('5D', timedelta(days=5)),
('35s', timedelta(seconds=35)),
('1h', timedelta(hours=1)),
('33', timedelta(hours=33)),
('22m', timedelta(minutes=22)),
('3.7', None),
('27Z', None),
]
for inp, expected in tests:
try:
td = gslib.commands.signurl._DurationToTimeDelta(inp)
self.assertEquals(td, expected)
except CommandException:
if expected is not None:
self.fail('{0} failed to parse')
def testSignPut(self):
"""Tests the _GenSignedUrl function with a PUT method."""
expected = ('https://storage.googleapis.com/test/test.txt?x-goog-signature='
'8c4d7226d8db1c939381d421c422c8724a762250d7ab9f79eaf943f8c0d05e'
'8eac43ef94cec44d8ab3f15d0f0243ad07bb1de470cc31099bdcbdf5555e1c'
'41d060fca84ea64681d7a926b5e2faafac97cf1bbb1d66f0167fc7144566a2'
'5fe2f5a708961046d6b195ba08a04b501d8b014f4fa203a5ac3d6c5effc5ea'
'549a68c9f353b050d5ea23786845307512bc051424151d2f515391ade2304d'
'db5bb44146ac83b89850b77ffeedbdd0682c9a1d1ae2e8dd75ad43c8263e35'
'8592c84f879fdb8b733feec0b516963bd17990d0e89a306744ca1de6d6fbaa'
'16ca9e82aacd1f64f2d43ae261ada2104ff481a1754b6f357d2c54fc2d127f'
'0b0bbe0f300776d0&x-goog-algorithm=GOOG4-RSA-SHA256&x-goog-cred'
'ential=test%40developer.gserviceaccount.com%2F19000101%2Fus-ea'
'st%2Fstorage%2Fgoog4_request&x-goog-date=19000101T000555Z&x-go'
'og-expires=3600&x-goog-signedheaders=host%3Bx-goog-resumable')
duration = timedelta(seconds=3600)
with SetBotoConfigForTest([
('Credentials', 'gs_host', 'storage.googleapis.com')]):
signed_url = gslib.commands.signurl._GenSignedUrl(
self.key,
client_id=self.client_email,
method='RESUMABLE',
gcs_path='test/test.txt',
duration=duration,
logger=self.logger,
region='us-east',
content_type='')
self.assertEquals(expected, signed_url)
def testSignResumable(self):
"""Tests the _GenSignedUrl function with a RESUMABLE method."""
expected = ('https://storage.googleapis.com/test/test.txt?x-goog-signature='
'8c4d7226d8db1c939381d421c422c8724a762250d7ab9f79eaf943f8c0d05e'
'8eac43ef94cec44d8ab3f15d0f0243ad07bb1de470cc31099bdcbdf5555e1c'
'41d060fca84ea64681d7a926b5e2faafac97cf1bbb1d66f0167fc7144566a2'
'5fe2f5a708961046d6b195ba08a04b501d8b014f4fa203a5ac3d6c5effc5ea'
'549a68c9f353b050d5ea23786845307512bc051424151d2f515391ade2304d'
'db5bb44146ac83b89850b77ffeedbdd0682c9a1d1ae2e8dd75ad43c8263e35'
'8592c84f879fdb8b733feec0b516963bd17990d0e89a306744ca1de6d6fbaa'
'16ca9e82aacd1f64f2d43ae261ada2104ff481a1754b6f357d2c54fc2d127f'
'0b0bbe0f300776d0&x-goog-algorithm=GOOG4-RSA-SHA256&x-goog-cred'
'ential=test%40developer.gserviceaccount.com%2F19000101%2Fus-ea'
'st%2Fstorage%2Fgoog4_request&x-goog-date=19000101T000555Z&x-go'
'og-expires=3600&x-goog-signedheaders=host%3Bx-goog-resumable')
class MockLogger(object):
def __init__(self):
self.warning_issued = False
def warn(self, unused_msg):
self.warning_issued = True
mock_logger = MockLogger()
duration = timedelta(seconds=3600)
with SetBotoConfigForTest([
('Credentials', 'gs_host', 'storage.googleapis.com')]):
signed_url = gslib.commands.signurl._GenSignedUrl(
self.key,
client_id=self.client_email,
method='RESUMABLE',
gcs_path='test/test.txt',
duration=duration,
logger=mock_logger,
region='us-east',
content_type='')
self.assertEquals(expected, signed_url)
# Resumable uploads with no content-type should issue a warning.
self.assertTrue(mock_logger.warning_issued)
mock_logger2 = MockLogger()
with SetBotoConfigForTest([
('Credentials', 'gs_host', 'storage.googleapis.com')]):
signed_url = gslib.commands.signurl._GenSignedUrl(
self.key,
client_id=self.client_email,
method='RESUMABLE',
gcs_path='test/test.txt',
duration=duration,
logger=mock_logger2,
region='us-east',
content_type='image/jpeg')
# No warning, since content type was included.
self.assertFalse(mock_logger2.warning_issued)
def testSignurlPutContentype(self):
"""Tests the _GenSignedUrl function a PUT method and content type."""
expected = ('https://storage.googleapis.com/test/test.txt?x-goog-signature='
'590b52cb0be515032578f372029a72dd7fc253ceb1b50b8cd5761af835b119'
'2d461adbb16b6d292e48a5b17f9d4078327a7f1ceed7fa3e15155c1d251398'
'a445b6346075a22bf7a6250264c983503e819eada2a3895213439ce3c9f590'
'564e54cbca436e1bcd677c36ec33224c1a074c376953fcd7514a6a7ea93cde'
'2dd698e9b461a697c9e4e30539cd5c3bd88172797c867955b388bc28e60d6b'
'b8a7fb302d2eb988ef5056843c2105f177c44fc98c202ece26bf288c02ded4'
'e7cdb85cb29584879e9765027a8ce99a4fedfda995d5e035114c5f8a8bfa94'
'8c438b2714e4a128dc46986336573139d4009f3a75fdbbb757603cff491c0b'
'014698ce171c9fe9&x-goog-algorithm=GOOG4-RSA-SHA256&x-goog-cred'
'ential=test%40developer.gserviceaccount.com%2F19000101%2Feu%2F'
'storage%2Fgoog4_request&x-goog-date=19000101T000555Z&x-goog-ex'
'pires=3600&x-goog-signedheaders=content-type%3Bhost')
duration = timedelta(seconds=3600)
with SetBotoConfigForTest([
('Credentials', 'gs_host', 'storage.googleapis.com')]):
signed_url = gslib.commands.signurl._GenSignedUrl(
self.key,
client_id=self.client_email,
method='PUT',
gcs_path='test/test.txt',
duration=duration,
logger=self.logger,
region='eu',
content_type='text/plain')
self.assertEquals(expected, signed_url)
def testSignurlGet(self):
"""Tests the _GenSignedUrl function with a GET method."""
expected = ('https://storage.googleapis.com/test/test.txt?x-goog-signature='
'2ed227f18d31cdf2b01da7cd4fcea45330fbfcc0dda1d327a8c27124a276ee'
'e0de835e9cd4b0bee609d6b4b21a88a8092a9c089574a300243dde38351f0d'
'183df007211ded41f2f0854290b995be6c9d0367d9c00976745ba27740238b'
'0dd49fee7c41e7ed1569bbab8ffbb00a2078e904ebeeec2f8e55e93d4baba1'
'3db5dc670b1b16183a15d5067f1584db88b3dc55e3edd3c97c0f31fec99ea4'
'ce96ddb8235b0352c9ce5110dad1a580072d955fe9203b6701364ddd85226b'
'55bec84ac46e48cd324fd5d8d8ad264d1aa0b7dbad3ac04b87b2a6c2c8ef95'
'3285cbe3b431e5def84552e112899459fcb64d2d84320c06faa1e8efa26eca'
'cce2eff41f2d2364&x-goog-algorithm=GOOG4-RSA-SHA256&x-goog-cred'
'ential=test%40developer.gserviceaccount.com%2F19000101%2Fasia%'
'2Fstorage%2Fgoog4_request&x-goog-date=19000101T000555Z&x-goog-'
'expires=0&x-goog-signedheaders=host')
duration = timedelta(seconds=0)
with SetBotoConfigForTest([
('Credentials', 'gs_host', 'storage.googleapis.com')]):
signed_url = gslib.commands.signurl._GenSignedUrl(
self.key,
client_id=self.client_email,
method='GET',
gcs_path='test/test.txt',
duration=duration,
logger=self.logger,
region='asia',
content_type='')
self.assertEquals(expected, signed_url)
def testSignurlGetWithJSONKey(self):
"""Tests _GenSignedUrl with a GET method and the test JSON private key."""
expected = ('https://storage.googleapis.com/test/test.txt?x-goog-signature='
'2ed227f18d31cdf2b01da7cd4fcea45330fbfcc0dda1d327a8c27124a276ee'
'e0de835e9cd4b0bee609d6b4b21a88a8092a9c089574a300243dde38351f0d'
'183df007211ded41f2f0854290b995be6c9d0367d9c00976745ba27740238b'
'0dd49fee7c41e7ed1569bbab8ffbb00a2078e904ebeeec2f8e55e93d4baba1'
'3db5dc670b1b16183a15d5067f1584db88b3dc55e3edd3c97c0f31fec99ea4'
'ce96ddb8235b0352c9ce5110dad1a580072d955fe9203b6701364ddd85226b'
'55bec84ac46e48cd324fd5d8d8ad264d1aa0b7dbad3ac04b87b2a6c2c8ef95'
'3285cbe3b431e5def84552e112899459fcb64d2d84320c06faa1e8efa26eca'
'cce2eff41f2d2364&x-goog-algorithm=GOOG4-RSA-SHA256&x-goog-cred'
'ential=test%40developer.gserviceaccount.com%2F19000101%2Fasia%'
'2Fstorage%2Fgoog4_request&x-goog-date=19000101T000555Z&x-goog-'
'expires=0&x-goog-signedheaders=host')
json_contents = pkgutil.get_data('gslib', 'tests/test_data/test.json')
key, client_email = gslib.commands.signurl._ReadJSONKeystore(
json_contents)
duration = timedelta(seconds=0)
with SetBotoConfigForTest([
('Credentials', 'gs_host', 'storage.googleapis.com')]):
signed_url = gslib.commands.signurl._GenSignedUrl(
key,
client_id=client_email,
method='GET',
gcs_path='test/test.txt',
duration=duration,
logger=self.logger,
region='asia',
content_type='')
self.assertEquals(expected, signed_url)
| apache-2.0 |
Stane1983/u-boot | tools/buildman/cmdline.py | 6 | 7614 | # SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2014 Google, Inc
#
from optparse import OptionParser
def ParseArgs():
"""Parse command line arguments from sys.argv[]
Returns:
tuple containing:
options: command line options
args: command lin arguments
"""
parser = OptionParser()
parser.add_option('-A', '--print-prefix', action='store_true',
help='Print the tool-chain prefix for a board (CROSS_COMPILE=)')
parser.add_option('-b', '--branch', type='string',
help='Branch name to build, or range of commits to build')
parser.add_option('-B', '--bloat', dest='show_bloat',
action='store_true', default=False,
help='Show changes in function code size for each board')
parser.add_option('--boards', type='string', action='append',
help='List of board names to build separated by comma')
parser.add_option('-c', '--count', dest='count', type='int',
default=-1, help='Run build on the top n commits')
parser.add_option('-C', '--force-reconfig', dest='force_reconfig',
action='store_true', default=False,
help='Reconfigure for every commit (disable incremental build)')
parser.add_option('-d', '--detail', dest='show_detail',
action='store_true', default=False,
help='Show detailed size delta for each board in the -S summary')
parser.add_option('-D', '--config-only', action='store_true', default=False,
help="Don't build, just configure each commit")
parser.add_option('-e', '--show_errors', action='store_true',
default=False, help='Show errors and warnings')
parser.add_option('-E', '--warnings-as-errors', action='store_true',
default=False, help='Treat all compiler warnings as errors')
parser.add_option('-f', '--force-build', dest='force_build',
action='store_true', default=False,
help='Force build of boards even if already built')
parser.add_option('-F', '--force-build-failures', dest='force_build_failures',
action='store_true', default=False,
help='Force build of previously-failed build')
parser.add_option('--fetch-arch', type='string',
help="Fetch a toolchain for architecture FETCH_ARCH ('list' to list)."
' You can also fetch several toolchains separate by comma, or'
" 'all' to download all")
parser.add_option('-g', '--git', type='string',
help='Git repo containing branch to build', default='.')
parser.add_option('-G', '--config-file', type='string',
help='Path to buildman config file', default='')
parser.add_option('-H', '--full-help', action='store_true', dest='full_help',
default=False, help='Display the README file')
parser.add_option('-i', '--in-tree', dest='in_tree',
action='store_true', default=False,
help='Build in the source tree instead of a separate directory')
# -I will be removed after April 2021
parser.add_option('-I', '--incremental', action='store_true',
default=False, help='Deprecated, does nothing. See -m')
parser.add_option('-j', '--jobs', dest='jobs', type='int',
default=None, help='Number of jobs to run at once (passed to make)')
parser.add_option('-k', '--keep-outputs', action='store_true',
default=False, help='Keep all build output files (e.g. binaries)')
parser.add_option('-K', '--show-config', action='store_true',
default=False, help='Show configuration changes in summary (both board config files and Kconfig)')
parser.add_option('--preserve-config-y', action='store_true',
default=False, help="Don't convert y to 1 in configs")
parser.add_option('-l', '--list-error-boards', action='store_true',
default=False, help='Show a list of boards next to each error/warning')
parser.add_option('--list-tool-chains', action='store_true', default=False,
help='List available tool chains (use -v to see probing detail)')
parser.add_option('-m', '--mrproper', action='store_true',
default=False, help="Run 'make mrproper before reconfiguring")
parser.add_option('-n', '--dry-run', action='store_true', dest='dry_run',
default=False, help="Do a dry run (describe actions, but do nothing)")
parser.add_option('-N', '--no-subdirs', action='store_true', dest='no_subdirs',
default=False, help="Don't create subdirectories when building current source for a single board")
parser.add_option('-o', '--output-dir', type='string', dest='output_dir',
help='Directory where all builds happen and buildman has its workspace (default is ../)')
parser.add_option('-O', '--override-toolchain', type='string',
help="Override host toochain to use for sandbox (e.g. 'clang-7')")
parser.add_option('-Q', '--quick', action='store_true',
default=False, help='Do a rough build, with limited warning resolution')
parser.add_option('-p', '--full-path', action='store_true',
default=False, help="Use full toolchain path in CROSS_COMPILE")
parser.add_option('-P', '--per-board-out-dir', action='store_true',
default=False, help="Use an O= (output) directory per board rather than per thread")
parser.add_option('-s', '--summary', action='store_true',
default=False, help='Show a build summary')
parser.add_option('-S', '--show-sizes', action='store_true',
default=False, help='Show image size variation in summary')
parser.add_option('--skip-net-tests', action='store_true', default=False,
help='Skip tests which need the network')
parser.add_option('--step', type='int',
default=1, help='Only build every n commits (0=just first and last)')
parser.add_option('-t', '--test', action='store_true', dest='test',
default=False, help='run tests')
parser.add_option('-T', '--threads', type='int',
default=None, help='Number of builder threads to use')
parser.add_option('-u', '--show_unknown', action='store_true',
default=False, help='Show boards with unknown build result')
parser.add_option('-U', '--show-environment', action='store_true',
default=False, help='Show environment changes in summary')
parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Show build results while the build progresses')
parser.add_option('-V', '--verbose-build', action='store_true',
default=False, help='Run make with V=1, logging all output')
parser.add_option('-w', '--work-in-output', action='store_true',
default=False, help='Use the output directory as the work directory')
parser.add_option('-W', '--ignore-warnings', action='store_true',
default=False, help='Return success even if there are warnings')
parser.add_option('-x', '--exclude', dest='exclude',
type='string', action='append',
help='Specify a list of boards to exclude, separated by comma')
parser.add_option('-y', '--filter-dtb-warnings', action='store_true',
default=False,
help='Filter out device-tree-compiler warnings from output')
parser.add_option('-Y', '--filter-migration-warnings', action='store_true',
default=False,
help='Filter out migration warnings from output')
parser.usage += """ [list of target/arch/cpu/board/vendor/soc to build]
Build U-Boot for all commits in a branch. Use -n to do a dry run"""
return parser.parse_args()
| gpl-2.0 |
selboo/flask | scripts/flaskext_compat.py | 6 | 5023 | # -*- coding: utf-8 -*-
"""
flaskext_compat
~~~~~~~~~~~~~~~
Implements the ``flask.ext`` virtual package for versions of Flask
older than 0.7. This module is a noop if Flask 0.8 was detected.
Usage::
import flaskext_compat
flaskext_compat.activate()
from flask.ext import foo
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import imp
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
newer flask_name without people having a hard time.
"""
def __init__(self, module_choices, wrapper_module):
self.module_choices = module_choices
self.wrapper_module = wrapper_module
self.prefix = wrapper_module + '.'
self.prefix_cutoff = wrapper_module.count('.') + 1
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__ and \
self.wrapper_module == other.wrapper_module and \
self.module_choices == other.module_choices
def __ne__(self, other):
return not self.__eq__(other)
def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname):
if fullname.startswith(self.prefix):
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
for path in self.module_choices:
realname = path % modname
try:
__import__(realname)
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
# since we only establish the entry in sys.modules at the
# end this seems to be redundant, but if recursive imports
# happen we will call into the move import a second time.
# On the second invocation we still don't have an entry for
# fullname in sys.modules, but we will end up with the same
# fake module name and that import will succeed since this
# one already has a temporary entry in the modules dict.
# Since this one "succeeded" temporarily that second
# invocation now will have created a fullname entry in
# sys.modules which we have to kill.
sys.modules.pop(fullname, None)
# If it's an important traceback we reraise it, otherwise
# we swallow it and try the next choice. The skipped frame
# is the one from __import__ above which we don't care about.
if self.is_important_traceback(realname, tb):
raise exc_type, exc_value, tb.tb_next
continue
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
return module
raise ImportError('No module named %s' % fullname)
def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False
def is_important_frame(self, important_module, tb):
"""Checks a single frame if it's important."""
g = tb.tb_frame.f_globals
if '__name__' not in g:
return False
module_name = g['__name__']
# Python 2.7 Behavior. Modules are cleaned up late so the
# name shows up properly here. Success!
if module_name == important_module:
return True
# Some python versions will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
test_string = os.path.sep + important_module.replace('.', os.path.sep)
return test_string + '.py' in filename or \
test_string + os.path.sep + '__init__.py' in filename
def activate():
import flask
ext_module = imp.new_module('flask.ext')
ext_module.__path__ = []
flask.ext = sys.modules['flask.ext'] = ext_module
importer = ExtensionImporter(['flask_%s', 'flaskext.%s'], 'flask.ext')
importer.install()
| bsd-3-clause |
CMRemix/trltetmo-dok2 | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
mitya57/django | tests/generic_views/test_detail.py | 31 | 8975 | import datetime
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectTemplateResponseMixin
from django.views.generic.edit import ModelFormMixin
from .models import Artist, Author, Book, Page
@override_settings(ROOT_URLCONF='generic_views.urls')
class DetailViewTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name='Rene Magritte')
cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano')
cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg')
cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1))
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1)
)
cls.page1 = Page.objects.create(
content='I was once bitten by a moose.', template='generic_views/page_template.html'
)
def test_simple_object(self):
res = self.client.get('/detail/obj/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], {'foo': 'bar'})
self.assertIsInstance(res.context['view'], View)
self.assertTemplateUsed(res, 'generic_views/detail.html')
def test_detail_by_pk(self):
res = self.client.get('/detail/author/%s/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_missing_object(self):
res = self.client.get('/detail/author/500/')
self.assertEqual(res.status_code, 404)
def test_detail_object_does_not_exist(self):
with self.assertRaises(ObjectDoesNotExist):
self.client.get('/detail/doesnotexist/1/')
def test_detail_by_custom_pk(self):
res = self.client.get('/detail/author/bycustompk/%s/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_slug(self):
res = self.client.get('/detail/author/byslug/scott-rosenberg/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg'))
self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg'))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_custom_slug(self):
res = self.client.get('/detail/author/bycustomslug/scott-rosenberg/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg'))
self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg'))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_pk_ignore_slug(self):
res = self.client.get('/detail/author/bypkignoreslug/%s-roberto-bolano/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_pk_ignore_slug_mismatch(self):
res = self.client.get('/detail/author/bypkignoreslug/%s-scott-rosenberg/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_pk_and_slug(self):
res = self.client.get('/detail/author/bypkandslug/%s-roberto-bolano/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_pk_and_slug_mismatch_404(self):
res = self.client.get('/detail/author/bypkandslug/%s-scott-rosenberg/' % self.author1.pk)
self.assertEqual(res.status_code, 404)
def test_verbose_name(self):
res = self.client.get('/detail/artist/%s/' % self.artist1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.artist1)
self.assertEqual(res.context['artist'], self.artist1)
self.assertTemplateUsed(res, 'generic_views/artist_detail.html')
def test_template_name(self):
res = self.client.get('/detail/author/%s/template_name/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/about.html')
def test_template_name_suffix(self):
res = self.client.get('/detail/author/%s/template_name_suffix/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_view.html')
def test_template_name_field(self):
res = self.client.get('/detail/page/%s/field/' % self.page1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.page1)
self.assertEqual(res.context['page'], self.page1)
self.assertTemplateUsed(res, 'generic_views/page_template.html')
def test_context_object_name(self):
res = self.client.get('/detail/author/%s/context_object_name/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['thingy'], self.author1)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_duplicated_context_object_name(self):
res = self.client.get('/detail/author/%s/dupe_context_object_name/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_custom_detail(self):
"""
AuthorCustomDetail overrides get() and ensures that
SingleObjectMixin.get_context_object_name() always uses the obj
parameter instead of self.object.
"""
res = self.client.get('/detail/author/%s/custom_detail/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['custom_author'], self.author1)
self.assertNotIn('author', res.context)
self.assertNotIn('object', res.context)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_deferred_queryset_template_name(self):
class FormContext(SingleObjectTemplateResponseMixin):
request = RequestFactory().get('/')
model = Author
object = Author.objects.defer('name').get(pk=self.author1.pk)
self.assertEqual(FormContext().get_template_names()[0], 'generic_views/author_detail.html')
def test_deferred_queryset_context_object_name(self):
class FormContext(ModelFormMixin):
request = RequestFactory().get('/')
model = Author
object = Author.objects.defer('name').get(pk=self.author1.pk)
fields = ('name',)
form_context_data = FormContext().get_context_data()
self.assertEqual(form_context_data['object'], self.author1)
self.assertEqual(form_context_data['author'], self.author1)
def test_invalid_url(self):
with self.assertRaises(AttributeError):
self.client.get('/detail/author/invalid/url/')
def test_invalid_queryset(self):
with self.assertRaises(ImproperlyConfigured):
self.client.get('/detail/author/invalid/qs/')
def test_non_model_object_with_meta(self):
res = self.client.get('/detail/nonmodel/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'].id, "non_model_1")
| bsd-3-clause |
Petraea/jsonbot | jsb/lib/less.py | 1 | 1782 | # jsb/less.py
#
#
""" maintain bot output cache. """
# jsb imports
from jsb.utils.exception import handle_exception
from jsb.utils.limlist import Limlist
from jsb.lib.cache import get, set, delete
## basic imports
import logging
## Less class
class Less(object):
""" output cache .. caches upto <nr> item of txt lines per channel. """
def clear(self, channel):
""" clear outcache of channel. """
channel = unicode(channel).lower()
try: delete(u"outcache-" + channel)
except KeyError: pass
def add(self, channel, listoftxt):
""" add listoftxt to channel's output. """
channel = unicode(channel).lower()
data = get("outcache-" + channel)
if not data: data = []
data.extend(listoftxt)
set(u"outcache-" + channel, data, 3600)
def set(self, channel, listoftxt):
""" set listoftxt to channel's output. """
channel = unicode(channel).lower()
set(u"outcache-" + channel, listoftxt, 3600)
def get(self, channel):
""" return 1 item popped from outcache. """
channel = unicode(channel).lower()
global get
data = get(u"outcache-" + channel)
if not data: txt = None
else:
try: txt = data.pop(0) ; set(u"outcache-" + channel, data, 3600)
except (KeyError, IndexError): txt = None
if data: size = len(data)
else: size = 0
return (txt, size)
def copy(self, channel):
""" return 1 item popped from outcache. """
channel = unicode(channel).lower()
global get
return get(u"outcache-" + channel)
def more(self, channel):
""" return more entry and remaining size. """
return self.get(channel)
outcache = Less() | mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.