code
stringlengths 1
199k
|
|---|
import sys
import weechat
SCRIPT_NAME = "fullwidth"
SCRIPT_AUTHOR = "GermainZ <germanosz@gmail.com>"
SCRIPT_VERSION = "0.1"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = ("Convert text to its fullwidth equivalent and send it "
"to buffer.")
PY3 = sys.version > '3'
if PY3:
unichr = chr
def send(buf, text):
weechat.command(buf, "/input send {}".format(text))
else:
def send(buf, text):
weechat.command(buf, "/input send {}".format(text.encode("utf-8")))
def cb_fullwidth_cmd(data, buf, args):
"""Callback for ``/fullwidth``, convert and send the given text."""
chars = []
if not PY3:
args = args.decode("utf-8")
for char in list(args):
ord_char = ord(char)
if ord_char >= 32 and ord_char <= 126:
char = unichr(ord_char + 65248)
chars.append(char)
send(buf, ''.join(chars))
return weechat.WEECHAT_RC_OK
if __name__ == "__main__":
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, '', '')
weechat.hook_command("fullwidth", SCRIPT_DESC, "<text>", '', '',
"cb_fullwidth_cmd", '')
|
import numpy as np
def Bootstrap(chi0_wGG, Nw, Kc_GG, printtxt, print_bootstrap, world):
Nw_local = chi0_wGG.shape[0]
npw = chi0_wGG.shape[1]
# arxiv 1107.0199
fxc_GG = np.zeros((npw, npw), dtype=complex)
tmp_GG = np.eye(npw, npw)
dminv_wGG = np.zeros((Nw_local, npw, npw), dtype=complex)
dflocal_w = np.zeros(Nw_local, dtype=complex)
df_w = np.zeros(Nw, dtype=complex)
for iscf in range(120):
dminvold_wGG = dminv_wGG.copy()
Kxc_GG = Kc_GG + fxc_GG
for iw in range(Nw_local):
chi_GG = np.dot(chi0_wGG[iw], np.linalg.inv(tmp_GG - np.dot(Kxc_GG, chi0_wGG[iw])))
dminv_wGG[iw] = tmp_GG + np.dot(Kc_GG, chi_GG)
if world.rank == 0:
alpha = dminv_wGG[0,0,0] / (Kc_GG[0,0] * chi0_wGG[0,0,0])
fxc_GG = alpha * Kc_GG
world.broadcast(fxc_GG, 0)
error = np.abs(dminvold_wGG - dminv_wGG).sum()
if world.sum(error) < 0.1:
printtxt('Self consistent fxc finished in %d iterations ! ' %(iscf))
break
if iscf > 100:
printtxt('Too many fxc scf steps !')
if print_bootstrap:
for iw in range(Nw_local):
dflocal_w[iw] = np.linalg.inv(dminv_wGG[iw])[0,0]
world.all_gather(dflocal_w, df_w)
if world.rank == 0:
f = open('df_scf%d' %(iscf), 'w')
for iw in range(Nw):
print >> f, np.real(df_w[iw]), np.imag(df_w[iw])
f.close()
world.barrier()
for iw in range(Nw_local):
dflocal_w[iw] = np.linalg.inv(dminv_wGG[iw])[0,0]
world.all_gather(dflocal_w, df_w)
return df_w
|
from collections import namedtuple
import itertools
from ..state import AddOnState
class AddOnStopper:
class __StopErrorException(Exception):
pass
__AddonWithDep = namedtuple('AddonDependency', ['addon', 'dependencies'])
def __init__(self, manager, *addons):
self.__manager = manager
self.__stopped = False
self.__has_error = False
self.__reverse_dependencies = {}
for addon in self.__manager:
for requirement in addon.requirements:
self.__reverse_dependencies.setdefault(requirement, []).append(addon)
self.__addons = self.__recursive_compute_deps(addons)
def __recursive_compute_deps(self, addons):
ret = []
for addon in addons:
if addon.state in (AddOnState.stopped, AddOnState.none):
continue
deps = itertools.chain(*(self.__reverse_dependencies.get(provision, ()) for provision in addon.provisions))
ret.append(
self.__AddonWithDep(addon, self.__recursive_compute_deps(deps))
)
return ret
@property
def finished(self):
return self.__stopped
@property
def has_error(self):
return self.__has_error
def addons(self):
return set(*self.__iterate_addons(self.__addons))
def __iterate_addons(self, addons):
for addon_with_dep in addons:
yield addon_with_dep.addon
yield from self.__iterate_addons(addon_with_dep.dependencies)
def do(self):
all_stopped = True
for addon in self.__addons:
try:
stopped = self.__recursive_stop(addon)
all_stopped = all_stopped and stopped
except AddOnStopper.__StopErrorException:
self.__has_error = True
self.__stopped = all_stopped
def __recursive_stop(self, addon_with_dep):
if addon_with_dep.addon.state != AddOnState.started:
if addon_with_dep.addon.state == AddOnState.error:
raise AddOnStopper.__StopErrorException()
return addon_with_dep.addon.state in (AddOnState.stopped, AddOnState.error)
dependencies_stopped = True
for dep in addon_with_dep.dependencies:
stopped = self.__recursive_stop(dep)
dependencies_stopped = dependencies_stopped and stopped
if dependencies_stopped:
addon_with_dep.addon._stop()
return addon_with_dep.addon.state in (AddOnState.stopped, AddOnState.error)
else:
return False
|
import Adafruit_BMP.BMP085 as BMP085
class BMP180:
def __init__(self):
self.sensor = BMP085.BMP085()
@property
def temperature(self):
temp = self.sensor.read_temperature()
return '{0:0.2f} *C'.format(temp)
@property
def presure(self):
presure = self.sensor.read_pressure()
return '{0:0.2f} Pa'.format(presure)
@property
def altitude(self):
altitude = self.sensor.read_altitude()
return '{0:0.2f} m'.format(altitude)
|
from pychess.Utils.const import KING, QUEEN, ROOK, BISHOP, KNIGHT, PAWN
from pychess.Utils.repr import reprSign, reprColor, reprPiece
class Piece:
def __init__ (self, color, piece):
self.color = color
self.piece = piece
self.opacity = 1.0
self.x = None
self.y = None
# Sign is a deprecated synonym for piece
def _set_sign (self, sign):
self.piece = sign
def _get_sign (self):
return self.piece
sign = property(_get_sign, _set_sign)
def __repr__ (self):
represen = "<%s %s" % (reprColor[self.color], reprPiece[self.piece])
if self.opacity != 1.0:
represen += " Op:%0.1f" % self.opacity
if self.x != None or self.y != None:
if self.x != None:
represen += " X:%0.1f" % self.x
else: represen += " X:None"
if self.y != None:
represen += " Y:%0.1f" % self.y
else: represen += " Y:None"
represen += ">"
return represen
|
from __future__ import print_function
import os
import codecs
import sys
import argparse
import bisect
import re
import fnmatch
import pkg_resources
from collections import OrderedDict
import enchant
from enchant.tokenize import get_tokenizer, URLFilter, WikiWordFilter, Filter
import pygments
from pygments import lexers
from pygments.filters import TokenMergeFilter
from pygments.token import Comment, String, Token, Generic, Literal
from colorama import Fore, Back, Style, init
try:
import magic
except ImportError:
magic = None
if sys.platform == "win32":
import msvcrt
getchar = msvcrt.getch
else: # POSIX platforms
import tty
import termios
def getchar():
"""Gets a character from stdin without waiting
for a newline.
:returns: A single character from stdin.
"""
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
return ch
NAME = 'SourceSpell'
DESCRIPTION = "%s - Command line spellchecker for source code files." % NAME
class EmptyFileError(Exception):
"""Error thrown for empty files."""
pass
class ParseError(Exception):
"""Error thrown for Pygments lexer errors."""
pass
class NextFile(Exception):
"""Trigger to advance to the next file."""
pass
class HashBangFilter(Filter):
"""Filter skipping over the hashbang in executable scripts.
Taken from: https://github.com/htgoebel/pysource-spellchecker
"""
_pattern = re.compile(r"^#!/.+$")
def _skip(self, word):
if self._pattern.match(word):
return True
return False
class MyWikiWordFilter(WikiWordFilter):
def _skip(self, text):
print(text)
WikiWordFilter._skip(self, text)
class EmailFilter(enchant.tokenize.EmailFilter):
"""Override the :class:`enchant.tokenize.EmailFilter` to filter out
addresses enclosed in angle brackets, for example:
<joe.bloggs@example.com>
"""
_pattern = re.compile(r"^.+@[^\.].*\.[a-z]{2,}\W?$")
class SpellingCorrection(object):
"""Object to store information for a spelling
error.
:param filename: File path, relative to the base directory.
:param word: The word being checked.
:param index: The file index at the start of the word.
:param line_no: The 1-indexed line number.
:param column: The column index.
:param dictionary: Reference to the dictionary object.
:type dictionary: :class:`enchant.Dict`
:param line_content: The contents of the line containing the error.
"""
def __init__(self, filename, word, index, line_no, column, dictionary, line_content):
self.filename = filename
self.word = word
self.index = index
self.line_no = line_no
self.column = column
self.dictionary = dictionary
self.line_content = line_content.rstrip()
def __str__(self):
"""Return a string representation of the error, including
the filename, line and column numbers.
"""
return (
"%s - Ln %s Col %s: %s" %
(self.filename, self.line_no, self.column, self.word)
)
@property
def suggestions(self):
"""The :class:`list` of suggested corrections."""
return self.dictionary.suggest(self.word)
def prompt(self):
"""Generate a prompt listing the available corrections.
"""
before = self.line_content[:self.column - 1]
after = self.line_content[self.column - 1 + len(self.word):]
suggestions = ' | '.join(
['%s: %s' % (idx, suggest) for idx, suggest in enumerate(self.suggestions[:10])]
)
return '%s%s%s%s%s\n\n%s' % (
before, Back.RED, self.word,
Style.RESET_ALL, after, suggestions
)
def merge_tokens(stream):
"""Merge tokens of the same type from Pygments.
Adapted from :class:`pygments.filters.TokenMergeFilter`
"""
(curr_type, curr_value, curr_index) = (None, None, None)
for index, ttype, value in stream:
if ttype is curr_type:
curr_value += value
else:
if curr_type is not None:
yield (curr_index, curr_type, curr_value)
(curr_type, curr_value, curr_index) = (ttype, value, index)
if curr_type is not None:
yield (curr_index, curr_type, curr_value)
class SourceFile(object):
"""Interface for checking for spelling errors in a
single source file.
:param filename: Absolute path to the file.
:param dictionary: Enchant dictionary.
:type dictionary: :class:`enchant.Dict`
:param tokeniser: Enchant tokeniser from :func:`get_tokenizer`
:type tokeniser: :class:`enchant.tokenize.Tokenizer`
:param base_dir: Base directory path.
:param encoding: Character set encoding to read files with.
"""
_rawstring_re = re.compile(r'^r["\']')
def __init__(self, filename, dictionary, tokeniser, base_dir, encoding='utf-8'):
self.base_dir = base_dir
self.filename = filename
self.dict = dictionary
# List of indexes of line endings for generating line numbers.
self.line_idxs = []
try:
with codecs.open(self.filename, 'r', encoding) as src_file:
self.content = src_file.read()
line_lengths = [len(line) for line in self.content.splitlines(True)]
if len(line_lengths) > 0:
count = 0
for length in line_lengths:
self.line_idxs.append(length + count)
count += length
else:
raise EmptyFileError("%s: File empty." % self.relname)
except UnicodeDecodeError:
print(
"%s: Couldn't decode with '%s' codec." % (self.relname, encoding),
file=sys.stderr
)
raise
self.code_lexer = self._get_lexer()
self.tokeniser = tokeniser
def _get_lexer(self):
"""Initialise the Pygments lexer.
"""
# TODO: Improve the lexer selection since Jinja and other template languages are
# often saved with .html template.
lexer = None
try:
lexer = lexers.get_lexer_for_filename(self.filename)
except pygments.util.ClassNotFound:
pass
if magic is not None and lexer is None:
# Fallback to mimetype detection
try:
mimetype = magic.from_file(self.filename, mime=True)
lexer = lexers.get_lexer_for_mimetype(mimetype)
except pygments.util.ClassNotFound:
pass
if lexer is None:
try:
# If all else fails use the guess_lexer method
lexer = lexers.guess_lexer(self.content[:512])
except pygments.util.ClassNotFound:
print("No lexer found for: %s" % self.relname, file=sys.stderr)
raise
return lexer
@property
def relname(self):
"""Returns the name of the file relative to
the base directory being checked.
"""
return self.filename[len(self.base_dir) + 1:]
def _index_to_col_lineno(self, index):
"""Calculates the line and column index from the
file index.
:param index: The file index.
:returns: A tuple of line number and column index.
:rtype: :class:`tuple` of (int, int)
"""
line = bisect.bisect_right(self.line_idxs, index)
column = index if line == 0 else index - self.line_idxs[line - 1]
# Note: line and column numbers are 1-indexed
return (line + 1, column + 1)
def _filter_code_tokens(self, stream):
"""Filter the token stream based on token type and
the name of the lexer.
"""
for index, tokentype, value in merge_tokens(stream):
# Handle token errors
if tokentype is Token.Error:
(line, _) = self._index_to_col_lineno(index)
raise ParseError('%s: Parse error at line %s.' % (self.relname, line))
# Lex python doc strings with the reStructuredText lexer.
if tokentype is String.Doc and self.code_lexer.name == 'Python':
sub_lexer = lexers.get_lexer_by_name('reStructuredText')
sub_stream = merge_tokens(sub_lexer.get_tokens_unprocessed(value))
for sub_index, tktype, value in sub_stream:
if self._select_token(tokentype, sub_lexer.name, value):
yield (index + sub_index, value)
else:
if self._select_token(tokentype, self.code_lexer.name, value):
yield (index, value)
def _select_token(self, tokentype, name, value):
"""Return ``True`` if the token should be used, ``False`` otherwise."""
# TODO: Make min length configurable.
MIN_LENGTH = 10
return (
(tokentype in Comment and tokentype not in Comment.Preproc) or
(tokentype in Token.Text) or
(tokentype in Generic.Emph) or
(tokentype in Generic.Strong) or
# Ignore string literals in reStructuredText since
# these are used class and function references.
(tokentype in Literal.String and
len(value) > MIN_LENGTH and
name != 'reStructuredText' and not
self._is_rawstring(value)) # Ignore Python raw-string literals
)
def _is_rawstring(self, value):
"""Return ``True`` if value is a Python raw-string literal,
``False`` otherwise.
"""
return self._rawstring_re.match(value) is not None
def errors(self):
"""Generator that yields :class:`SpellingCorrection` objects for the current
source file.
"""
stream = self.code_lexer.get_tokens_unprocessed(self.content)
for index, value in self._filter_code_tokens(stream):
for word, token_index in self.tokeniser(value):
if not self.dict.check(word):
line, column = self._index_to_col_lineno(index + token_index)
# Get line content
lo = 0 if line == 1 else self.line_idxs[line - 2]
line_content = self.content[lo:self.line_idxs[line - 1]]
yield SpellingCorrection(
self.relname, word, index + token_index,
line, column, self.dict, line_content
)
class BaseChecker(object):
"""Common functionality for all checker classes.
:param base_dir: The path to the base directory.
:param ignore_patterns: List of glob ignore patterns to skip.
:param language: ISO language code, e.g. 'en_GB' or 'en_US'
:param project_dict: Path to the project dictionary for excluded words.
:param encoding: Character set encoding to use reading / writing files.
"""
def __init__(self, base_dir='.', ignore_patterns=None, language='en_GB',
project_dict=None, encoding='utf-8'):
self.base_dir = os.path.realpath(base_dir)
# Ignore common binary file formats and hidden files
self.ignore_patterns = [
'*.gif', '*.jpeg', '*.jpg', '*.bmp', '*.png',
'*.exe', '*.dll', '*.webp', '*.pyc', '*.zip',
'*.gz', '*/.*'
]
if not os.path.isabs(project_dict):
project_dict = os.path.abspath(os.path.join(base_dir, project_dict))
if ignore_patterns is not None:
self.ignore_patterns.extend(
[os.path.join(self.base_dir, pattern) for pattern in ignore_patterns]
)
self.dictionary = enchant.DictWithPWL(language, project_dict)
self.ret_code = 0
self.encoding = encoding
# TODO: Consider breaking apart WikiWords instead of filtering them out.
self.tokeniser = get_tokenizer(
self.dictionary.tag, [EmailFilter, URLFilter, WikiWordFilter, HashBangFilter]
)
def _search_files(self):
"""Generator function which returns files to be checked."""
for root, dirs, files in os.walk(self.base_dir):
for name in files:
filename = os.path.join(root, name)
if any([fnmatch.fnmatch(filename, i) for i in self.ignore_patterns]):
continue
yield filename
def _process_file(self, src_file):
"""Called from run for each source file
under the base directory.
:param src_file: The source file being checked.
:type src_file: :class:`SourceFile`
"""
raise NotImplementedError
def run(self):
"""Runs the checker.
:returns: The script exit code.
:rtype: int
"""
for name in self._search_files():
try:
self._process_file(
SourceFile(name, self.dictionary, self.tokeniser, self.base_dir, self.encoding)
)
except pygments.util.ClassNotFound:
self.ret_code = 1
continue
except ParseError as e:
print(e, file=sys.stderr)
self.ret_code = 1
continue
except UnicodeDecodeError:
self.ret_code = 1
continue
except (EmptyFileError, NextFile):
continue # Skip empty files
except StopIteration: # User quit.
break
return self.ret_code
class SpellChecker(BaseChecker):
"""Non-Interactive spell checker. Prints a list of
all spelling errors found.
"""
def _process_file(self, src_file):
"""Prints errors to stderr and sets the error flag."""
for error in src_file.errors():
self.ret_code = 1
print(error, file=sys.stderr)
class InteractiveChecker(BaseChecker):
"""Interactive spellchecker. Allows the user
to quickly fix spelling errors and add words to
the excluded words dictionary.
"""
def _print_options(self):
"""Prints the list of keyboard options."""
codes = OrderedDict([
('0-9', 'Use the numbered suggestion.'),
('a', 'Ignore the error and add to the excluded words.'),
('n', 'Go to the next file, save existing changes.'),
('q', 'Exit immediately, discards changes in the current file.')
])
print()
for code, help in codes.items():
print("%s - %s" % (code, help))
print("To skip to the next error, press any other key.")
def _handle_response(self, src_map, error):
"""Handle the user response. Return True
if a correction was made.
:param src_map: The map of indexes to tokens.
:type src_map: :class:`collections.OrderedDict`
:param error: The spelling correction data.
:type error: :class:`SpellingCorrection`
"""
correction = False
print("--->", end=" ")
response = getchar()
# Echo response
print(response)
# Correct with the numbered correction
if response.isdigit():
try:
src_map[error.index] = error.suggestions[int(response)]
correction = True
except IndexError:
print("%sInvalid selection, please try again.%s" % (Back.RED, Style.RESET_ALL))
return self._handle_response(error)
# Add word to the excluded words list
elif response == "a":
self.dictionary.add(error.word)
# Next file
elif response == "n":
raise NextFile()
# Stop spellchecking
elif response == "q":
raise StopIteration()
# Ignore the current error by default
else:
pass
return correction
def _process_file(self, src_file):
"""For each error in the file. Prompt the
user for the action to take.
:param src_file: Source file being checked.
:type src_file: :class:`SourceFile`
"""
write_file = False
src_map = self._get_source_map(src_file.content)
for idx, error in enumerate(src_file.errors()):
if idx == 0:
print("\n%s%s:%s\n" % (Fore.GREEN, src_file.relname, Style.RESET_ALL))
print(error.prompt())
self._print_options()
write_file |= self._handle_response(src_map, error)
if write_file:
with codecs.open(src_file.filename, 'w', self.encoding) as out_file:
out_file.write(u''.join(src_map.values()))
def _get_source_map(self, contents):
"""Creates a map of index, token pairs from the source
file to handle spelling replacements.
:param contents: The contents of the source file.
:returns: The generated map.
:rtype: :class:`collections.OrderedDict`
"""
src_map = OrderedDict()
offset = 0
for token in re.split(r'(\W+)', contents):
if token == '':
continue
src_map[offset] = token
offset += len(token)
return src_map
def get_parser(description=''):
"""Initialise the command line argument parsing.
:returns: The argument parser.
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--directory', '-d', default='.', help='Base directory to search from')
parser.add_argument(
'--interactive', '-i', default=False, action='store_true',
help='Run the interactive checker'
)
parser.add_argument(
'--ignore-patterns', '-I', nargs='+', default=None, help='List of glob patterns to ignore'
)
parser.add_argument('--language', '-l', default='en_GB', help='Language to use')
parser.add_argument(
'--excluded-words', '-e', default='.excluded-words', help='Path to excluded words list'
)
parser.add_argument('--encoding', '-E', default='utf-8', help='Character encoding to use')
parser.add_argument('--version', '-v', default=False, action='store_true', help='Print version')
return parser
def main():
"""Main entry point."""
parser = get_parser(DESCRIPTION)
args = parser.parse_args()
if args.version:
print(DESCRIPTION)
print("Version: %s" % _get_version())
else:
init() # Initialise colorama
checker_class = InteractiveChecker if args.interactive else SpellChecker
checker = checker_class(args.directory, args.ignore_patterns, args.language,
args.excluded_words, args.encoding)
sys.exit(checker.run())
def _get_version():
"""Read the version from the package metadata."""
try:
pkg_info = pkg_resources.get_distribution(NAME)
return pkg_info.version
except pkg_resources.DistributionNotFound:
print("Could not find the distribution information!")
print(r"The project must be built, and installed with 'pip install' or 'setup.py develop'.")
sys.exit(1)
if __name__ == "__main__":
main()
|
from . import controllers
from . import models
from . import partner
|
(S'9364dda56f0ec0ea32d748574b517361'
p1
(ihappydoclib.parseinfo.moduleinfo
ModuleInfo
p2
(dp3
S'_namespaces'
p4
((dp5
S'Error'
p6
(ihappydoclib.parseinfo.classinfo
ClassInfo
p7
(dp8
g4
((dp9
(dp10
tp11
sS'_filename'
p12
S'../python/frowns/Depict/GraphViz.py'
p13
sS'_docstring'
p14
S''
sS'_class_member_info'
p15
(lp16
sS'_name'
p17
g6
sS'_parent'
p18
g2
sS'_comment_info'
p19
(dp20
sS'_base_class_info'
p21
(lp22
S'Exception'
p23
asS'_configuration_values'
p24
(dp25
sS'_class_info'
p26
g9
sS'_function_info'
p27
g10
sS'_comments'
p28
S''
sbs(dp29
S'addCoords'
p30
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p31
(dp32
g4
((dp33
(dp34
tp35
sS'_exception_info'
p36
(dp37
S'Error( "Unable to locate neato" )'
p38
NsS'Error( "neato failed depiction" )'
p39
NssS'_parameter_names'
p40
(S'mol'
p41
tp42
sS'_parameter_info'
p43
(dp44
g41
(NNNtp45
ssg12
g13
sg14
S"(mol) Assign x,y coordinates to the atoms of a molecule\n using AT&T's graph layout algorithm. This technique is\n very fast but doesn't look very good so use with\n caution.\n\n The executable 'neato' must exist in the search path"
p46
sg17
g30
sg18
g2
sg19
g20
sg24
(dp47
sg26
g33
sg27
g34
sg28
S''
sbstp48
sS'_import_info'
p49
(ihappydoclib.parseinfo.imports
ImportInfo
p50
(dp51
S'_named_imports'
p52
(dp53
sS'_straight_imports'
p54
(lp55
S'os'
p56
aS're'
p57
asbsg12
g13
sg14
S'"""Render a molecule using AT&T\'s GraphViz.\n\nThis is a poor man\'s rendering package and should be used\naccordingly. It is intended merely as a debugging tool\nto see if molecules are formed somewhat correctly as\ndebugging smiles strings tends to make my brain wilt.\n\nThat being said, it\'s pretty cool :) Be aware that while\nGraphViz is opensourced it is not free for commercial\napplications and any such use must be cleared with AT&T.\n\nYou may find GraphViz here\nhttp://www.research.att.com/sw/tools/graphviz/\nbut please read their FAQ and license before downloading.\n\nThis package must be able to find the \'neato\' executable\nin the search path.\n"""'
p58
sg17
S'GraphViz'
p59
sg18
Nsg19
g20
sg24
(dp60
S'include_comments'
p61
I1
sS'cacheFilePrefix'
p62
S'.happydoc.'
p63
sS'useCache'
p64
I1
sS'docStringFormat'
p65
S'StructuredText'
p66
ssg26
g5
sg27
g29
sg28
S''
sbt.
|
"""Unit test utilities for Google C++ Testing and Mocking Framework."""
import os
import sys
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
IS_OS2 = os.name == 'os2'
import atexit
import shutil
import tempfile
import unittest as _test_module
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
TestCase = _test_module.TestCase # pylint: disable=C6409
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN or IS_OS2) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
print(message, file=sys.stderr)
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True if and only if the child process has been
terminated by a signal.
exited True if and only if the child process exited
normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file object for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if bool(self._return_code & 0x80000000):
self.terminated_by_signal = True
self.exited = False
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
|
import sys, os, traceback, telepot, time, json, random, pprint
import tool, auth, log, mmctool, mmcdb, mmcDefauV, mmcAnali, mmcSachi
from libmsgMmc import msgMain, mainShort, msgCreo, msgOuto, msgInco, msgTran
from libmsgMmc import msgDefSet, msgList, msgEdit, msgAnali, msgSachi
from telepot.delegate import per_chat_id, create_open, pave_event_space
"""Command list
help - Show command list
whats_now - Show current unsaved work
new - Create new record
list - Show prevous record
statics - View statistics card
start - Welcome and Introduction
setting - View setting card
exit - Close conversation
"""
class User(telepot.helper.ChatHandler):
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
self._vez = 0
self._keywo = ""
self._keys = ""
self._mod = ['']
self._sumo = ''
self._temra = mmcDefauV.keywo('temra')
self._recom = {}
self._defSett = {}
self._statics = mmcDefauV.keywo('statics')
self._list = mmcDefauV.keywo('list')
self._setting = mmcDefauV.keywo('setting')
self._karatio = {}
self._rawdb = {}
self._keydb = {}
#
def printbug(self,text,usrid):
filla = open(tool.path('log/mmcbot',usrid=auth.id())+tool.date(modde=5),'a')
print("---"+text+"---")
filla.write('\n--- pri: ' + text + '---\n')
filla.write('Time: ' + tool.date(modde=2) + '\n')
filla.write('User: ' + str(auth.id()) + '\n')
filla.write('keywo: ' + pprint.pformat(self._keywo) + '\n')
filla.write('keys: ' + pprint.pformat(self._keys) + '\n')
filla.write('mod: ' + pprint.pformat(self._mod) + '\n')
filla.write('temra: ' + pprint.pformat(self._temra) + '\n')
filla.write('recom: ' + pprint.pformat(self._recom) + '\n')
filla.write('defSett: ' + pprint.pformat(self._defSett) + '\n')
filla.write('setting: ' + pprint.pformat(self._setting) + '\n')
filla.write('--- pri fin ---\n')
filla.close()
def sending(self,wuerd,modda=0):
lingua = self._setting['lingua']
if len(wuerd) >=4069:
parta = [ wuerd[i:i+4000] for i in range(0, len(wuerd), 4000) ]
for numo in range(0,len(parta)):
if numo == 0:
self.sender.sendMessage(parta[numo]+mainShort.woood(lingua,'spitpost'))
elif numo == len(parta) - 1:
self.sender.sendMessage(mainShort.woood(lingua,'spitpre')+parta[numo])
else:
self.sender.sendMessage(mainShort.woood(lingua,'spitpre')+parta[numo]+mainShort.woood(lingua,'spitpost'))
if modda == 1:
self._vez=mmctool.finvez(self._vez)
else:
self._vez=mmctool.printvez(self._vez)
time.sleep(1)
else:
self.sender.sendMessage(wuerd)
if modda == 1:
self._vez=mmctool.finvez(self._vez)
else:
self._vez=mmctool.printvez(self._vez)
time.sleep(1)
def stacksend(self,staak,modda=0):
lingua = self._setting['lingua']
for wuerd in staak:
sending(wuerd)
if modda == 1:
self._vez=mmctool.finvez(self._vez)
def comme(self,msg):
content_type, chat_type, chat_id = telepot.glance(msg)
text=msg['text']
lingua = self._setting['lingua']
if "/start" in text:
if self._mod[-1] == '':
tasStart=msgMain.start()+mainShort.woood(lingua,'cof')
finno = 1
else:
tasStart=msgMain.start()
finno = 0
self.sending(tasStart, modda = finno )
if self._mod[-1] == '':
self.close()
elif "/help" in text:
if self._mod[-1] == '':
tasHelp=msgMain.help()+mainShort.woood(lingua,'cof')
finno = 1
else:
tasHelp=msgMain.help()
finno = 0
self.sending(tasHelp, modda = finno )
if self._mod[-1] == '':
self.close()
elif "/setting" in text:
self.sending(msgDefSet.main(self._setting))
elif "/modify_Setting" in text:
self.sending(mainShort.woood(lingua,'refeson'))
mmcdb.refesdb(chat_id)
self._rawdb = mmcdb.opendb(chat_id)['raw']
self._keydb = mmcdb.opendb(chat_id)['key']
if self._mod[-1] == '':
self._mod=mmctool.apmod(self._mod,'defSett')
else:
if self._mod[-1] != 'defSett':
self._mod=mmctool.apmod(self._mod,'defSett')
tasDeSet=mainShort.woood(lingua,'refesfin')+msgDefSet.lista(self._setting)
self.sending(tasDeSet)
if self._setting['defSettWarn'] == 0:
self.sending(msgDefSet.warn())
self._setting['defSettWarn'] = 1
mmcdb.changeSetting(self._setting,chat_id)
elif "/exit" in text:
self.sending(mainShort.woood(lingua,'bye') , modda = 1)
self.close()
elif "/new" in text:
self.sending(mainShort.woood(lingua,'refeson'))
mmcdb.refesdb(chat_id)
self._rawdb = mmcdb.opendb(chat_id)['raw']
self._keydb = mmcdb.opendb(chat_id)['key']
if self._mod[-1] == '':
self._temra["datte"] = tool.date(modde=1)
self._temra['fromm'] = self._setting['dexpe']
self._temra['toooo'] = self._setting['ovede']
self._temra['karen'] = self._setting['karen']
self._temra['tkare'] = self._setting['karen']
self._sumo = 'outo'
if self._keywo != "":
if '/' not in self._keywo:
tasOut = mainShort.woood(lingua,'refesfin') + msgCreo.keyword(self._keywo)
self.sending(tasOut)
else:
tasOut=mainShort.woood(lingua,'refesfin')+msgOuto.main(self._temra)+mainShort.woood(lingua,'rekeswd')
self.sending(tasOut)
self._mod=mmctool.popmod(self._mod)
self._mod=mmctool.apmod( self._mod,'creo' )
elif "/list" in text:
self.sending(mainShort.woood(lingua,'refeson'))
mmcdb.refesdb(chat_id)
self._rawdb = mmcdb.opendb(chat_id)['raw']
self._keydb = mmcdb.opendb(chat_id)['key']
self._sumo = ''
if self._mod[-1] == '':
lastdate = list(self._keydb['datte'])
lastdate.sort()
try:
self._list.update({ 'datte' : lastdate[-1] })
except IndexError :
self._list.update({ 'datte' : '' })
tasList=mainShort.woood(lingua,'refesfin')+msgList.main(lingua,self._list.get('datte',''),mmcdb.listList(self._list.get('datte',''),chat_id))
self.sending(tasList)
self._mod=mmctool.popmod(self._mod)
self._mod=mmctool.apmod(self._mod,"list")
elif "/Edit" in text:
if self._mod[-1] == "list":
uuid = self._list.get('uuid','')
if uuid !='':
self._mod = mmctool.apmod(self._mod,'edit')
self._keywo = ''
self._temra.update(self._rawdb.get(uuid,''))
self.sending(msgEdit.main(self._temra,uuid)+mainShort.woood(lingua,'rekeswd'))
self._vez = mmctool.printvez(self._vez)
else:
self.sending(msgList.main(lingua,self._list.get('datte',''),mmcdb.listList(self._list.get('datte',''),chat_id)))
self._vez = mmctool.printvez(self._vez)
else:
self.sending(msgMain.keywo('whatsnow'))
self._vez = mmctool.printvez(self._vez)
elif "/statics" in text:
self.sending(mainShort.woood(lingua,'refeson'))
mmcdb.refesdb(chat_id)
self._rawdb = mmcdb.opendb(chat_id)['raw']
self._keydb = mmcdb.opendb(chat_id)['key']
self._sumo = ''
tasPoce=mainShort.woood(lingua,'refesfin')
resKa = mmcdb.getKaratio(self._keydb)
if resKa:
tasPoce=tasPoce+mainShort.woood(lingua,'refka')
self.sending(tasPoce)
tasPoce=''
else:
tasPoce = tasPoce+mainShort.woood(lingua,'kaDatte')+tool.acedate('momoco','karen')
tasPoce = tasPoce+mainShort.woood(lingua,'cband')
self._karatio = mmcdb.openKaratio()
tasList=tasPoce+msgAnali.chooseMode(lingua)
self.sending(tasList)
self._mod=mmctool.popmod(self._mod)
self._mod=mmctool.apmod(self._mod,"statics")
elif self._mod[-1] == '':
self.sending(msgMain.bored()+mainShort.woood(lingua,'cof') , modda = 1)
self.close()
elif self._mod[-1] == "list":
if self._sumo == 'sachi':
if "/Back" in text:
self._sumo = ''
self._list.update({'uuid' : '' })
self.sending(msgList.main(lingua,self._list.get('datte',''),mmcdb.listList(self._list.get('datte',''),chat_id)))
elif "/Search" in text:
self._statics.update({ 'mode' : 'sachi' })
if mmcAnali.check(self._statics):
self.sending(mainShort.woood(lingua,'analiWarn'))
else:
self._sumo = ''
melib = mmcSachi.sachi(chat_id,self._statics)
tasta = mmcSachi.listSachi(chat_id,melib)
self.sending(msgList.main(lingua,' - ',tasta))
elif '/change_cokas' in text:
skdic = mmcDefauV.keywo('transle')
keywo = 'cokas'
titil = skdic.get(keywo,'')
self.sending(msgMain.selection(mmcAnali.listClass(keywo,lingua=lingua),titil))
elif '/set_cokas_as_' in text:
self._statics.update({ 'cokas' : text.replace('/set_cokas_as_','') })
self.sending(msgSachi.listMain(lingua,self._statics))
elif '/set_as_' in text:
if '/set_as_btempo' in text:
self._statics.update({ 'btempo' : self._keywo })
elif '/set_as_ftempo' in text:
self._statics.update({ 'ftempo' : self._keywo })
elif '/set_as_keywo' in text:
self._statics.update({ 'keywo' : self._keywo })
self.sending(msgSachi.listMain(lingua,self._statics))
else:
if "/Back" in text:
self._list.update({'uuid' : '' })
self.sending(msgList.main(lingua,self._list.get('datte',''),mmcdb.listList(self._list.get('datte',''),chat_id)))
elif "/Search" in text:
self._sumo = 'sachi'
self.sending(msgSachi.listMain(lingua,self._statics))
if "/whats_now" in text:
self.sending(msgList.main(lingua,self._list.get('datte',''),mmcdb.listList(self._list.get('datte',''),chat_id)))
elif "/Close" in text:
self.sending(msgList.disca(lingua)+mainShort.woood(lingua,'cof') , modda = 1)
self._mod=[]
self.close()
elif "/uuid_" in text:
print('uuid')
for sette in text.split(' '):
if "/uuid_" in sette:
self._list.update({'uuid' : sette.replace('/uuid_','') })
self.sending(msgList.single(lingua,self._list.get('uuid',''),chat_id,self._rawdb))
elif "/Choose_" in text:
for sette in text.split(' '):
if "/Choose_" in sette:
keywo = sette.replace("/Choose_",'').replace('_','-')
setta = mmctool.filteDate(list(self._keydb['datte']),keywo)
testa = mmctool.cmdzDate(setta)
self.sending(msgList.change(lingua,keywo,testa))
elif "/ch_" in text:
self._sumo = ''
tasta = ''
for takso in text.split(' '):
if '/ch_' in takso:
tasta = takso.replace('/ch_','').replace('_','-')
self._list.update({'datte' : tasta })
self.sending(msgList.main(lingua,self._list.get('datte',''),mmcdb.listList(self._list.get('datte',''),chat_id)))
elif self._mod[-1] == "statics":
if "/Analysis" in text:
if self._statics['mode'] != '':
print('statics : '+pprint.pformat(self._statics, compact=True))
if mmcAnali.check(self._statics):
self.sending(mainShort.woood(lingua,'analiWarn'))
else:
if self._statics['mode'] == 'abratio':
medio = mmcAnali.abratio(chat_id,self._statics)
secto = msgAnali.abratioResut(lingua,medio)
elif self._statics['mode'] == 'atren':
medio = mmcAnali.atren(chat_id,self._statics)
secto = msgAnali.atrenResut(lingua,medio)
elif self._statics['mode'] == 'akaun':
medio = mmcAnali.akaun(chat_id,self._statics)
secto = msgAnali.akaunResut(lingua,medio)
for lun in secto:
self.sending(lun)
else:
self.sending(msgMain.keywo('whatsnow'))
elif "/whats_now" in text:
self.sending(msgMain.keywo('whatsnow'))
elif "/Back" in text:
if self._statics['mode'] != '':
if self._statics['mode'] == 'abratio':
self.sending(msgAnali.chooseMode(lingua))
self._statics['mode'] = ''
elif self._statics['mode'] == 'atren':
self.sending(msgAnali.chooseMode(lingua))
self._statics['mode'] = ''
elif self._statics['mode'] == 'akaun':
self.sending(msgAnali.chooseMode(lingua))
self._statics['mode'] = ''
else:
self._statics = mmcDefauV.keywo('statics')
self.sending(msgAnali.chooseMode(lingua))
elif "/Close" in text:
self.sending(msgAnali.disca(lingua)+mainShort.woood(lingua,'cof') , modda = 1 )
self._mod=[]
self.close()
elif "/Discard" in text:
if self._statics['mode'] == 'abratio':
self.sending(msgAnali.abratioMain(lingua,self._statics))
elif self._statics['mode'] == 'atren':
self.sending(msgAnali.atrenMain(lingua,self._statics))
elif self._statics['mode'] == 'akaun':
self.sending(msgAnali.akaunMain(lingua,self._statics))
elif '/set_Mode_as_' in text:
if '/set_Mode_as_abratio' in text:
self._statics['mode'] = 'abratio'
self.sending(msgAnali.abratioMain(lingua,self._statics))
elif '/set_Mode_as_atren' in text:
self._statics['mode'] = 'atren'
self.sending(msgAnali.atrenMain(lingua,self._statics))
elif '/set_Mode_as_akaun' in text:
self._statics['mode'] = 'akaun'
self.sending(msgAnali.akaunMain(lingua,self._statics))
elif '/change_acuno' in text:
self._recom = mmcdb.listAcc('ch','chs','acuno',chat_id)
self.sending(msgMain.selection(self._recom[1],mmcDefauV.keywo('transle',lingua=lingua).get('acuno','acuno')))
elif '/change_' in text:
skdic = mmcDefauV.keywo('transle')
if '/change_cokas' in text:
keywo = 'cokas'
elif '/change_targe' in text:
keywo = 'targe'
titil = skdic.get(keywo,'')
self.sending(msgMain.selection(mmcAnali.listClass(keywo,lingua=lingua),titil))
elif '/set_cokas_as_' in text:
self._statics.update({ 'cokas' : text.replace('/set_cokas_as_','') })
if self._statics['mode'] == 'abratio':
self.sending(msgAnali.abratioMain(lingua,self._statics))
elif self._statics['mode'] == 'atren':
self.sending(msgAnali.atrenMain(lingua,self._statics))
elif self._statics['mode'] == 'akaun':
self.sending(msgAnali.akaunMain(lingua,self._statics))
elif '/set_targe_as_' in text:
self._statics.update({ 'targe' : text.replace('/set_targe_as_','') })
if self._statics['mode'] == 'abratio':
self.sending(msgAnali.abratioMain(lingua,self._statics))
elif self._statics['mode'] == 'atren':
self.sending(msgAnali.atrenMain(lingua,self._statics))
elif '/set_Leve_in_' in text:
if '/set_Leve_in_Day' in text:
self._statics.update({ 'leve' : 10 })
elif '/set_Leve_in_Month' in text:
self._statics.update({ 'leve' : 7 })
elif '/set_Leve_in_Year' in text:
self._statics.update({ 'leve' : 4 })
if self._statics['mode'] == 'abratio':
self.sending(msgAnali.abratioMain(lingua,self._statics))
elif self._statics['mode'] == 'atren':
self.sending(msgAnali.atrenMain(lingua,self._statics))
elif '/set_as_' in text:
if '/set_as_btempo' in text:
self._statics.update({ 'btempo' : self._keywo })
elif '/set_as_ftempo' in text:
self._statics.update({ 'ftempo' : self._keywo })
elif '/set_as_cokey' in text:
self._statics.update({ 'cokey' : self._keywo })
elif '/set_as_targe' in text:
self._statics.update({ 'targe' : self._keywo })
elif '/set_as_balan' in text:
self._statics.update({ 'balan' : self._keywo })
if self._statics['mode'] == 'abratio':
self.sending(msgAnali.abratioMain(lingua,self._statics))
elif self._statics['mode'] == 'atren':
self.sending(msgAnali.atrenMain(lingua,self._statics))
elif self._statics['mode'] == 'akaun':
self.sending(msgAnali.akaunMain(lingua,self._statics))
elif "/ch" in text :
for sette in text.split(" "):
if "/chs_" in sette:
try:
wahfu = sette.split('_')
self._keys = wahfu[1]
self._keywo = self._recom[2][wahfu[2]]
self._statics.update({ self._keys : self._keywo })
if self._statics['mode'] == 'akaun':
self.sending(msgAnali.akaunMain(lingua,self._statics))
except KeyError:
print("KeyError : Doesn't Exist or Expired")
if self._statics['mode'] == 'akaun':
self.sending(mainShort.woood(lingua,'rgsWarn')+msgAnali.akaunMain(lingua,self._statics)+mainShort.woood(lingua,'rekeswd'))
elif "/ch_" in sette:
self._statics.update({ wahfu[1] : wahfu[2] })
if self._statics['mode'] == 'akaun':
self.sending(msgAnali.akaunMain(lingua,self._statics))
elif self._mod[-1] in ['creo','edit']:
if "/Discard" in text:
self._keywo = ''
for key in self._temra.keys():
self._temra.update({ key : '' })
if self._mod[-1] == 'edit':
mmctool.printbug("Discard editing\n mod",self._mod,chat_id)
self.sending(msgEdit.discar()+mainShort.woood(lingua,'cof'))
else:
mmctool.printbug("Discard record\n mod",self._mod,chat_id)
self.sending(msgCreo.discard()+mainShort.woood(lingua,'cof') , modda = 1)
self._mod=mmctool.popmod(self._mod)
mmctool.printbug("Changed back mode\n mod",self._mod,chat_id)
if self._mod[-1] != 'edit':
self.close()
elif "/Save" in text:
if self._mod[-1] == 'edit':
uuid = self._list.get('uuid','')
record = mmcdb.chRaw(self._temra,uuid,chat_id)
elif self._sumo == 'inco':
self._temra['price'] = self._temra['tpric']
self._temra['karen'] = self._temra['tkare']
self._temra['fromm'] = self._setting['genis']
record = mmcdb.addRaw(chat_id,self._temra)
elif self._sumo == 'outo':
self._temra['tpric'] = self._temra['price']
self._temra['tkare'] = self._temra['karen']
self._temra['toooo'] = self._setting['ovede']
record = mmcdb.addRaw(chat_id,self._temra)
else:
record = mmcdb.addRaw(chat_id,self._temra)
if self._sumo == 'outo':
self.sending(msgOuto.finis(self._temra)+mainShort.woood(lingua,'cof') , modda = 1)
elif self._sumo == 'inco':
self.sending(msgInco.finis(self._temra)+mainShort.woood(lingua,'cof') , modda = 1)
elif self._sumo == 'tran':
self.sending(msgTran.finis(self._temra)+mainShort.woood(lingua,'cof') , modda = 1)
elif self._mod[-1] == 'edit':
self.sending(msgEdit.fin(uuid,chat_id,record)+mainShort.woood(lingua,'cof'))
if self._mod[-1] == 'edit':
self._mod=mmctool.popmod(self._mod)
mmctool.printbug("Changed back mode\n mod",self._mod,chat_id)
else:
self.close()
elif "/set_as" in text :
self._keywo = self._keywo.replace(" ","_")
if "/set_as_Date" in text:
self._temra.update({ 'datte' : self._keywo })
self._keys='datte'
elif "/set_as_Item" in text:
self._temra.update({ 'namma' : self._keywo })
self._keys='namma'
elif "/set_as_Remind" in text:
self._temra.update({ 'namma' : self._keywo })
self._keys='namma'
elif "/set_as_Category" in text:
self._temra.update({ 'klass' : self._keywo })
self._keys='klass'
elif "/set_as_Seller" in text:
self._temra.update({ 'shoop' : self._keywo })
self._keys='shoop'
elif "/set_as_Agent" in text:
self._temra.update({ 'shoop' : self._keywo })
self._keys='shoop'
elif "/set_as_Account_From" in text:
self._temra.update({ 'fromm' : self._keywo })
self._keys='fromm'
elif "/set_as_Account_To" in text:
self._temra.update({ 'toooo' : self._keywo })
self._keys='toooo'
elif "/set_as_Account" in text:
self._temra.update({ 'fromm' : self._keywo })
self._keys='fromm'
elif "/set_as_Price" in text:
self._temra.update({ 'price' : self._keywo })
self._keys='price'
elif "/set_as_Notes" in text:
self._temra.update({ 'desci' : self._keywo })
self._keys='desci'
elif "/set_as_Income" in text:
self._temra.update({ 'tpric' : self._keywo })
self._keys='tpric'
elif "/set_as_Amount_From" in text:
self._temra.update({ 'price' : self._keywo })
self._keys='price'
elif "/set_as_Amount_To" in text:
self._temra.update({ 'tpric' : self._keywo })
self._keys='tpric'
elif "/set_as_Currency_Source" in text:
self._temra.update({ 'karen' : self._keywo })
self._keys='karen'
elif "/set_as_Currency_Target" in text:
self._temra.update({ 'tkare' : self._keywo })
self._keys='tkare'
elif "/set_as_Currency" in text:
self._temra.update({ 'karen' : self._keywo })
self._keys='karen'
tasRef=''
if self._sumo == 'outo':
tasRef=msgOuto.main(self._temra)
elif self._sumo == 'inco':
tasRef=msgInco.main(self._temra)
elif self._sumo == 'tran':
tasRef=msgTran.main(self._temra)
elif self._mod[-1] == 'edit':
tasRef=msgEdit.main(self._temra,self._list.get('uuid',''))
if self._keys in mmcDefauV.keywo('recset'):
self._recom = mmcdb.recomtxt(self._temra,self._recom,self._keys,self._keywo,mmcDefauV.keywo('recset'),chat_id)
if self._recom[1] !="" :
self.sending(tasRef)
self.sending(msgCreo.recom(self._recom[1],self._keywo))
else:
self.sending(tasRef+mainShort.woood(lingua,'rekeswd'))
else:
self.sending(tasRef+mainShort.woood(lingua,'rekeswd'))
elif "/rg" in text :
for sette in text.split(" "):
if "/rgs_" in sette:
try:
self._keys = mmcDefauV.keywo('sf')[sette[5:7]]
self._keywo = self._recom[2][sette[8:len(sette)]]
self._temra.update({ self._keys : self._keywo })
if self._sumo == 'outo':
tasRg=msgOuto.main(self._temra)
elif self._sumo == 'inco':
tasRg=msgInco.main(self._temra)
elif self._sumo == 'tran':
tasRg=msgTran.main(self._temra)
elif self._mod[-1] == 'edit':
tasRg=msgEdit.main(self._temra,self._list.get('uuid',''))
except KeyError:
print("KeyError : Doesn't Exist or Expired")
if self._sumo == 'outo':
tasRg=mainShort.woood(lingua,'rgsWarn')+msgOuto.main(self._temra)+mainShort.woood(lingua,'rekeswd')
elif self._sumo == 'inco':
tasRg=mainShort.woood(lingua,'rgsWarn')+msgInco.main(self._temra)+mainShort.woood(lingua,'rekeswd')
elif self._sumo == 'tran':
tasRg=mainShort.woood(lingua,'rgsWarn')+msgTran.main(self._temra)+mainShort.woood(lingua,'rekeswd')
elif self._mod[-1] == 'edit':
tasRg=mainShort.woood(lingua,'rgsWarn')+msgEdit.main(self._temra,self._list.get('uuid',''))+mainShort.woood(lingua,'rekeswd')
if self._keys in mmcDefauV.keywo('recset'):
self._recom = mmcdb.recomtxt(self._temra,self._recom,self._keys,self._keywo,mmcDefauV.keywo('recset'),chat_id)
if self._recom[1] !="" :
self.sending(tasRg)
self.sending(msgCreo.recom(self._recom[1],self._keywo))
else:
self.sending(tasRg)
else:
self.sending(tasRg)
elif "/rg_" in sette:
self._temra.update({ mmcDefauV.keywo('sf')[sette[4:6]] : sette[7:len(sette)] })
if self._sumo == 'outo':
tasRg=msgOuto.main(self._temra)
elif self._sumo == 'inco':
tasRg=msgInco.main(self._temra)
elif self._sumo == 'tran':
tasRg=msgTran.main(self._temra)
elif self._mod[-1] == 'edit':
tasRg=msgEdit.main(self._temra,self._list.get('uuid',''))
if self._keys in mmcDefauV.keywo('recset'):
self._recom = mmcdb.recomtxt(self._temra,self._recom,self._keys,self._keywo,mmcDefauV.keywo('recset'),chat_id)
if self._recom[1] !="" :
self.sending(tasRg)
self.sending(msgCreo.recom(self._recom[1],self._keywo))
else:
self.sending(tasRg+mainShort.woood(lingua,'rekeswd'))
else:
self.sending(tasRg+mainShort.woood(lingua,'rekeswd'))
elif "/change" in text:
if "/change_to_" in text:
if '/change_to_Income' in text:
self._temra['fromm'] = self._setting['genis']
self._temra['toooo'] = self._setting['dinco']
self._temra['shoop'] = ''
self._temra['klass'] = self._setting['incom']
self._temra['karen'] = self._setting['karen']
self._temra['tkare'] = self._setting['karen']
self._temra.update( { 'tpric' : self._temra.get('price','') } )
self.sending(msgInco.main(self._temra)+mainShort.woood(lingua,'rekeswd'))
# self._mod=mmctool.popmod(self._mod)
# self._mod=mmctool.apmod(self._mod,"inco")
self._sumo = 'inco'
elif '/change_to_Transfer' in text:
self._temra['fromm'] = self._setting['dinco']
self._temra['toooo'] = self._setting['dexpe']
self._temra['shoop'] = ''
self._temra['klass'] = self._setting['tanfe']
self._temra['karen'] = self._setting['karen']
self._temra['tkare'] = self._setting['karen']
self._temra.update( { 'tpric' : self._temra.get('price','') } )
self.sending(msgTran.main(self._temra)+mainShort.woood(lingua,'rekeswd'))
# self._mod=mmctool.popmod(self._mod)
# self._mod=mmctool.apmod(self._mod,"tran")
self._sumo = 'tran'
elif '/change_to_Expense' in text:
self._temra['fromm'] = self._setting['dexpe']
self._temra['toooo'] = self._setting['ovede']
self._temra['klass'] = ''
self._temra['karen'] = self._setting['karen']
self._temra['tkare'] = self._setting['karen']
self.sending(msgOuto.main(self._temra)+mainShort.woood(lingua,'rekeswd'))
# self._mod=mmctool.popmod(self._mod)
# self._mod=mmctool.apmod(self._mod,"outo")
self._sumo = 'outo'
else:
self._recom = {}
if "/change_Currency_To" in text:
keywo = 'tk'
self._recom = mmcdb.listKen('rg','rgs',keywo,chat_id)
self.sending(msgMain.selection(self._recom[1],'Currency (To)'))
elif "/change_Currency" in text:
keywo = 'kr'
self._recom = mmcdb.listKen('rg','rgs',keywo,chat_id)
self.sending(msgMain.selection(self._recom[1],'Currency'))
elif "/change_Acc_From" in text:
keywo = 'fr'
self._recom = mmcdb.listAcc('rg','rgs',keywo,chat_id)
self.sending(msgMain.selection(self._recom[1],'Account (From)'))
elif "/change_Acc_To" in text:
keywo = 'to'
self._recom = mmcdb.listAcc('rg','rgs',keywo,chat_id)
self.sending(msgMain.selection(self._recom[1],'Account (To)'))
elif text.replace('/change_','') in ['Seller','Agent']:
keywo = 'sh'
self._recom = mmcdb.listSeller(self._temra.get('klass',''),'rg','rgs',keywo,chat_id)
if self._recom[1] != '':
self.sending(msgMain.selection(self._recom[1],text.replace('/change_','')))
else:
self.sending(mainShort.woood(lingua,'emptylist')+mainShort.woood(lingua,'rekeswd'))
elif "/whats_now" in text:
if self._sumo == 'outo':
self.sending(msgOuto.main(self._temra)+mainShort.woood(lingua,'rekeswd'))
elif self._sumo == 'inco':
self.sending(msgInco.main(self._temra)+mainShort.woood(lingua,'rekeswd'))
elif self._sumo == 'tran':
self.sending(msgTran.main(self._temra)+mainShort.woood(lingua,'rekeswd'))
elif self._mod[-1] == 'edit':
self.sending(msgEdit.main(self._temra,self._list.get('uuid',''))+mainShort.woood(lingua,'rekeswd'))
elif "/Back" in text:
if self._sumo == 'outo':
self.sending(msgOuto.main(self._temra)+mainShort.woood(lingua,'rekeswd'))
elif self._sumo == 'inco':
self.sending(msgInco.main(self._temra)+mainShort.woood(lingua,'rekeswd'))
elif self._sumo == 'tran':
self.sending(msgTran.main(self._temra)+mainShort.woood(lingua,'rekeswd'))
elif self._mod[-1] == 'edit':
self.sending(msgEdit.main(self._temra,self._list.get('uuid',''))+mainShort.woood(lingua,'rekeswd'))
elif self._mod[-1] == 'defSett':
if "/Discard" in text:
self._keywo = ""
for key in self._temra.keys():
self._temra[key]=""
mmctool.printbug("Discard Account Setting\n mod",self._mod,chat_id)
self.sending(msgDefSet.discard())
self._mod=mmctool.popmod(self._mod)
mmctool.printbug("Changed back mode\n mod",self._mod,chat_id)
elif "/Save" in text:
mmcdb.changeSetting(self._setting,chat_id)
self.sending(msgDefSet.fins(self._setting))
self._mod=mmctool.popmod(self._mod)
elif "/Explain" in text:
self.sending(msgDefSet.warn())
elif "/Back" in text:
self.sending(msgDefSet.lista(self._setting))
elif "/whats_now" in text:
self.sending(msgDefSet.lista(self._temra))
elif "/change_" in text:
keywo = text[8:10]
sfdic = mmcDefauV.keywo('sf')
self._defSett = {}
if '/change_lingua' in text:
keywo = 'lingua'
self._defSett = mmcdb.listLigua('ch',keywo,chat_id)
sasak = mmcDefauV.keywo('transle')['lingua']
elif keywo in mmcDefauV.keywo('klass')['Acc']:
self._defSett = mmcdb.listAcc('ch','chu',keywo,chat_id)
kenwo = sfdic[keywo]
sasak = mmcDefauV.keywo('transle')[kenwo]
elif keywo in mmcDefauV.keywo('klass')['Kas']:
self._defSett = mmcdb.listKas('ch','chu',keywo,chat_id)
kenwo = sfdic[keywo]
sasak = mmcDefauV.keywo('transle')[kenwo]
elif keywo in mmcDefauV.keywo('klass')['Ken']:
self._defSett = mmcdb.listKen('ch','chu',keywo,chat_id)
kenwo = sfdic[keywo]
sasak = mmcDefauV.keywo('transle')[kenwo]
self.sending(msgMain.selection(self._defSett[1],sasak))
elif "/ch" in text:
if '/ch_lingua_' in text:
self._setting['lingua'] = text.replace('/ch_lingua_','')
tasDeSetCha=''
else:
for sette in text.split(" "):
if "/chu_" in sette:
try:
self._setting.update({ mmcDefauV.keywo('sf')[sette[5:7]] : self._defSett[2][sette[8:len(sette)]] })
tasDeSetCha=''
except KeyError:
tasDeSetCha=msgMain.keywo('rgsWarn')
elif "/ch_" in sette:
self._setting[mmcDefauV.keywo('sf')[sette[4:6]]] = sette[7:len(sette)]
tasDeSetCha=''
tasDeSetCha=tasDeSetCha+msgDefSet.lista(self._setting)
self.sending(tasDeSetCha)
def open(self, initial_msg, seed): # Welcome Region
content_type, chat_type, chat_id = telepot.glance(initial_msg)
self.printbug("Intitial",chat_id)
mmctool.printbug("inti_msg",initial_msg,chat_id)
self._mod = ['']
self._setting = mmcdb.upgradeSetting(self._setting,chat_id)
self._karatio = mmcdb.openKaratio()
self._rawdb = mmcdb.opendb(chat_id)['raw']
self._keydb = mmcdb.opendb(chat_id)['key']
self._vez=0
open(tool.path('log/mmcbot',usrid=auth.id())+tool.date(modde=1)+'.c','a').write('\n')
if content_type != 'text':
self.sending(msgMain.error(), modda = 1)
self.close()
return
if "/" in initial_msg["text"]:
self.comme(initial_msg)
else:
if "/" not in initial_msg["text"]:
self._keywo = initial_msg["text"].replace(" ","_")
self.sending(msgMain.home(self._keywo))
return True # prevent on_message() from being called on the initial message
def on_chat_message(self, msg): # Each Msg
content_type, chat_type, chat_id = telepot.glance(msg)
self.printbug("Received",chat_id)
mmctool.printbug("msg",msg,chat_id)
lingua = self._setting['lingua']
if content_type != 'text':
self.sending(msgMain.error(), modda = 1)
self.close()
return
if msg["text"][0] == '/':
self.comme(msg)
else:
self._keywo = msg["text"].replace("/","")
if self._mod[-1] == '':
self.sending(msgMain.home(self._keywo))
elif self._mod[-1] == "list":
if self._sumo == 'sachi':
self.sending(msgSachi.listKeywo(lingua,self._keywo))
else:
self.sending(mainShort.woood(lingua,'emptysachi'))
elif self._mod[-1] == "edit":
tasEdit = msgEdit.keyword(self._keywo)
self.sending(tasEdit)
elif self._mod[-1] == 'statics':
if self._statics['mode'] == 'abratio':
self.sending(msgAnali.abratioKeywo(lingua,self._keywo))
elif self._statics['mode'] == 'atren':
self.sending(msgAnali.atrenKeywo(lingua,self._keywo))
elif self._statics['mode'] == 'akaun':
self.sending(msgAnali.akaunKeywo(lingua,self._keywo))
elif self._mod[-1] == "creo":
if self._sumo == 'outo':
tasOut= msgCreo.keyword(self._keywo)
self.sending(tasOut)
elif self._sumo == 'inco':
tasInco = msgInco.keyword(self._keywo)
self.sending(tasInco)
elif self._sumo == 'tran':
tasTran = msgTran.keyword(self._keywo)
self.sending(tasTran)
elif self._mod[-1] == 'defSett':
numme = str(random.choice(range(10,100)))
self._defSett={}
try:
self._keywo.encode('latin-1')
self._defSett={1:["/ch_","_"+self._keywo],2:[]}
except UnicodeEncodeError:
self._defSett={1:["/chu_","_"+numme+" "+self._keywo],2:{numme:self._keywo}}
self.sending(msgDefSet.setup(self._keywo,self._defSett))
def on__idle(self, event): # Timeout Region
lingua = self._setting['lingua']
self.sending(msgMain.timesout()+mainShort.woood(lingua,'cof') , modda = 1 )
self.close()
key=json.load(open("database/key","r"))
TOKEN = key["momocobot"]
bot = telepot.DelegatorBot(TOKEN, [pave_event_space()(
per_chat_id(), create_open, User, timeout=100),]
)
bot.message_loop(run_forever='Listening ...')
|
from . import Connector
from .. import util
import sys
import re
class PyODBCConnector(Connector):
driver = 'pyodbc'
supports_sane_multi_rowcount = False
if util.py2k:
# PyODBC unicode is broken on UCS-4 builds
supports_unicode = sys.maxunicode == 65535
supports_unicode_statements = supports_unicode
supports_native_decimal = True
default_paramstyle = 'named'
# for non-DSN connections, this should
# hold the desired driver name
pyodbc_driver_name = None
# will be set to True after initialize()
# if the freetds.so is detected
freetds = False
# will be set to the string version of
# the FreeTDS driver if freetds is detected
freetds_driver_version = None
# will be set to True after initialize()
# if the libessqlsrv.so is detected
easysoft = False
def __init__(self, supports_unicode_binds=None, **kw):
super(PyODBCConnector, self).__init__(**kw)
self._user_supports_unicode_binds = supports_unicode_binds
@classmethod
def dbapi(cls):
return __import__('pyodbc')
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
keys = opts
query = url.query
connect_args = {}
for param in ('ansi', 'unicode_results', 'autocommit'):
if param in keys:
connect_args[param] = util.asbool(keys.pop(param))
if 'odbc_connect' in keys:
connectors = [util.unquote_plus(keys.pop('odbc_connect'))]
else:
dsn_connection = 'dsn' in keys or \
('host' in keys and 'database' not in keys)
if dsn_connection:
connectors = ['dsn=%s' % (keys.pop('host', '') or
keys.pop('dsn', ''))]
else:
port = ''
if 'port' in keys and 'port' not in query:
port = ',%d' % int(keys.pop('port'))
connectors = ["DRIVER={%s}" %
keys.pop('driver', self.pyodbc_driver_name),
'Server=%s%s' % (keys.pop('host', ''), port),
'Database=%s' % keys.pop('database', '')]
user = keys.pop("user", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % keys.pop('password', ''))
else:
connectors.append("Trusted_Connection=Yes")
# if set to 'Yes', the ODBC layer will try to automagically
# convert textual data from your database encoding to your
# client encoding. This should obviously be set to 'No' if
# you query a cp1253 encoded database from a latin1 client...
if 'odbc_autotranslate' in keys:
connectors.append("AutoTranslate=%s" %
keys.pop("odbc_autotranslate"))
connectors.extend(['%s=%s' % (k, v) for k, v in keys.items()])
return [[";".join(connectors)], connect_args]
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return "The cursor's connection has been closed." in str(e) or \
'Attempt to use a closed connection.' in str(e)
elif isinstance(e, self.dbapi.Error):
return '[08S01]' in str(e)
else:
return False
def initialize(self, connection):
# determine FreeTDS first. can't issue sql easily
# without getting unicode_statements/binds set up.
pyodbc = self.dbapi
dbapi_con = connection.connection
_sql_driver_name = dbapi_con.getinfo(pyodbc.SQL_DRIVER_NAME)
self.freetds = bool(re.match(r".*libtdsodbc.*\.so", _sql_driver_name
))
self.easysoft = bool(re.match(r".*libessqlsrv.*\.so", _sql_driver_name
))
if self.freetds:
self.freetds_driver_version = dbapi_con.getinfo(
pyodbc.SQL_DRIVER_VER)
self.supports_unicode_statements = (
not util.py2k or
(not self.freetds and not self.easysoft)
)
if self._user_supports_unicode_binds is not None:
self.supports_unicode_binds = self._user_supports_unicode_binds
elif util.py2k:
self.supports_unicode_binds = (
not self.freetds or self.freetds_driver_version >= '0.91'
) and not self.easysoft
else:
self.supports_unicode_binds = True
# run other initialization which asks for user name, etc.
super(PyODBCConnector, self).initialize(connection)
def _dbapi_version(self):
if not self.dbapi:
return ()
return self._parse_dbapi_version(self.dbapi.version)
def _parse_dbapi_version(self, vers):
m = re.match(
r'(?:py.*-)?([\d\.]+)(?:-(\w+))?',
vers
)
if not m:
return ()
vers = tuple([int(x) for x in m.group(1).split(".")])
if m.group(2):
vers += (m.group(2),)
return vers
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.getinfo(self.dbapi.SQL_DBMS_VER)):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
|
try:
from setuptools import setup, find_packages
from setuptools.command.test import test
is_setuptools = True
except ImportError:
raise
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages # noqa
from setuptools.command.test import test # noqa
is_setuptools = False
import os
import sys
import codecs
CELERY_COMPAT_PROGRAMS = int(os.environ.get('CELERY_COMPAT_PROGRAMS', 1))
if sys.version_info < (2, 6):
raise Exception('Celery 3.1 requires Python 2.6 or higher.')
downgrade_packages = [
'celery.app.task',
]
orig_path = sys.path[:]
for path in (os.path.curdir, os.getcwd()):
if path in sys.path:
sys.path.remove(path)
try:
import imp
import shutil
for pkg in downgrade_packages:
try:
parent, module = pkg.rsplit('.', 1)
print('- Trying to upgrade %r in %r' % (module, parent))
parent_mod = __import__(parent, None, None, [parent])
_, mod_path, _ = imp.find_module(module, parent_mod.__path__)
if mod_path.endswith('/' + module):
print('- force upgrading previous installation')
print(' - removing {0!r} package...'.format(mod_path))
try:
shutil.rmtree(os.path.abspath(mod_path))
except Exception:
sys.stderr.write('Could not remove {0!r}: {1!r}\n'.format(
mod_path, sys.exc_info[1]))
except ImportError:
print('- upgrade %s: no old version found.' % module)
except:
pass
finally:
sys.path[:] = orig_path
NAME = 'celery'
entrypoints = {}
extra = {}
classes = """
Development Status :: 5 - Production/Stable
License :: OSI Approved :: BSD License
Topic :: System :: Distributed Computing
Topic :: Software Development :: Object Brokering
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Programming Language :: Python :: Implementation :: Jython
Operating System :: OS Independent
Operating System :: POSIX
Operating System :: Microsoft :: Windows
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
PY3 = sys.version_info[0] == 3
JYTHON = sys.platform.startswith('java')
PYPY = hasattr(sys, 'pypy_version_info')
import re
re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)')
re_vers = re.compile(r'VERSION\s*=.*?\((.*?)\)')
re_doc = re.compile(r'^"""(.+?)"""')
rq = lambda s: s.strip("\"'")
def add_default(m):
attr_name, attr_value = m.groups()
return ((attr_name, rq(attr_value)), )
def add_version(m):
v = list(map(rq, m.groups()[0].split(', ')))
return (('VERSION', '.'.join(v[0:3]) + ''.join(v[3:])), )
def add_doc(m):
return (('doc', m.groups()[0]), )
pats = {re_meta: add_default,
re_vers: add_version,
re_doc: add_doc}
here = os.path.abspath(os.path.dirname(__file__))
meta_fh = open(os.path.join(here, 'celery/__init__.py'))
try:
meta = {}
for line in meta_fh:
if line.strip() == '# -eof meta-':
break
for pattern, handler in pats.items():
m = pattern.match(line.strip())
if m:
meta.update(handler(m))
finally:
meta_fh.close()
py_version = sys.version_info
def strip_comments(l):
return l.split('#', 1)[0].strip()
def reqs(*f):
return [
r for r in (
strip_comments(l) for l in open(
os.path.join(os.getcwd(), 'requirements', *f)).readlines()
) if r]
install_requires = reqs('default.txt')
if JYTHON:
install_requires.extend(reqs('jython.txt'))
tests_require = reqs('test3.txt' if PY3 else 'test.txt')
if os.path.exists('README.rst'):
long_description = codecs.open('README.rst', 'r', 'utf-8').read()
else:
long_description = 'See http://pypi.python.org/pypi/celery'
console_scripts = entrypoints['console_scripts'] = [
'celery = celery.__main__:main',
]
if CELERY_COMPAT_PROGRAMS:
console_scripts.extend([
'celeryd = celery.__main__:_compat_worker',
'celerybeat = celery.__main__:_compat_beat',
'celeryd-multi = celery.__main__:_compat_multi',
])
if is_setuptools:
extras = lambda *p: reqs('extras', *p)
extra['extras_require'] = {
# Celery specific
'auth': extras('auth.txt'),
'cassandra': extras('cassandra.txt'),
'memcache': extras('memcache.txt'),
'couchbase': extras('couchbase.txt'),
'threads': extras('threads.txt'),
'eventlet': extras('eventlet.txt'),
'gevent': extras('gevent.txt'),
'msgpack': extras('msgpack.txt'),
'yaml': extras('yaml.txt'),
'redis': extras('redis.txt'),
'mongodb': extras('mongodb.txt'),
'sqs': extras('sqs.txt'),
'couchdb': extras('couchdb.txt'),
'beanstalk': extras('beanstalk.txt'),
'zookeeper': extras('zookeeper.txt'),
'zeromq': extras('zeromq.txt'),
'sqlalchemy': extras('sqlalchemy.txt'),
'librabbitmq': extras('librabbitmq.txt'),
'pyro': extras('pyro.txt'),
'slmq': extras('slmq.txt'),
}
setup(
name=NAME,
version=meta['VERSION'],
description=meta['doc'],
author=meta['author'],
author_email=meta['contact'],
url=meta['homepage'],
platforms=['any'],
license='BSD',
packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']),
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
test_suite='nose.collector',
classifiers=classifiers,
entry_points=entrypoints,
long_description=long_description,
**extra)
|
from sqlalchemy import Column, Integer, String
from datetime import date
import base
from assetjet.cfg import db
from sqlalchemy.orm import sessionmaker
import json
class Asset(base.ModelBase, base.ajModel):
__tablename__ = 'assets'
cd = Column(String, primary_key=True)
name = Column(String)
gicssectorid = Column(Integer)
def __init__(self, cd, name, gicssectorid):
self.cd = cd
self.name = name
self.gicssectorid = gicssectorid
def __repr__(self):
return json.JSONEncoder().encode(self.__dict__)
#return "<Asset('%s','%s', '%s')>" % (self.cd, self.name, self.gicssectorid)
|
import os, zipfile
def backupToZip(folder):
"""
создает резервную копию всего содержимого папки folder
"""
folder = os.path.abspath(folder)
number = 1
while True:
zipFilename = os.path.basename(folder) + '_' + str(number) + '.zip'
if not os.path.exists(zipFilename):
break
number += 1
print('Создается файл %s...' % (zipFilename))
backupZip = zipfile.ZipFile(zipFilename, 'w')
for foldername, subfolders, filenames in os.walk(folder):
print('Добавления файла из папки %s...' % (foldername))
# Добавить в zip-файл текущую папку.
backupZip.write(foldername)
# Добавить в Zip-файл все файлы из данной папки
for filename in filenames:
newBase = os.path.basename(folder) + '_'
if filename.startswith(newBase) and filename.endswith('.zip'):
continue # не создавать резервной копии
backupZip.write(os.path.join(foldername, filename))
backupZip.close()
print('Все готово!!!')
if __name__ == '__main__':
n = input('Введите путь к папке для создания из нее Zip-архива \n>>>')
backupToZip(n)
|
from twisted.internet import defer, task, reactor, protocol
from nevow import inevow, rend, tags, loaders, flat, athena, stan, guard
from twisted.web import http, resource, static, server
import time
from common import permissionDenied, uni
from corepost import Response, NotFoundException, AlreadyExistsException
from corepost.web import RESTResource, route, Http
from corepost.convert import convertForSerialization, generateXml, convertToJson
from corepost.enums import MediaType, HttpHeader
import yaml
from functools import wraps, partial
from twisted.web.http import Request
from dmlib.utils import genutils
import logging, os, sys
log = logging.getLogger( 'Webgui' )
curdir=os.path.abspath(os.path.dirname(sys.argv[0]))
class RestPages(rend.Page):
def child_(self, ctx):
return permissionDenied()
def childFactory(self, ctx, name):
log.debug("No child found (%s)" % name)
return permissionDenied()
def locateChild(self, ctx, segments):
name=segments[0]
session = inevow.ISession(ctx)
if '.' in name:
if name=='v1.0':
r=RestV10Pages()
r.core=self.core
r.session=session
return r, segments[1:]
if name=='v1.2' and len(segments)>1:
r=RestV12Pages()
r.core=self.core
r.session=session
request = inevow.IRequest(ctx)
request.postpath=list(segments[1:])
return r, segments[1:]
return rend.Page.locateChild(self, ctx, name)
class ResponseConversion(object):
def __init__(self, request, code=200, entity=None, headers={}, ctype=None):
self.code = code
self.entity=entity if entity != None else ""
self.headers=headers
self.request=request
self.ctype=ctype
self.serialized = convertForSerialization(self.entity)
if ctype=='json':
self.__convertToJson()
elif ctype=='jsonp':
self.__convertToJsonp()
elif ctype=='xml':
self.__convertToXml()
elif ctype=='yaml':
self.__convertToYaml()
else:
self.__automagickConversion()
def __convertToJson(self):
self.headers[HttpHeader.CONTENT_TYPE]=MediaType.APPLICATION_JSON
self.response=Response(self.code, convertToJson(self.serialized), self.headers)
def __convertToJsonp(self):
self.headers[HttpHeader.CONTENT_TYPE]=MediaType.APPLICATION_JSON
callback=""
if 'callback' in self.request.args.keys():
callback=self.request.args['callback'][0]
self.response=Response(self.code, callback+"("+convertToJson(self.serialized)+")", self.headers)
def __convertToXml(self):
self.headers[HttpHeader.CONTENT_TYPE]=MediaType.APPLICATION_XML
self.response=Response(self.code, generateXml(self.serialized), self.headers)
def __convertToYaml(self):
self.headers[HttpHeader.CONTENT_TYPE]=MediaType.TEXT_YAML
self.response=Response(self.code, yaml.dump(self.serialized), self.headers)
def __automagickConversion(self):
if self.request.path.endswith('/json'):
self. __convertToJson()
elif self.request.path.endswith('/jsonp'):
self. __convertToJsonp()
elif self.request.path.endswith('/xml'):
self. __convertToXml()
elif self.request.path.endswith('/yaml'):
self.__convertToYaml()
else:
if HttpHeader.ACCEPT in self.request.received_headers:
accept = self.request.received_headers[HttpHeader.ACCEPT]
if MediaType.APPLICATION_JSON in accept:
self. __convertToJson()
elif MediaType.TEXT_YAML in accept:
self.__convertToYaml()
elif MediaType.APPLICATION_XML in accept or MediaType.TEXT_XML in accept:
self. __convertToXml()
else:
self. __convertToJson()
else:
self. __convertToJson()
def getResponse(self):
return self.response
def wrapResponse(f=None, uri=False, res_filter=None, *a, **kw):
if f and not callable(f):
uri=f
f=None
if f is None:
return partial(wrapResponse, uri=uri, *a, **kw)
def okResponse(res, u):
if isinstance(res, ResponseConversion):
entity=res.entity
if res_filter and callable(res_filter):
entity=res_filter(entity)
elif res_filter and hasattr(res_filter, '__iter__'):
for fil in res_filter:
if callable(fil):
entity=fil(entity)
entity={'result': 'succeed', 'data': entity, 'ts': time.time()}
if int(res.code) >= 400:
entity['result']='fail'
if uri:
entity['uri']=uri
elif u:
entity['uri']=u
r = ResponseConversion(res.request, res.code, entity, res.headers, res.ctype).getResponse()
else:
if res_filter and callable(res_filter):
res=res_filter(res)
elif res_filter and hasattr(res_filter, '__iter__'):
for fil in res_filter:
if callable(fil):
res=fil(res)
r={'result': 'succeed', 'data': res, 'ts': time.time()}
if uri:
r['uri']=uri
elif u:
r['uri']=u
return r
def errorResponse(res, u):
if isinstance(res, ResponseConversion):
entity={'result': 'fail', 'data': res.entity, 'ts': time.time()}
if uri:
entity['uri']=uri
elif u:
entity['uri']=u
r = ResponseConversion(res.request, res.code, entity, res.headers, res.ctype).getResponse()
else:
r={'result': 'fail', 'data': res, 'ts': time.time()}
if uri:
r['uri']=uri
elif u:
r['uri']=u
return r
@wraps(f)
def decorate(*a, **kw):
ruri=False
if len(a) > 1 and isinstance(a[1], Request):
ruri=a[1].uri
ret=defer.maybeDeferred(f, *a, **kw)
ret.addCallback(okResponse, ruri)
ret.addErrback(errorResponse, ruri)
return ret
return decorate
class RestCore(object):
path = ""
def __init__(self, core, session):
self.core = core
self.session = session
def callbackResponse(self, d, request, search=False):
def okResponse(res):
if (('__len__' in dir(res) and len(res)==0) or (not res and res!=0) ) and not search:
return ResponseConversion(request, code=404, entity=res)
else:
return ResponseConversion(request, code=200, entity=res)
def errorResponse(res):
return ResponseConversion(request, code=500, entity='Server Error')
return d.addCallbacks(okResponse, errorResponse)
def positiveCallback(self, d, request, search=False):
def okResponse(res):
if (('__len__' in dir(res) and len(res)==0) or not res or (genutils.is_number(res) and res==0)) and not search:
return ResponseConversion(request, code=404, entity=res)
else:
return ResponseConversion(request, code=200, entity=res)
def errorResponse(res):
return ResponseConversion(request, code=500, entity='Server Error')
return d.addCallbacks(okResponse, errorResponse)
def _getRequestArgs(self, request):
rargs={}
for k in request.args.keys():
rargs[k]=request.args[k][0]
if len(rargs.keys())==0 and request.method=="PUT":
if(request.content.getvalue()!=""):
try:
# NOTE: workaround for PUT empry args
r = http.parse_qs(request.content.getvalue(), keep_blank_values=1)
for k in r.keys():
rargs[k]=r[k][0]
except:
pass
if request.method in (Http.POST,Http.PUT) and HttpHeader.CONTENT_TYPE in request.received_headers.keys():
contentType = request.received_headers["content-type"]
if contentType.split(";")[0] == MediaType.APPLICATION_JSON:
try:
request.json = json.loads(request.data) if request.data else {}
try:
r = dict(rargs.items() + request.json.items())
except:
r = request.json
except Exception as ex:
raise TypeError("Unable to parse JSON body: %s" % ex)
elif contentType.split(";")[0] in (MediaType.APPLICATION_XML,MediaType.TEXT_XML):
try:
request.xml = ElementTree.XML(request.data)
try:
r = dict(rargs.items() + request.xml.items())
except:
r = request.xml
except Exception as ex:
raise TypeError("Unable to parse XML body: %s" % ex)
elif contentType.split(";")[0] == MediaType.TEXT_YAML:
try:
request.yaml = yaml.safe_load(request.data)
try:
r = dict(rargs.items() + request.xml.yaml())
except:
r = request.xml.yaml
except Exception as ex:
raise TypeError("Unable to parse YAML body: %s" % ex)
else:
r = rargs
else:
r = rargs
return r
class BaseRest(RestCore):
@route("/")
def welcome(self, request, *a, **kw):
return 'Welcome to the Domotika REST API v1.2'
@route("/keepalive")
@wrapResponse
def keepAlive(self, request, *a, **kw):
return {'time':time.time()}
@route("/daemonstatus")
@wrapResponse
def daemonStatus(self, request, *a, **kw):
return ResponseConversion(request, entity=self.core.getDaemonStatus())
class BoardRest(RestCore):
path="boards"
@route("/")
def boardlist(self, request, *a, **kw):
return 'boardlist'
@route("/forceautodetect")
@wrapResponse
def boardForceAutodetect(self, request, *a, **kw):
self.core.startAutoDetection(True)
return ResponseConversion(request, entity='OK')
@route("/autodetect")
@wrapResponse
def boardAutodetect(self, request, *a, **kw):
self.core.startAutoDetection()
return ResponseConversion(request, entity='OK')
@route("/syncall")
@wrapResponse
def boardSyncAll(self, request, *a, **kw):
self.core.startSync()
return ResponseConversion(request, entity='OK')
@route("/syncboardbyid/<int:boardid>")
@wrapResponse
def syncBoardById(self, request, boardid):
self.core.startSync(boardid)
return ResponseConversion(request, entity='OK')
@route("/pushboardbyid/<int:boardid>")
@wrapResponse
def pushBoardById(self, request, boardid):
self.core.startPush(boardid)
return ResponseConversion(request, entity='OK')
class CronRest(RestCore):
path="timers"
@route("/")
def timerlist(self, request, *a, **kw):
return 'timerlist'
class UserRest(RestCore):
path="users"
@route("/")
@wrapResponse
def userlist(self, request, *a, **kw):
return self.callbackResponse(self.core.getAllUsers(), request, search=True)
@route("/userbyname/<username>/", Http.GET)
@wrapResponse
def userbyname(self, request=None, username='', *a, **kw):
return self.callbackResponse(self.core.getUserFromName(username), request)
@route("/refreshme")
@wrapResponse
def refreshme(self, request=None,*a, **kw):
def setUserSession(res):
self.session.mind.perms.gui_theme=res.gui_theme
self.session.mind.perms.email=res.email
self.session.mind.perms.tts=res.tts
self.session.mind.perms.language=res.language
self.session.mind.perms.slide=res.slide
self.session.mind.perms.webspeech=res.webspeech
self.session.mind.perms.speechlang=res.speechlang
self.session.mind.perms.left_bar=res.left_bar
self.session.mind.perms.right_bar=res.right_bar
return res
log.info('Refresh session for user '+str(self.session.mind.perms.username))
d=self.core.getUserFromName(self.session.mind.perms.username).addCallback(setUserSession)
return self.callbackResponse(d, request)
@route("/me")
@wrapResponse
def getme(self, request=None,*a, **kw):
return self.callbackResponse(self.core.getUserFromName(self.session.mind.perms.username), request)
@route("/me", Http.PUT)
@wrapResponse
def setme(self, request=None,*a, **kw):
def onOk(res):
log.info(res)
return self.callbackResponse(self.core.getUserFromName(self.session.mind.perms.username), request)
def onError(res):
log.info(res)
return ResponseConversion(request, code=404, entity="User not found")
log.info("REST Update user "+str(self.session.mind.perms.username))
r = self._getRequestArgs(request)
pwd=False
tts=False
lang="it"
slide=False
webspeech="touch"
speechlang="it-IT"
theme='dmblack'
leftb='hidden-sm'
rightb='hidden-sm'
if 'lang' in r.keys():
lang=r['lang']
if 'tts' in r.keys():
tts=True
if 'passwd' in r.keys() and r['passwd'] != "":
pwd=r['passwd']
if 'slide' in r.keys():
slide=True
if 'gui_theme' in r.keys():
theme=str(r['gui_theme'])
if 'webspeech' in r.keys() and r['webspeech'] in ['no','touch','continuous']:
webspeech=r['webspeech']
if 'speechlang' in r.keys() and r['speechlang'] in ['it-IT','it-CH','en-US','en-GB']:
speechlang=r['speechlang']
if 'leftb' in r.keys() and r['leftb'] in ['all','none','visible-sm','visible-md','visible-lg','hidden-sm','hidden-md','hidden-lg']:
leftb=str(r['leftb'])
if 'rightb' in r.keys() and r['rightb'] in ['all','none','visible-sm','visible-md','visible-lg','hidden-sm','hidden-md','hidden-lg']:
rightb=str(r['rightb'])
if 'desktop_homepath' in r.keys() and 'mobile_homepath' in r.keys() and 'email' in r.keys():
return self.core.updateUserData(self.session.mind.perms.username, pwd,
r['email'], r['desktop_homepath'], r['mobile_homepath'],
tts, lang, slide, webspeech, speechlang, theme, leftb, rightb).addCallbacks(onOk, onError)
log.info('Erroneous request on update my userdata! ('+str(self.session.mind.perms.username)+')')
return ResponseConversion(request, code=400, entity="Bad request - error in parameters")
class ActionRest(RestCore):
path="actions"
@route("/speech_text", Http.POST)
@wrapResponse
def speechText(self, request, *a, **kw):
r = self._getRequestArgs(request)
confidence = 1.0
if 'confidence' in r.keys():
confidence = float(r['confidence'])
if 'text' in r.keys():
return self.core.voiceReceived(r['text'], confidence).addCallback(
lambda res: res)
return ResponseConversion(request, code=500, entity="No text in request")
@route("/setbyid/<int:aid>",(Http.GET,Http.POST))
@wrapResponse
def setById(self, request, aid, *a, **kw):
return self.callbackResponse(self.core.setActionById(aid), request)
class NotifyRest(RestCore):
path="notifications"
@route("/", Http.GET)
@wrapResponse
def notificationList(self, request, *a, **kw):
return self.callbackResponse(self.core.getNotifications(self.session.mind.perms.username), request, search=True)
@route("/", Http.DELETE)
@wrapResponse
def markAllRead(self, request, *a, **kw):
return self.callbackResponse(self.core.markReadNotifications(self.session.mind.perms.username, '*'), request, search=False)
@route("/<int:nid>", Http.DELETE)
@wrapResponse
def markRead(self, request, nid=0, *a, **kw):
return self.positiveCallback(self.core.markReadNotifications(self.session.mind.perms.username, nid), request, search=False)
@route("/after/<float:fromts>",Http.GET)
@wrapResponse
def notificationsFromTime(self, request, fromts=0, *a, **kw):
return self.callbackResponse(self.core.getNotifications(self.session.mind.perms.username, fromts), request, search=True)
@route("/count",Http.GET)
@wrapResponse
def notificationCount(self, request, *a, **kw):
return self.callbackResponse(self.core.getNotifications(self.session.mind.perms.username, usecount=True), request)
@route("/after/<float:fromts>/count",Http.GET)
@wrapResponse
def notificationsFromTimeCount(self, request, fromts=0, *a, **kw):
return self.callbackResponse(self.core.getNotifications(self.session.mind.perms.username, fromts, usecount=True), request)
class RelayRest(RestCore):
path="relays"
@route("/setbyid/<int:rid>",(Http.GET,Http.POST))
@wrapResponse
def setById(self, request, rid, *a, **kw):
return self.callbackResponse(self.core.setRelayById(rid), request)
@route("/setbyid/<int:rid>/on",(Http.GET,Http.POST))
@wrapResponse
def setOnById(self, request, rid, *a, **kw):
return self.callbackResponse(self.core.setRelayById(rid, 'on'), request)
@route("/setbyid/<int:rid>/off",(Http.GET,Http.POST))
@wrapResponse
def setOffById(self, request, rid, *a, **kw):
return self.callbackResponse(self.core.setRelayById(rid, 'off'), request)
@route("/setbyid/<int:rid>/change",(Http.GET,Http.POST))
@wrapResponse
def setChangeById(self, request, rid, *a, **kw):
return self.callbackResponse(self.core.setRelayById(rid, 'change'), request)
class ChartRest(RestCore):
path="charts"
@route("/", (Http.GET))
@wrapResponse
def charts(self, request, *a, **kw):
return ResponseConversion(request, code=404, entity="Not yet implemented")
@route("/chartbyid/<int:cid>", (Http.GET))
@wrapResponse
def chartById(self, request, cid, *a, **kw):
return ResponseConversion(request, code=404, entity="Not yet implemented (chart by id)")
@route("/chartbyname/<chartname>", (Http.GET))
@wrapResponse
def chartByName(self, request, chartname, *a, **kw):
return self.callbackResponse(self.core.getChartData(chartname), request)
class ClimaRest(RestCore):
path="clima"
@route("/status",(Http.GET))
@wrapResponse
def getStatus(self, request, *a, **kw):
return self.callbackResponse(self.core.getClimaStatus(), request)
@route("/status",(Http.PUT,Http.POST))
@wrapResponse
def setStatus(self, request, *a, **kw):
r = self._getRequestArgs(request)
if 'status' in r.keys():
statusname=r['status']
return self.callbackResponse(self.core.setClimaStatus(statusname), request)
return ResponseConversion(request, code=500, entity="No status in request")
@route("/thermostat/<thermostat>",(Http.GET))
@wrapResponse
def getThermostatStatus(self, request, thermostat, *a, **kw):
return self.callbackResponse(self.core.getThermostat(thermostat), request)
@route("/thermostat/<thermostat>",(Http.PUT,Http.POST))
@wrapResponse
def setThermostat(self, request, thermostat, *a, **kw):
r = self._getRequestArgs(request)
func=False
if 'function' in r.keys() and r['function'] in ['manual','program']:
func=r['function']
setval=False
if 'set' in r.keys() and genutils.is_number(r['set']):
setval=r['set']
return self.callbackResponse(self.core.setThermostat(thermostat, func, setval), request)
@route("/program/<thermostat>/<climastatus>",(Http.GET))
@wrapResponse
def getProgram(self, request, thermostat, climastatus, *a, **kw):
return self.callbackResponse(self.core.getThermostatProgram(thermostat, climastatus), request)
@route("/program/<thermostat>/<climastatus>",(Http.PUT,Http.POST))
@wrapResponse
def setProgram(self, request, thermostat, climastatus, *a, **kw):
r = self._getRequestArgs(request)
return self.callbackResponse(self.core.setThermostatProgram(thermostat, climastatus, r), request)
RESTv12LIST=(
UserRest,
CronRest,
BoardRest,
BaseRest,
ActionRest,
NotifyRest,
RelayRest,
ChartRest,
ClimaRest,
)
class RestV12Pages(rend.Page):
def child_(self, ctx):
request = inevow.IRequest(ctx)
request.setHeader("pragma", "no-cache")
request.postpath=['/']
return RESTResource((BaseRest(self.core, self.session),))
def childFactory(self, ctx, name):
request = inevow.IRequest(ctx)
request.setHeader("pragma", "no-cache")
request.postpath=['/',name]+request.postpath
return RESTResource([x(self.core, self.session) for x in RESTv12LIST])
import json
import dmjson as dmj
def dbrelay(dbobj):
res=[]
for ret in dbobj:
res.append(ret.toHash(['id','board_name','board_ip','outnum','outtype','ctx','act','msgtype','dynamic','relnum','domain','websection','button_name','active','position']))
return res
def dbaction(dbobj):
res=[]
for ret in dbobj:
res.append(ret.toHash(['id','rcv_dst','rcv_msgtype','rcv_ctx','rcv_act','use_rcv_arg','rcv_arg','execute','command','ikapacket','ikap_src','ikap_dst','ikap_msgtype','ikap_ctx','ikap_act','ikap_arg','launch_sequence','launch_sequence_name','websection','button_name','local_only','active','position','ipdest']))
return res
def dbinput(dbobj):
res=[]
for ret in dbobj:
res.append(ret.toHash(['id','board_name','board_ip','inpnum','dynamic','websection','button_name','active','position','inpname']))
return res
def relstatus(dbobj, ts=True, res=[]):
if ts:
res.append({'command': 'updatets', 'data':ts})
if len(dbobj)>0:
dbret={'command': 'updaterelays', 'data':[]}
for ret in dbobj:
dbret['data'].append(ret.toHash(['id', 'buttonid', 'board_name','board_ip','outnum','ctx','outtype','relnum','status','lastupdate']))
res.append(dbret)
return res
def inputstatus(dbobj, ts=True, res=[]):
if ts:
res.append({'command': 'updatets', 'data':ts})
if len(dbobj)>0:
dbret={'command': 'updateinputs', 'data':[]}
for ret in dbobj:
dbret['data'].append(ret.toHash(['id', 'buttonid', 'board_name','board_ip','inpnum','status','lastupdate']))
res.append(dbret)
return res
class RestV10Pages(rend.Page):
def child_(self, ctx):
return permissionDenied()
def childFactory(self, ctx, name):
log.debug("No child found (%s)" % name)
return permissionDenied()
def child_keepAlive(self, ctx):
return str(time.time())
def child_relayStatus(self, ctx):
ret=[]
wts=0
ts=int(time.time())-1
request = inevow.IRequest(ctx)
request.setHeader("pragma", "no-cache")
log.debug(request)
log.debug(request.args)
self.manageCommands(request)
wts=0
try:
if 'ts' in request.args and unicode(request.args['ts'][0]).isnumeric():
wts=int(request.args['ts'][0])
except:
pass
rs = self.core.getRelays(wts)
return rs.addCallback(relstatus, ts, ret).addCallback(dmj.jsonize)
def child_inputStatus(self, ctx):
ret=[]
wts=0
ts=int(time.time())-1
request = inevow.IRequest(ctx)
request.setHeader("pragma", "no-cache")
log.debug(request)
log.debug(request.args)
self.manageCommands(request)
try:
if 'ts' in request.args and unicode(request.args['ts'][0]).isnumeric():
wts=int(request.args['ts'][0])
except:
pass
ist=self.core.getInputs(wts)
return ist.addCallback(inputstatus, ts, ret).addCallback(dmj.jsonize)
def _getIOStatus(self, d, ts):
rs = self.core.getRelays(ts)
rs.addCallback(relstatus, False, d)
return rs.addCallback(dmj.jsonize)
def child_getIOStatus(self, ctx):
#ret=[]
ret=self.core.getActionStatus()
wts=0
ts=int(time.time())-1
request = inevow.IRequest(ctx)
request.setHeader("pragma", "no-cache")
log.debug(request)
log.debug(request.args)
self.manageCommands(request)
try:
if 'ts' in request.args and unicode(request.args['ts'][0]).isnumeric():
wts=int(request.args['ts'][0])
except:
pass
ist=self.core.getInputs(wts)
ist.addCallback(inputstatus, ts, ret).addCallback(self._getIOStatus, wts)
return ist
def manageCommands(self,req):
if 'command' in req.args:
for cmd in req.args['command']:
self.core.sendAction(cmd)
def child_asteriskAction(self, ctx):
request = inevow.IRequest(ctx)
try:
self.core.asteriskAction(
request.args['ext'][0],
request.args['context'][0])
except:
return 'Error'
def child_asteriskAliases(self, ctx):
request = inevow.IRequest(ctx)
try:
return self.core.asteriskAliases(
request.args['ext'][0],
request.args['context'][0])
except:
return 'Error'
def child_speechAction(self, ctx):
request = inevow.IRequest(ctx)
confidence = 1.0
if 'confidence' in request.args.keys():
confidence = float(request.args['confidence'][0])
try:
return self.core.voiceReceived(request.args['text'][0], confidence).addCallback(lambda res: res[0])
except:
return 'Error'
def child_motionDetection(self, ctx):
request = inevow.IRequest(ctx)
try:
self.core.father.motionDetection(
int(request.args['type'][0]),
int(request.args['status'][0]),
request.args['camera'][0],
request.args['zone'][0]
)
return 'OK'
except:
return 'KO'
def child_uiCommands(self, ctx):
request = inevow.IRequest(ctx)
if 'command' in request.args:
res=self.core.uiCommand(request.args['command'])
return dmj.jsonize({'result': res})
def child_configuringStatus(self, ctx):
return dmj.jsonize({'result': self.core.configuringStatus()})
def child_getRelayList(self, ctx):
rs = self.core.getRelayList()
inevow.IRequest(ctx).setHeader("pragma", "no-cache")
return rs.addCallback(dbrelay).addCallback(dmj.jsonize)
def child_getActionList(self, ctx):
rs = self.core.getActionList()
inevow.IRequest(ctx).setHeader("pragma", "no-cache")
return rs.addCallback(dbaction).addCallback(dmj.jsonize)
def child_getInputList(self, ctx):
rs = self.core.getInputList()
inevow.IRequest(ctx).setHeader("pragma", "no-cache")
return rs.addCallback(dbinput).addCallback(dmj.jsonize)
|
from __future__ import absolute_import, unicode_literals
import os
from setuptools import setup, find_packages
from version import get_version
version = get_version()
setup(
name='edem.sponsors',
version=version,
description="Provides the Sponsors Viewlet on page sidebars",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers for values
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Zope2",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 or later "
"(GPLv3+)",
"Natural Language :: English",
"Operating System :: POSIX :: Linux"
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='Bill Bushey',
author_email='bill.bushey@e-democracy.org',
url='http://www.e-democracy.org/',
license='GPL 3',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['edem'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'edem.skin',
'gs.viewlet',
'gs.group.base',
'gs.site.home',
'Products.GSContent',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",)
|
"""
Copyright (c) 2017 Genome Research Ltd.
Author: Christopher Harrison <ch12@sanger.ac.uk>
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from typing import Callable, Dict, Hashable, Iterator, List, Mapping, Optional, Tuple, Pattern
class ParseError(Exception):
""" Raised on parsing error """
_ParseT = Tuple[str, str]
_Parser = Callable[[str], _ParseT]
def _terminal(pattern: Pattern) -> _Parser:
"""
Make a terminal grammar node parser
@param pattern Pattern to match
@return Parser that matches pattern
"""
def _parser(s: str) -> _ParseT:
match = pattern.match(s)
if not match:
raise ParseError()
index = match.end()
return s[:index], s[index:]
return _parser
def _conjunction(*parsers: _Parser) -> _Parser:
"""
Make a conjunction parser from parsers
@param parsers Parsers to match (at least one)
@return Parser that matches the conjunction
"""
assert parsers
def _parser(s: str) -> _ParseT:
matched = ""
remainder = s
for parser in parsers:
try:
match, remainder = parser(remainder)
matched += match
except ParseError:
raise
return matched, remainder
return _parser
def _disjunction(*parsers: _Parser) -> _Parser:
"""
Make a disjunction parser from parsers
@param parsers Parsers to match (at least one)
@return Parser that matches this disjunction
"""
assert parsers
def _parser(s: str) -> _ParseT:
for parser in parsers:
try:
return parser(s)
except ParseError:
pass
raise ParseError()
return _parser
def _sequence(parser: _Parser, minimum: int=0, maximum: Optional[int]=None) -> _Parser:
"""
Make a sequence parser from parsers
@note _sequence(parser, maximum=1) makes an optional parser
@param parser Parser to match
@param minimum Minimum number of matches
@param maximum Maximum number of matches (optional)
@return Parser that matches sequence
"""
if maximum:
assert maximum >= minimum
def _parser(s: str) -> _ParseT:
matched = ""
remainder = s
num_matched = 0
while True:
try:
match, remainder = parser(remainder)
matched += match
num_matched += 1
if (not remainder) or (maximum and num_matched == maximum):
break
except ParseError:
if num_matched < minimum:
raise
break
return matched, remainder
return _parser
_WS = _terminal(re.compile(r"[\t ]+"))
_OWS = _terminal(re.compile(r"[\t ]*"))
_DIGIT = _terminal(re.compile(r"[0-9]"))
_ALPHA = _terminal(re.compile(r"[a-zA-Z]"))
_QUOTE = _terminal(re.compile(r"\""))
_EQUALS = _terminal(re.compile(r"="))
_ESCAPED = _terminal(re.compile(r"\\[\x09\x20-\x7e\x80-\xff]"))
_TEXT = _terminal(re.compile(r"[\x09\x20\x21\x23-\x5b\x5d-\x7e\x80-\xff]"))
_QUOTED_STRING = _conjunction(
_QUOTE,
_sequence(_disjunction(_TEXT, _ESCAPED)),
_QUOTE
)
_TOKEN = _sequence(
_disjunction(
_ALPHA,
_DIGIT,
_terminal(re.compile(r"[!#$%&'*+.^_`|~-]"))
),
minimum=1
)
_TOKEN68 = _conjunction(
_sequence(
_disjunction(
_ALPHA,
_DIGIT,
_terminal(re.compile(r"[-_~+/]"))
),
minimum=1
),
_sequence(_EQUALS)
)
_LIST_SEPARATOR = _conjunction(
_OWS,
_terminal(re.compile(r",")),
_OWS
)
_PARAM_SEPARATOR = _conjunction(_OWS, _EQUALS, _OWS)
_PARAM_VALUE = _disjunction(_TOKEN, _QUOTED_STRING)
def _param(s: str) -> Tuple[str, str, str]:
"""
Key-Value parameter parser
@param s Input string
@return Tuple of parameter key, parameter value and remaining string
"""
param_key, remainder = _TOKEN(s)
_equals, remainder = _PARAM_SEPARATOR(remainder)
param_value, remainder = _PARAM_VALUE(remainder)
return param_key, param_value, remainder
def _params(s: str) -> Tuple[Dict[str, str], str]:
"""
Parameter group parser
@param s Input string
@return Dictionary of parameters and remaining string
"""
parameters: Dict[str, str] = {}
param_key, param_value, remainder = _param(s)
parameters[param_key] = param_value
if remainder:
while True:
try:
_comma, next_param = _LIST_SEPARATOR(remainder)
param_key, param_value, remainder = _param(next_param)
parameters[param_key] = param_value
if not remainder:
break
except ParseError:
break
return parameters, remainder
class HTTPAuthMethod(Hashable, Mapping[str, str]):
""" HTTP authentication method model """
_method: str
_payload: str
_params: Dict[str, str]
def __init__(self, auth_method: str, *, payload: Optional[str]=None,
params: Optional[Dict[str, str]]=None) -> None:
"""
Constructor
@note payload and params should be mutually exclusive
@param auth_method Authentication method (string)
@param payload Payload (string)
@param params Parameters (dictionary)
"""
assert not (payload and params)
self._method = auth_method
self._payload = payload
self._params = params or {}
def __str__(self) -> str:
output = self._method
if self._payload:
output += f" {self._payload}"
else:
for k, v in self._params.items():
output += f" {k}={v}"
return output
def __repr__(self) -> str:
return f"<{self._method} Authenticator at {id(self):#x}>"
def __hash__(self) -> int:
return hash(str(self))
def __getitem__(self, param: str) -> str:
return self._params[param]
def __iter__(self) -> Iterator[str]:
return iter(self._params)
def __len__(self) -> int:
return len(self._params)
@property
def auth_method(self) -> str:
return self._method
@property
def payload(self) -> str:
return self._payload
def _auth_handler(s: str) -> Tuple[HTTPAuthMethod, str]:
"""
Authentication handler parser
@param s Input string
@return Tuple of authentication handler and remaining string
"""
auth_method, remainder = _TOKEN(s)
try:
# Lookahead to see if we're at the end of the list item
if remainder:
_lookahead = _LIST_SEPARATOR(remainder)
return HTTPAuthMethod(auth_method), remainder
except ParseError:
# There's no list separator, so there must be a payload/parameters
_, remainder = _WS(remainder)
try:
params, remainder = _params(remainder)
return HTTPAuthMethod(auth_method, params=params), remainder
except ParseError:
payload, remainder = _TOKEN68(remainder)
return HTTPAuthMethod(auth_method, payload=payload), remainder
def auth_parser(auth_header: str) -> List[HTTPAuthMethod]:
"""
Authentication header parser
@param auth_header Authentication header (string)
@return List of HTTP authentication methods
"""
auth_handlers: List[HTTPAuthMethod] = []
handler, remainder = _auth_handler(auth_header)
auth_handlers.append(handler)
while remainder:
_, remainder = _LIST_SEPARATOR(remainder)
handler, remainder = _auth_handler(remainder)
auth_handlers.append(handler)
return auth_handlers
|
from cloudinit import helpers
from cloudinit.sources import DataSourceOpenNebula as ds
from cloudinit import util
from mocker import MockerTestCase
from ..helpers import populate_dir
from base64 import b64encode
import os
import pwd
TEST_VARS = {
'VAR1': 'single',
'VAR2': 'double word',
'VAR3': 'multi\nline\n',
'VAR4': "'single'",
'VAR5': "'double word'",
'VAR6': "'multi\nline\n'",
'VAR7': 'single\\t',
'VAR8': 'double\\tword',
'VAR9': 'multi\\t\nline\n',
'VAR10': '\\', # expect \
'VAR11': '\'', # expect '
'VAR12': '$', # expect $
}
INVALID_CONTEXT = ';'
USER_DATA = '#cloud-config\napt_upgrade: true'
SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i'
HOSTNAME = 'foo.example.com'
PUBLIC_IP = '10.0.0.3'
CMD_IP_OUT = '''\
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether 02:00:0a:12:01:01 brd ff:ff:ff:ff:ff:ff
'''
class TestOpenNebulaDataSource(MockerTestCase):
parsed_user = None
def setUp(self):
super(TestOpenNebulaDataSource, self).setUp()
self.tmp = self.makeDir()
self.paths = helpers.Paths({'cloud_dir': self.tmp})
# defaults for few tests
self.ds = ds.DataSourceOpenNebula
self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula")
self.sys_cfg = {'datasource': {'OpenNebula': {'dsmode': 'local'}}}
# we don't want 'sudo' called in tests. so we patch switch_user_cmd
def my_switch_user_cmd(user):
self.parsed_user = user
return []
self.switch_user_cmd_real = ds.switch_user_cmd
ds.switch_user_cmd = my_switch_user_cmd
def tearDown(self):
ds.switch_user_cmd = self.switch_user_cmd_real
super(TestOpenNebulaDataSource, self).tearDown()
def test_get_data_non_contextdisk(self):
orig_find_devs_with = util.find_devs_with
try:
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertFalse(ret)
finally:
util.find_devs_with = orig_find_devs_with
def test_get_data_broken_contextdisk(self):
orig_find_devs_with = util.find_devs_with
try:
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT})
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
finally:
util.find_devs_with = orig_find_devs_with
def test_get_data_invalid_identity(self):
orig_find_devs_with = util.find_devs_with
try:
# generate non-existing system user name
sys_cfg = self.sys_cfg
invalid_user = 'invalid'
while not sys_cfg['datasource']['OpenNebula'].get('parseuser'):
try:
pwd.getpwnam(invalid_user)
invalid_user += 'X'
except KeyError:
sys_cfg['datasource']['OpenNebula']['parseuser'] = \
invalid_user
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
populate_context_dir(self.seed_dir, {'KEY1': 'val1'})
dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
finally:
util.find_devs_with = orig_find_devs_with
def test_get_data(self):
orig_find_devs_with = util.find_devs_with
try:
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
populate_context_dir(self.seed_dir, {'KEY1': 'val1'})
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertTrue(ret)
finally:
util.find_devs_with = orig_find_devs_with
def test_seed_dir_non_contextdisk(self):
self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir,
self.seed_dir)
def test_seed_dir_empty1_context(self):
populate_dir(self.seed_dir, {'context.sh': ''})
results = ds.read_context_disk_dir(self.seed_dir)
self.assertEqual(results['userdata'], None)
self.assertEqual(results['metadata'], {})
def test_seed_dir_empty2_context(self):
populate_context_dir(self.seed_dir, {})
results = ds.read_context_disk_dir(self.seed_dir)
self.assertEqual(results['userdata'], None)
self.assertEqual(results['metadata'], {})
def test_seed_dir_broken_context(self):
populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT})
self.assertRaises(ds.BrokenContextDiskDir,
ds.read_context_disk_dir,
self.seed_dir)
def test_context_parser(self):
populate_context_dir(self.seed_dir, TEST_VARS)
results = ds.read_context_disk_dir(self.seed_dir)
self.assertTrue('metadata' in results)
self.assertEqual(TEST_VARS, results['metadata'])
def test_ssh_key(self):
public_keys = ['first key', 'second key']
for c in range(4):
for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'):
my_d = os.path.join(self.tmp, "%s-%i" % (k, c))
populate_context_dir(my_d, {k: '\n'.join(public_keys)})
results = ds.read_context_disk_dir(my_d)
self.assertTrue('metadata' in results)
self.assertTrue('public-keys' in results['metadata'])
self.assertEqual(public_keys,
results['metadata']['public-keys'])
public_keys.append(SSH_KEY % (c + 1,))
def test_user_data_plain(self):
for k in ('USER_DATA', 'USERDATA'):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: USER_DATA,
'USERDATA_ENCODING': ''})
results = ds.read_context_disk_dir(my_d)
self.assertTrue('userdata' in results)
self.assertEqual(USER_DATA, results['userdata'])
def test_user_data_encoding_required_for_decode(self):
b64userdata = b64encode(USER_DATA)
for k in ('USER_DATA', 'USERDATA'):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: b64userdata})
results = ds.read_context_disk_dir(my_d)
self.assertTrue('userdata' in results)
self.assertEqual(b64userdata, results['userdata'])
def test_user_data_base64_encoding(self):
for k in ('USER_DATA', 'USERDATA'):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: b64encode(USER_DATA),
'USERDATA_ENCODING': 'base64'})
results = ds.read_context_disk_dir(my_d)
self.assertTrue('userdata' in results)
self.assertEqual(USER_DATA, results['userdata'])
def test_hostname(self):
for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: PUBLIC_IP})
results = ds.read_context_disk_dir(my_d)
self.assertTrue('metadata' in results)
self.assertTrue('local-hostname' in results['metadata'])
self.assertEqual(PUBLIC_IP, results['metadata']['local-hostname'])
def test_network_interfaces(self):
populate_context_dir(self.seed_dir, {'ETH0_IP': '1.2.3.4'})
results = ds.read_context_disk_dir(self.seed_dir)
self.assertTrue('network-interfaces' in results)
def test_find_candidates(self):
def my_devs_with(criteria):
return {
"LABEL=CONTEXT": ["/dev/sdb"],
"LABEL=CDROM": ["/dev/sr0"],
"TYPE=iso9660": ["/dev/vdb"],
}.get(criteria, [])
orig_find_devs_with = util.find_devs_with
try:
util.find_devs_with = my_devs_with
self.assertEqual(["/dev/sdb", "/dev/sr0", "/dev/vdb"],
ds.find_candidate_devs())
finally:
util.find_devs_with = orig_find_devs_with
class TestOpenNebulaNetwork(MockerTestCase):
def setUp(self):
super(TestOpenNebulaNetwork, self).setUp()
def test_lo(self):
net = ds.OpenNebulaNetwork('', {})
self.assertEqual(net.gen_conf(), u'''\
auto lo
iface lo inet loopback
''')
def test_eth0(self):
net = ds.OpenNebulaNetwork(CMD_IP_OUT, {})
self.assertEqual(net.gen_conf(), u'''\
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
address 10.18.1.1
network 10.18.1.0
netmask 255.255.255.0
''')
def test_eth0_override(self):
context = {
'DNS': '1.2.3.8',
'ETH0_IP': '1.2.3.4',
'ETH0_NETWORK': '1.2.3.0',
'ETH0_MASK': '255.255.0.0',
'ETH0_GATEWAY': '1.2.3.5',
'ETH0_DOMAIN': 'example.com',
'ETH0_DNS': '1.2.3.6 1.2.3.7'
}
net = ds.OpenNebulaNetwork(CMD_IP_OUT, context)
self.assertEqual(net.gen_conf(), u'''\
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
address 1.2.3.4
network 1.2.3.0
netmask 255.255.0.0
gateway 1.2.3.5
dns-search example.com
dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7
''')
class TestParseShellConfig(MockerTestCase):
def test_no_seconds(self):
cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"])
# we could test 'sleep 2', but that would make the test run slower.
ret = ds.parse_shell_config(cfg)
self.assertEqual(ret, {"foo": "bar", "xx": "foo"})
def populate_context_dir(path, variables):
data = "# Context variables generated by OpenNebula\n"
for (k, v) in variables.iteritems():
data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''")))
populate_dir(path, {'context.sh': data})
|
import scipy.sparse
import numpy as np
from sklearn.utils import check_random_state
from sklearn.utils.extmath import randomized_range_finder, safe_sparse_dot
from divisi2.dense import DenseMatrix
from divisi2._svdlib import svd_ndarray
def divisi_sparse_to_scipy_sparse(matrix):
values, rows, cols = matrix.find()
coo_mat = scipy.sparse.coo_matrix((values, (rows, cols)))
return coo_mat.tocsc()
def randomized_svd(matrix, k, p=10, q=5, random_state=0):
M = divisi_sparse_to_scipy_sparse(matrix)
U, S, V = _randomized_svd(M, k, p, q, random_state)
U = DenseMatrix(U, matrix.row_labels, None)
V = DenseMatrix(V, matrix.col_labels, None)
return U, S, V
def _randomized_svd(M, k, p, q, random_state):
random_state = check_random_state(random_state)
Q = randomized_range_finder(M, k+p, q, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat_t, S, Vt = svd_ndarray(B, k)
del B
U = np.dot(Q, Uhat_t.T)
return U, S, Vt.T
|
"""
Pyicoteo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import os
import sys
from tempfile import gettempdir
from heapq import heappop, heappush
from itertools import islice, cycle, chain
import logging
from core import Cluster, Region, InvalidLine, InsufficientData, ConversionNotSupported
from defaults import *
from bam import BamReader, BamFetcher, BamFetcherSamtools
class OperationFailed(Exception):
pass
def manage_temp_file(path, keep_temp, logger):
"""A temporary file that is no longer needed is given, and depending on the value of self.keep_temp its removed or kept"""
if keep_temp:
logger.info("Temp file kept at: %s (remember cleaning them!)"%path)
else:
os.remove(path)
logger.info('Temporary file %s removed'%path)
def sorting_lambda(format):
if format == ELAND:
return lambda x:(x.split()[6], int(x.split()[7]), len(x.split()[1]))
elif format == SAM or format == BAM:
return lambda x:(x.split()[2], int(x.split()[3]), len(x.split()[9]))
else: #anything else, we fall to BED like format for sorting
return lambda x:(x.split()[0],int(x.split()[1]),int(x.split()[2]))
def get_logger(logger_name, verbose=True, debug=False):
"""
Getting a logger in the pyicos format.
This should substitute the 'passing down' logging capabilities of the Turbomix class.
"""
logging_format= "%(asctime)s (PID:%(process)s) - %(levelname)s - %(message)s"
logging.basicConfig(filename=logger_name, format=logging_format)
logger = logging.getLogger(logger_name)
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
if debug:
ch.setLevel(logging.DEBUG)
elif verbose:
ch.setLevel(logging.INFO)
else:
ch.setLevel(logging.WARNING)
formatter = logging.Formatter("%(asctime)s (PID:%(process)s) - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def open_file(path, format=None, gzipped=False, logger=None):
"""To open files that are not binary format, like BAM and gzip files"""
if format == BAM:
if logger: logger.debug("read_fetcher: Returning BAM reader")
return BamReader(path, logger)
elif gzipped:
print "Open Gzipped! (not implemented)"
sys.exit(1)
else:
return open(path, 'rb')
def read_fetcher(file_path, experiment_format, read_half_open=False, rounding=True, cached=True, logger=None, use_samtools=False, access_sequential=True, only_counts=False):
if experiment_format == BAM: #access_sequential
if use_samtools:
return BamFetcherSamtools(file_path, read_half_open, rounding, cached, logger)
else:
return SortedFileReader(file_path, experiment_format, read_half_open, rounding, cached, logger)
#elif experiment_format == BAM:
#return BamFetcher(file_path, read_half_open, rounding, cached, logger) #doesnt work yet
elif only_counts:
return SortedFileCountReader(file_path, experiment_format, read_half_open, rounding, cached, logger)
else:
return SortedFileReader(file_path, experiment_format, read_half_open, rounding, cached, logger)
def add_slash_to_path(path):
if path[-1] != '/':
path = '%s/'%path
return path
def poisson(actual, mean):
"""From StackOverflow: This algorithm is iterative,
to keep the components from getting too large or small"""
try:
p = math.exp(-mean)
for i in xrange(actual):
p *= mean
p /= i+1
return p
except OverflowError:
return 0
def pearson(list_one, list_two):
"""
Accepts paired lists and returns a number between -1 and 1,
known as Pearson's r, that indicates how closely correlated
the two datasets are.
A score close to one indicates a high positive correlation.
That means that X tends to be big when Y is big.
A score close to negative one indicates a high negative correlation.
That means X tends to be small when Y is big.
A score close to zero indicates little correlation between the two
datasets.
h3. Sources
http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
http://davidmlane.com/hyperstat/A56626.html
http://www.cmh.edu/stats/definitions/correlation.htm
http://www.amazon.com/Programming-Collective-Intelligence-Building-Applications/dp/0596529325
"""
if len(list_one) != len(list_two):
raise ValueError('The two lists you provided do not have the name number \
of entries. Pearson\'s r can only be calculated with paired data.')
n = len(list_one)
# Convert all of the data to floats
list_one = map(float, list_one)
list_two = map(float, list_two)
# Add up the total for each
sum_one = sum(list_one)
sum_two = sum(list_two)
# Sum the squares of each
sum_of_squares_one = sum([pow(i, 2) for i in list_one])
sum_of_squares_two = sum([pow(i, 2) for i in list_two])
# Sum up the product of each element multiplied against its pair
product_sum = sum([item_one * item_two for item_one, item_two in zip(list_one, list_two)])
# Use the raw materials above to assemble the equation
pearson_numerator = product_sum - (sum_one * sum_two / n)
pearson_denominator = math.sqrt((sum_of_squares_one - pow(sum_one,2) / n) * (sum_of_squares_two - pow(sum_two,2) / n))
# To avoid dividing by zero,
# catch it early on and drop out
if pearson_denominator == 0:
return 0
# Divide the equation to return the r value
r = pearson_numerator / pearson_denominator
return r
def list_available_formats():
#TODO this is very bad, this function is a trap. Fix
print 'Formats Pyicoteo can read:'
for format in READ_FORMATS:
print format
print '\nFormats Pyicoteo can write:'
for format in WRITE_FORMATS:
print format
sys.exit(0)
class SafeReader:
def __init__(self, logger=None):
self.logger = logger
self.invalid_count = 0
self.invalid_limit = 2000
def safe_read_line(self, cluster, line):
"""Reads a line in a file safely catching the error for headers.
Triggers OperationFailed exception if too many invalid lines are fed to the method"""
try:
cluster.read_line(line)
except InvalidLine:
if self.invalid_count > self.invalid_limit:
if self.logger:
self.logger.error('Limit of invalid lines: Check the experiment, control, and region file formats, probably the error is in there. Pyicoteo by default expects bedpk files, except for region files, which are bed files')
raise OperationFailed
else:
if self.logger: self.logger.debug("Skipping invalid (%s) line (or header): %s"%(cluster.reader.format, line))
self.invalid_count += 1
class BigSort:
"""
This class can sort huge files without loading them fully into memory.
Based on a recipe by Tomasz Bieruta.
"""
def __init__(self, file_format, read_half_open=False, frag_size=0, id=0, logger=True, filter_chunks=True, push_distance=0, buffer_size = 320000, temp_file_size = 8000000):
self.logger = logger
self.file_format = file_format
self.frag_size = frag_size
self.push_distance = push_distance
self.buffer_size = buffer_size
self.temp_file_size = temp_file_size
self.filter_chunks = filter_chunks
try:
if self.file_format:
self.cluster = Cluster(read=self.file_format, write=self.file_format, read_half_open=read_half_open, write_half_open=read_half_open, logger=self.logger)
except ConversionNotSupported:
self.logger.error('')
self.logger.error('Reading "%s" is not supported (unknown format).\n'%self.file_format)
list_available_formats()
self.id = id
def skipHeaderLines(self, key, experiment_file):
validLine = False
count = 0
while not validLine and count < 400: #file formats with more than 400 lines of header should die anyway
try:
currentPos = experiment_file.tell()
line = [experiment_file.readline()]
line.sort(key=key)
experiment_file.seek(currentPos)
validLine = True
except:
count += 1
def remove_chunks(self, chunks):
for chunk in chunks:
try:
os.remove(chunk)
except:
pass
def filter_chunk(self, chunk):
filtered_chunk = []
for line in chunk:
if self.cluster.reader.quality_filter(line):
self.cluster.clear()
try:
self.cluster.read_line(line)
if self.frag_size:
self.cluster.extend(self.frag_size)
if self.push_distance:
self.cluster.push(self.push_distance)
except InvalidLine:
if self.logger: self.logger.debug('Discarding middle invalid line: %s'%line)
if not self.cluster.is_empty():
filtered_chunk.append(self.cluster.write_line())
return filtered_chunk
def sort(self, input, output=None, key=None, tempdirs=[]):
if key is None: # unless explicitly specified, sort with the default lambda
key = sorting_lambda(self.file_format)
if not tempdirs:
tempdirs.append(gettempdir())
input_file = open(input,'rb',self.temp_file_size)
self.skipHeaderLines(key, input_file)
try:
input_iterator = iter(input_file)
chunks = []
for tempdir in cycle(tempdirs):
current_chunk = list(islice(input_iterator, self.buffer_size))
if self.filter_chunks:
current_chunk = self.filter_chunk(current_chunk)
if current_chunk:
if self.logger: self.logger.debug("Chunk: len current_chunk: %s chunks: %s temp_file_size %s buffer_size %s"%(len(current_chunk), len(chunks), self.temp_file_size, self.buffer_size))
current_chunk.sort(key=key)
output_chunk = open(os.path.join(tempdir,'%06i_%s_%s'%(len(chunks), os.getpid(), self.id)),'w+b',self.temp_file_size)
output_chunk.writelines(current_chunk)
output_chunk.flush()
output_chunk.seek(0)
chunks.append(output_chunk.name)
else:
break
except KeyboardInterrupt: # If there is an interruption, delete all temporary files and raise the exception for further processing.
print 'Removing temporary files...'
self.remove_chunks(chunks)
raise
finally:
input_file.close()
if output is None:
output = "%s/tempsort%s_%s"%(tempdirs[0], os.getpid(), self.id)
output_file = open(output,'wb',self.temp_file_size)
try:
output_file.writelines(self.merge(chunks,key))
finally:
self.remove_chunks(chunks)
output_file.close()
return open(output)
def merge(self, chunks, key=None):
if self.logger: self.logger.info("... Merging chunks...")
if key is None:
key = lambda x : x
values = []
for index, chunk in enumerate(chunks):
try:
chunk_file = open(chunk)
iterator = iter(chunk_file)
value = iterator.next()
except StopIteration:
self.remove_chunks(chunks)
#try: chunks.remove(chunk) except: pass # igual hay algo magico aqui que se me ha pasado, pero creo que no vale para nada
else:
heappush(values,((key(value), index, value, iterator, chunk_file)))
while values:
k, index, value, iterator, chunk = heappop(values)
yield value
try:
value = iterator.next()
except StopIteration:
self.remove_chunks(chunks)
#aqui tambien habia magia remove chunks
else:
heappush(values,(key(value),index,value,iterator,chunk))
class DualSortedReader:
"""Given two sorted files of tags in a format supported by Pyicoteo, iterates through them returning them in order"""
def __init__(self, file_a_path, file_b_path, format, read_half_open=False, logger=None):
self.logger = logger
self.file_a = open(file_a_path)
self.file_b = open(file_b_path)
self.current_a = Cluster(cached=False, read=format, read_half_open=read_half_open, logger=self.logger)
self.current_b = Cluster(cached=False, read=format, read_half_open=read_half_open, logger=self.logger)
def __iter__(self):
stop_a = True # indicates if the exception StopIteration is raised by file a (True) or file b (False)
safe_reader = SafeReader(self.logger)
try:
while 1:
if not self.current_a:
stop_a = True
line_a = self.file_a.next()
safe_reader.safe_read_line(self.current_a, line_a)
if not self.current_b:
stop_a = False
line_b = self.file_b.next()
safe_reader.safe_read_line(self.current_b, line_b)
if self.current_a < self.current_b:
self.current_a.clear()
yield line_a
else:
self.current_b.clear()
yield line_b
except StopIteration: # we still need to print the reminder of the sorter file
if stop_a:
while self.file_b:
yield line_b
line_b = self.file_b.next()
else:
while self.file_a:
yield line_a
line_a = self.file_a.next()
class SortedFileReader:
"""
Holds a cursor and a file path. Given a start and an end, it iterates through the file starting on the cursor position,
and yields the clusters that overlap with the region specified. The cursor will be left behind the position of the last region fed
to the SortedFileReader.
"""
def __init__(self, file_path, experiment_format, read_half_open=False, cached=True, rounding=True, logger=None):
self.__dict__.update(locals())
self.file_iterator = open(file_path, 'rb')
self.safe_reader = SafeReader()
self.cursor = self.file_iterator.tell()
self.initial_tell = self.file_iterator.tell()
def restart(self):
"""Start again reading the file from the start"""
self.logger.debug("Rewinding to %s"%self.cursor)
self.file_iterator.seek(self.initial_tell)
self.cursor = self.initial_tell
def rewind(self):
"""Move back to cursor position"""
#print "REWIND"
self.file_iterator.seek(self.cursor)
def _read_line(self):
"""Reads the next line of the file. If advance, the cursor will get the position of the file"""
self.line = self.file_iterator.readline()
if self.line == '':
return True
return False
def _advance(self):
#print "ADVANCE"
self.cursor = self.file_iterator.tell()
def _get_cluster(self):
"""Returns line in a cluster ignoring the count of lines. Assumes that the cursor position exists"""
c = Cluster(read=self.experiment_format, read_half_open=self.read_half_open, rounding=self.rounding, cached=self.cached, logger=self.logger)
if self.line:
self.safe_read_line(c, self.line)
return c
def overlapping_clusters(self, region, overlap=1):
if self.logger: self.logger.debug("YIELD OVER: Started...")
self.rewind()
if self._read_line(): return
cached_cluster = self._get_cluster()
while (cached_cluster.name < region.name) or (cached_cluster.name == region.name and region.start > cached_cluster.end) or cached_cluster.is_empty():
#print "old:",cached_cluster.name, cached_cluster.start, cached_cluster.end
self._advance()
if self._read_line(): return
cached_cluster = self._get_cluster()
#print "NEW:", cached_cluster.name, cached_cluster.start, cached_cluster.end
# get intersections
while cached_cluster.start <= region.end and cached_cluster.name == region.name:
if cached_cluster.overlap(region) >= overlap:
if not region.strand or region.strand == cached_cluster.strand: # don't include the clusters with different strand from the region
yield cached_cluster
if self._read_line(): return
cached_cluster = self._get_cluster()
def safe_read_line(self, cluster, line):
self.safe_reader.safe_read_line(cluster, line)
class SortedFileCountReader:
"""
Holds a cursor and a file path. Given a start and an end, it iterates through the file starting on the cursor position,
and retrieves the *counts* (number of reads) that overlap with the region specified. Because this class doesn't store the reads, but only counts them,
it doesn't have memory problems when encountering huge clusters of reads.
"""
def __init__(self, file_path, experiment_format, read_half_open=False, rounding=True, cached=True, logger=None):
self.__dict__.update(locals())
self.file_iterator = open_file(file_path, format=experiment_format, logger=logger)
if logger:
self.logger.debug('Fetcher used for %s: Sequential Sorted Counts Reader'%file_path)
self.safe_reader = SafeReader(logger=logger)
self.__initvalues()
def rewind(self):
"""Start again reading the file from the start"""
self.file_iterator.seek(0)
self.__initvalues()
def __initvalues(self):
self.slow_seek = 0
self.current_tag = Cluster()
def _read_next_tag(self):
"""Loads the cache if the line read by the cursor is not there yet. If the line is empty, it means that the end of file was reached,
so this function sends a signal for the parent function to halt. If the region is stranded, the only tags returned will be the ones of that strand"""
try:
line = self.file_iterator.readline()
except StopIteration:
return True
if line == '':
return True
self.current_tag = Cluster(read=self.experiment_format, read_half_open=self.read_half_open, rounding=self.rounding, cached=False, logger=self.logger)
self.safe_read_line(self.current_tag, line)
return False
def get_overlaping_counts(self, region, overlap=1):
counts = 0
# load last seek
self.file_iterator.seek(self.slow_seek)
self.current_tag = Cluster()
# advance slow seek
while (self.current_tag.name < region.name) or (self.current_tag.name == region.name and region.start > self.current_tag.end):
self.slow_seek = self.file_iterator.tell()
if self._read_next_tag():
return counts
# get intersections
while self.current_tag.start <= region.end and self.current_tag.name == region.name:
if self.current_tag.overlap(region) >= overlap:
if not region.strand or region.strand == self.current_tag.strand:
counts += 1
if self._read_next_tag():
return counts
return counts
def safe_read_line(self, cluster, line):
self.safe_reader.safe_read_line(cluster, line)
|
import os
import os.path
import shutil
from ipapython.ipa_log_manager import root_logger
import random
import six
from six.moves.configparser import SafeConfigParser
from ipaplatform.tasks import tasks
from ipaplatform.paths import paths
if six.PY3:
unicode = str
SYSRESTORE_PATH = paths.TMP
SYSRESTORE_INDEXFILE = "sysrestore.index"
SYSRESTORE_STATEFILE = "sysrestore.state"
class FileStore:
"""Class for handling backup and restore of files"""
def __init__(self, path = SYSRESTORE_PATH, index_file = SYSRESTORE_INDEXFILE):
"""Create a _StoreFiles object, that uses @path as the
base directory.
The file @path/sysrestore.index is used to store information
about the original location of the saved files.
"""
self._path = path
self._index = os.path.join(self._path, index_file)
self.random = random.Random()
self.files = {}
self._load()
def _load(self):
"""Load the file list from the index file. @files will
be an empty dictionary if the file doesn't exist.
"""
root_logger.debug("Loading Index file from '%s'", self._index)
self.files = {}
p = SafeConfigParser()
p.optionxform = str
p.read(self._index)
for section in p.sections():
if section == "files":
for (key, value) in p.items(section):
self.files[key] = value
def save(self):
"""Save the file list to @_index. If @files is an empty
dict, then @_index should be removed.
"""
root_logger.debug("Saving Index File to '%s'", self._index)
if len(self.files) == 0:
root_logger.debug(" -> no files, removing file")
if os.path.exists(self._index):
os.remove(self._index)
return
p = SafeConfigParser()
p.optionxform = str
p.add_section('files')
for (key, value) in self.files.items():
p.set('files', key, str(value))
with open(self._index, "w") as f:
p.write(f)
def backup_file(self, path):
"""Create a copy of the file at @path - so long as a copy
does not already exist - which will be restored to its
original location by restore_files().
"""
root_logger.debug("Backing up system configuration file '%s'", path)
if not os.path.isabs(path):
raise ValueError("Absolute path required")
if not os.path.isfile(path):
root_logger.debug(" -> Not backing up - '%s' doesn't exist", path)
return
(reldir, backupfile) = os.path.split(path)
filename = ""
for i in range(8):
h = "%02x" % self.random.randint(0,255)
filename += h
filename += "-"+backupfile
backup_path = os.path.join(self._path, filename)
if os.path.exists(backup_path):
root_logger.debug(" -> Not backing up - already have a copy of '%s'", path)
return
shutil.copy2(path, backup_path)
stat = os.stat(path)
template = '{stat.st_mode},{stat.st_uid},{stat.st_gid},{path}'
self.files[filename] = template.format(stat=stat, path=path)
self.save()
def has_file(self, path):
"""Checks whether file at @path was added to the file store
Returns #True if the file exists in the file store, #False otherwise
"""
result = False
for (key, value) in self.files.items():
(mode,uid,gid,filepath) = value.split(',', 3)
if (filepath == path):
result = True
break
return result
def restore_file(self, path, new_path = None):
"""Restore the copy of a file at @path to its original
location and delete the copy.
Takes optional parameter @new_path which specifies the
location where the file is to be restored.
Returns #True if the file was restored, #False if there
was no backup file to restore
"""
if new_path is None:
root_logger.debug("Restoring system configuration file '%s'", path)
else:
root_logger.debug("Restoring system configuration file '%s' to '%s'", path, new_path)
if not os.path.isabs(path):
raise ValueError("Absolute path required")
if new_path is not None and not os.path.isabs(new_path):
raise ValueError("Absolute new path required")
mode = None
uid = None
gid = None
filename = None
for (key, value) in self.files.items():
(mode,uid,gid,filepath) = value.split(',', 3)
if (filepath == path):
filename = key
break
if not filename:
raise ValueError("No such file name in the index")
backup_path = os.path.join(self._path, filename)
if not os.path.exists(backup_path):
root_logger.debug(" -> Not restoring - '%s' doesn't exist", backup_path)
return False
if new_path is not None:
path = new_path
shutil.copy(backup_path, path) # SELinux needs copy
os.remove(backup_path)
os.chown(path, int(uid), int(gid))
os.chmod(path, int(mode))
tasks.restore_context(path)
del self.files[filename]
self.save()
return True
def restore_all_files(self):
"""Restore the files in the inbdex to their original
location and delete the copy.
Returns #True if the file was restored, #False if there
was no backup file to restore
"""
if len(self.files) == 0:
return False
for (filename, value) in self.files.items():
(mode,uid,gid,path) = value.split(',', 3)
backup_path = os.path.join(self._path, filename)
if not os.path.exists(backup_path):
root_logger.debug(" -> Not restoring - '%s' doesn't exist", backup_path)
continue
shutil.copy(backup_path, path) # SELinux needs copy
os.remove(backup_path)
os.chown(path, int(uid), int(gid))
os.chmod(path, int(mode))
tasks.restore_context(path)
# force file to be deleted
self.files = {}
self.save()
return True
def has_files(self):
"""Return True or False if there are any files in the index
Can be used to determine if a program is configured.
"""
return len(self.files) > 0
def untrack_file(self, path):
"""Remove file at path @path from list of backed up files.
Does not remove any files from the filesystem.
Returns #True if the file was untracked, #False if there
was no backup file to restore
"""
root_logger.debug("Untracking system configuration file '%s'", path)
if not os.path.isabs(path):
raise ValueError("Absolute path required")
mode = None
uid = None
gid = None
filename = None
for (key, value) in self.files.items():
(mode,uid,gid,filepath) = value.split(',', 3)
if (filepath == path):
filename = key
break
if not filename:
raise ValueError("No such file name in the index")
backup_path = os.path.join(self._path, filename)
if not os.path.exists(backup_path):
root_logger.debug(" -> Not restoring - '%s' doesn't exist", backup_path)
return False
try:
os.unlink(backup_path)
except Exception as e:
root_logger.error('Error removing %s: %s' % (backup_path, str(e)))
del self.files[filename]
self.save()
return True
class StateFile:
"""A metadata file for recording system state which can
be backed up and later restored.
StateFile gets reloaded every time to prevent loss of information
recorded by child processes. But we do not solve concurrency
because there is no need for it right now.
The format is something like:
[httpd]
running=True
enabled=False
"""
def __init__(self, path = SYSRESTORE_PATH, state_file = SYSRESTORE_STATEFILE):
"""Create a StateFile object, loading from @path.
The dictionary @modules, a member of the returned object,
is where the state can be modified. @modules is indexed
using a module name to return another dictionary containing
key/value pairs with the saved state of that module.
The keys in these latter dictionaries are arbitrary strings
and the values may either be strings or booleans.
"""
self._path = os.path.join(path, state_file)
self.modules = {}
self._load()
def _load(self):
"""Load the modules from the file @_path. @modules will
be an empty dictionary if the file doesn't exist.
"""
root_logger.debug("Loading StateFile from '%s'", self._path)
self.modules = {}
p = SafeConfigParser()
p.optionxform = str
p.read(self._path)
for module in p.sections():
self.modules[module] = {}
for (key, value) in p.items(module):
if value == str(True):
value = True
elif value == str(False):
value = False
self.modules[module][key] = value
def save(self):
"""Save the modules to @_path. If @modules is an empty
dict, then @_path should be removed.
"""
root_logger.debug("Saving StateFile to '%s'", self._path)
for module in self.modules.keys():
if len(self.modules[module]) == 0:
del self.modules[module]
if len(self.modules) == 0:
root_logger.debug(" -> no modules, removing file")
if os.path.exists(self._path):
os.remove(self._path)
return
p = SafeConfigParser()
p.optionxform = str
for module in self.modules.keys():
p.add_section(module)
for (key, value) in self.modules[module].items():
p.set(module, key, str(value))
with open(self._path, "w") as f:
p.write(f)
def backup_state(self, module, key, value):
"""Backup an item of system state from @module, identified
by the string @key and with the value @value. @value may be
a string or boolean.
"""
if not isinstance(value, (str, bool, unicode)):
raise ValueError("Only strings, booleans or unicode strings are supported")
self._load()
if module not in self.modules:
self.modules[module] = {}
if key not in self.modules:
self.modules[module][key] = value
self.save()
def get_state(self, module, key):
"""Return the value of an item of system state from @module,
identified by the string @key.
If the item doesn't exist, #None will be returned, otherwise
the original string or boolean value is returned.
"""
self._load()
if module not in self.modules:
return None
return self.modules[module].get(key, None)
def delete_state(self, module, key):
"""Delete system state from @module, identified by the string
@key.
If the item doesn't exist, no change is done.
"""
self._load()
try:
del self.modules[module][key]
except KeyError:
pass
else:
self.save()
def restore_state(self, module, key):
"""Return the value of an item of system state from @module,
identified by the string @key, and remove it from the backed
up system state.
If the item doesn't exist, #None will be returned, otherwise
the original string or boolean value is returned.
"""
value = self.get_state(module, key)
if value is not None:
self.delete_state(module, key)
return value
def has_state(self, module):
"""Return True or False if there is any state stored for @module.
Can be used to determine if a service is configured.
"""
if module in self.modules:
return True
else:
return False
|
from functools import lru_cache
import locale
import re
from subprocess import PIPE, Popen, check_call
import sys
from vcsn.tools import _tmp_file
state_style = 'node [shape = circle, style = rounded, width = 0.5]'
state_pretty = 'node [fontsize = 12, fillcolor = cadetblue1, shape = circle, style = "filled,rounded", height = 0.4, width = 0.4, fixedsize = true]'
state_simple = 'node [fillcolor = cadetblue1, shape = circle, style = "filled,rounded", width = 0.3]'
state_point = 'node [shape = point, width = 0]'
edge_style = 'edge [arrowhead = vee, arrowsize = .6]'
def _states_as_tooltips(s):
s = re.sub(r'label = (".*?"), shape = box', r'tooltip = \1', s)
return _states_as_pretty(s)
def _states_as_simple(s):
'Make all the states simple circles, put its label in its tooltip.'
s = s.replace(state_style, state_simple)
# A non-decorated state.
s = re.sub(r'^( *)([0-9]+)$',
r'\1\2 [label = "", tooltip = "\2"]',
s, flags=re.MULTILINE)
# A decorated state.
s = re.sub(r'^(\ *([0-9]+)\ *\[.*?label\ *=\ *)"(.*?)", shape = box',
r'\1"", tooltip = "\2: \3"',
s, flags=re.MULTILINE)
return s
def _states_as_points(s):
'''Transform all the nodes into simple points, as to reveal only
the transitions.'''
return (s.replace(state_style, state_point)
.replace(', shape = box', ''))
def _states_as_pretty(s):
'''Transform all the nodes into colored points.'''
return (s.replace(state_style, state_pretty)
.replace('shape = box', 'shape = box, fixedsize = false'))
def _dot_gray_node(m):
'''Replace gray node contours by gray nodes, and apply style to
nodes with their own style.'''
node = m.group(1)
attr = m.group(2)
if ' -> ' not in node:
attr = attr.replace('color = DimGray', 'fillcolor = lightgray')
attr = re.sub(r'style = (\w+)', r'style = "\1"', attr)
attr = re.sub(r'style = "(.+?)"',
r'style = "\1,filled,rounded"', attr)
# This is really ugly... We should definitely use gvpr.
attr = re.sub(r'(?:filled,)?rounded,filled,rounded',
r'filled,rounded', attr)
return node + attr
def _dot_pretty(s, mode="pretty"):
'''
Improve pretty-printing in a dot source.
Use some HTML entities instead of ugly ASCII sequences.
Use nicer arrows.
If `mode` is `tooltip`, convert node labels to tooltips.
If it is `transitions`, then hide the states.
'''
if mode != 'dot':
if mode == 'simple':
s = _states_as_simple(s)
elif mode == 'tooltip':
s = _states_as_tooltips(s)
elif mode == 'transitions':
s = _states_as_points(s)
else: # mode == 'pretty'
s = _states_as_pretty(s)
# Useless states should be filled in gray, instead of having a
# gray contour. Fill with a lighter gray. But don't change the
# color of the arrows.
s = re.sub('^(.*)(\[.*?\])$', _dot_gray_node, s, flags=re.MULTILINE)
return s
@lru_cache(maxsize=32)
def _dot_to_boxart(dot):
dot = dot.replace('digraph', 'digraph a')
p = Popen(['/opt/local/libexec/perl5.16/sitebin/graph-easy',
'--from=graphviz', '--as=boxart'],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
p.stdin.write(dot)
out, err = p.communicate()
if p.wait():
raise RuntimeError('graph-easy failed: ' + err)
if isinstance(out, bytes):
out = out.decode('utf-8')
return out
@lru_cache(maxsize=32)
def _dot_to_svg(dot, engine='dot', *args):
"The conversion of a Dot source into SVG by dot."
# http://www.graphviz.org/content/rendering-automata
p1 = Popen([engine] + list(args),
stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
p2 = Popen(['gvpr', '-c', 'E[head.name == "F*" && head.name != "Fpre"]{lp=pos=""}'],
stdin=p1.stdout, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
p3 = Popen(['neato', '-n2', '-Tsvg'],
stdin=p2.stdout, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
p2.stdout.close() # Allow p2 to receive a SIGPIPE if p3 exits.
p1.stdin.write(dot)
p1.stdin.close()
out, err = p3.communicate()
if p1.wait():
raise RuntimeError(engine + " failed: " + p1.stderr.read())
if p2.wait():
raise RuntimeError("gvpr failed: " + p2.stderr.read())
if p3.wait():
raise RuntimeError("neato failed: " + err)
if isinstance(out, bytes):
out = out.decode('utf-8')
return out
@lru_cache(maxsize=32)
def _dot_to_svg_dot2tex(dot, engine="dot", *args):
'''The conversion of a Dot source into SVG by dot2tex.
Requires dot2tex, texi2pdf and pdf2svg.
'''
with _tmp_file('tex') as tex, \
_tmp_file('pdf') as pdf, \
_tmp_file('svg') as svg:
p1 = Popen(['dot2tex', '--prog', engine],
stdin=PIPE, stdout=tex, stderr=PIPE,
universal_newlines=True)
out, err = p1.communicate(dot)
if p1.wait():
raise RuntimeError("dot2tex failed: " + err)
check_call(["texi2pdf", "--batch", "--clean", "--quiet",
"--output", pdf.name, tex.name])
check_call(["pdf2svg", pdf.name, svg.name])
res = open(svg.name).read()
if isinstance(res, bytes):
res = res.decode('utf-8')
return res
class Daut:
def __init__(self):
self.transitions = []
self.id = r'(?:\w+|"(?:[^\\"]|\\.)*")'
# An RE than matches transitions in Daut syntax.
self.re_daut_tr = '^ *({id}|\$)? *-> *({id}|\$)? *(.*?)$'.format(id = self.id)
def quote(self, s):
'''Turn a string (label) into a string in double-quotes.'''
if len(s) < 2 or s[0] != '"' or s[-1] != '"':
s = '"' + re.sub(r'([\\"])', r'\\\1', s) + '"'
return s
def unquote(self, s):
'''Strip double-quotes and escapes from a string.'''
if 2 <= len(s) and s[0] == '"' and s[-1] == '"':
s = re.sub(r'\\(.)', r'\1', s[1:-1])
return s
# Using split(',') is tempting, but will break strings
# that contain commas --- e.g., [label = "a, b"].
def attr_dot_split(self, s):
attr = r'{id}(?:\s*=\s*{id})?'.format(id=self.id)
scanner = re.Scanner([
(",;", None),
(attr, lambda scanner, tok: tok),
(r"\s+", None),
])
return scanner.scan(s)[0]
def parse_attr_dot(self, s):
if s:
s.strip()
if s.startswith('[') and s.endswith(']'):
s = s[1:-1]
res = [a.strip() for a in self.attr_dot_split(s)]
return res
else:
res = []
return res
def attr_dot(self, attrs):
'''Receive a Dot list of attributes, and if the first
is not a proper assignment, consider it's the label, so
prepend 'label=' to it.
'''
if attrs and not attrs[0].startswith("label"):
attrs[0] = 'label = {}'.format(self.quote(attrs[0]))
for i, a in enumerate(attrs):
if a in ['blue', 'red', 'green']:
attrs[i] = "color={a}, fontcolor={a}".format(a=a)
# Join on ";" rather that ",".
if attrs:
return "[" + "; ".join(attrs) + "]"
else:
return ""
def parse_attr_daut(self, s):
'''Return the list of attributes in Daut syntax.'''
if s:
s.strip()
res = [a.strip() for a in s.split(';')]
else:
res = []
return res
def attr_daut(self, attrs):
'''Keep a single attribute: the label.'''
if attrs:
for a in attrs:
if a.startswith('label'):
return self.unquote(re.sub('label *= *', '', a))
return ''
def transition_daut(self, s, d, a):
'''Format a transition to Daut syntax.'''
label = self.attr_daut(a)
return "{} -> {}{}{}".format(s or '$', d or '$',
' ' if label else '', label)
def prepend(self, prefix, s):
if s.startswith('"'):
return '"' + prefix + s[1:]
else:
return prefix + s
def transition_dot(self, s, d, a):
'''Format a transition to Dot syntax.'''
if s == '' or s == '$':
s = self.prepend('I', d)
self.hidden.append(s)
if d == '' or d == '$':
d = self.prepend('F', s)
self.hidden.append(d)
a = self.attr_dot(a)
return ' {} -> {}{}{}'.format(s, d, ' ' if a else '', a)
def parse_context(self, match):
'''Record the context.'''
self.context = match.group(1)
def parse_transition(self, match, format):
'''Return (source, destination, attributes) with Daut syntax.'''
s = match.group(1)
if s is None or s.startswith('I') or s.startswith('"I'):
s = '$'
d = match.group(2)
if d is None or d.startswith('F') or s.startswith('"F'):
d = '$'
if format == "dot":
attr = self.parse_attr_dot(match.group(3))
else:
attr = self.parse_attr_daut(match.group(3))
return (s, d, attr)
def daut_to_dot(self, s):
'''Convert from Daut syntax to Dot.'''
self.context = "lal_char, b"
# The list of pre/post states.
self.hidden = []
s = re.sub('^ *(?:vcsn_)?(?:context|ctx) *= *"?(.*?)"?$',
self.parse_context, s, flags=re.MULTILINE)
s = re.sub(self.re_daut_tr,
lambda m: self.transition_dot(
*self.parse_transition(m, "daut")),
s, flags=re.MULTILINE)
return '''digraph
{{
vcsn_context = "{context}"
rankdir = LR
{edge_style}
{{
{state_point}
{hidden}
}}
{state_style}
{transitions}
}}'''.format(context=self.context,
transitions=s,
state_point=state_point,
state_style=state_style,
edge_style=edge_style,
hidden=" ".join(self.hidden))
def daut_to_transitions(self, s):
'''Extract the list of transitions (as triples) from Daut.'''
return re.findall(self.re_daut_tr, s, flags=re.MULTILINE)
def dot_to_daut(self, s):
'''Convert from Dot syntax to Daut.'''
res = []
s = re.sub('^ *vcsn_context *= *"(.*?)"$',
lambda m: res.append('context = "{}"'.format(m.group(1))),
s, flags=re.MULTILINE)
re.sub('^ *({id}?) *-> *({id}?) *(\[.*?\])?$'.format(id=self.id),
lambda m: res.append(
self.transition_daut(*self.parse_transition(m, "dot"))),
s, flags=re.MULTILINE)
return "\n".join(res)
def daut_to_dot(s):
'''Read a Daut input, translate to regular Dot.'''
d = Daut()
return d.daut_to_dot(s)
def dot_to_daut(s):
'''Read a Dot input, simplify it into Daut.'''
d = Daut()
return d.dot_to_daut(s)
def daut_to_transitions(s):
'''From a Daut, return the list of transitions as triples
`(src, dst, entry)`, using `$` to denote pre/post states.'''
d = Daut()
return d.daut_to_transitions(s)
|
from __future__ import division
import errno
import json
import os
import sys
import time
import traceback
from twisted.internet import defer
from twisted.python import log
from twisted.web import resource, static
import p2pool
from bitcoin import data as bitcoin_data
from . import data as p2pool_data, p2p
from util import deferral, deferred_resource, graph, math, memory, pack, variable
def _atomic_read(filename):
try:
with open(filename, 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
try:
with open(filename + '.new', 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
return None
def _atomic_write(filename, data):
with open(filename + '.new', 'wb') as f:
f.write(data)
f.flush()
try:
os.fsync(f.fileno())
except:
pass
try:
os.rename(filename + '.new', filename)
except: # XXX windows can't overwrite
os.remove(filename)
os.rename(filename + '.new', filename)
def get_web_root(wb, datadir_path, bitcoind_warning_var, stop_event=variable.Event()):
node = wb.node
start_time = time.time()
web_root = resource.Resource()
def get_users():
height, last = node.tracker.get_height_and_last(node.best_share_var.value)
weights, total_weight, donation_weight = node.tracker.get_cumulative_weights(node.best_share_var.value, min(height, 720), 65535*2**256)
res = {}
for script in sorted(weights, key=lambda s: weights[s]):
res[bitcoin_data.script2_to_address(script, node.net.PARENT)] = weights[script]/total_weight
return res
def get_current_scaled_txouts(scale, trunc=0):
txouts = node.get_current_txouts()
total = sum(txouts.itervalues())
results = dict((script, value*scale//total) for script, value in txouts.iteritems())
if trunc > 0:
total_random = 0
random_set = set()
for s in sorted(results, key=results.__getitem__):
if results[s] >= trunc:
break
total_random += results[s]
random_set.add(s)
if total_random:
winner = math.weighted_choice((script, results[script]) for script in random_set)
for script in random_set:
del results[script]
results[winner] = total_random
if sum(results.itervalues()) < int(scale):
results[math.weighted_choice(results.iteritems())] += int(scale) - sum(results.itervalues())
return results
def get_patron_sendmany(total=None, trunc='0.01'):
if total is None:
return 'need total argument. go to patron_sendmany/<TOTAL>'
###Neisklar: This should be defined in the network-settings not hardcoded...
total = int(float(total)*1e8)
trunc = int(float(trunc)*1e8)
return json.dumps(dict(
(bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8)
for script, value in get_current_scaled_txouts(total, trunc).iteritems()
if bitcoin_data.script2_to_address(script, node.net.PARENT) is not None
))
def get_global_stats():
# averaged over last hour
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
nonstale_hash_rate = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)
stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
return dict(
pool_nonstale_hash_rate=nonstale_hash_rate,
pool_hash_rate=nonstale_hash_rate/(1 - stale_prop),
pool_stale_prop=stale_prop,
min_difficulty=bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target),
)
def get_local_stats():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
my_unstale_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes)
my_orphan_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'orphan')
my_doa_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'doa')
my_share_count = my_unstale_count + my_orphan_count + my_doa_count
my_stale_count = my_orphan_count + my_doa_count
my_stale_prop = my_stale_count/my_share_count if my_share_count != 0 else None
my_work = sum(bitcoin_data.target_to_average_attempts(share.target)
for share in node.tracker.get_chain(node.best_share_var.value, lookbehind - 1)
if share.hash in wb.my_share_hashes)
actual_time = (node.tracker.items[node.best_share_var.value].timestamp -
node.tracker.items[node.tracker.get_nth_parent_hash(node.best_share_var.value, lookbehind - 1)].timestamp)
share_att_s = my_work / actual_time
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
return dict(
my_hash_rates_in_last_hour=dict(
note="DEPRECATED",
nonstale=share_att_s,
rewarded=share_att_s/(1 - global_stale_prop),
actual=share_att_s/(1 - my_stale_prop) if my_stale_prop is not None else 0, # 0 because we don't have any shares anyway
),
my_share_counts_in_last_hour=dict(
shares=my_share_count,
unstale_shares=my_unstale_count,
stale_shares=my_stale_count,
orphan_stale_shares=my_orphan_count,
doa_stale_shares=my_doa_count,
),
my_stale_proportions_in_last_hour=dict(
stale=my_stale_prop,
orphan_stale=my_orphan_count/my_share_count if my_share_count != 0 else None,
dead_stale=my_doa_count/my_share_count if my_share_count != 0 else None,
),
miner_hash_rates=miner_hash_rates,
miner_dead_hash_rates=miner_dead_hash_rates,
efficiency_if_miner_perfect=(1 - stale_orphan_shares/shares)/(1 - global_stale_prop) if shares else None, # ignores dead shares because those are miner's fault and indicated by pseudoshare rejection
efficiency=(1 - (stale_orphan_shares+stale_doa_shares)/shares)/(1 - global_stale_prop) if shares else None,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
shares=dict(
total=shares,
orphan=stale_orphan_shares,
dead=stale_doa_shares,
),
uptime=time.time() - start_time,
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.bitcoind_work.value['bits'].target),
block_value=node.bitcoind_work.value['subsidy']*1e-8,
warnings=p2pool_data.get_warnings(node.tracker, node.best_share_var.value, node.net, bitcoind_warning_var.value, node.bitcoind_work.value),
donation_proportion=wb.donation_percentage/100,
version=p2pool.__version__,
protocol_version=p2p.Protocol.VERSION,
fee=wb.worker_fee,
)
class WebInterface(deferred_resource.DeferredResource):
def __init__(self, func, mime_type='application/json', args=()):
deferred_resource.DeferredResource.__init__(self)
self.func, self.mime_type, self.args = func, mime_type, args
def getChild(self, child, request):
return WebInterface(self.func, self.mime_type, self.args + (child,))
@defer.inlineCallbacks
def render_GET(self, request):
request.setHeader('Content-Type', self.mime_type)
request.setHeader('Access-Control-Allow-Origin', '*')
res = yield self.func(*self.args)
defer.returnValue(json.dumps(res) if self.mime_type == 'application/json' else res)
def decent_height():
return min(node.tracker.get_height(node.best_share_var.value), 720)
web_root.putChild('rate', WebInterface(lambda: p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, decent_height())/(1-p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, decent_height()))))
web_root.putChild('difficulty', WebInterface(lambda: bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target)))
web_root.putChild('users', WebInterface(get_users))
web_root.putChild('user_stales', WebInterface(lambda: dict((bitcoin_data.pubkey_hash_to_address(ph, node.net.PARENT), prop) for ph, prop in
p2pool_data.get_user_stale_props(node.tracker, node.best_share_var.value, node.tracker.get_height(node.best_share_var.value)).iteritems())))
web_root.putChild('fee', WebInterface(lambda: wb.worker_fee))
web_root.putChild('current_payouts', WebInterface(lambda: dict((bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8) for script, value in node.get_current_txouts().iteritems())))
web_root.putChild('patron_sendmany', WebInterface(get_patron_sendmany, 'text/plain'))
web_root.putChild('global_stats', WebInterface(get_global_stats))
web_root.putChild('local_stats', WebInterface(get_local_stats))
web_root.putChild('peer_addresses', WebInterface(lambda: ' '.join('%s%s' % (peer.transport.getPeer().host, ':'+str(peer.transport.getPeer().port) if peer.transport.getPeer().port != node.net.P2P_PORT else '') for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('peer_txpool_sizes', WebInterface(lambda: dict(('%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port), peer.remembered_txs_size) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('pings', WebInterface(defer.inlineCallbacks(lambda: defer.returnValue(
dict([(a, (yield b)) for a, b in
[(
'%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port),
defer.inlineCallbacks(lambda peer=peer: defer.returnValue(
min([(yield peer.do_ping().addCallback(lambda x: x/0.001).addErrback(lambda fail: None)) for i in xrange(3)])
))()
) for peer in list(node.p2p_node.peers.itervalues())]
])
))))
web_root.putChild('peer_versions', WebInterface(lambda: dict(('%s:%i' % peer.addr, peer.other_sub_version) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('payout_addr', WebInterface(lambda: bitcoin_data.pubkey_hash_to_address(wb.my_pubkey_hash, node.net.PARENT)))
web_root.putChild('recent_blocks', WebInterface(lambda: [dict(
ts=s.timestamp,
hash='%064x' % s.header_hash,
number=pack.IntType(24).unpack(s.share_data['coinbase'][1:4]) if len(s.share_data['coinbase']) >= 4 else None,
share='%064x' % s.hash,
) for s in node.tracker.get_chain(node.best_share_var.value, min(node.tracker.get_height(node.best_share_var.value), 24*60*60//node.net.SHARE_PERIOD)) if s.pow_hash <= s.header['bits'].target]))
web_root.putChild('uptime', WebInterface(lambda: time.time() - start_time))
web_root.putChild('stale_rates', WebInterface(lambda: p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, decent_height(), rates=True)))
new_root = resource.Resource()
web_root.putChild('web', new_root)
stat_log = []
if os.path.exists(os.path.join(datadir_path, 'stats')):
try:
with open(os.path.join(datadir_path, 'stats'), 'rb') as f:
stat_log = json.loads(f.read())
except:
log.err(None, 'Error loading stats:')
def update_stat_log():
while stat_log and stat_log[0]['time'] < time.time() - 24*60*60:
stat_log.pop(0)
lookbehind = 3600//node.net.SHARE_PERIOD
if node.tracker.get_height(node.best_share_var.value) < lookbehind:
return None
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
stat_log.append(dict(
time=time.time(),
pool_hash_rate=p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)/(1-global_stale_prop),
pool_stale_prop=global_stale_prop,
local_hash_rates=miner_hash_rates,
local_dead_hash_rates=miner_dead_hash_rates,
shares=shares,
stale_shares=stale_orphan_shares + stale_doa_shares,
stale_shares_breakdown=dict(orphan=stale_orphan_shares, doa=stale_doa_shares),
current_payout=node.get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.bitcoind_work.value['bits'].target),
block_value=node.bitcoind_work.value['subsidy']*1e-8,
))
with open(os.path.join(datadir_path, 'stats'), 'wb') as f:
f.write(json.dumps(stat_log))
x = deferral.RobustLoopingCall(update_stat_log)
x.start(5*60)
stop_event.watch(x.stop)
new_root.putChild('log', WebInterface(lambda: stat_log))
def get_share(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return None
share = node.tracker.items[int(share_hash_str, 16)]
return dict(
parent='%064x' % share.previous_hash,
children=['%064x' % x for x in sorted(node.tracker.reverse.get(share.hash, set()), key=lambda sh: -len(node.tracker.reverse.get(sh, set())))], # sorted from most children to least children
type_name=type(share).__name__,
local=dict(
verified=share.hash in node.tracker.verified.items,
time_first_seen=start_time if share.time_seen == 0 else share.time_seen,
peer_first_received_from=share.peer_addr,
),
share_data=dict(
timestamp=share.timestamp,
target=share.target,
max_target=share.max_target,
payout_address=bitcoin_data.script2_to_address(share.new_script, node.net.PARENT),
donation=share.share_data['donation']/65535,
stale_info=share.share_data['stale_info'],
nonce=share.share_data['nonce'],
desired_version=share.share_data['desired_version'],
absheight=share.absheight,
abswork=share.abswork,
),
block=dict(
hash='%064x' % share.header_hash,
header=dict(
version=share.header['version'],
previous_block='%064x' % share.header['previous_block'],
merkle_root='%064x' % share.header['merkle_root'],
timestamp=share.header['timestamp'],
target=share.header['bits'].target,
nonce=share.header['nonce'],
),
gentx=dict(
hash='%064x' % share.gentx_hash,
coinbase=share.share_data['coinbase'].ljust(2, '\x00').encode('hex'),
value=share.share_data['subsidy']*1e-8,
),
txn_count=len(list(share.iter_transaction_hash_refs())),
),
)
new_root.putChild('share', WebInterface(lambda share_hash_str: get_share(share_hash_str)))
new_root.putChild('heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.heads]))
new_root.putChild('verified_heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.verified.heads]))
new_root.putChild('tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.tails for x in node.tracker.reverse.get(t, set())]))
new_root.putChild('verified_tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.verified.tails for x in node.tracker.verified.reverse.get(t, set())]))
new_root.putChild('best_share_hash', WebInterface(lambda: '%064x' % node.best_share_var.value))
def get_share_data(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return ''
share = node.tracker.items[int(share_hash_str, 16)]
return p2pool_data.share_type.pack(share.as_share1a())
new_root.putChild('share_data', WebInterface(lambda share_hash_str: get_share_data(share_hash_str), 'application/octet-stream'))
new_root.putChild('currency_info', WebInterface(lambda: dict(
symbol=node.net.PARENT.SYMBOL,
block_explorer_url_prefix=node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX,
address_explorer_url_prefix=node.net.PARENT.ADDRESS_EXPLORER_URL_PREFIX,
)))
new_root.putChild('version', WebInterface(lambda: p2pool.__version__))
hd_path = os.path.join(datadir_path, 'graph_db')
hd_data = _atomic_read(hd_path)
hd_obj = {}
if hd_data is not None:
try:
hd_obj = json.loads(hd_data)
except Exception:
log.err(None, 'Error reading graph database:')
dataview_descriptions = {
'last_hour': graph.DataViewDescription(150, 60*60),
'last_day': graph.DataViewDescription(300, 60*60*24),
'last_week': graph.DataViewDescription(300, 60*60*24*7),
'last_month': graph.DataViewDescription(300, 60*60*24*30),
'last_year': graph.DataViewDescription(300, 60*60*24*365.25),
}
def build_desired_rates(ds_name, ds_desc, dv_name, dv_desc, obj):
if not obj:
last_bin_end = 0
bins = dv_desc.bin_count*[{}]
else:
pool_rates = obj['pool_rates'][dv_name]
desired_versions = obj['desired_versions'][dv_name]
def get_total_pool_rate(t):
n = int((pool_rates['last_bin_end'] - t)/dv_desc.bin_width)
if n < 0 or n >= dv_desc.bin_count:
return None
total = sum(x[0] for x in pool_rates['bins'][n].values())
count = math.mean(x[1] for x in pool_rates['bins'][n].values())
if count == 0:
return None
return total/count
last_bin_end = desired_versions['last_bin_end']
bins = [dict((name, (total*get_total_pool_rate(last_bin_end - (i+1/2)*dv_desc.bin_width), count)) for name, (total, count) in desired_versions['bins'][i].iteritems()) for i in xrange(dv_desc.bin_count)]
return graph.DataView(dv_desc, ds_desc, last_bin_end, bins)
hd = graph.HistoryDatabase.from_obj({
'local_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_dead_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_share_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_dead_share_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'pool_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'current_payout': graph.DataStreamDescription(dataview_descriptions),
'current_payouts': graph.DataStreamDescription(dataview_descriptions, multivalues=True),
'incoming_peers': graph.DataStreamDescription(dataview_descriptions),
'outgoing_peers': graph.DataStreamDescription(dataview_descriptions),
'miner_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'miner_dead_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'desired_versions': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'desired_version_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True, default_func=build_desired_rates),
'traffic_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'getwork_latency': graph.DataStreamDescription(dataview_descriptions),
'memory_usage': graph.DataStreamDescription(dataview_descriptions),
}, hd_obj)
x = deferral.RobustLoopingCall(lambda: _atomic_write(hd_path, json.dumps(hd.to_obj())))
x.start(100)
stop_event.watch(x.stop)
@wb.pseudoshare_received.watch
def _(work, dead, user):
t = time.time()
hd.datastreams['local_hash_rate'].add_datum(t, work)
if dead:
hd.datastreams['local_dead_hash_rate'].add_datum(t, work)
if user is not None:
hd.datastreams['miner_hash_rates'].add_datum(t, {user: work})
if dead:
hd.datastreams['miner_dead_hash_rates'].add_datum(t, {user: work})
@wb.share_received.watch
def _(work, dead):
t = time.time()
hd.datastreams['local_share_hash_rate'].add_datum(t, work)
if dead:
hd.datastreams['local_dead_share_hash_rate'].add_datum(t, work)
@node.p2p_node.traffic_happened.watch
def _(name, bytes):
hd.datastreams['traffic_rate'].add_datum(time.time(), {name: bytes})
def add_point():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.net.CHAIN_LENGTH, 60*60//node.net.SHARE_PERIOD, node.tracker.get_height(node.best_share_var.value))
t = time.time()
pool_rates = p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, lookbehind, rates=True)
pool_total = sum(pool_rates.itervalues())
hd.datastreams['pool_rates'].add_datum(t, pool_rates)
current_txouts = node.get_current_txouts()
hd.datastreams['current_payout'].add_datum(t, current_txouts.get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8)
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
current_txouts_by_address = dict((bitcoin_data.script2_to_address(script, node.net.PARENT), amount) for script, amount in current_txouts.iteritems())
hd.datastreams['current_payouts'].add_datum(t, dict((user, current_txouts_by_address[user]*1e-8) for user in miner_hash_rates if user in current_txouts_by_address))
hd.datastreams['incoming_peers'].add_datum(t, sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming))
hd.datastreams['outgoing_peers'].add_datum(t, sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming))
vs = p2pool_data.get_desired_version_counts(node.tracker, node.best_share_var.value, lookbehind)
vs_total = sum(vs.itervalues())
hd.datastreams['desired_versions'].add_datum(t, dict((str(k), v/vs_total) for k, v in vs.iteritems()))
hd.datastreams['desired_version_rates'].add_datum(t, dict((str(k), v/vs_total*pool_total) for k, v in vs.iteritems()))
try:
hd.datastreams['memory_usage'].add_datum(t, memory.resident())
except:
if p2pool.DEBUG:
traceback.print_exc()
x = deferral.RobustLoopingCall(add_point)
x.start(5)
stop_event.watch(x.stop)
@node.bitcoind_work.changed.watch
def _(new_work):
hd.datastreams['getwork_latency'].add_datum(time.time(), new_work['latency'])
new_root.putChild('graph_data', WebInterface(lambda source, view: hd.datastreams[source].dataviews[view].get_data(time.time())))
web_root.putChild('static', static.File(os.path.join(os.path.dirname(sys.argv[0]), 'web-static')))
return web_root
|
from decksite.data import query
from decksite.deck_type import DeckType
def test_decks_where_deck_type() -> None:
args = {'deckType': DeckType.LEAGUE.value}
assert "= 'League'" in query.decks_where(args, 1)
args = {'deckType': DeckType.TOURNAMENT.value}
assert "= 'Gatherling'" in query.decks_where(args, 1)
args = {'deckType': DeckType.ALL.value}
assert "= 'League'" not in query.decks_where(args, 1)
assert "= 'Gatherling'" not in query.decks_where(args, 1)
def test_decks_where_archetype_id() -> None:
pass
|
import liblo, sys
try:
target = liblo.Address(1234)
except liblo.AddressError, err:
print str(err)
sys.exit()
liblo.send(target, "/jingles", 0)
|
"""Test backwards compatibility for resource managers using register().
The transaction package supports several different APIs for resource
managers. The original ZODB3 API was implemented by ZODB.Connection.
The Connection passed persistent objects to a Transaction's register()
method. It's possible that third-party code also used this API, hence
these tests that the code that adapts the old interface to the current
API works.
These tests use a TestConnection object that implements the old API.
They check that the right methods are called and in roughly the right
order.
Common cases
------------
First, check that a basic transaction commit works.
>>> cn = TestConnection()
>>> cn.register(Object())
>>> cn.register(Object())
>>> cn.register(Object())
>>> transaction.commit()
>>> len(cn.committed)
3
>>> len(cn.aborted)
0
>>> cn.calls
['begin', 'vote', 'finish']
Second, check that a basic transaction abort works. If the
application calls abort(), then the transaction never gets into the
two-phase commit. It just aborts each object.
>>> cn = TestConnection()
>>> cn.register(Object())
>>> cn.register(Object())
>>> cn.register(Object())
>>> transaction.abort()
>>> len(cn.committed)
0
>>> len(cn.aborted)
3
>>> cn.calls
[]
Error handling
--------------
The tricky part of the implementation is recovering from an error that
occurs during the two-phase commit. We override the commit() and
abort() methods of Object to cause errors during commit.
Note that the implementation uses lists internally, so that objects
are committed in the order they are registered. (In the presence of
multiple resource managers, objects from a single resource manager are
committed in order. I'm not sure if this is an accident of the
implementation or a feature that should be supported by any
implementation.)
The order of resource managers depends on sortKey().
>>> cn = TestConnection()
>>> cn.register(Object())
>>> cn.register(CommitError())
>>> cn.register(Object())
>>> transaction.commit()
Traceback (most recent call last):
...
RuntimeError: commit
>>> len(cn.committed)
1
>>> len(cn.aborted)
3
Clean up:
>>> transaction.abort()
"""
import doctest
import transaction
class Object(object):
def commit(self):
pass
def abort(self):
pass
class CommitError(Object):
def commit(self):
raise RuntimeError("commit")
class AbortError(Object):
def abort(self):
raise RuntimeError("abort")
class BothError(CommitError, AbortError):
pass
class TestConnection:
def __init__(self):
self.committed = []
self.aborted = []
self.calls = []
def register(self, obj):
obj._p_jar = self
transaction.get().register(obj)
def sortKey(self):
return str(id(self))
def tpc_begin(self, txn):
self.calls.append("begin")
def tpc_vote(self, txn):
self.calls.append("vote")
def tpc_finish(self, txn):
self.calls.append("finish")
def tpc_abort(self, txn):
self.calls.append("abort")
def commit(self, obj, txn):
obj.commit()
self.committed.append(obj)
def abort(self, obj, txn):
obj.abort()
self.aborted.append(obj)
def test_suite():
return doctest.DocTestSuite()
additional_tests = test_suite
|
import argparse
import sqlite3
import tempfile
import csv
import subprocess
from generate.schema import Box
def main():
parser = argparse.ArgumentParser(description="Plot a route from database")
parser.add_argument("sqlite_path")
parser.add_argument("route")
args = parser.parse_args()
with sqlite3.connect(args.sqlite_path) as conn:
cur = conn.cursor()
rows = cur.execute("SELECT pathblob FROM routes where route = ?", (args.route,))
first_row = next(rows)
blob = first_row[0]
paths = Box().from_bytes(blob)
with tempfile.NamedTemporaryFile() as temp_file:
with open(temp_file.name, "w") as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow(['i', 'lat', 'lon'])
for i, path in enumerate(paths):
for path_tuple in path:
writer.writerow([i, path_tuple[0], path_tuple[1]])
subprocess.check_call(["Rscript", "plot_shape.R", temp_file.name, "out.png"])
if __name__ == "__main__":
main()
|
import logging
from cStringIO import StringIO
from plugins.plugin import Plugin
from PIL import Image
class Upsidedownternet(Plugin):
name = "Upsidedownternet"
optname = "upsidedownternet"
desc = 'Flips images 180 degrees'
has_opts = False
implements = ["handleResponse", "handleHeader"]
def initialize(self, options):
from PIL import Image, ImageFile
globals()['Image'] = Image
globals()['ImageFile'] = ImageFile
self.options = options
def handleHeader(self, request, key, value):
'''Kill the image skipping that's in place for speed reasons'''
if request.isImageRequest:
request.isImageRequest = False
request.isImage = True
request.imageType = value.split("/")[1].upper()
def handleResponse(self, request, data):
try:
isImage = getattr(request, 'isImage')
except AttributeError:
isImage = False
if isImage:
try:
image_type = request.imageType
#For some reason more images get parsed using the parser
#rather than a file...PIL still needs some work I guess
p = Image.Parser()
p.feed(data)
im = p.close()
im = im.transpose(Image.ROTATE_180)
output = StringIO()
im.save(output, format=image_type)
data = output.getvalue()
output.close()
logging.info("Flipped image")
except Exception as e:
print "Error: %s" % e
return {'request': request, 'data': data}
|
default_app_config = 'wad_Campaign.apps.wad_CampaignConfig'
|
import sys, cStringIO, re
from BeautifulSoup import BeautifulSoup
from WebCursor import WebCursor
from pdfminer.pdfparser import PDFParser, PDFDocument
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, \
process_pdf
from pdfminer.cmapdb import CMapDB
from pdfminer.layout import LAParams
import datetime, pickle, textwrap, argparse
import os.path
import random
consolewidth = 79
default_encoding = sys.stdout.encoding
if default_encoding is None:
default_encoding = 'utf-8'
loske_base_url = u"http://www.betriebsrestaurant-gmbh.de/"
loske_main = u"index.php?id=91"
mensa = \
u"http://www.studentenwerk-muenchen.de/mensa/speiseplan/speiseplan_{0}_-de.html"
mensa_id = {"arcisstr": 421,
"garching": 422}
config_file = os.path.expanduser(os.path.join(u"~", u".essen"))
mensa_price_mapping = {
u"Tagesgericht 1" : (1.00, 1.90, 2.40),
u"Tagesgericht 2" : (1.55, 2.20, 2.70),
u"Tagesgericht 3" : (1.90, 2.40, 2.90),
u"Tagesgericht 4" : (2.40, 2.80, 3.30),
u"Biogericht 1" : (1.55, 2.20, 2.70),
u"Biogericht 2" : (1.90, 2.40, 2.90),
u"Biogericht 3" : (2.40, 2.80, 3.30),
u"Biogericht 4" : (2.60, 3.00, 3.50),
u"Biogericht 5" : (2.80, 3.20, 3.70),
u"Biogericht 6" : (3.00, 3.40, 3.90),
u"Biogericht 7" : (3.20, 3.60, 4.10),
u"Biogericht 8" : (3.50, 3.90, 4.40),
u"Biogericht 9" : (4.00, 4.40, 4.90),
u"Biogericht 10" : (4.50, 4.90, 5.40),
u"Aktionsessen 1" : (1.55, 2.20, 2.70),
u"Aktionsessen 2" : (1.90, 2.40, 2.90),
u"Aktionsessen 3" : (2.40, 2.80, 3.30),
u"Aktionsessen 4" : (2.60, 3.00, 3.50),
u"Aktionsessen 5" : (2.80, 3.20, 3.70),
u"Aktionsessen 6" : (3.00, 3.40, 3.90),
u"Aktionsessen 7" : (3.20, 3.60, 4.10),
u"Aktionsessen 8" : (3.50, 3.90, 4.40),
u"Aktionsessen 9" : (4.00, 4.40, 4.90),
u"Aktionsessen 10" : (4.50, 4.90, 5.40)}
class bcolors:
HEADER = ''
OKBLUE = ''
OKGREEN = ''
WARNING = ''
FAIL = ''
ENDC = ''
import platform
if platform.system() == u'Linux':
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
TYPE_FMI, TYPE_IPP, TYPE_MENSA, TYPE_MENSA_XP = range(4)
type_translation = {"FMI": TYPE_FMI,
"IPP": TYPE_IPP,
"MEN": TYPE_MENSA,
"MXP": TYPE_MENSA_XP}
config = {}
config["last_update_ipp"] = datetime.date(1,1,1)
config["last_update_mensa"] = datetime.date(1,1,1)
config["last_update_fmi"] = datetime.date(1,1,1)
config["meals"] = {}
def error(string):
print >>sys.stderr, string
sys.exit(1)
def save_config(filename):
fp = open(filename, "w")
if fp:
pickle.dump(config, fp)
fp.close()
def load_config(filename):
global config
fp = open(filename, "r")
if fp:
config = pickle.load(fp)
fp.close()
def filter_meals(date):
for t, s in config["meals"][date]:
if t in config["locations"]:
yield t, s
def unicodewrap(string, width):
# textwrap.wrap handles unicode non-breakable spaces incorrectly
# so we need to encode before and decode after textwrap.wrap
l = textwrap.wrap(string.encode('utf-8'), width)
return [el.decode('utf-8') for el in l]
def dump_all_meals():
dates = sorted(config["meals"].keys())
for d in dates:
print u"%s:" % (str(d))
for m in filter_meals(d):
t, s = m
sb = u'\n '.join(unicodewrap(s, consolewidth-7))
if t is TYPE_IPP:
print " IPP",
elif t is TYPE_FMI:
print " FMI",
else:
print " MEN",
print "- %s" % (sb.encode(default_encoding, 'replace'))
def dump_one_day_meals(date):
dates = config["meals"].keys()
for d in dates:
if d == date:
print u"%s:" % (str(d))
for m in filter_meals(d):
t, s = m
sb = u'\n '.join(unicodewrap(s, consolewidth-7))
if t is TYPE_IPP:
print " IPP",
elif t is TYPE_FMI:
print " FMI",
else:
print " MEN",
print "- %s" % (sb.encode(default_encoding, 'replace'))
if TYPE_MENSA_XP in config["locations"]:
ex = extrapolationsgericht(date)
if ex is not None:
sb = u'\n '.join(unicodewrap(ex, consolewidth-7))
print " MXP",
print "- %s" % (sb.encode(default_encoding, 'replace'))
def extrapolationsgericht(date):
pricesplit_re = re.compile(u'(.*) (\(.*\))', re.UNICODE)
mealsplit_re = re.compile(u'(.*) (an|mit|in|vom|auf) (.*)', re.IGNORECASE |
re.UNICODE)
mealsplit_pseudo_re = re.compile(u'.* (".*"|all .*).*', re.IGNORECASE |
re.UNICODE)
mealsplit_pseudo1_re = re.compile(u'(.*) (".*"|all .*).*', re.IGNORECASE |
re.UNICODE)
yesterday = date - datetime.timedelta(days=1)
# skip weekend
while yesterday.weekday() == 6 or yesterday.weekday() == 5:
yesterday -= datetime.timedelta(days=1)
dates = config["meals"].keys()
meals = []
for d in dates:
if d == yesterday:
for m in filter_meals(d):
t,s = m
if t is not TYPE_IPP and t is not TYPE_FMI:
meals.append(s)
meals_split = []
prices = []
for m in meals:
ret = pricesplit_re.search(m)
if ret:
name, price = ret.groups()
meals_split.append(name)
prices.append(price)
firsts = []
middles = []
lasts = []
for m in meals_split:
ret1 = mealsplit_re.search(m)
ret2 = mealsplit_pseudo_re.search(m)
ret3 = mealsplit_pseudo1_re.search(m)
if ret1:
g = ret1.groups()
firsts.append(g[0])
middles.append(g[1])
lasts.append(g[2])
if ret2:
lasts.append(ret2.groups()[0])
if ret3:
firsts.append(ret3.groups()[0])
if len(firsts) < 1 or len(middles) < 1 or len(lasts) < 1:
return None
return u'%s %s %s %s' % (random.choice(firsts),
random.choice(middles),
random.choice(lasts),
random.choice(prices))
def show_last_update():
print u"ipp: %s, mensa: %s" % (str(config["last_update_ipp"]),
str(config["last_update_mensa"]))
def remove_older(when):
for k in config["meals"].keys():
if k < when:
del config["meals"][k]
def parse_loske_pdf(pdf, is_ipp=True):
stripcid_re = re.compile(u"\(cid:.*?\)", re.UNICODE)
newline_heuristic_re = re.compile(u"Montag, den |Dienstag, den |Mittwoch" \
u", den |Donnerstag, den |Freitag, den ",
re.IGNORECASE | re.UNICODE)
bnw_endheuristic_re = re.compile(u"B\.n\.W\.=Beilage.*", re.UNICODE)
dow_beginheuristic_re = re.compile(u".*?Montag, den ",
re.IGNORECASE | re.UNICODE)
meal_detect_re = re.compile(u"(\d\.)(.*?)(\d).(\d\d)", re.UNICODE)
#meal_detect_re = re.compile(u"(\d\.)(\D)", re.UNICODE)
date_re = re.compile(u"(\d{1,2})\.(\d{1,2})\.(\d{1,4})(.*)", re.UNICODE)
meal_props = re.compile(ur'\b[VKRS](?:\+[VKRS])*\b\s*', re.UNICODE)
meal_numbers = re.compile(ur'([^/]|^)\s*\b[1-6](?:,[1-6])*\b([^/]|$)',
re.UNICODE)
rsrcmgr = PDFResourceManager()
outtxt = cStringIO.StringIO()
device = TextConverter(rsrcmgr, outtxt)
pdfp = PDFParser(cStringIO.StringIO(pdf))
doc = PDFDocument()
pdfp.set_document(doc)
doc.set_parser(pdfp)
doc.initialize("")
if not doc.is_extractable:
print >>sys.stderr, u"PDF Document not extractable"
sys.exit(1)
interpreter = PDFPageInterpreter(rsrcmgr, device)
for (pageno,page) in enumerate(doc.get_pages()):
#print pageno
interpreter.process_page(page)
device.close()
fulltext = outtxt.getvalue().decode('utf-8', 'replace')
fulltext = stripcid_re.sub(u'', fulltext)
fulltext = dow_beginheuristic_re.sub(u'', fulltext)
fulltext = bnw_endheuristic_re.sub(u'', fulltext)
fulltext = newline_heuristic_re.sub(u'\n', fulltext)
lines = fulltext.split(u'\n')
now = datetime.date(1,1,1)
for line in lines:
ret = date_re.search(line)
if ret:
day, month, year, meals = ret.groups()
try:
now = datetime.date(int(year), int(month), int(day))
except ValueError:
# some weird date in pdf (like 29.02.2013), skipping these
# entries is the easiest solution
continue
#meals = meal_detect_re.sub(ur'\n\2(\3.\4 €)', meals).strip()
meals = meal_detect_re.finditer(meals)
for meal_match in meals:
m = meal_match.group(2)
m = meal_props.sub(u'', m)
m = meal_numbers.sub(lambda x : x.group(1) + x.group(2), m)
m = m.replace(u'*', u'')
m = m.split()
m.append(u'({0}.{1} €)'.format(meal_match.group(3),
meal_match.group(4)))
m = u' '.join(m)
meal_type = TYPE_IPP if is_ipp else TYPE_FMI
try:
tmp = config["meals"][now]
config["meals"][now].append((meal_type, m))
except KeyError, e:
config["meals"][now] = [(meal_type, m)]
def get_new_loske(is_ipp=True):
wc = WebCursor();
loske_html = wc.get(loske_base_url+loske_main)
if loske_html == "":
print >>sys.stderr, u"Could not download" , loske_base_url+loske_main
sys.exit(1)
soup = BeautifulSoup(loske_html)
# print soup.prettify()
tables = soup.findAll(u'table', attrs={u'class' : u'csc-uploads csc-uploads-0'})
thisweek_url = ""
if len(tables) < 2:
print >>sys.stderr, u"Parse html error"
sys.exit(1)
# First table is IPP, third table is FMI
table = tables[0] if is_ipp else tables[2]
alla = table.findAll('a')
if len(alla) < 1:
print >>sys.stderr, u"Parse html error"
sys.exit(1)
thisweek_url = alla[0]['href']
if thisweek_url == "":
print >>sys.stderr, u"Parse html error"
sys.exit(1)
pdf = wc.get(loske_base_url+thisweek_url)
if pdf == "":
print >>sys.stderr, u"Could not download", loske_base_url+thisweek_url
sys.exit(1)
parse_loske_pdf(pdf, is_ipp)
nextweek_url = ""
# Second table is IPP, fourth table is FMI
table = tables[1] if is_ipp else tables[3]
alla = tables[1].findAll('a')
if len(alla) < 1:
print >>sys.stderr, u"Parse html error"
sys.exit(1)
nextweek_url = alla[0]['href']
if thisweek_url == "":
print >>sys.stderr, u"Parse html error"
sys.exit(1)
pdf = wc.get(loske_base_url+nextweek_url)
if pdf == "":
print >>sys.stderr, u"Could not download", loske_base_url+nextweek_url
sys.exit(1)
parse_loske_pdf(pdf, is_ipp)
config["last_update_ipp"] = datetime.date.today()
def dow_to_int(dow):
montag_re = re.compile(u"Montag", re.IGNORECASE | re.UNICODE)
dienstag_re = re.compile(u"Dienstag", re.IGNORECASE | re.UNICODE)
mittwoch_re = re.compile(u"Mittwoch", re.IGNORECASE | re.UNICODE)
donnerstag_re = re.compile(u"Donnerstag", re.IGNORECASE | re.UNICODE)
freitag_re = re.compile(u"Freitag", re.IGNORECASE | re.UNICODE)
ret = montag_re.search(dow)
if ret:
return 0
ret = dienstag_re.search(dow)
if ret:
return 1
ret = mittwoch_re.search(dow)
if ret:
return 2
ret = donnerstag_re.search(dow)
if ret:
return 3
ret = freitag_re.search(dow)
if ret:
return 4
return -1
def get_new_mensa():
date_re = re.compile(u".., (\d{1,2})\.(\d{1,2})\.(\d{1,4})", re.UNICODE)
desc_nl_re = re.compile(u"(?:(.*?)(?:<br>))*", re.UNICODE)
desc_nl_rep_re = re.compile(u"<br>", re.UNICODE)
foodtags_re = re.compile(
ur"(?:\s*\([0-9vfSR][0-9]?(?:,[0-9vfSR][0-9]?)*\))", re.UNICODE)
wc = WebCursor();
mensa_url = mensa.format(mensa_id[config["mensa_location"]])
mensa_html = wc.get(mensa_url)
if mensa_html == "":
print >>sys.stderr, u"Could not download" , mensa_url
sys.exit(1)
soup = BeautifulSoup(mensa_html)
days = soup.findAll(u"table", attrs={u"class": u"menu"})
for d in days:
headline = d.findAll(u"td", attrs={u"class": u"headline"})
if len(headline) < 2:
error("Mensa parse error.")
headline = headline[1]
strhl = headline.findAll(u"strong")
if len(strhl) < 1:
error("Mensa parse error.")
ret = date_re.search(strhl[0].text)
if not ret:
error("Mensa parse error.")
day, month, year = ret.groups()
now = datetime.date(int(year), int(month), int(day))
meals = d.findAll(u"tr")
for m in meals:
if len(m.findAll(u"td", attrs={u"class": u"headline"})) > 0:
continue
typ = m.findAll(u"td", attrs={u"class": u"gericht"})
if len(typ) < 1:
error("Mensa parse error.")
price = None
for match, value in mensa_price_mapping.items():
ret = re.search(match, typ[0].text)
if ret:
price = value[config["person"]]
break
desc = m.findAll(u"td", attrs={u"class": u"beschreibung"})
if len(desc) < 1:
error("Mensa parse error.")
desc = desc[0].findAll(u"span", attrs={u"style": u"float:left"})
if len(desc) < 1:
error("Mensa parse error.")
t = desc[0].text
t = t.strip()
t = foodtags_re.sub(u'', t)
t = re.sub(r'[Z|z]igeuner', u"Südländer Typ II", t)
t = t.split()
if price is not None:
t.append(u"(%.2f €)" % (price,))
t = ' '.join(t)
try:
tmp = config["meals"][now]
config["meals"][now].append((TYPE_MENSA, t))
except KeyError, e:
config["meals"][now] = [(TYPE_MENSA, t)]
config["last_update_mensa"] = datetime.date.today()
def update_all():
print >>sys.stderr, u"Updating..."
config["meals"] = {}
if TYPE_MENSA in config["locations"]:
get_new_mensa()
if TYPE_IPP in config["locations"]:
get_new_loske(is_ipp=True)
if TYPE_FMI in config["locations"]:
get_new_loske(is_ipp=False)
save_config(config_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Command line interface to the Mensa, Max-Planck-Institute Garching
and FMI Garching.
DATE can be a date in german format (e.g. 14.04.2011, 4.2.2010, ...)
DATE can also be a day and month (e.g. 14.4.)
DATE can also be only a day, month and year will be the current year
and month
You can also specify a german weekday (e.g. Montag or mo)
If date is 'all' then all saved meals are displayed, 'morgen' displays
the meal of the next day
'''),
epilog='Warning! Extremely hacky software!')
def is_a_date(string):
date_re = re.compile(u'(\d{1,2})\.(\d{1,2})\.(\d{1,4})', re.UNICODE)
shortdate_re = re.compile(u'(\d{1,2})\.(\d{1,2})', re.UNICODE)
day_re = re.compile(u'(mo|di|mi|do|fr|sa|so|Montag|Dienstag|Mittwoch' \
u'|Donnerstag|Freitag|Samstag|Sonntag)',
re.IGNORECASE | re.UNICODE)
daynum_re = re.compile(u'(\d{1,2})', re.UNICODE)
all_re = re.compile(u'all', re.IGNORECASE | re.UNICODE)
morgen_re = re.compile(u'morgen', re.IGNORECASE | re.UNICODE)
matched = False
ret = None
r = date_re.search(string)
if r:
day, month, year = r.groups()
ret = datetime.date(int(year), int(month), int(day))
return ret
r = shortdate_re.search(string)
if r:
day, month = r.groups()
ret = datetime.date(datetime.date.today().year,
int(month), int(day))
return ret
r = daynum_re.search(string)
if r:
day = r.groups()[0]
ret = datetime.date(datetime.date.today().year,
datetime.date.today().month, int(day))
return ret
r = all_re.search(string)
if r:
ret = "all"
return ret
r = morgen_re.search(string)
if r:
ret = datetime.date.today() + datetime.timedelta(1)
return ret
r = day_re.search(string)
if r:
wd = r.groups()[0].lower()
dowint = 0
if wd[:2] == "mo":
dowint = 0
elif wd[:2] == "di":
dowint = 1
elif wd[:2] == "mi":
dowint = 2
elif wd[:2] == "do":
dowint = 3
elif wd[:2] == "fr":
dowint = 4
elif wd[:2] == "sa":
dowint = 5
else:
dowint = 6
ret = datetime.date.today()
for i in range(7):
if ret.weekday() == dowint:
break
else:
ret += datetime.timedelta(days=1)
return ret
if not ret:
msg = u"%s is not a valid date, day of week (german) or 'all'" \
% (string,)
raise argparse.ArgumentTypeError(msg)
return ret
parser.add_argument('-u', action='store_true', default=False,
help='Update the database')
parser.add_argument('-p', dest='person', default='',
help="Personal status (student|employee|guest)")
parser.add_argument('--ml', dest='mensa_location', choices=mensa_id.keys(),
help="Choose your mensa location")
parser.add_argument('-l', '--locations', metavar="L1:L2:...",
help="Locations to print " \
"({0})".format('|'.join(type_translation.keys())))
parser.add_argument('--na', '--no-autoupdate', dest='autoupdate',
default=True, action='store_false',
help="Disable autoupdate (useful when no internet "
"connection is available)")
parser.add_argument('date',
metavar='DATE',
nargs='?',
type=is_a_date,
help='Lookup meals for specific date')
opts = parser.parse_args()
if os.path.isfile(config_file):
load_config(config_file)
elif not opts.u:
print >>sys.stderr, bcolors.FAIL + "No configfile found.\n" + \
bcolors.ENDC + "You need to update and perform basic " \
"setup.\nFor example if you are a student in Garching run:\n" \
+ sys.argv[0] + " -u -p student --ml garching -l MEN:IPP\n" \
"See " + sys.argv[0] + " -h for more info"
sys.exit(1)
if opts.person:
if opts.person == "student":
config["person"] = 0
elif opts.person == "employee":
config["person"] = 1
elif opts.person == "guest":
config["person"] = 2
else:
print >>sys.stderr, bcolors.FAIL + "Unknown option given to " \
"-p" + bcolors.ENDC
sys.exit(1)
if opts.mensa_location:
config["mensa_location"] = opts.mensa_location
if opts.locations:
config["locations"] = [type_translation[l] for l in \
opts.locations.split(':') \
if l in type_translation]
save_config(config_file)
if "person" not in config:
config["person"] = 0
if "mensa_location" not in config:
config["mensa_location"] = "arcisstr"
if "locations" not in config:
config["locations"] = type_translation.values()
save_config(config_file)
do_update = False
if opts.u or opts.person or opts.mensa_location:
do_update = True
current_week = datetime.date.today().isocalendar()[1]
try:
if (opts.autoupdate and
((current_week != config["last_update_mensa"].isocalendar()[1] and
TYPE_MENSA in config["locations"]) or
(current_week != config["last_update_ipp"].isocalendar()[1] and
TYPE_IPP in config["locations"]) or
(current_week != config["last_update_fmi"].isocalendar()[1] and
TYPE_FMI in config["locations"]))):
do_update = True
except KeyError:
do_update = True
if do_update:
print >>sys.stderr, (bcolors.WARNING + "Last update was not in this "
"week." + bcolors.ENDC)
update_all()
if opts.date is None:
dump_one_day_meals(datetime.date.today())
sys.exit(0)
if opts.date == "all":
dump_all_meals()
sys.exit(0)
dump_one_day_meals(opts.date)
|
from hypothesis import given, assume
from hypothesis.strategies import text, just, integers, tuples, sampled_from
import nose
from nose.tools import raises
from repeatedmistakes.strategies import *
"""
Test all of the strategies in the strategies module for common functionality
"""
two_characters = text(min_size=2, max_size=2)
valid_history_strategy = two_characters.flatmap(
lambda chars: integers(min_value=0).flatmap(
lambda n: tuples(just(chars), text(alphabet=chars, min_size=n, max_size=n),
text(alphabet=chars, min_size=n, max_size=n)
)
)
)
@given(history = valid_history_strategy, strategy = sampled_from(strategy_list))
def test_strategy_passedCharactersetAndHistories_returnsCharacterInCharacterset(history, strategy):
"""Test that if we pass a characterset and histories to a strategy, it only returns actions in that characterset"""
characterset = history[0]
strat_history = list(history[1])
opponent_history = list(history[2])
# Create an object with the correct characterset
test_object = strategy(C=characterset[0], D=characterset[1])
# Set up the object's history
test_object.history = strat_history
# Assert that the returned action is still in the characterset
assert test_object.next_move(opponent_history) in characterset
length_mistmatch_strategy = two_characters.flatmap(
lambda chars: tuples(just(chars), text(alphabet=chars), text(alphabet=chars))
)
@raises(HistoryLengthMismatch)
@given(history = length_mistmatch_strategy, strategy = sampled_from(strategy_list))
def test_strategy_historyLengthMismatch_raisesHistoryLengthMistmatchException(history, strategy):
"""Test that if the history length doesn't match the opponent history length, an exception is raised"""
characterset = history[0]
strat_history = history[1]
opponent_history = history[2]
# Assume that the lengths of the histories are not the same
assume(not len(history) == len(opponent_history))
# Create an object with the correct characterset
test_object = strategy(C=characterset[0], D=characterset[1])
# Set up the object's history
test_object.history = strat_history
# Try and get the next move which should raise an error
test_object.next_move(opponent_history)
different_characterset_strategy = two_characters.flatmap(
lambda chars: integers(min_value=0).flatmap(
lambda n: tuples(just(chars), text(alphabet=chars, min_size=n, max_size=n),
text(alphabet=two_characters, min_size=n, max_size=n))
)
)
@raises(InvalidActionError)
@given(history = different_characterset_strategy, strategy = sampled_from(strategy_list))
def test_strategy_invalidCharactersPassed_raisesInvalidActionError(history, strategy):
"""Test that if the opponent's history doesn't match the characterset, an InvalidActionError is thrown."""
characterset = history[0]
strat_history = history[1]
opponent_history = history[2]
# Assume that the charactersets are not the same between the history and the opponent history
assume(set(characterset) != set(opponent_history))
# Create an object with the correct characterset
test_object = strategy(C=characterset[0], D=characterset[1])
# Set up the object's history
test_object.history = strat_history
# Try and get the next move which should raise an error
test_object.next_move(opponent_history)
different_history_characterset_strategy = tuples(two_characters, two_characters).flatmap(
lambda tup: tuples(just(tup[0]), text(alphabet=tup[1])))
@raises(InvalidActionError)
@given(history = different_history_characterset_strategy, strategy = sampled_from(strategy_list))
def test_strategy_historyWithWrongCharacterset_raisesInvalidActionError(history, strategy):
"""Test that if you pass a history with the wrong characterset, an InvalidActionError is thrown."""
characterset = history[0]
strat_history = history[1]
# Assume that the history characterset isn't a subset of the characterset
assume(not set(strat_history) <= set(characterset))
# Set up the object
test_object = strategy(C=characterset[0], D=characterset[1])
# Try and pass a history which should raise an error
test_object.history = strat_history
@given(history = two_characters.flatmap(lambda chars: tuples(just(chars), text(alphabet=chars))),
strategy = sampled_from(strategy_list))
def test_strategy_passHistoryThenReset_historyIsEmpty(history, strategy):
"""Test that regardless of characterset or history, reset clears the history"""
characterset = history[0]
strat_history = history[1]
test_object = strategy(C=characterset[0], D=characterset[1])
test_object.history = strat_history
test_object.reset()
assert test_object.history == []
"""
Individual strategy tests
"""
@given(valid_history_strategy)
def test_AllC_passAnyHistory_ReturnsC(s):
"""Test that AllC always returns a C regardless of history or input"""
characterset = s[0]
history = s[1]
opponent_history = s[2]
test_object = AllC(C=characterset[0], D=characterset[1])
test_object.history = history
assert test_object.next_move(opponent_history) == test_object.C
@given(valid_history_strategy)
def test_AllD_passAnyHistory_ReturnsD(s):
"""Test that AllD always returns a D regardless of history or input"""
characterset = s[0]
history = s[1]
opponent_history = s[2]
test_object = AllD(C=characterset[0], D=characterset[1])
test_object.history = history
assert test_object.next_move(opponent_history) == test_object.D
if __name__ == '__main__':
nose.main()
|
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.convert.azw3_output_ui import Ui_Form
from calibre.gui2.convert import Widget
font_family_model = None
class PluginWidget(Widget, Ui_Form):
TITLE = _('AZW3 Output')
HELP = _('Options specific to')+' AZW3 '+_('output')
COMMIT_NAME = 'azw3_output'
ICON = I('mimetypes/mobi.png')
def __init__(self, parent, get_option, get_help, db=None, book_id=None):
Widget.__init__(self, parent,
['prefer_author_sort', 'toc_title',
'mobi_toc_at_start',
'dont_compress', 'no_inline_toc', 'share_not_sync',
]
)
self.db, self.book_id = db, book_id
self.initialize_options(get_option, get_help, db, book_id)
|
def trim_cloud_list(clouds):
i = 0
j = 0
while i < len(clouds):
j = i + 1
while j < len(clouds):
if clouds[i].contains_cloud(clouds[j]):
clouds.remove(clouds[j])
return trim_cloud_list(clouds)
elif clouds[j].contains_cloud(clouds[i]):
clouds.remove(clouds[i])
return trim_cloud_list(clouds)
j += 1
i += 1
return clouds
def combine_clouds(clouds):
i = 0
j = 0
while i < len(clouds):
j = i + 1
while j < len(clouds):
intersect_area = clouds[i].get_intersection_area(clouds[j])
if intersect_area > 0:
avg_area = (clouds[i].get_area() + clouds[j].get_area()) / 2
area_threshold = avg_area * 2
if intersect_area > area_threshold:
clouds.append(get_cloud_combination(clouds[i], clouds[j]))
clouds.remove(clouds[i])
clouds.remove(clouds[j])
return combine_clouds(clouds)
j += 1
i += 1
return clouds
def get_cloud_combination(cloud1, cloud2):
x = cloud1.x if cloud1.x < cloud2.x else cloud2.x
y = cloud1.y if cloud1.y < cloud2.y else cloud2.y
x1 = cloud1.x + cloud1.w
x2 = cloud2.x + cloud2.w
y1 = cloud1.y + cloud1.h
y2 = cloud2.y + cloud2.h
w = x1 - x if x1 > x2 else x2 - x
h = y1 - y if y1 > y2 else y2 - y
return Cloud(x, y, w, h)
class Cloud:
x = 0
y = 0
w = 0
h = 0
def __init__(self):
pass
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
def contains(self, x, y):
return x >= self.x and y >= self.y and \
x < self.x + self.w and y < self.y + self.h
def contains_cloud(self, cloud):
return self.x <= cloud.x and \
self.y <= cloud.y and \
self.x + self.w >= cloud.x + cloud.w and \
self.y + self.h >= cloud.y + cloud.h
def intersects(self, cloud):
return self.contains(cloud.x, cloud.y) or \
self.contains(cloud.x + cloud.w, cloud.y) or \
self.contains(cloud.x, cloud.y + cloud.h) or \
self.contains(cloud.x + cloud.h, cloud.y + cloud.h) or \
cloud.contains(self.x, self.y) or \
cloud.contains(self.x + self.w, self.y) or \
cloud.contains(self.x, self.y + self.h) or \
cloud.contains(self.x + self.h, self.y + self.h)
def get_intersection_area(self, cloud):
if not self.intersects(cloud):
return 0
xmin = self.x if self.x < cloud.x else cloud.x
xmax1 = self.x + self.w
xmax2 = cloud.x + cloud.w
xmax = xmax1 if xmax1 > xmax2 else xmax2
ymin = self.y if self.y < cloud.y else cloud.y
ymax1 = self.y + self.h
ymax2 = cloud.y + cloud.h
ymax = ymax1 if ymax1 > ymax2 else ymax2
w = xmax - xmin
h = ymax - ymin
return w * h
def get_point(self):
return (self.x, self.y)
def get_opposite_point(self):
return (self.x + self.w, self.y + self.h)
def get_area(self):
return self.w * self.h
|
import os
import unittest
import ansibleinventorygrapher.inventory
class TestVault(unittest.TestCase):
def test_vault_password_file(self):
invfile = os.path.join('test', 'vault', 'inventory')
vault_password_files = [os.path.join('test', 'vault', 'vaultpass')]
inventory_mgr = ansibleinventorygrapher.inventory.InventoryManager(invfile, False, vault_password_files)
hostname = "web-01"
host = inventory_mgr.inventory.get_host(hostname)
group = inventory_mgr.inventory.get_group("web")
the_vars = ansibleinventorygrapher.tidy_all_the_variables(host, inventory_mgr)
self.assertEqual(the_vars[group]["text"], "hello")
def test_vault_password_file(self):
invfile = os.path.join('test', 'vault', 'inventory')
vault_password_files = [os.path.join('test', 'vault', 'vaultpass'),
os.path.join('test', 'vault', 'notthevaultpass')]
inventory_mgr = ansibleinventorygrapher.inventory.InventoryManager(invfile, False, vault_password_files)
hostname = "web-01"
host = inventory_mgr.inventory.get_host(hostname)
group = inventory_mgr.inventory.get_group("web")
the_vars = ansibleinventorygrapher.tidy_all_the_variables(host, inventory_mgr)
self.assertEqual(the_vars[group]["text"], "hello")
def test_vault_ids(self):
invfile = os.path.join('test', 'vault_ids', 'inventory')
vault_ids = ['another_vault@' + os.path.join('test', 'vault_ids', 'vaultpass')]
inventory_mgr = ansibleinventorygrapher.inventory.InventoryManager(invfile, False, [], vault_ids)
hostname = "web-01"
host = inventory_mgr.inventory.get_host(hostname)
the_vars = ansibleinventorygrapher.tidy_all_the_variables(host, inventory_mgr)
self.assertEqual(the_vars[host]["hello"], "world")
def test_no_vault_pass(self):
invfile = os.path.join('test', 'vault', 'inventory')
try:
inventory_mgr = ansibleinventorygrapher.inventory.InventoryManager(invfile, False, [])
hostname = "web-01"
host = inventory_mgr.inventory.get_host(hostname)
the_vars = ansibleinventorygrapher.tidy_all_the_variables(host, inventory_mgr)
raise RuntimeError
except ansibleinventorygrapher.inventory.NoVaultSecretFound:
pass # expected behaviour
def test_inline_vault_without_password(self):
invfile = os.path.join('test', 'vault', 'inventory')
inventory_mgr = ansibleinventorygrapher.inventory.InventoryManager(invfile, False, [])
hostname = "inline-01"
host = inventory_mgr.inventory.get_host(hostname)
group = inventory_mgr.inventory.get_group("inline")
the_vars = ansibleinventorygrapher.tidy_all_the_variables(host, inventory_mgr)
self.assertTrue('text' in the_vars[group])
self.assertFalse('text' in the_vars[host])
|
from .solution_level_clustering import MetaboliteLevelDiseaseClustering
|
from pyramid.response import Response
from pyramid.view import view_config
from qrcode import *
@view_config(route_name='qrcode')
def qrcode(request):
uri = request.matchdict['uri']
qr = QRCode(version=3, error_correction=ERROR_CORRECT_L)
qr.add_data(request.route_url('view_paste', pasteID=uri))
qr.make() # Generate the QRCode itself
# im contains a PIL.Image.Image object
im = qr.make_image()
resp = Response()
resp.status_int = 200
resp.content_type = "image/png"
im.save(resp.body_file, 'PNG')
return resp
|
import base64
from cipher import Cipher
from Crypto import Cipher as CryptoCipher
from Crypto import Random
import hashlib
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s : s[:-ord(s[len(s)-1:])]
class AES(Cipher):
# Generates a key using a hash of some passphrase
@staticmethod
def keygen(passphrase, secure=128):
# if secure == 256:
# h = SHA256.new()
# elif secure == 512:
# h = SHA256.new()
# else:
# raise Exception("Unsupported security level")
# h.update(passphrase)
# return h.hexdigest()
return hashlib.sha256(passphrase).digest()
def encrypt( self, raw ):
raw = pad(str(raw))
iv = Random.new().read( CryptoCipher.AES.block_size )
cipher = CryptoCipher.AES.new(
self.keys["priv"]["key"],
CryptoCipher.AES.MODE_CBC,
iv )
return base64.b64encode( iv + cipher.encrypt( raw ) )
def decrypt( self, enc ):
enc = base64.b64decode(enc)
iv = enc[:16]
cipher = CryptoCipher.AES.new( self.keys["priv"]["key"],
CryptoCipher.AES.MODE_CBC,
iv )
return unpad(cipher.decrypt( enc[16:] ))
|
"""
Often you can control monitors simply from the Monitor Center in the
PsychoPy application, but you can also create/control them using scripts.
This allow you to override certain values for the current run: :
mon = monitors.Monitor('testMonitor') # load the testMonitor
mon.setDistance(120) # change distance in this run (don't save)
Or you can load a specific calibration of that monitor:
mon.setCurrent(-1) is the last (alphabetical) calibration
mon.setCurrent('2015_05_21 11:42') # use a specific named calibration
More info is available at http: //www.psychopy.org/api/monitors.html
"""
from __future__ import absolute_import, division, print_function
from psychopy import monitors
names = monitors.getAllMonitors()
for thisName in names:
thisMon = monitors.Monitor(thisName)
print(thisMon.getDistance())
|
__author__= "Luis C. Pérez Tato (LCPT)"
__copyright__= "Copyright 2016, LCPT"
__license__= "GPL"
__version__= "3.0"
__email__= "l.pereztato@gmail.com"
import sys
def getLg(soilClass):
'''
From a length greater than de distance "lg" the soil mouvement
can bi consideread as completely uncorrelated.
'''
retval= 300
if(soilClass == "A"):
retval= 600
elif(soilClass == "B"):
retval= 500
elif(soilClass == "C"):
retval= 400
elif(soilClass == "D"):
retval= 300
elif(soilClass == "E"):
retval= 500
else:
sys.stderr.write("Unknown soil type: "+soilClass)
return retval
def getUgd(soilClass, quakeZone,bridgeClass):
'''
Returns the design value for soil displacement.
soilClass: A, B, C, D or E.
quakeZone: ZI, ZII, ZIIa, ZIIIb
bridgeClass: COI, COII, COIII
'''
retval= 17e-2
if(soilClass == "A"):
if(quakeZone == "ZI"):
retval= 2e-2
elif(quakeZone == "Z2"):
retval= 4e-2
elif(quakeZone == "Z3a"):
retval= 5e-2
elif(quakeZone == "Z3b"):
retval= 6e-2
else:
sys.stderr.write("Unknown quake zone: "+quakeZone)
elif(soilClass == "B"):
if(quakeZone == "ZI"):
retval= 4e-2
elif(quakeZone == "Z2"):
retval= 6e-2
elif(quakeZone == "Z3a"):
retval= 8e-2
elif(quakeZone == "Z3b"):
retval= 10e-2
else:
sys.stderr.write("Unknown quake zone: "+quakeZone)
elif(soilClass == "C"):
if(quakeZone == "ZI"):
retval= 5e-2
elif(quakeZone == "Z2"):
retval= 7e-2
elif(quakeZone == "Z3a"):
retval= 9e-2
elif(quakeZone == "Z3b"):
retval= 11e-2
else:
sys.stderr.write("Unknown quake zone: "+quakeZone)
elif(soilClass == "D"):
if(quakeZone == "ZI"):
retval= 6e-2
elif(quakeZone == "Z2"):
retval= 11e-2
elif(quakeZone == "Z3a"):
retval= 14e-2
elif(quakeZone == "Z3b"):
retval= 17e-2
else:
sys.stderr.write("Unknown quake zone: "+quakeZone)
elif(soilClass == "E"):
if(quakeZone == "ZI"):
retval= 4e-2
elif(quakeZone == "Z2"):
retval= 7e-2
elif(quakeZone == "Z3a"):
retval= 9e-2
elif(quakeZone == "Z3b"):
retval= 11e-2
else:
sys.stderr.write("Unknown quake zone: "+quakeZone)
else:
sys.stderr.write("Unknown soil type: "+soilClass)
if(bridgeClass == "COII"):
retval*=1.2
elif(bridgeClass == "COIII"):
retval*=1.4
return retval
def getBminPontFlotant(dAbutFixedPoint,soilClass,quakeZone,bridgeClass):
'''
Returns the minimal dimension of abutment support to avoid
the risk of bridge deck falling during a quake. See "Évaluation
parasismique des ponts-routes existants" Office féderal des routes
page 48).
dAbutFixedPoint: Distance between the abutment and the fixed point.
soilClass: A, B, C, D or E.
quakeZone: ZI, ZII, ZIIa, ZIIIb
bridgeClass: COI, COII, COIII
'''
lg= getLg(soilClass)
ugd= getUgd(soilClass, quakeZone,bridgeClass)
return 0.2+min((1.3+2*dAbutFixedPoint/lg),3.3)*ugd
def getBminPontAppuiFixe(l,a,soilClass,quakeZone,bridgeClass):
'''
Returns the minimal dimension of abutment support to avoid
the risk of bridge deck falling during a quake. See "Évaluation
parasismique des ponts-routes existants" Office féderal des routes
page 49).
l: Deck length. (Distance between free and fixed abutments).
a: expansion joint gap
soilClass: A, B, C, D or E.
quakeZone: ZI, ZII, ZIIa, ZIIIb
bridgeClass: COI, COII, COIII
'''
lg= getLg(soilClass)
ugd= getUgd(soilClass, quakeZone,bridgeClass)
return 0.2+a+min((2*l/lg),2)*ugd
|
from .pysoroban import cli
|
from BeautifulSoup import BeautifulSoup
from urllib2 import urlopen, URLError, HTTPError, HTTPCookieProcessor, build_opener, install_opener
class ParserOhio:
def __init__(self):
self.baseurl = 'http://procure.ohio.gov/proc/'
self.listurl = self.baseurl + 'searchCurContracts.asp'
def parse_list(self):
soup = BeautifulSoup(urlopen(self.listurl).read())
tables = soup.findAll('table', width='590')
targetTable = tables[1]
trs = targetTable.findAll('tr')
for tr in trs[2:]:
tds = tr.findAll('td')
title = tds[0].getText()
contract_type = tds[1].getText()
market_type = tds[2].getText()
index_num = tds[3].getText()
contract_num = tds[4].getText()
effective_date = tds[5].getText()
expiration_date = tds[6].getText()
vendor = tds[7].getText()
aTag = tr.find('a')
rfpUrl = aTag.get('href')
if contract_type.lower() != "rfp":
continue
print rfpUrl
#self.parser_rpf(self.base + rfpUrl)
def parse_rfp(self, url):
soup = BeautifulSoup(urlopen(url).read())
tables = soup.findAll('table', width='100%')
targetTable = tables[0]
trs = targetTable.findAll('tr')
for tr in trs:
print tr
tds = tr.findAll('td')
if __name__ == '__main__':
parser = ParserOhio()
#parser.parse_list()
parser.parse_rfp('http://procure.ohio.gov/proc/viewContractsAwards.asp?contractID=11103')
|
import struct, imghdr
def get_image_size( fname ):
'''Determine the image type of fhandle and return its size.
from draco'''
with open(fname, 'rb') as fhandle:
head = fhandle.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack('>ii', head[16:24])
elif imghdr.what(fname) == 'gif':
width, height = struct.unpack('<HH', head[6:10])
elif imghdr.what(fname) == 'jpeg':
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
except Exception: #IGNORE:W0703
return
else:
return
return width, height
|
"""Postprocessors about additional items in minimum needs."""
from safe.definitions import minimum_needs_fields, displaced_field
from safe.utilities.i18n import tr
from safe.definitions.concepts import concepts
from safe.definitions.fields import (
pregnant_displaced_count_field, lactating_displaced_count_field)
from safe.definitions.fields import additional_rice_count_field
from safe.definitions.post_processors.post_processor_inputs import (
constant_input_type,
field_input_type,
needs_profile_input_type)
from safe.definitions.post_processors.post_processors import (
formula_process,
multiply,
function_process)
__copyright__ = "Copyright 2017, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
EXTRA_CALORIES_NEEDED_PER_DAY = 500 # in KKal / day
DAY_IN_A_WEEK = 7 # in day / week
KG_RICE_PER_CALORIES = 0.1 / 129 # in KKal (100 gram gives 129 KKal calories)
post_processor_additional_rice = {
'key': 'post_processor_additional_rice',
'name': tr(
'Additional Weekly Rice kg for Pregnant and Lactating Women Post '
'Processor'
),
'description': tr(
'A post processor to calculate additional rice for pregnant and '
'lactating women who are displaced. '
'"Displaced" is defined as: {displaced_concept}').format(
displaced_concept=concepts['displaced_people']['description']),
'input': {
'pregnant_displaced': [
{
'value': pregnant_displaced_count_field,
'type': field_input_type,
}
],
'lactating_displaced': [
{
'value': lactating_displaced_count_field,
'type': field_input_type,
}
],
'additional_rice_ratio':
{
'type': constant_input_type,
'value': (
EXTRA_CALORIES_NEEDED_PER_DAY *
DAY_IN_A_WEEK *
KG_RICE_PER_CALORIES),
}
},
'output': {
# The formula:
# See: https://github.com/inasafe/inasafe/issues/3607
# for reference
#
# displaced_population * (pregnant_rate + breastfeeding_rate) *
# extra_calories_needed_per_day * day_in_week * kg_rice_per_calories
#
# The number:
# displaced_population * (0.024 + 0.026) * 550 Kkal/day * 7 day/week *
# 0.1 kg rice / 129 Kkal
#
# displaced_population * (0.024 + 0.026) * 550 * 7 * 0.1 / 129
# Update, 19 May 2017, Ismail Sunni
# Since we have pregnant and lactating displace field, we will use it
# to replace the hard coded value.
'additional_rice': {
'value': additional_rice_count_field,
'type': formula_process,
'formula': (
'(pregnant_displaced + lactating_displaced) * '
'additional_rice_ratio')
},
}
}
def initialize_minimum_needs_post_processors():
"""Generate definitions for minimum needs post processors."""
processors = []
for field in minimum_needs_fields:
field_key = field['key']
field_name = field['name']
field_description = field['description']
need_parameter = field['need_parameter']
""":type: safe.common.parameters.resource_parameter.ResourceParameter
"""
processor = {
'key': 'post_processor_{key}'.format(key=field_key),
'name': '{field_name} Post Processor'.format(
field_name=field_name),
'description': field_description,
'input': {
'population': {
'value': displaced_field,
'type': field_input_type,
},
'amount': {
'type': needs_profile_input_type,
'value': need_parameter.name,
}
},
'output': {
'needs': {
'value': field,
'type': function_process,
'function': multiply
}
}
}
processors.append(processor)
return processors
minimum_needs_post_processors = \
initialize_minimum_needs_post_processors()
|
import glob
import logging
import os
import unittest
from abstract_domains.numerical.octagon_domain import OctagonDomain
from abstract_domains.usage.usage_domains import UsedSegmentationDomain
from core.expressions import VariableIdentifier
from engine.backward import BackwardInterpreter
from engine.forward import ForwardInterpreter
from semantics.usage.usage_semantics import UsageSemantics, UsageOctagonSemantics
from unittests.generic_tests import ResultCommentsFileTestCase
logging.basicConfig(level=logging.INFO, filename='unittests.log', filemode='w')
class SegmentationTestCase(ResultCommentsFileTestCase):
def __init__(self, source_path):
super().__init__(source_path)
self._source_path = source_path
def runTest(self):
logging.info(self)
self.render_cfg()
# find all variables
variable_names = self.find_variable_names()
int_vars = []
list_vars = []
list_to_len_var = {}
for name in variable_names:
# TODO remove this name hack when type inferences work
if name.startswith("list"):
typ = list
var = VariableIdentifier(typ, name)
list_vars.append(var)
list_to_len_var[var] = VariableIdentifier(int, var.name + "__len")
else:
typ = int
var = VariableIdentifier(typ, name)
int_vars.append(var)
list_len_vars = list(list_to_len_var.values())
# print(list(map(str,variables)))
# Run Octagonal Analysis (forward)
forward_interpreter = ForwardInterpreter(self.cfg, UsageOctagonSemantics(), 3)
result = forward_interpreter.analyze(OctagonDomain(int_vars + list_vars + list_len_vars))
# ensure all results are closed for displaying
for node in result.nodes:
node_result_list = result.get_node_result(node)
for octagon in node_result_list:
octagon.close()
self.render_result_cfg(result, "Oct")
# Run Usage Segmentation Analysis (backwards)
backward_interpreter = BackwardInterpreter(self.cfg, UsageSemantics(), 3)
result = backward_interpreter.analyze(
UsedSegmentationDomain(int_vars, list_vars, list_len_vars, list_to_len_var, result))
self.render_result_cfg(result, "Seg")
self.check_result_comments(result)
def suite():
s = unittest.TestSuite()
g = os.getcwd() + '/segmentation/**.py'
for path in glob.iglob(g):
if os.path.basename(path) != "__init__.py":
s.addTest(SegmentationTestCase(path))
runner = unittest.TextTestRunner()
runner.run(s)
if __name__ == '__main__':
suite()
|
from settings_local import SUBSCRIPTION_ID, STORAGE_ACCOUNT_NAME, STORAGE_ACCOUNT_KEY, EMAIL_PASSWORD, EMAIL_USERNAME
__author__ = 'Natalie Sanders'
from azure.servicemanagement import *
from azure.storage import *
from subprocess import call
from os import chdir
import os
import socket
import zipfile
import pickle
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from email import encoders
global user_info
def delete_vm():
hosted_service = sms.get_hosted_service_properties(service_name=username, embed_detail=True)
if hosted_service.deployments:
deployment = sms.get_deployment_by_name(username, username)
roles = deployment.role_list
for instance in roles:
if vm_name == instance.role_name:
if len(roles) == 1:
sms.delete_deployment(service_name=username, deployment_name=username)
else:
sms.delete_role(service_name=username, deployment_name=username, role_name=vm_name)
break
def send_mail( send_from, send_to, subject, text, files=[], server="localhost", port=587, username='', password='', isTls=True):
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = send_to
msg['Date'] = formatdate(localtime = True)
msg['Subject'] = subject
msg.attach( MIMEText(text) )
for f in files:
part = MIMEBase('application', "octet-stream")
part.set_payload( open(f,"rb").read() )
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="{0}"'.format(os.path.basename(f)))
msg.attach(part)
smtp = smtplib.SMTP(server, port)
if isTls: smtp.starttls()
smtp.login(username,password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.quit()
def upload_results():
####### Upload Final Results ########
# Zip output directory
z = zipfile.ZipFile(user_info["sim"] + '_Results.zip', "w", zipfile.ZIP_DEFLATED)
for result in os.listdir('Output'):
chdir("c:/Users/Public/Sim/Output")
z.write(result)
z.close()
result = 'r-' + vm_name
blob_service.put_block_blob_from_path(container_name, result, 'c:/Users/Public/Sim/' + user_info["sim"] + '_Results.zip')
def download_input():
blob_service.get_blob_to_path(container_name, vm_name, 'c:/Users/Public/Sim/Inputs.zip')
chdir("C:/Users/Public/Sim")
z = zipfile.ZipFile('Inputs.zip', 'r')
z.extractall()
z.close()
vm_name = socket.gethostname()
split = vm_name.split('-')
username = '-'.join(split[:-1])
container_name = '-'.join(split[:-1]).lower()
subscription_id = SUBSCRIPTION_ID
certificate_path = 'CURRENT_USER\\my\\AzureCertificate'
call(['certutil', '-user', '-f', '-p', '1', '-importPFX', 'c:/temp/azure.pfx'])
sms = ServiceManagementService(subscription_id, certificate_path)
chdir('C:/Users/Public/Sim')
output = open("Output/stdout.txt", "wb")
blob_service = BlobService(
account_name=STORAGE_ACCOUNT_NAME,
account_key=STORAGE_ACCOUNT_KEY)
download_input()
f = "C:/Users/Public/Sim/AzureUserInfo.pickle"
user_info = pickle.load(file(f))
'''
except:
output.write('Could not download input from the cloud.\n')
output.close()
upload_results()
'''
call(["openMalaria.exe", "-s", "input.xml", "-p", "schema-32"], stdout=output)
output.close()
try:
########### Upload Results ##########
upload_results()
########### Email Results ###########
send_mail( send_from = 'vecnet.results@gmail.com',
send_to = user_info["email"],
subject = 'The results for your ' + user_info["sim"] + ' simulation are ready!',
text = 'Hi ' + user_info['username'] + ',\n\nYour ' + user_info["sim"] + ' simulation has '
'finished running. Look for your results below.\n\nThanks for using VecNet Azure '
'resources!\nThe VecNet Team',
files = ['c:/Users/Public/Sim/' + user_info["sim"] + '_Results.zip'],
server = "smtp.gmail.com",
port = 587,
username = EMAIL_USERNAME,
password = EMAIL_PASSWORD,
isTls = True)
finally:
delete_vm()
|
from ConfigParser import ConfigParser
import requests
import sys
import os
import json
import matplotlib.pylab as pl
_cur_path = os.path.dirname(os.path.realpath(__file__))
_root_path = os.path.join(_cur_path, '../..')
config = ConfigParser()
config.read(os.path.join(_root_path, 'config.ini'))
_fact_path = os.path.join(_root_path, config.get('paths', 'fact_path'))
_index_path = os.path.join(_cur_path, 'index.txt')
if __name__ == '__main__':
with open(_fact_path) as fin:
problems = json.load(fin)
totalEasy = 0
totalMedium = 0
totalHard = 0
for key, vals in problems.items():
if 'easy' == vals['difficulty']:
totalEasy += 1
elif 'medium' == vals['difficulty']:
totalMedium += 1
else:
totalHard += 1
pl.title("Solved Problems on LeetCode OJ")
pl.xlabel("Order")
pl.ylabel("Difficulty based on Acceptance")
easy = 0
medium = 0
hard = 0
x = 0
data = []
with open(_index_path) as fin:
line = fin.readline()
while line:
toks = line[:-1].split('\t')
if len(toks) < 2:
break
else:
key = toks[0]
print(line.strip())
assert problems[key]['title'] == toks[1]
x += 1
y = 100 - float(problems[key]['accept'])
if 'easy' == problems[key]['difficulty']:
easy += 1
pl.annotate(str(key), xy=(x, y), color='green')
elif 'medium' == problems[key]['difficulty']:
medium += 1
pl.annotate(str(key), xy=(x, y), color='blue')
else:
hard += 1
pl.annotate(str(key), xy=(x, y), color='red')
data.append(y)
line = fin.readline()
pl.xlim((0, x+1))
print
print "Easy:\t"+str(easy)+"/"+str(totalEasy)+" ("+str(100.*easy/totalEasy)+"%)"
print "Medium:\t"+str(medium)+"/"+str(totalMedium)+" ("+str(100.*medium/totalMedium)+"%)"
print "Hard:\t"+str(hard)+"/"+str(totalHard)+" ("+str(100.*hard/totalHard)+"%)"
print "Progress: "+str(100.*(easy+medium+hard)/(totalEasy+totalMedium+totalHard))+"%"
pl.plot(data, color='purple')
pl.show()
|
import os
import mozharness
from mozharness.base.script import platform_name
external_tools_path = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
'external_tools',
)
PYTHON_WIN32 = 'c:/mozilla-build/python27/python.exe'
PLATFORM_CONFIG = {
'linux': {
'exes': {
'gittool.py': os.path.join(external_tools_path, 'gittool.py'),
'virtualenv': ['/tools/buildbot/bin/python', '/tools/misc-python/virtualenv.py'],
},
'env': {
'DISPLAY': ':0',
'PATH': '%(PATH)s:' + external_tools_path,
}
},
'linux64': {
'exes': {
'gittool.py': os.path.join(external_tools_path, 'gittool.py'),
'virtualenv': ['/tools/buildbot/bin/python', '/tools/misc-python/virtualenv.py'],
},
'env': {
'DISPLAY': ':0',
'PATH': '%(PATH)s:' + external_tools_path,
}
},
'macosx': {
'exes': {
'gittool.py': os.path.join(external_tools_path, 'gittool.py'),
'virtualenv': ['/tools/buildbot/bin/python', '/tools/misc-python/virtualenv.py'],
},
'env': {
'PATH': '%(PATH)s:' + external_tools_path,
}
},
'win32': {
"exes": {
'gittool.py': [PYTHON_WIN32, os.path.join(external_tools_path, 'gittool.py')],
# Otherwise, depending on the PATH we can pick python 2.6 up
'python': PYTHON_WIN32,
'virtualenv': [PYTHON_WIN32, 'c:/mozilla-build/buildbotve/virtualenv.py'],
}
}
}
config = PLATFORM_CONFIG[platform_name()]
config.update({
"find_links": [
"http://pypi.pvt.build.mozilla.org/pub",
"http://pypi.pub.build.mozilla.org/pub",
],
'pip_index': False,
'virtualenv_path': 'venv',
})
|
# -*- coding: utf-8 -*-
from hashlib import md5, sha256
import base64
import webbrowser
from Crypto import Random
from Crypto.Cipher import AES
import wx
try:
from auth import SALT
except ImportError:
SALT = ""
class LogInDialog(wx.Dialog):
def __init__(self, parent=None, title="", caption=""):
super(LogInDialog, self).__init__(parent, wx.ID_ANY, title, style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
self.Center()
Caption = wx.StaticText(self, wx.ID_ANY, caption)
focus_password = lambda event: self.Password.SetFocus()
LabelUID = wx.StaticText(self, wx.ID_ANY, "&User Name Or E-Mail Address:")
self.UID = wx.TextCtrl(self, wx.ID_ANY, style=wx.TE_PROCESS_ENTER)
self.UID.Bind(wx.EVT_TEXT_ENTER, focus_password)
self.UID.SetInitialSize((300, 20))
LabelPassword = wx.StaticText(self, wx.ID_ANY, "&Password:")
self.Password = wx.TextCtrl(self, wx.ID_ANY, style=wx.TE_PASSWORD)
self.Password.SetInitialSize((300, 20))
RegisterButton = wx.Button(self, label = "&Register a new account.")
RegisterButton.Bind(wx.EVT_BUTTON, self.register_event)
Buttons = self.CreateButtonSizer(wx.OK|wx.CANCEL)
self.UIDBox = wx.BoxSizer(wx.HORIZONTAL)
self.UIDBox.Add(LabelUID)
self.UIDBox.Add(self.UID, proportion=1, border=1)
self.PasswordBox = wx.BoxSizer(wx.HORIZONTAL)
self.PasswordBox.Add(LabelPassword)
self.PasswordBox.Add(self.Password, proportion=1, border=1)
Sizer = wx.BoxSizer(wx.VERTICAL)
Sizer.Add(Caption, proportion=0, flag=wx.TOP|wx.ALIGN_CENTER_HORIZONTAL, border=25)
Sizer.Add(self.UIDBox, proportion=0, flag=wx.TOP|wx.RIGHT|wx.ALIGN_RIGHT, border=40)
Sizer.Add(self.PasswordBox, proportion=0, flag=wx.RIGHT|wx.ALIGN_RIGHT, border=40)
Sizer.Add(RegisterButton, proportion=0, flag=wx.TOP|wx.BOTTOM|wx.RIGHT|wx.ALIGN_RIGHT, border=30)
Sizer.Add(Buttons, proportion=0, flag=wx.EXPAND|wx.ALL, border=1)
self.SetSizerAndFit(Sizer)
self.UID.SetFocus()
def register_event(self, event):
"""Open the default web browser, and navigate to the account registration page on BCScan.com."""
webbrowser.open("http://bcscan.com/register.php")
def SetValue(self, uid="", password=""):
if uid: self.UID.SetValue(uid)
if password: self.Password.SetValue(password)
def GetValue(self):
key = sha256(SALT).digest()
iv = Random.new().read(16)
iv_base64 = base64.b64encode(iv)[:22]
Cipher = AES.new(key, AES.MODE_CBC, iv)
unencrypted = self.Password.GetValue()
if unencrypted.strip():
unencrypted += md5(unencrypted).hexdigest()
unencrypted += (16 - len(unencrypted)%16) * "\0"
encrypted = Cipher.encrypt(unencrypted)
password = iv_base64 + base64.b64encode(encrypted)
else:
password = ""
return (self.UID.GetValue().strip(), password)
|
from __future__ import absolute_import
import os
import subprocess
import sys
import psutil
from distutils.util import strtobool
from distutils.version import LooseVersion
import mozpack.path as mozpath
PROCESSORS_THRESHOLD = 4
MEMORY_THRESHOLD = 7.4
FREESPACE_THRESHOLD = 10
LATEST_MOZILLABUILD_VERSION = '1.11.0'
DISABLE_8DOT3_WIN = '''
Disable 8.3 filename creation systemwide?
This increases performance but some legacy applications may not be able to find
files and directories that have long file names.
https://support.microsoft.com/kb/121007
'''
DISABLE_LASTACCESS_WIN = '''
Disable the last access time feature?
This improves the speed of file and
directory access by deferring Last Access Time modification on disk by up to an
hour. Backup programs that rely on this feature may be affected.
https://technet.microsoft.com/en-us/library/cc785435.aspx
'''
class Doctor(object):
def __init__(self, srcdir, objdir, fix):
self.srcdir = mozpath.normpath(srcdir)
self.objdir = mozpath.normpath(objdir)
self.srcdir_mount = self.getmount(self.srcdir)
self.objdir_mount = self.getmount(self.objdir)
self.path_mounts = [
('srcdir', self.srcdir, self.srcdir_mount),
('objdir', self.objdir, self.objdir_mount)
]
self.fix = fix
self.results = []
def check_all(self):
checks = [
'cpu',
'memory',
'storage_freespace',
'fs_8dot3',
'fs_lastaccess',
'mozillabuild'
]
for check in checks:
self.report(getattr(self, check))
good = True
fixable = False
denied = False
for result in self.results:
if result.get('status') != 'GOOD':
good = False
if result.get('fixable', False):
fixable = True
if result.get('denied', False):
denied = True
if denied:
print('run "mach doctor --fix" AS ADMIN to re-attempt fixing your system')
elif False: # elif fixable:
print('run "mach doctor --fix" as admin to attempt fixing your system')
return int(not good)
def getmount(self, path):
while path != '/' and not os.path.ismount(path):
path = mozpath.abspath(mozpath.join(path, os.pardir))
return path
def prompt_bool(self, prompt, limit=5):
''' Prompts the user with prompt and requires a boolean value. '''
valid = False
while not valid and limit > 0:
try:
choice = strtobool(raw_input(prompt + '[Y/N]\n'))
valid = True
except ValueError:
print("ERROR! Please enter a valid option!")
limit -= 1
if limit > 0:
return choice
else:
raise Exception("Error! Reached max attempts of entering option.")
def report(self, results):
# Handle single dict result or list of results.
if isinstance(results, dict):
results = [results]
for result in results:
status = result.get('status', 'UNSURE')
if status == 'SKIPPED':
continue
self.results.append(result)
print('%s...\t%s\n' % (
result.get('desc', ''),
status
)
).expandtabs(40)
@property
def platform(self):
platform = getattr(self, '_platform', None)
if not platform:
platform = sys.platform
while platform[-1].isdigit():
platform = platform[:-1]
setattr(self, '_platform', platform)
return platform
@property
def cpu(self):
cpu_count = psutil.cpu_count()
if cpu_count < PROCESSORS_THRESHOLD:
status = 'BAD'
desc = '%d logical processors detected, <%d' % (
cpu_count, PROCESSORS_THRESHOLD
)
else:
status = 'GOOD'
desc = '%d logical processors detected, >=%d' % (
cpu_count, PROCESSORS_THRESHOLD
)
return {'status': status, 'desc': desc}
@property
def memory(self):
memory = psutil.virtual_memory().total
# Convert to gigabytes.
memory_GB = memory / 1024**3.0
if memory_GB < MEMORY_THRESHOLD:
status = 'BAD'
desc = '%.1fGB of physical memory, <%.1fGB' % (
memory_GB, MEMORY_THRESHOLD
)
else:
status = 'GOOD'
desc = '%.1fGB of physical memory, >%.1fGB' % (
memory_GB, MEMORY_THRESHOLD
)
return {'status': status, 'desc': desc}
@property
def storage_freespace(self):
results = []
desc = ''
mountpoint_line = self.srcdir_mount != self.objdir_mount
for (purpose, path, mount) in self.path_mounts:
desc += '%s = %s\n' % (purpose, path)
if not mountpoint_line:
mountpoint_line = True
continue
try:
usage = psutil.disk_usage(mount)
freespace, size = usage.free, usage.total
freespace_GB = freespace / 1024**3
size_GB = size / 1024**3
if freespace_GB < FREESPACE_THRESHOLD:
status = 'BAD'
desc += 'mountpoint = %s\n%dGB of %dGB free, <%dGB' % (
mount, freespace_GB, size_GB, FREESPACE_THRESHOLD
)
else:
status = 'GOOD'
desc += 'mountpoint = %s\n%dGB of %dGB free, >=%dGB' % (
mount, freespace_GB, size_GB, FREESPACE_THRESHOLD
)
except OSError:
status = 'UNSURE'
desc += 'path invalid'
results.append({'status': status, 'desc': desc})
return results
@property
def fs_8dot3(self):
if self.platform != 'win':
return {'status': 'SKIPPED'}
results = []
fixable = False
denied = False
# See 'fsutil behavior':
# https://technet.microsoft.com/en-us/library/cc785435.aspx
try:
command = 'fsutil behavior query disable8dot3'.split(' ')
fsutil_output = subprocess.check_output(command)
system8dot3 = int(fsutil_output.partition(':')[2][1])
except subprocess.CalledProcessError:
return {'status': 'UNSURE',
'desc': 'unable to check 8dot3 behavior'}
if system8dot3 == 1:
return {'status': 'GOOD',
'desc': '8dot3 disabled systemwide'}
elif system8dot3 == 0:
if False: # if self.fix:
choice = self.prompt_bool(DISABLE_8DOT3_WIN)
if not choice:
return {'status': 'BAD, NOT FIXED',
'desc': '8dot3 enabled systemwide'}
try:
command = 'fsutil behavior set disable8dot3 1'.split(' ')
fsutil_output = subprocess.check_output(command)
status = 'GOOD, FIXED'
desc = '8dot3 disabled systemwide'
except subprocess.CalledProcessError, e:
desc = '8dot3 enabled systemwide'
if e.output.find('denied') != -1:
status = 'BAD, FIX DENIED'
denied = True
else:
status = 'BAD, NOT FIXED'
else:
status = 'BAD, FIXABLE'
desc = '8dot3 enabled systemwide'
fixable = True
return {'status': status, 'desc': desc, 'fixable': fixable,
'denied': denied}
# See 'fsutil 8dot3':
# https://technet.microsoft.com/en-us/library/ff621566.aspx
elif system8dot3 == 2 or system8dot3 == 3:
# 2 = Individual disk behavior respected.
# 3 = 8dot3 disabled on all except system disk.
# Neither is a default value; assume that it's meant to be that
# way and don't try to fix it.
common_mountpoint = self.srcdir_mount == self.objdir_mount
for (purpose, path, mount) in self.path_mounts:
results.append(self.check_disk_8dot3(mount))
if common_mountpoint:
break
return results
def check_disk_8dot3(self, path, disk):
disk = disk.replace('/', '')
try:
command = ('fsutil behavior query disable8dot3 ' + disk).split(' ')
fsutil_output = subprocess.check_output(command)
(volumeLine, systemLine, emptyLine, effectLine, emptyLine2) = fsutil_output.split('\r\n')
volume8dot3 = int(volumeLine.partition(':')[2][1])
effective8dot3 = int(effectLine.find('disabled') != -1)
if volume8dot3 == 1:
# Current disk has 8dot3 disabled.
status = 'GOOD'
desc = '%s has 8dot3 disabled' % disk
else:
status = 'BAD'
desc = '%s has 8dot3 disabled' % disk
except subprocess.CalledProcessError:
status = 'UNSURE'
desc = '%s 8dot3 behavior unknown' % disk
return {'status': status, 'desc': desc}
@property
def fs_lastaccess(self):
results = []
if self.platform == 'win':
fixable = False
denied = False
# See 'fsutil behavior':
# https://technet.microsoft.com/en-us/library/cc785435.aspx
try:
command = 'fsutil behavior query disablelastaccess'.split(' ')
fsutil_output = subprocess.check_output(command)
disablelastaccess = int(fsutil_output.partition('=')[2][1])
except subprocess.CalledProcessError:
disablelastaccess = -1
status = 'UNSURE'
desc = 'unable to check lastaccess behavior'
if disablelastaccess == 1:
status = 'GOOD'
desc = 'lastaccess disabled systemwide'
elif disablelastaccess == 0:
if False: # if self.fix:
choice = self.prompt_bool(DISABLE_LASTACCESS_WIN)
if not choice:
return {'status': 'BAD, NOT FIXED',
'desc': 'lastaccess enabled systemwide'}
try:
command = 'fsutil behavior set disablelastaccess 1'.split(' ')
fsutil_output = subprocess.check_output(command)
status = 'GOOD, FIXED'
desc = 'lastaccess disabled systemwide'
except subprocess.CalledProcessError, e:
desc = 'lastaccess enabled systemwide'
if e.output.find('denied') != -1:
status = 'BAD, FIX DENIED'
denied = True
else:
status = 'BAD, NOT FIXED'
else:
status = 'BAD, FIXABLE'
desc = 'lastaccess enabled'
fixable = True
results.append({'status': status, 'desc': desc, 'fixable': fixable,
'denied': denied})
elif self.platform in ['darwin', 'freebsd', 'linux', 'openbsd']:
common_mountpoint = self.srcdir_mount == self.objdir_mount
for (purpose, path, mount) in self.path_mounts:
results.append(self.check_mount_lastaccess(mount))
if common_mountpoint:
break
else:
results.append({'status': 'SKIPPED'})
return results
def check_mount_lastaccess(self, mount):
partitions = psutil.disk_partitions()
atime_opts = {'atime', 'noatime', 'relatime', 'norelatime'}
option = ''
for partition in partitions:
if partition.mountpoint == mount:
mount_opts = set(partition.opts.split(','))
intersection = list(atime_opts & mount_opts)
if len(intersection) == 1:
option = intersection[0]
break
if not option:
status = 'BAD'
if self.platform == 'linux':
option = 'noatime/relatime'
else:
option = 'noatime'
desc = '%s has no explicit %s mount option' % (
mount, option
)
elif option == 'atime' or option == 'norelatime':
status = 'BAD'
desc = '%s has %s mount option' % (
mount, option
)
elif option == 'noatime' or option == 'relatime':
status = 'GOOD'
desc = '%s has %s mount option' % (
mount, option
)
return {'status': status, 'desc': desc}
@property
def mozillabuild(self):
if self.platform != 'win':
return {'status': 'SKIPPED'}
MOZILLABUILD = mozpath.normpath(os.environ.get('MOZILLABUILD', ''))
if not MOZILLABUILD or not os.path.exists(MOZILLABUILD):
return {'desc': 'not running under MozillaBuild'}
try:
with open(mozpath.join(MOZILLABUILD, 'VERSION'), 'r') as fh:
version = fh.readline()
if not version:
raise ValueError()
if LooseVersion(version) < LooseVersion(LATEST_MOZILLABUILD_VERSION):
status = 'BAD'
desc = 'MozillaBuild %s in use, <%s' % (
version, LATEST_MOZILLABUILD_VERSION
)
else:
status = 'GOOD'
desc = 'MozillaBuild %s in use' % version
except (IOError, ValueError):
status = 'UNSURE'
desc = 'MozillaBuild version not found'
return {'status': status, 'desc': desc}
|
class GEMINI(DataClassification):
name="GEMINI"
# this a description of the intent of the classification
# to what does the classification apply?
usage = '''
Applies to all data from either GMOS-North or GMOS-South instruments in any mode.
'''
# Added the instrument names directly, so that when we get engineering data that does
# not have telescope headers in, and thus doesn't identify as GEMINI_NORTH or _SOUTH
# then it does identify as GEMINI, so that the gemini descriptors associate with it.
requirement = OR(ISCLASS("GEMINI_NORTH"),
ISCLASS("GEMINI_SOUTH"),
ISCLASS("GMOS"),
ISCLASS("NIRI"),
ISCLASS("GNIRS"),
ISCLASS("MICHELLE"),
ISCLASS("NICI"),
ISCLASS("F2"),
ISCLASS("NIFS"),
ISCLASS("TRECS"),
ISCLASS("GSAOI"),
ISCLASS("BHROS"))
newtypes.append( GEMINI())
|
'''Tests related to importing comments.'''
import requests
import unittest
from importer import add_comment
json_data = {
u"url": u"www.💩.com",
u"title": u"Upgrade browser message",
u"browser": u"Firefox 30",
u"os": u"Windows 7",
u"body": u"The site asks me to upgrade",
u"labels": [u"contactready", u"invalid"],
u"comments": [u"1", u"2", u"", u"3", None]
}
class TestComments(unittest.TestCase):
def test_empty_comments(self):
self.assertFalse(add_comment("1", ""))
self.assertFalse(add_comment("1", None))
if __name__ == '__main__':
unittest.main()
|
from . import models, wizard
|
import mmap
import re
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand, CommandError
from grouprise.features.contributions.models import Contribution
from grouprise.features.content.models import Version
class Command(BaseCommand):
TAGS_PATTERN = rb"COPY public\.tags_tag.*?;\s*(.*?)\\\."
LINKS_PATTERN = rb"COPY public\.tags_tagged.*?;\s*(.*?)\\\."
help = "Restores tags from a grouprise 2.x database dump."
def add_arguments(self, parser):
parser.add_argument(
"dump_file", help="filename of version 2.x SQL database dump"
)
def handle(self, *args, **options):
def get_data(pattern_str):
pattern = re.compile(pattern_str, re.DOTALL | re.MULTILINE)
match = pattern.search(mm)
if match:
for dataset in match[1].splitlines():
yield dataset.decode().split(sep="\t")
else:
raise CommandError("Dump file does not contain tag data")
dump_file = options.get("dump_file")
with open(dump_file, "r") as f:
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
tags = {
int(data[0]): {"name": data[1], "slug": data[2]}
for data in get_data(self.TAGS_PATTERN)
}
links = [
{
"tagged": int(data[1]),
"tagged_type": int(data[2]),
"tag": int(data[3]),
}
for data in get_data(self.LINKS_PATTERN)
]
for link in links:
content_type = ContentType.objects.get_for_id(link["tagged_type"])
try:
model = content_type.get_object_for_this_type(id=link["tagged"])
except:
print(
"Warning: Tagged object does no longer exist (type {}, id {})".format(
link["tagged_type"], link["tagged"]
)
)
continue
if isinstance(model, Contribution):
model = model.container
elif isinstance(model, Version):
model = model.content
if hasattr(model, "tags"):
tag_name = tags[link["tag"]]["name"]
if len(tag_name) <= 100:
model.tags.add(tag_name)
else:
print("Warning: Tag name is too long: {}".format(tag_name))
else:
print(
"Warning: Model of type {} is not taggable".format(
model.__class__.__name__
)
)
|
from unittest import TestCase
from ..debian_urls import wnpp_issue_url
class DebianUrlsTest(TestCase):
def test_wnpp_issue_url(self):
self.assertEqual(wnpp_issue_url(748374),
'https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=748374')
|
import requests
import re
import json
from . import macro_replacement_fields
from decimal import Decimal
from superdesk.cache import cache
from flask import current_app as app
RATE_SERVICE = 'http://data.fixer.io/api/latest?access_key={}&symbols={}'
SUFFIX_REGEX = r'((\s*\-?\s*)((mln)|(bln)|(bn)|([mM]illion)|([bB]illion)|[mb]))?\)?'
SECONDARY_SUFFIX_REGEX = r'(\s*\-?\s*)((mln)|(bln)|(bn)|([mM]illion)|([bB]illion)|[mb])?\)?'
SYMBOLS = 'USD,AUD,CHF,NZD,CNY,GBP,EUR,JPY'
def to_currency(value, places=2, curr='', sep=',', dp='.', pos='', neg='-', trailneg=''):
"""Convert Decimal to a money formatted string.
places: required number of places after the decimal point
curr: optional currency symbol before the sign (may be blank)
sep: optional grouping separator (comma, period, space, or blank)
dp: decimal point indicator (comma or period)
only specify as blank when places is zero
pos: optional sign for positive numbers: '+', space or blank
neg: optional sign for negative numbers: '-', '(', space or blank
trailneg:optional trailing minus indicator: '-', ')', space or blank
>>> d = Decimal('-1234567.8901')
>>> moneyfmt(d, curr='$')
'-$1,234,567.89'
>>> moneyfmt(d, places=0, sep='.', dp='', neg='', trailneg='-')
'1.234.568-'
>>> moneyfmt(d, curr='$', neg='(', trailneg=')')
'($1,234,567.89)'
>>> moneyfmt(Decimal(123456789), sep=' ')
'123 456 789.00'
>>> moneyfmt(Decimal('-0.02'), neg='<', trailneg='>')
'<0.02>'
"""
q = Decimal(10) ** -places # 2 places --> '0.01'
sign, digits, exp = value.quantize(q).as_tuple()
result = []
digits = list(map(str, digits))
build, next = result.append, digits.pop
if sign:
build(trailneg)
for i in range(places):
build(next() if digits else '0')
if places:
build(dp)
if not digits:
build('0')
i = 0
while digits:
build(next())
i += 1
if i == 3 and digits:
i = 0
build(sep)
build(curr)
build(neg if sign else pos)
return ''.join(reversed(result))
@cache(ttl=43200)
def get_all_rates():
r = requests.get(RATE_SERVICE.format(app.config.get('CURRENCY_API_KEY', ''), SYMBOLS), timeout=5)
r.raise_for_status()
result = json.loads(r.text)
if result.get('success') is True:
return result
else:
raise LookupError('Failed to retrieve currency conversion rates')
def get_rate(from_currency, to_currency):
"""Get the exchange rate."""
result = get_all_rates()
if result.get('success') is True:
from_value = result.get('rates').get(from_currency)
to_value = result.get('rates').get(to_currency)
else:
raise LookupError('Failed to retrieve currency conversion rate')
return Decimal(to_value / from_value)
def update_suffix(value, suffix, precision=0):
"""Updates the
:param value:
:param suffix:
:param precision:
:return:
"""
thousand = Decimal(1000)
million = Decimal(1000000)
billion = Decimal(1000000000)
trillion = Decimal(1000000000000)
million_suffixes = ['m', 'mln', 'million']
billion_suffixes = ['b', 'bln', 'billion']
if not suffix:
if value >= trillion:
value = value / trillion
suffix = 'trillion'
if value >= billion:
value = value / billion
suffix = 'billion'
if value >= million:
value = value / million
suffix = 'million'
if (value >= thousand) and suffix in billion_suffixes:
value = value / thousand
suffix = 'trillion'
if (value >= thousand) and suffix in million_suffixes:
value = value / thousand
suffix = 'billion'
if precision == 0:
if value < Decimal(1):
precision = 2
elif value < Decimal(10):
precision = 1
return value, suffix, precision
def format_output(original, converted, suffix, src_currency):
"""Returns the replacement string for the given original value"""
original = original if src_currency is None or src_currency in original \
else original.replace('$', src_currency)
if suffix:
return '{} ({} {})'.format(original, converted, suffix)
else:
return '{} ({})'.format(original, converted, suffix)
def do_conversion(item, rate, currency, search_param, match_index, value_index, suffix_index, src_currency=None):
"""
Performs the conversion
:param item: story
:param rate: exchange rate
:param currency: currency symbol or prefix to be used in the results
:param search_param: search parameter to locate the original value. It should
be a valid regular expression pattern, and not just an arbitrary string.
:param match_index: int index of groups used in matching string
:param value_index: int index of groups used in converting the value
:param suffix_index: int index of groups used for millions or billions
:return: modified story
"""
diff = {}
def convert(match):
match_item = match.group(match_index)
value_item = match.group(value_index)
suffix_item = match.group(suffix_index)
if match_item and value_item:
if ')' in match_item and '(' not in match_item:
# clear any trailing parenthesis
match_item = re.sub('[)]', '', match_item)
from_value = Decimal(re.sub(r'[^\d.]', '', value_item))
precision = abs(from_value.as_tuple().exponent)
to_value = rate * from_value
to_value, suffix_item, precision = update_suffix(to_value, suffix_item, precision)
converted_value = to_currency(to_value, places=precision, curr=currency)
diff.setdefault(match_item, format_output(match_item, converted_value, suffix_item, src_currency))
return diff[match_item]
# if the rate is returned from the cache it's type will be float, we need to change it to Decimal
if isinstance(rate, float):
rate = Decimal(rate)
for field in macro_replacement_fields:
if item.get(field, None):
re.sub(search_param, convert, item[field])
return (item, diff)
|
import unittest
from butler_offline.viewcore.converter import datum_from_german as datum
from butler_offline.core.database.sparen.orderdauerauftrag import OrderDauerauftrag
from butler_offline.core.database.sparen.order import Order
def test_add_should_add():
component_under_test = OrderDauerauftrag()
component_under_test.add(datum('01.01.2020'), datum('01.01.2021'), 'monatlich', '1name', '1konto', '1depotwert', 100)
assert len(component_under_test.content) == 1
assert component_under_test.content.Startdatum[0] == datum('01.01.2020')
assert component_under_test.content.Endedatum[0] == datum('01.01.2021')
assert component_under_test.content.Rhythmus[0] == 'monatlich'
assert component_under_test.content.Name[0] == '1name'
assert component_under_test.content.Konto[0] == '1konto'
assert component_under_test.content.Depotwert[0] == '1depotwert'
assert component_under_test.content.Wert[0] == 100
def test_edit_should_edit():
component_under_test = OrderDauerauftrag()
component_under_test.add(datum('01.01.2020'), datum('01.01.2021'), 'monatlich', '1name', '1konto', '1depotwert', 100)
component_under_test.add(datum('02.02.2020'), datum('02.02.2021'), 'monatlich', '2name', '2konto', '2depotwert', 200)
component_under_test.add(datum('03.03.2020'), datum('03.03.2021'), 'monatlich', '3name', '3konto', '3depotwert', 300)
assert len(component_under_test.content) == 3
element_before = component_under_test.get(1)
assert element_before == {
'index': 1,
'Startdatum': datum('02.02.2020'),
'Endedatum': datum('02.02.2021'),
'Rhythmus': 'monatlich',
'Name': '2name',
'Konto': '2konto',
'Depotwert': '2depotwert',
'Wert': 200
}
component_under_test.edit(1, datum('03.02.2020'), datum('03.02.2021'), 'jährlich', '24name', '24konto', '24depotwert', 240)
assert len(component_under_test.content) == 3
element_after = component_under_test.get(1)
assert element_after == {
'index': 1,
'Startdatum': datum('03.02.2020'),
'Endedatum': datum('03.02.2021'),
'Rhythmus': 'jährlich',
'Name': '24name',
'Konto': '24konto',
'Depotwert': '24depotwert',
'Wert': 240
}
def test_order_until_today_with_invalid_dates_should_be_empty():
component_under_test = OrderDauerauftrag()
component_under_test.add(datum('01.01.2020'), datum('01.01.2019'), 'monatlich', 'invalid', '1konto', '1depotwert', 222)
result = component_under_test.get_all_order_until_today()
assert len(result) == 0
def test_order_until_today_with_date_in_future_should_be_empty():
component_under_test = OrderDauerauftrag()
component_under_test.add(datum('01.01.3020'), datum('01.01.3021'), 'monatlich', 'future', '1konto', '1depotwert', 333)
result = component_under_test.get_all_order_until_today()
assert len(result) == 0
def test_order_until_today():
component_under_test = OrderDauerauftrag()
component_under_test.add(datum('01.01.2020'), datum('02.02.2020'), 'monatlich', '1name', '1konto', '1depotwert', 100)
result = component_under_test.get_all_order_until_today()
assert len(result) == 2
assert result.Datum[0] == datum('01.01.2020')
assert result.Name[0] == '1name'
assert result.Konto[0] == '1konto'
assert result.Depotwert[0] == '1depotwert'
assert result.Wert[0] == 100
assert result.Datum[1] == datum('01.02.2020')
assert result.Name[1] == '1name'
assert result.Konto[1] == '1konto'
assert result.Depotwert[1] == '1depotwert'
assert result.Wert[1] == 100
def test_order_until_today_table_header_should_comply_order_table_header():
component_under_test = OrderDauerauftrag()
component_under_test.add(datum('01.01.2020'), datum('02.02.2020'), 'monatlich', '1name', '1konto', '1depotwert', 100)
result = component_under_test.get_all_order_until_today()
assert sorted(result.columns) == sorted(Order.TABLE_HEADER)
def test_get_past_should_only_return_past():
component_under_test = OrderDauerauftrag()
component_under_test.add(
datum('01.01.2020'), datum('02.02.2020'), 'monatlich', '1name', '1konto', '1depotwert', 100)
component_under_test.add(
datum('01.01.2020'), datum('02.02.2050'), 'monatlich', 'future', '1konto', '1depotwert', 100)
result = component_under_test.past()
assert len(result) == 1
assert result[0]['Name'] == '1name'
def test_get_aktuelle_should_only_return_current():
component_under_test = OrderDauerauftrag()
component_under_test.add(
datum('01.01.2020'), datum('02.02.2020'), 'monatlich', 'past', '1konto', '1depotwert', 100)
component_under_test.add(
datum('01.01.2020'), datum('02.02.2050'), 'monatlich', '1name', '1konto', '1depotwert', 100)
component_under_test.add(
datum('01.01.2050'), datum('02.02.2050'), 'monatlich', 'future', '1konto', '1depotwert', 100)
result = component_under_test.aktuelle()
assert len(result) == 1
assert result[0]['Name'] == '1name'
def test_get_future_should_only_return_future():
component_under_test = OrderDauerauftrag()
component_under_test.add(
datum('01.01.2020'), datum('02.02.2050'), 'monatlich', 'current', '1konto', '1depotwert', 100)
component_under_test.add(
datum('01.01.2050'), datum('02.02.2050'), 'monatlich', '1name', '1konto', '1depotwert', 100)
result = component_under_test.future()
assert len(result) == 1
assert result[0]['Name'] == '1name'
if __name__ == '__main__':
unittest.main()
|
import logging
from django.contrib.auth.mixins import (
LoginRequiredMixin,
PermissionRequiredMixin
)
from django.urls import reverse_lazy
from django.utils.translation import (
ugettext as _,
ugettext_lazy
)
from django.views.generic import (
CreateView,
DeleteView,
ListView,
UpdateView
)
from wger.config.models import LanguageConfig
from wger.exercises.models import Muscle
from wger.utils.generic_views import (
WgerDeleteMixin,
WgerFormMixin
)
from wger.utils.language import load_item_languages
logger = logging.getLogger(__name__)
class MuscleListView(ListView):
"""
Overview of all muscles and their exercises
"""
model = Muscle
queryset = Muscle.objects.all().order_by('-is_front', 'name'),
context_object_name = 'muscle_list'
template_name = 'muscles/overview.html'
def get_context_data(self, **kwargs):
"""
Send some additional data to the template
"""
context = super(MuscleListView, self).get_context_data(**kwargs)
context['active_languages'] = load_item_languages(LanguageConfig.SHOW_ITEM_EXERCISES)
context['show_shariff'] = True
return context
class MuscleAdminListView(LoginRequiredMixin, PermissionRequiredMixin, MuscleListView):
"""
Overview of all muscles, for administration purposes
"""
permission_required = 'exercises.change_muscle'
queryset = Muscle.objects.order_by('name')
template_name = 'muscles/admin-overview.html'
class MuscleAddView(WgerFormMixin, LoginRequiredMixin, PermissionRequiredMixin, CreateView):
"""
Generic view to add a new muscle
"""
model = Muscle
fields = ['name', 'is_front']
success_url = reverse_lazy('exercise:muscle:admin-list')
title = ugettext_lazy('Add muscle')
permission_required = 'exercises.add_muscle'
class MuscleUpdateView(WgerFormMixin, LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
"""
Generic view to update an existing muscle
"""
model = Muscle
fields = ['name', 'is_front']
success_url = reverse_lazy('exercise:muscle:admin-list')
permission_required = 'exercises.change_muscle'
def get_context_data(self, **kwargs):
"""
Send some additional data to the template
"""
context = super(MuscleUpdateView, self).get_context_data(**kwargs)
context['title'] = _('Edit {0}').format(self.object.name)
return context
class MuscleDeleteView(WgerDeleteMixin, LoginRequiredMixin, PermissionRequiredMixin, DeleteView):
"""
Generic view to delete an existing muscle
"""
model = Muscle
fields = ('name', 'is_front')
success_url = reverse_lazy('exercise:muscle:admin-list')
permission_required = 'exercises.delete_muscle'
messages = ugettext_lazy('Successfully deleted')
def get_context_data(self, **kwargs):
"""
Send some additional data to the template
"""
context = super(MuscleDeleteView, self).get_context_data(**kwargs)
context['title'] = _('Delete {0}?').format(self.object.name)
return context
|
from datetime import date
from odoo import _, api, fields, models
from odoo.exceptions import Warning as UserError
class RibaList(models.Model):
def _compute_acceptance_move_ids(self):
for riba in self:
move_ids = self.env["account.move"]
for line in riba.line_ids:
move_ids |= line.acceptance_move_id
riba.acceptance_move_ids = move_ids
def _compute_unsolved_move_ids(self):
for riba in self:
move_ids = self.env["account.move"]
for line in riba.line_ids:
move_ids |= line.unsolved_move_id
riba.unsolved_move_ids = move_ids
def _compute_payment_ids(self):
for riba in self:
move_lines = self.env["account.move.line"]
for line in riba.line_ids:
move_lines |= line.payment_ids
riba.payment_ids = move_lines
_name = "riba.distinta"
_description = "C/O Slip"
_inherit = ["mail.thread"]
_order = "date_created desc"
name = fields.Char(
"Reference",
required=True,
readonly=True,
states={"draft": [("readonly", False)]},
default=(lambda self: self.env["ir.sequence"].next_by_code("riba.distinta")),
)
config_id = fields.Many2one(
"riba.configuration",
string="Configuration",
index=True,
required=True,
readonly=True,
states={"draft": [("readonly", False)]},
help="C/O configuration to be used.",
)
state = fields.Selection(
[
("draft", "Draft"),
("accepted", "Accepted"),
("accredited", "Credited"),
("paid", "Paid"),
("unsolved", "Past Due"),
("cancel", "Canceled"),
],
"State",
readonly=True,
default="draft",
)
line_ids = fields.One2many(
"riba.distinta.line",
"distinta_id",
"C/O Due Dates",
readonly=True,
states={"draft": [("readonly", False)]},
)
user_id = fields.Many2one(
"res.users",
"User",
required=True,
readonly=True,
states={"draft": [("readonly", False)]},
default=lambda self: self.env.user,
)
date_created = fields.Date(
"Creation Date",
readonly=True,
default=lambda self: fields.Date.context_today(self),
)
date_accepted = fields.Date("Acceptance Date")
date_accreditation = fields.Date("Credit Date")
date_paid = fields.Date("Payment Date", readonly=True)
date_unsolved = fields.Date("Past Due Date", readonly=True)
company_id = fields.Many2one(
"res.company",
"Company",
required=True,
readonly=True,
states={"draft": [("readonly", False)]},
default=lambda self: self.env.company,
)
acceptance_move_ids = fields.Many2many(
"account.move",
compute="_compute_acceptance_move_ids",
string="Acceptance Entries",
)
accreditation_move_id = fields.Many2one(
"account.move", "Credit Entry", readonly=True
)
payment_ids = fields.Many2many(
"account.move.line", compute="_compute_payment_ids", string="Payments"
)
unsolved_move_ids = fields.Many2many(
"account.move", compute="_compute_unsolved_move_ids", string="Past Due Entries"
)
type = fields.Selection(string="Type", related="config_id.type", readonly=True)
registration_date = fields.Date(
"Registration Date",
states={
"draft": [("readonly", False)],
"cancel": [("readonly", False)],
},
readonly=True,
required=True,
default=lambda self: fields.Date.context_today(self),
help="Keep empty to use the current date.",
)
def action_riba_export(self):
return {
"type": "ir.actions.act_window",
"name": "Issue C/O",
"res_model": "riba.file.export",
"view_mode": "form",
"target": "new",
"context": self.env.context,
}
def unlink(self):
for riba_list in self:
if riba_list.state not in ("draft", "cancel"):
raise UserError(
_(
"Slip %s is in state '%s'. You can only delete documents"
" in state 'Draft' or 'Canceled'."
)
% (riba_list.name, riba_list.state)
)
super(RibaList, self).unlink()
def confirm(self):
for distinta in self:
for line in distinta.line_ids:
line.confirm()
def riba_cancel(self):
for distinta in self:
for line in distinta.line_ids:
line.state = "cancel"
if line.acceptance_move_id:
line.acceptance_move_id.unlink()
if line.unsolved_move_id:
line.unsolved_move_id.unlink()
if distinta.accreditation_move_id:
distinta.accreditation_move_id.unlink()
distinta.state = "cancel"
def settle_all_line(self):
for riba_list in self:
for line in riba_list.line_ids:
if line.state == "accredited":
line.riba_line_settlement()
@api.onchange("date_accepted", "date_accreditation")
def _onchange_date(self):
if self.date_accepted and self.date_accreditation:
if self.date_accepted > self.date_accreditation:
raise UserError(
_("Credit date must be greater or equal to" " acceptance date.")
)
def riba_unsolved(self):
self.state = "unsolved"
self.date_unsolved = fields.Date.context_today(self)
def test_state(self, state):
for riba_list in self:
for line in riba_list.line_ids:
if line.state != state:
return False
return True
def test_accepted(self):
return self.test_state("confirmed")
def test_unsolved(self):
return self.test_state("unsolved")
def test_paid(self):
return self.test_state("paid")
def action_cancel_draft(self):
for riba_list in self:
riba_list.state = "draft"
for line in riba_list.line_ids:
line.state = "draft"
class RibaListLine(models.Model):
_name = "riba.distinta.line"
_inherit = "mail.thread"
_description = "C/O Details"
_rec_name = "sequence"
def _compute_line_values(self):
for line in self:
line.amount = 0.0
line.invoice_date = ""
line.invoice_number = ""
for move_line in line.move_line_ids:
line.amount += move_line.amount
if not line.invoice_date:
line.invoice_date = str(
fields.Date.from_string(
move_line.move_line_id.move_id.invoice_date
).strftime("%d/%m/%Y")
)
else:
line.invoice_date = "{}, {}".format(
line.invoice_date,
str(
fields.Date.from_string(
move_line.move_line_id.move_id.invoice_date
).strftime("%d/%m/%Y")
),
)
if not line.invoice_number:
line.invoice_number = str(
move_line.move_line_id.move_id.move_id.name
if move_line.move_line_id.move_id.display_name == "/"
else move_line.move_line_id.move_id.display_name
)
else:
line.invoice_number = "{}, {}".format(
line.invoice_number,
str(
move_line.move_line_id.move_id.move_id.name
if move_line.move_line_id.move_id.display_name == "/"
else move_line.move_line_id.move_id.display_name
),
)
amount = fields.Float(compute="_compute_line_values", string="Amount")
invoice_date = fields.Char(
compute="_compute_line_values", string="Invoice Date", size=256
)
invoice_number = fields.Char(
compute="_compute_line_values", string="Invoice Number", size=256
)
cig = fields.Char(compute="_compute_cig_cup_values", string="CIG", size=256)
cup = fields.Char(compute="_compute_cig_cup_values", string="CUP", size=256)
def _compute_cig_cup_values(self):
for line in self:
line.cig = ""
line.cup = ""
for move_line in line.move_line_ids:
for (
related_document
) in move_line.move_line_id.move_id.related_documents:
if related_document.cup:
line.cup = str(related_document.cup)
if related_document.cig:
line.cig = str(related_document.cig)
def move_line_id_payment_get(self):
# return the move line ids with the same account as the distinta line
if not self.id:
return []
query = """ SELECT l.id
FROM account_move_line l, riba_distinta_line rdl
WHERE rdl.id = %s AND l.move_id = rdl.acceptance_move_id
AND l.account_id = rdl.acceptance_account_id
"""
self._cr.execute(query, (self.id,))
return [row[0] for row in self._cr.fetchall()]
def test_reconciled(self):
# check whether all corresponding account move lines are reconciled
line_ids = self.move_line_id_payment_get()
if not line_ids:
return False
move_lines = self.env["account.move.line"].browse(line_ids)
reconcilied = all(line.reconciled for line in move_lines)
return reconcilied
def _compute_lines(self):
for riba_line in self:
payment_lines = []
if riba_line.acceptance_move_id and not riba_line.state == "unsolved":
for line in riba_line.acceptance_move_id.line_ids:
payment_lines.extend(
[
_f
for _f in [
rp.credit_move_id.id for rp in line.matched_credit_ids
]
if _f
]
)
riba_line.payment_ids = self.env["account.move.line"].browse(
list(set(payment_lines))
)
sequence = fields.Integer("Number")
move_line_ids = fields.One2many(
"riba.distinta.move.line", "riba_line_id", string="Credit Move Lines"
)
acceptance_move_id = fields.Many2one(
"account.move", string="Acceptance Entry", readonly=True
)
unsolved_move_id = fields.Many2one(
"account.move", string="Past Due Entry", readonly=True
)
acceptance_account_id = fields.Many2one(
"account.account", string="Acceptance Account"
)
bank_id = fields.Many2one("res.partner.bank", string="Debtor Bank")
iban = fields.Char(
related="bank_id.acc_number", string="IBAN", store=False, readonly=True
)
distinta_id = fields.Many2one(
"riba.distinta", string="Slip", required=True, ondelete="cascade"
)
partner_id = fields.Many2one("res.partner", string="Customer", readonly=True)
due_date = fields.Date("Due Date", readonly=True)
state = fields.Selection(
[
("draft", "Draft"),
("confirmed", "Confirmed"),
("accredited", "Credited"),
("paid", "Paid"),
("unsolved", "Past Due"),
("cancel", "Canceled"),
],
"State",
readonly=True,
tracking=True,
)
payment_ids = fields.Many2many(
"account.move.line", compute="_compute_lines", string="Payments"
)
type = fields.Selection(
string="Type", related="distinta_id.config_id.type", readonly=True
)
config_id = fields.Many2one(
string="Configuration", related="distinta_id.config_id", readonly=True
)
def confirm(self):
move_model = self.env["account.move"]
move_line_model = self.env["account.move.line"]
for line in self:
journal = line.distinta_id.config_id.acceptance_journal_id
total_credit = 0.0
move = move_model.create(
{
"ref": "C/O {} - Line {}".format(
line.distinta_id.name, line.sequence
),
"journal_id": journal.id,
"date": line.distinta_id.registration_date,
}
)
to_be_reconciled = self.env["account.move.line"]
riba_move_line_name = ""
for riba_move_line in line.move_line_ids:
total_credit += riba_move_line.amount
if (
str(riba_move_line.move_line_id.move_id.sequence_number)
and str(riba_move_line.move_line_id.move_id.sequence_number)
not in riba_move_line_name
):
riba_move_line_name = " ".join(
[
riba_move_line_name,
str(riba_move_line.move_line_id.move_id.sequence_number),
]
).lstrip()
elif (
riba_move_line.move_line_id.name
and riba_move_line.move_line_id.name not in riba_move_line_name
):
riba_move_line_name = " ".join(
[riba_move_line_name, riba_move_line.move_line_id.name]
).lstrip()
move_line = move_line_model.with_context(
{"check_move_validity": False}
).create(
{
"name": (
riba_move_line.move_line_id.move_id
and riba_move_line.move_line_id.move_id.sequence_number
or riba_move_line.move_line_id.name
),
"partner_id": line.partner_id.id,
"account_id": riba_move_line.move_line_id.account_id.id,
"credit": riba_move_line.amount,
"debit": 0.0,
"move_id": move.id,
}
)
to_be_reconciled |= move_line
to_be_reconciled |= riba_move_line.move_line_id
move_line_model.with_context({"check_move_validity": False}).create(
{
"name": "C/O %s-%s Ref. %s - %s"
% (
line.distinta_id.name,
line.sequence,
riba_move_line_name,
line.partner_id.name,
),
"account_id": (
line.acceptance_account_id.id
or line.distinta_id.config_id.acceptance_account_id.id
# in questo modo se la riga non ha conto accettazione
# viene prelevato il conto in configuration riba
),
"partner_id": line.partner_id.id,
"date_maturity": line.due_date,
"credit": 0.0,
"debit": total_credit,
"move_id": move.id,
}
)
move.action_post()
to_be_reconciled.reconcile()
line.write(
{
"acceptance_move_id": move.id,
"state": "confirmed",
}
)
line.distinta_id.state = "accepted"
if not line.distinta_id.date_accepted:
line.distinta_id.date_accepted = fields.Date.context_today(self)
def riba_line_settlement(self):
for riba_line in self:
if not riba_line.distinta_id.config_id.settlement_journal_id:
raise UserError(_("Please define a Settlement Journal."))
# trovare le move line delle scritture da chiudere
move_model = self.env["account.move"]
move_line_model = self.env["account.move.line"]
settlement_move_line = move_line_model.search(
[
("account_id", "=", riba_line.acceptance_account_id.id),
("move_id", "=", riba_line.acceptance_move_id.id),
("debit", "!=", 0),
]
)
settlement_move_amount = settlement_move_line.debit
move_ref = "Settlement C/O {} - {}".format(
riba_line.distinta_id.name,
riba_line.partner_id.name,
)
settlement_move = move_model.create(
{
"journal_id": (
riba_line.distinta_id.config_id.settlement_journal_id.id
),
"date": date.today().strftime("%Y-%m-%d"),
"ref": move_ref,
}
)
move_line_credit = move_line_model.with_context(
{"check_move_validity": False}
).create(
{
"name": move_ref,
"partner_id": riba_line.partner_id.id,
"account_id": riba_line.acceptance_account_id.id,
"credit": settlement_move_amount,
"debit": 0.0,
"move_id": settlement_move.id,
}
)
accr_acc = riba_line.distinta_id.config_id.accreditation_account_id
move_line_model.with_context({"check_move_validity": False}).create(
{
"name": move_ref,
"account_id": accr_acc.id,
"credit": 0.0,
"debit": settlement_move_amount,
"move_id": settlement_move.id,
}
)
move_line_credit.move_id.action_post()
to_be_settled = self.env["account.move.line"]
to_be_settled |= move_line_credit
to_be_settled |= settlement_move_line
to_be_settled.reconcile()
class RibaListMoveLine(models.Model):
_name = "riba.distinta.move.line"
_description = "C/O Details"
_rec_name = "amount"
amount = fields.Float("Amount", digits="Account")
move_line_id = fields.Many2one("account.move.line", string="Credit Move Line")
riba_line_id = fields.Many2one(
"riba.distinta.line", string="Slip Line", ondelete="cascade"
)
|
"""
This file blocks all the routes defined automatically by cms_form.
"""
from odoo import http
from odoo.addons.cms_form.controllers.main import \
CMSFormController, CMSWizardFormController, CMSSearchFormController
class UwantedCMSFormController(CMSFormController):
@http.route()
def cms_form(self, model, model_id=None, **kw):
return http.request.render('website.404')
class UnwantedCMSWizardFormController(CMSWizardFormController):
@http.route()
def cms_wiz(self, wiz_model, model_id=None, **kw):
return http.request.render('website.404')
class UnwantedCMSSearchFormController(CMSSearchFormController):
@http.route()
def cms_form(self, model, **kw):
return http.request.render('website.404')
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dashboard_app', '0021_auto_20151207_0921'),
]
operations = [
migrations.AlterField(
model_name='imagechartfilter',
name='filter',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='dashboard_app.TestRunFilter'),
),
]
|
{
'name': 'BOM price report',
'version': '0.0.1',
'category': 'Generic Modules/Customization',
'author': 'Micronaet s.r.l.',
'website': 'http://www.micronaet.it',
'depends': [
'base',
'mrp',
'bom_total_component', # for bom component
'report_aeroo',
],
'init_xml': [],
'data': [
"security/bom_group.xml",
"report/report_bom.xml",
"bom_view.xml",
"wizard/duplicate_view.xml",
],
'demo_xml' : [],
'active' : False,
'installable' : True,
}
|
__author__ = 'lijiyang'
'''
host = 'localhost'
port = 27017
db = 'test_app_store'
indexfile = 'repo/index.xml'
repo_path = '/home/public/users/lijiyang/appstore/repo/'
'''
host = 'localhost'
port = 27017
db = 'sen5_app_store'
indexfile = 'repo/index.xml'
repo_path = '/home/appstore/appstore/repo/'
|
from openerp import models, fields, api, exceptions
from openerp.tools.translate import _
import re
import logging
_logger = logging.getLogger(__name__)
class CountryStateCity(models.Model):
"""
Model added to manipulate separately the cities on Partner address.
"""
_description = 'Model to manipulate Cities'
_name = 'res.country.state.city'
code = fields.Char('City Code', size=5, help='Code DANE - 5 digits-',
required=True)
name = fields.Char('City Name', size=64, required=True)
state_id = fields.Many2one('res.country.state', 'State', required=True)
country_id = fields.Many2one('res.country', 'Country', required=True)
_order = 'code'
class PartnerInfoExtended(models.Model):
_name = 'res.partner'
_inherit = 'res.partner'
# Company Name (legal name)
companyName = fields.Char("Name of the Company")
# Brand Name (e.j. Claro Móvil = Brand, COMCEL SA = legal name)
companyBrandName = fields.Char("Brand")
# companyType
companyType = fields.Selection(related='company_type')
# Adding new name fields
x_name1 = fields.Char("First Name")
x_name2 = fields.Char("Second Name")
x_lastname1 = fields.Char("Last Name")
x_lastname2 = fields.Char("Second Last Name")
# Document information
doctype = fields.Selection(
[
(1, "No identification"),
(11, "11 - Birth Certificate"),
(12, "12 - Identity Card"),
(13, "13 - Citizenship Card"),
(21, "21 - Alien Registration Card"),
(22, "22 - Foreigner ID"),
(31, "31 - TAX Number (NIT)"),
(41, "41 - Passport"),
(42, "42 - Foreign Identification Document"),
(43, "43 - No Foreign Identification")
], "Type of Identification"
)
xidentification = fields.Char("Document Number", store=True,
help="Enter the Identification Number")
verificationDigit = fields.Integer('VD', size=2)
formatedNit = fields.Char(
string='NIT Formatted',
compute="_compute_concat_nit",
store=True
)
# Tributate regime
x_pn_retri = fields.Selection(
[
(6, "Simplified"),
(23, "Natural Person"),
(7, "Common"),
(11, "Great Taxpayer Autorretenedor"),
(22, "International"),
(25, "Common Autorretenedor"),
(24, "Great Contributor")
], "Tax Regime"
)
# CIIU - Clasificación Internacional Industrial Uniforme
ciiu = fields.Many2one('ciiu', "ISIC Activity")
personType = fields.Selection(
[
(1, "Natural"),
(2, "Juridical")
],
"Type of Person",
default=1
)
# Replacing the field company_type
company_type = fields.Selection(
[
('person', 'Individual'),
('company', 'Company')
]
)
# Boolean if contact is a company or an individual
is_company = fields.Boolean(string=None)
# Verification digit
dv = fields.Integer(string=None, store=True)
# Country -> State -> Municipality - Logic
country_id = fields.Many2one('res.country', "Country")
xcity = fields.Many2one('res.country.state.city', "Municipality")
city = fields.Char(related="xcity.name")
# identification field has to be unique,
# therefore a constraint will validate it:
_sql_constraints = [
('ident_unique',
'UNIQUE(doctype,xidentification)',
"Identification number must be unique!"),
]
# Check to handle change of Country, City and Municipality
change_country = fields.Boolean(string="Change Country / Department?",
default=True, store=False)
# Name of point of sales / delivery contact
pos_name = fields.Char("Point of Sales Name")
# Birthday of the contact (only useful for non-company contacts)
xbirthday = fields.Date("Birthday")
def get_doctype(self, cr, uid, context={'lang': 'es_CO'}):
result = []
for item in self.pool.get('res.partner').fields_get(cr, uid, allfields=['doctype'], context=context)['doctype']['selection']:
result.append({'id': item[0], 'name': item[1]})
return result
def get_persontype(self, cr, uid, context={'lang': 'es_CO'}):
result = []
for item in self.pool.get('res.partner').fields_get(cr, uid, allfields=['personType'], context=context)['personType']['selection']:
result.append({'id': item[0], 'name': item[1]})
return result
@api.depends('xidentification')
def _compute_concat_nit(self):
"""
Concatenating and formatting the NIT number in order to have it
consistent everywhere where it is needed
@return: void
"""
# Executing only for Document Type 31 (NIT)
for partner in self:
if partner.doctype is 31:
# First check if entered value is valid
self._check_ident()
self._check_ident_num()
# Instead of showing "False" we put en empty string
if partner.xidentification is False:
partner.xidentification = ''
else:
partner.formatedNit = ''
# Formatting the NIT: xx.xxx.xxx-x
s = str(partner.xidentification)[::-1]
newnit = '.'.join(s[i:i+3] for i in range(0, len(s), 3))
newnit = newnit[::-1]
nitList = [
newnit,
# Calling the NIT Function
# which creates the Verification Code:
self._check_dv(str(partner.xidentification))
]
formatedNitList = []
for item in nitList:
if item is not '':
formatedNitList.append(item)
partner.formatedNit = '-' .join(formatedNitList)
# Saving Verification digit in a proper field
for pnitem in self:
pnitem.dv = nitList[1]
@api.onchange('x_name1', 'x_name2', 'x_lastname1', 'x_lastname2', 'companyName',
'pos_name', 'companyBrandName')
def _concat_name(self):
"""
This function concatenates the four name fields in order to be able to
search for the entire name. On the other hand the original name field
should not be editable anymore as the new name fields should fill it up
automatically.
@return: void
"""
# Avoiding that "False" will be written into the name field
if self.x_name1 is False:
self.x_name1 = ''
if self.x_name2 is False:
self.x_name2 = ''
if self.x_lastname1 is False:
self.x_lastname1 = ''
if self.x_lastname2 is False:
self.x_lastname2 = ''
# Collecting all names in a field that will be concatenated
nameList = [
self.x_name1.encode(encoding='utf-8').strip(),
self.x_name2.encode(encoding='utf-8').strip(),
self.x_lastname1.encode(encoding='utf-8').strip(),
self.x_lastname2.encode(encoding='utf-8').strip()
]
formatedList = []
if self.companyName is False:
if self.type == 'delivery':
self.name = self.pos_name
self.x_name1 = False
self.x_name2 = False
self.x_lastname1 = False
self.x_lastname2 = False
self.doctype = 1
else:
for item in nameList:
if item is not '':
formatedList.append(item)
self.name = ' ' .join(formatedList).title()
else:
# Some Companies are know for their Brand, which could conflict from the users point of view while
# searching the company (e.j. o2 = brand, Telefonica = Company)
if self.companyBrandName is not False:
delimiter = ', '
company_list = (self.companyBrandName, self.companyName)
self.name = delimiter.join(company_list).title()
else:
self.name = self.companyName.title()
@api.onchange('name')
def onChangeName(self):
"""
The name field gets concatenated by the four name fields.
If a user enters a value anyway, the value will be deleted except first
name has no value. Reason: In certain forms of odoo it is still
possible to add value to the original name field. Therefore we have to
ensure that this field can receive values unless we offer the four name
fields.
@return: void
"""
if self.x_name1 is not False:
if len(self.x_name1) > 0:
self._concat_name()
if self.companyName is not False:
if len(self.companyName) > 0:
self._concat_name()
@api.onchange('personType')
def onChangePersonType(self):
"""
Delete entries in name and company fields once the type of person
changes. This avoids unnecessary entries in the database and makes the
contact cleaner and ready for analysis
@return: void
"""
if self.personType is 2:
self.x_name1 = ''
self.x_name2 = ''
self.x_lastname1 = ''
self.x_lastname2 = ''
self.x_pn_retri = 7
elif self.personType is 1:
self.companyName = False
self.companyBrandName = False
self.x_pn_retri = False
@api.onchange('doctype')
def onChangeDocumentType(self):
"""
If Document Type changes we delete the document number as for different
document types there are different rules that apply e.g. foreign
documents (e.g. 21) allows letters in the value. Here we reduce the
risk of having corrupt information about the contact.
@return: void
"""
self.xidentification = False
@api.onchange('company_type')
def onChangeCompanyType(self):
"""
This function changes the person type once the company type changes.
If it is a company, document type 31 will be selected automatically as
in Colombia it's more likely that it will be chosen by the user.
@return: void
"""
if self.company_type == 'company':
self.personType = 2
self.is_company = True
self.doctype = 31
else:
self.personType = 1
self.is_company = False
self.doctype = 1
@api.onchange('is_company')
def onChangeIsCompany(self):
"""
This function changes the person type field and the company type if
checked / unchecked
@return: void
"""
if self.is_company is True:
self.personType = 2
self.company_type = 'company'
self.xbirthday = False
else:
self.is_company = False
self.company_type = 'person'
@api.onchange('change_country')
def onChangeAddress(self):
"""
This function changes the person type field and the company type if
checked / unchecked
@return: void
"""
if self.change_country is True:
self.country_id = False
self.state_id = False
self.xcity = False
def _check_dv(self, nit):
"""
Function to calculate the check digit (DV) of the NIT. So there is no
need to type it manually.
@param nit: Enter the NIT number without check digit
@return: String
"""
for item in self:
if item.doctype != 31:
return str(nit)
nitString = '0'*(15-len(nit)) + nit
vl = list(nitString)
result = (
int(vl[0])*71 + int(vl[1])*67 + int(vl[2])*59 + int(vl[3])*53 +
int(vl[4])*47 + int(vl[5])*43 + int(vl[6])*41 + int(vl[7])*37 +
int(vl[8])*29 + int(vl[9])*23 + int(vl[10])*19 + int(vl[11])*17 +
int(vl[12])*13 + int(vl[13])*7 + int(vl[14])*3
) % 11
if result in (0, 1):
return str(result)
else:
return str(11-result)
def onchange_location(self, cr, uid, ids, country_id=False,
state_id=False):
"""
This functions is a great helper when you enter the customer's
location. It solves the problem of various cities with the same name in
a country
@param country_id: Country Id (ISO)
@param state_id: State Id (ISO)
@return: object
"""
if country_id:
mymodel = 'res.country.state'
filter_column = 'country_id'
check_value = country_id
domain = 'state_id'
elif state_id:
mymodel = 'res.country.state.city'
filter_column = 'state_id'
check_value = state_id
domain = 'xcity'
else:
return {}
obj = self.pool.get(mymodel)
ids = obj.search(cr, uid, [(filter_column, '=', check_value)])
return {
'domain': {domain: [('id', 'in', ids)]},
'value': {domain: ''}
}
@api.constrains('xidentification')
def _check_ident(self):
"""
This function checks the number length in the Identification field.
Min 6, Max 12 digits.
@return: void
"""
for item in self:
if item.doctype is not 1:
msg = _('Error! Number of digits in Identification number must be'
'between 2 and 12')
if len(str(item.xidentification)) < 2:
raise exceptions.ValidationError(msg)
elif len(str(item.xidentification)) > 12:
raise exceptions.ValidationError(msg)
@api.constrains('xidentification')
def _check_ident_num(self):
"""
This function checks the content of the identification fields: Type of
document and number cannot be empty.
There are two document types that permit letters in the identification
field: 21 and 41. The rest does not permit any letters
@return: void
"""
for item in self:
if item.doctype is not 1:
if item.xidentification is not False and \
item.doctype != 21 and \
item.doctype != 41:
if re.match("^[0-9]+$", item.xidentification) is None:
msg = _('Error! Identification number can only '
'have numbers')
raise exceptions.ValidationError(msg)
@api.constrains('doctype', 'xidentification')
def _checkDocType(self):
"""
This function throws and error if there is no document type selected.
@return: void
"""
if self.doctype is not 1:
if self.doctype is False:
msg = _('Error! Please choose an identification type')
raise exceptions.ValidationError(msg)
elif self.xidentification is False and self.doctype is not 43:
msg = _('Error! Identification number is mandatory')
raise exceptions.ValidationError(msg)
@api.constrains('x_name1', 'x_name2', 'companyName')
def _check_names(self):
"""
Double check: Although validation is checked within the frontend (xml)
we check it again to get sure
"""
if self.is_company is True:
if self.personType is 1:
if self.x_name1 is False or self.x_name1 == '':
msg = _('Error! Please enter the persons name')
raise exceptions.ValidationError(msg)
elif self.personType is 2:
if self.companyName is False:
msg = _('Error! Please enter the companys name')
raise exceptions.ValidationError(msg)
elif self.type == 'delivery':
if self.pos_name is False or self.pos_name == '':
msg = _('Error! Please enter the persons name')
raise exceptions.ValidationError(msg)
else:
if self.x_name1 is False or self.x_name1 == '':
msg = _('Error! Please enter the name of the person')
raise exceptions.ValidationError(msg)
@api.constrains('personType')
def _check_person_type(self):
"""
This function checks if the person type is not empty
@return: void
"""
if self.personType is False:
msg = _('Error! Please select a person type')
raise exceptions.ValidationError(msg)
|
from . import ir_mail_server
|
import account_check_duo
import account_voucher
import res_partner_bank
import wizard_third
import wizard_issued
import account
import report
import account_checkbook
import ticket_deposit
|
import datetime
import dj_database_url
from app.schedule.service.sms import FakeSMS
from .default import *
ALLOWED_HOSTS = ['*']
DATABASES = {'default': dj_database_url.config()}
CORS_ORIGIN_ALLOW_ALL = True
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=10)
}
DJOSER['SEND_ACTIVATION_EMAIL'] = False
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication'
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
'rest_framework.filters.OrderingFilter',
),
}
CELERY_ALWAYS_EAGER = True
CELERY_BROKER_URL = 'memory://localhost:8000//'
CELERY_EAGER_PROPAGATE = True
CELERY_TASK_MAX_RETRY = 1
FIREBASE_TOKEN = 'A-FAKE-TOKEN'
MESSAGE_CLASS = FakeSMS
|
from ..base.Mineral import Mineral
from erukar.system.engine import Observation
import erukar
class Crystal(Mineral):
Probability = 0.05
Desirability = 16.0
PriceMultiplier = 50
WeightMultiplier = 4.5
DurabilityMultiplier = 0.5
InventoryName = "Crystal"
InventoryDescription = 'Delicate, clear, and the most expensive material known to man'
PermittedEntities = [
erukar.system.Weapon,
erukar.system.Ammunition,
erukar.system.Armor,
]
|
from django import forms
from django.core.exceptions import ValidationError
from elections.uk.geo_helpers import (
BadPostcodeException,
UnknownGeoException,
get_ballots_from_postcode,
)
from people.forms.fields import CurrentUnlockedBallotsField
class PostcodeForm(forms.Form):
q = forms.CharField(
label="Enter a candidate name or postcode",
max_length=200,
widget=forms.TextInput(
attrs={"placeholder": "Enter a name or postcode"}
),
)
def clean_q(self):
postcode = self.cleaned_data["q"]
try:
# Check if this postcode is valid and
# contained in a constituency. (If it's valid then the
# result is cached, so this doesn't cause a double lookup.)
get_ballots_from_postcode(postcode)
except (UnknownGeoException, BadPostcodeException) as e:
raise ValidationError(str(e))
return postcode
class SelectBallotForm(forms.Form):
"""
Just a ballot picker
"""
ballot = CurrentUnlockedBallotsField()
|
"""
Test helpers for Comprehensive Theming.
"""
from django.test import TestCase
from mock import patch
from openedx.core.djangoapps.theming import helpers
class ThemingHelpersTests(TestCase):
"""
Make sure some of the theming helper functions work
"""
def test_get_value_returns_override(self):
"""
Tests to make sure the get_value() operation returns a combined dictionary consisting
of the base container with overridden keys from the microsite configuration
"""
with patch('microsite_configuration.microsite.get_value') as mock_get_value:
override_key = 'JWT_ISSUER'
override_value = 'testing'
mock_get_value.return_value = {override_key: override_value}
jwt_auth = helpers.get_value('JWT_AUTH')
self.assertEqual(jwt_auth[override_key], override_value)
|
from xmodule.modulestore import Location
from contentstore.utils import get_modulestore
from xmodule.x_module import XModuleDescriptor
from xmodule.modulestore.inheritance import own_metadata
from xblock.core import Scope
from xmodule.course_module import CourseDescriptor
import copy
class CourseMetadata(object):
'''
For CRUD operations on metadata fields which do not have specific editors
on the other pages including any user generated ones.
The objects have no predefined attrs but instead are obj encodings of the
editable metadata.
'''
FILTERED_LIST = ['xml_attributes',
'start',
'end',
'enrollment_start',
'enrollment_end',
'tabs',
'graceperiod',
'checklists']
@classmethod
def fetch(cls, course_location):
"""
Fetch the key:value editable course details for the given course from
persistence and return a CourseMetadata model.
"""
if not isinstance(course_location, Location):
course_location = Location(course_location)
course = {}
descriptor = get_modulestore(course_location).get_item(course_location)
for field in descriptor.fields + descriptor.lms.fields:
if field.scope != Scope.settings:
continue
if field.name not in cls.FILTERED_LIST:
course[field.name] = field.read_json(descriptor)
return course
@classmethod
def update_from_json(cls, course_location, jsondict, filter_tabs=True):
"""
Decode the json into CourseMetadata and save any changed attrs to the db.
Ensures none of the fields are in the blacklist.
"""
descriptor = get_modulestore(course_location).get_item(course_location)
dirty = False
#Copy the filtered list to avoid permanently changing the class attribute
filtered_list = copy.copy(cls.FILTERED_LIST)
#Don't filter on the tab attribute if filter_tabs is False
if not filter_tabs:
filtered_list.remove("tabs")
for k, v in jsondict.iteritems():
# should it be an error if one of the filtered list items is in the payload?
if k in filtered_list:
continue
if hasattr(descriptor, k) and getattr(descriptor, k) != v:
dirty = True
value = getattr(CourseDescriptor, k).from_json(v)
setattr(descriptor, k, value)
elif hasattr(descriptor.lms, k) and getattr(descriptor.lms, k) != k:
dirty = True
value = getattr(CourseDescriptor.lms, k).from_json(v)
setattr(descriptor.lms, k, value)
if dirty:
get_modulestore(course_location).update_metadata(course_location,
own_metadata(descriptor))
# Could just generate and return a course obj w/o doing any db reads,
# but I put the reads in as a means to confirm it persisted correctly
return cls.fetch(course_location)
@classmethod
def delete_key(cls, course_location, payload):
'''
Remove the given metadata key(s) from the course. payload can be a
single key or [key..]
'''
descriptor = get_modulestore(course_location).get_item(course_location)
for key in payload['deleteKeys']:
if hasattr(descriptor, key):
delattr(descriptor, key)
elif hasattr(descriptor.lms, key):
delattr(descriptor.lms, key)
get_modulestore(course_location).update_metadata(course_location,
own_metadata(descriptor))
return cls.fetch(course_location)
|
from odoo import models
class ContractContract(models.Model):
_inherit = 'contract.contract'
def _prepare_invoice(self, date_invoice, journal=None):
contract_lines = self._get_lines_to_invoice(date_invoice)
line_min_fec = min(
contract_lines, key=lambda x: x.next_period_date_start)
line_max_fec = min(
contract_lines, key=lambda x: x.next_period_date_end)
invoice_vals, move_form = super(
ContractContract, self)._prepare_invoice(
date_invoice, journal=journal)
invoice_vals.update(
{'start_date_period': line_min_fec.next_period_date_start,
'end_date_period': line_max_fec.next_period_date_end})
return invoice_vals, move_form
|
from frappe import _
def get_data():
return [
{
"label": _("Documents"),
"icon": "icon-star",
"items": [
{
"type": "page",
"name": "sales-dashboard",
"icon": "icon-dashboard",
"label": _("Sales Dashboard"),
"link": "sales-dashboard",
"description": _("Sales Dashboard"),
},
{
"type": "doctype",
"name": "Sales Invoice",
"description": _("Sales Invoice."),
},
{
"type": "doctype",
"name": "Trials",
"description": _("Trials Dashboard."),
},
{
"type": "doctype",
"name": "Work Order",
"description": _("Work Order."),
},
{
"type": "doctype",
"name": "Stock Entry",
"description": _("Stock Entry."),
},
{
"type": "doctype",
"name": "Customer",
"description": _("Customer database."),
},
# {
# "type": "doctype",
# "name": "Opportunity",
# "description": _("Potential opportunities for selling."),
# },
# {
# "type": "doctype",
# "name": "Quotation",
# "description": _("Quotes to Leads or Customers."),
# },
# {
# "type": "doctype",
# "name": "Sales Order",
# "description": _("Confirmed orders from Customers."),
# },
{
"type": "doctype",
"name": "Contact",
"description": _("All Contacts."),
},
{
"type": "doctype",
"name": "Address",
"description": _("All Addresses."),
},
# {
# "type": "doctype",
# "name": "Item",
# "description": _("All Products or Services."),
# },
]
},
{
"label": _("Tools"),
"icon": "icon-wrench",
"items": [
{
"type": "doctype",
"name": "SMS Center",
"description":_("Send mass SMS to your contacts"),
},
{
"type": "doctype",
"name": "Newsletter",
"description": _("Newsletters to contacts, leads."),
},
]
},
{
"label": _("Setup"),
"icon": "icon-cog",
"items": [
{
"type": "doctype",
"name": "Selling Settings",
"description": _("Default settings for selling transactions.")
},
{
"type": "doctype",
"name": "Campaign",
"description": _("Sales campaigns."),
},
{
"type": "page",
"label": _("Customer Group"),
"name": "Sales Browser",
"icon": "icon-sitemap",
"link": "Sales Browser/Customer Group",
"description": _("Manage Customer Group Tree."),
"doctype": "Customer Group",
},
{
"type": "page",
"label": _("Territory"),
"name": "Sales Browser",
"icon": "icon-sitemap",
"link": "Sales Browser/Territory",
"description": _("Manage Territory Tree."),
"doctype": "Territory",
},
{
"type": "doctype",
"name": "Sales Partner",
"description": _("Manage Sales Partners."),
},
{
"type": "page",
"label": _("Sales Person"),
"name": "Sales Browser",
"icon": "icon-sitemap",
"link": "Sales Browser/Sales Person",
"description": _("Manage Sales Person Tree."),
"doctype": "Sales Person",
},
{
"type": "page",
"name": "Sales Browser",
"icon": "icon-sitemap",
"label": _("Item Group Tree"),
"link": "Sales Browser/Item Group",
"description": _("Tree of Item Groups."),
"doctype": "Item Group",
},
{
"type": "doctype",
"name":"Terms and Conditions",
"label": _("Terms and Conditions Template"),
"description": _("Template of terms or contract.")
},
{
"type": "doctype",
"name": "Sales Taxes and Charges Master",
"description": _("Tax template for selling transactions.")
},
{
"type": "doctype",
"name": "Shipping Rule",
"description": _("Rules for adding shipping costs.")
},
{
"type": "doctype",
"name": "Price List",
"description": _("Price List master.")
},
# {
# "type": "doctype",
# "name": "Item Price",
# "description": _("Multiple Item prices."),
# "route": "Report/Item Price"
# },
{
"type": "doctype",
"name": "Offer",
"description": _("Rules for applying pricing and discount.")
},
# {
# "type": "doctype",
# "name": "Sales BOM",
# "description": _("Bundle items at time of sale."),
# },
# {
# "type": "doctype",
# "name": "Sales Email Settings",
# "description": _("Setup incoming server for sales email id. (e.g. sales@example.com)")
# },
{
"type": "doctype",
"name": "Industry Type",
"description": _("Track Leads by Industry Type.")
},
{
"type": "doctype",
"name": "SMS Settings",
"description": _("Setup SMS gateway settings")
},
]
},
{
"label": _("Main Reports"),
"icon": "icon-table",
"items": [
{
"type": "page",
"name": "sales-analytics",
"label": _("Sales Analytics"),
"icon": "icon-bar-chart",
},
{
"type": "page",
"name": "sales-funnel",
"label": _("Sales Funnel"),
"icon": "icon-bar-chart",
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Acquisition and Loyalty",
"doctype": "Customer",
"icon": "icon-bar-chart",
},
]
},
{
"label": _("Standard Reports"),
"icon": "icon-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Lead Details",
"doctype": "Lead"
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Addresses and Contacts",
"doctype": "Contact"
},
{
"type": "report",
"is_query_report": True,
"name": "Ordered Items To Be Delivered",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Sales Person-wise Transaction Summary",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Item-wise Sales History",
"doctype": "Item"
},
{
"type": "report",
"is_query_report": True,
"name": "Territory Target Variance (Item Group-Wise)",
"route": "query-report/Territory Target Variance Item Group-Wise",
"doctype": "Territory"
},
{
"type": "report",
"is_query_report": True,
"name": "Sales Person Target Variance (Item Group-Wise)",
"route": "query-report/Sales Person Target Variance Item Group-Wise",
"doctype": "Sales Person",
},
{
"type": "report",
"is_query_report": True,
"name": "Customers Not Buying Since Long Time",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Quotation Trends",
"doctype": "Quotation"
},
{
"type": "report",
"is_query_report": True,
"name": "Sales Order Trends",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Available Stock for Packing Items",
"doctype": "Item",
},
{
"type": "report",
"is_query_report": True,
"name": "Pending SO Items For Purchase Request",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"icon": "icon-file-text",
"name": "Gift Voucher Sales Report",
"doctype": "Gift Voucher"
},
]
},
]
|
import time
from random import randint, gauss
from colorsys import hls_to_rgb
from lampapp import LampApp
from hardware.lamp import Color #FIXME odd import..
app = LampApp()
app.need("lamp")
app.need("volume")
def randColor(light, sat, mu=0.3, sigma=0.06):
hue = gauss(mu, sigma)
c = [hue%1, light, sat]
c = [int(255*val) for val in hls_to_rgb(*c)]
return Color(*c)
@app.setup()
def setup():
global on, count, saturation, light, colors
on = False
count = 0
#colorsys.hls_to_rgb(h, l, s)
saturation = 0.7
light = 0.3
colors = [ randColor(light, saturation, mu=0) for _ in range(0, app.lamp.nb_pixel) ]
@app.every(0.05)
def loop():
global on, count, saturation, light, colors
#count += 1
count = app.volume.value
app.lamp.turn_on(flush=False)
mu = (count / 900.) % 1
# change one color:
for _ in range(randint(0, 2)):
k = randint(0, app.lamp.nb_pixel-1)
colors[k] = randColor(light, saturation, mu=mu)
for k in range(0, app.lamp.nb_pixel):
app.lamp.turn_on(k+1, colors[k], flush=False)
app.lamp.flush()
if __name__ == "__main__":
app.run()
|
from taiga.base.api import serializers
from taiga.base.fields import TagsField
from taiga.base.fields import PgArrayField
from taiga.base.neighbors import NeighborsSerializerMixin
from taiga.mdrender.service import render as mdrender
from taiga.projects.validators import ProjectExistsValidator
from taiga.projects.notifications.validators import WatchersValidator
from taiga.projects.serializers import BasicIssueStatusSerializer
from taiga.users.serializers import BasicInfoSerializer as UserBasicInfoSerializer
from . import models
class IssueSerializer(WatchersValidator, serializers.ModelSerializer):
tags = TagsField(required=False)
external_reference = PgArrayField(required=False)
is_closed = serializers.Field(source="is_closed")
comment = serializers.SerializerMethodField("get_comment")
generated_user_stories = serializers.SerializerMethodField("get_generated_user_stories")
blocked_note_html = serializers.SerializerMethodField("get_blocked_note_html")
description_html = serializers.SerializerMethodField("get_description_html")
votes = serializers.SerializerMethodField("get_votes_number")
status_extra_info = BasicIssueStatusSerializer(source="status", required=False, read_only=True)
assigned_to_extra_info = UserBasicInfoSerializer(source="assigned_to", required=False, read_only=True)
class Meta:
model = models.Issue
read_only_fields = ('id', 'ref', 'created_date', 'modified_date')
def get_comment(self, obj):
# NOTE: This method and field is necessary to historical comments work
return ""
def get_generated_user_stories(self, obj):
return obj.generated_user_stories.values("id", "ref", "subject")
def get_blocked_note_html(self, obj):
return mdrender(obj.project, obj.blocked_note)
def get_description_html(self, obj):
return mdrender(obj.project, obj.description)
def get_votes_number(self, obj):
# The "votes_count" attribute is attached in the get_queryset of the viewset.
return getattr(obj, "votes_count", 0)
class IssueListSerializer(IssueSerializer):
class Meta:
model = models.Issue
read_only_fields = ('id', 'ref', 'created_date', 'modified_date')
exclude=("description", "description_html")
class IssueNeighborsSerializer(NeighborsSerializerMixin, IssueSerializer):
def serialize_neighbor(self, neighbor):
return NeighborIssueSerializer(neighbor).data
class NeighborIssueSerializer(serializers.ModelSerializer):
class Meta:
model = models.Issue
fields = ("id", "ref", "subject")
depth = 0
class IssuesBulkSerializer(ProjectExistsValidator, serializers.Serializer):
project_id = serializers.IntegerField()
bulk_issues = serializers.CharField()
|
from . import test_biaya_jabatan, test_ptkp, test_pph_rate, test_partner_pph_21
|
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
import sys
import os
import imp
import json
from path import path
from .discussionsettings import *
from lms.lib.xblock.mixin import LmsBlockMixin
PLATFORM_NAME = "PatX"
CC_MERCHANT_NAME = PLATFORM_NAME
COURSEWARE_ENABLED = True
ENABLE_JASMINE = False
DISCUSSION_SETTINGS = {
'MAX_COMMENT_DEPTH': 2,
}
FEATURES = {
'SAMPLE': False,
'USE_DJANGO_PIPELINE': True,
'DISPLAY_DEBUG_INFO_TO_STAFF': True,
'DISPLAY_HISTOGRAMS_TO_STAFF': False, # For large courses this slows down courseware access for staff.
'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails
'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose
## DO NOT SET TO True IN THIS FILE
## Doing so will cause all courses to be released on production
'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date
# When True, will only publicly list courses by the subdomain. Expects you
# to define COURSE_LISTINGS, a dictionary mapping subdomains to lists of
# course_ids (see dev_int.py for an example)
'SUBDOMAIN_COURSE_LISTINGS': False,
# When True, will override certain branding with university specific values
# Expects a SUBDOMAIN_BRANDING dictionary that maps the subdomain to the
# university to use for branding purposes
'SUBDOMAIN_BRANDING': False,
'FORCE_UNIVERSITY_DOMAIN': False, # set this to the university domain to use, as an override to HTTP_HOST
# set to None to do no university selection
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the corresponding ones in cms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True, # enables the student notes API and UI.
# discussion home panel, which includes a subscription on/off setting for discussion digest emails.
# this should remain off in production until digest notifications are online.
'ENABLE_DISCUSSION_HOME_PANEL': False,
'ENABLE_PSYCHOMETRICS': False, # real-time psychometrics (eg item response theory analysis in instructor dashboard)
'ENABLE_DJANGO_ADMIN_SITE': True, # set true to enable django's admin site, even on prod (e.g. for course ops)
'ENABLE_SQL_TRACKING_LOGS': False,
'ENABLE_LMS_MIGRATION': False,
'ENABLE_MANUAL_GIT_RELOAD': False,
'ENABLE_MASQUERADE': True, # allow course staff to change to student view of courseware
'ENABLE_SYSADMIN_DASHBOARD': True, # sysadmin dashboard, to see what courses are loaded, to delete & load courses
'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL
# extrernal access methods
'ACCESS_REQUIRE_STAFF_FOR_COURSE': False,
'AUTH_USE_OPENID': False,
'AUTH_USE_CERTIFICATES': False,
'AUTH_USE_OPENID_PROVIDER': False,
# Even though external_auth is in common, shib assumes the LMS views / urls, so it should only be enabled
# in LMS
'AUTH_USE_SHIB': False,
'AUTH_USE_CAS': False,
# This flag disables the requirement of having to agree to the TOS for users registering
# with Shib. Feature was requested by Stanford's office of general counsel
'SHIB_DISABLE_TOS': False,
# Can be turned off if course lists need to be hidden. Effects views and templates.
'COURSES_ARE_BROWSABLE': True,
# Enables ability to restrict enrollment in specific courses by the user account login method
'RESTRICT_ENROLL_BY_REG_METHOD': False,
# analytics experiments
'ENABLE_INSTRUCTOR_ANALYTICS': False,
# Enables the LMS bulk email feature for course staff
'ENABLE_INSTRUCTOR_EMAIL': True,
# If True and ENABLE_INSTRUCTOR_EMAIL: Forces email to be explicitly turned on
# for each course via django-admin interface.
# If False and ENABLE_INSTRUCTOR_EMAIL: Email will be turned on by default
# for all Mongo-backed courses.
'REQUIRE_COURSE_EMAIL_AUTH': True,
# enable analytics server.
# WARNING: THIS SHOULD ALWAYS BE SET TO FALSE UNDER NORMAL
# LMS OPERATION. See analytics.py for details about what
# this does.
'RUN_AS_ANALYTICS_SERVER_ENABLED': False,
# Flip to True when the YouTube iframe API breaks (again)
'USE_YOUTUBE_OBJECT_API': False,
# Give a UI to show a student's submission history in a problem by the
# Staff Debug tool.
'ENABLE_STUDENT_HISTORY_VIEW': True,
# segment.io for LMS--need to explicitly turn it on for production.
'SEGMENT_IO_LMS': False,
# Provide a UI to allow users to submit feedback from the LMS (left-hand help modal)
'ENABLE_FEEDBACK_SUBMISSION': False,
# Turn on a page that lets staff enter Python code to be run in the
# sandbox, for testing whether it's enabled properly.
'ENABLE_DEBUG_RUN_PYTHON': False,
# Enable URL that shows information about the status of variuous services
'ENABLE_SERVICE_STATUS': False,
# Toggle to indicate use of a custom theme
'USE_CUSTOM_THEME': False,
# Don't autoplay videos for students
'AUTOPLAY_VIDEOS': False,
# Enable instructor dash to submit background tasks
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
# Enable instructor to assign individual due dates
'INDIVIDUAL_DUE_DATES': False,
# Enable instructor dash beta version link
'ENABLE_INSTRUCTOR_BETA_DASHBOARD': True,
# Allow use of the hint managment instructor view.
'ENABLE_HINTER_INSTRUCTOR_VIEW': False,
# for load testing
'AUTOMATIC_AUTH_FOR_TESTING': False,
# Toggle to enable chat availability (configured on a per-course
# basis in Studio)
'ENABLE_CHAT': False,
# Allow users to enroll with methods other than just honor code certificates
'MULTIPLE_ENROLLMENT_ROLES': False,
# Toggle the availability of the shopping cart page
'ENABLE_SHOPPING_CART': True,
# Toggle storing detailed billing information
'STORE_BILLING_INFO': False,
# Enable flow for payments for course registration (DIFFERENT from verified student flow)
'ENABLE_PAID_COURSE_REGISTRATION': True,
# Automatically approve student identity verification attempts
'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': False,
# Disable instructor dash buttons for downloading course data
# when enrollment exceeds this number
'MAX_ENROLLMENT_INSTR_BUTTONS': 200,
# Grade calculation started from the new instructor dashboard will write
# grades CSV files to S3 and give links for downloads.
'ENABLE_S3_GRADE_DOWNLOADS': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# Give course staff unrestricted access to grade downloads (if set to False,
# only edX superusers can perform the downloads)
'ALLOW_COURSE_STAFF_GRADE_DOWNLOADS': False,
'ENABLED_PAYMENT_REPORTS': ["refund_report", "itemized_purchase_report", "university_revenue_share", "certificate_status"],
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggle embargo functionality
'EMBARGO': False,
# Whether the Wiki subsystem should be accessible via the direct /wiki/ paths. Setting this to True means
# that people can submit content and modify the Wiki in any arbitrary manner. We're leaving this as True in the
# defaults, so that we maintain current behavior
'ALLOW_WIKI_ROOT_ACCESS': True,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Turn on third-party auth. Disabled for now because full implementations are not yet available. Remember to syncdb
# if you enable this; we don't create tables by default.
'ENABLE_THIRD_PARTY_AUTH': False,
# Toggle to enable alternate urls for marketing links
'ENABLE_MKTG_SITE': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': False,
# Turn off Advanced Security by default
'ADVANCED_SECURITY': False,
}
DEFAULT_GROUPS = []
GENERATE_PROFILE_SCORES = False
XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/lms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
COURSES_ROOT = ENV_ROOT / "data"
DATA_DIR = COURSES_ROOT
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'lib')
system_node_path = os.environ.get("NODE_PATH", REPO_ROOT / 'node_modules')
node_paths = [
COMMON_ROOT / "static/js/vendor",
COMMON_ROOT / "static/coffee/src",
system_node_path,
]
NODE_PATH = ':'.join(node_paths)
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
STATUS_MESSAGE_PATH = ENV_ROOT / "status_message.json"
OPENID_PROVIDER_TRUSTED_ROOTS = ['cs50.net', '*.cs50.net']
from tempdir import mkdtemp_clean
MAKO_MODULE_DIR = mkdtemp_clean('mako')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates']
TEMPLATE_DIRS = [
PROJECT_ROOT / "templates",
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
# Added for django-wiki
'django.core.context_processors.media',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'sekizai.context_processors.sekizai',
# Hack to get required link URLs to password reset templates
'edxmako.shortcuts.marketing_link_context_processor',
# Shoppingcart processor (detects if request.user has a cart)
'shoppingcart.context_processor.user_has_cart_context_processor',
)
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
STUDENT_FILEUPLOAD_MAX_SIZE = 4 * 1000 * 1000 # 4 MB
MAX_FILEUPLOADS_PER_INPUT = 20
LIB_URL = '/static/js/'
BOOK_URL = 'https://mitxstatic.s3.amazonaws.com/book_images/' # For AWS deploys
RSS_TIMEOUT = 600
STATIC_GRAB = False
DEV_CONTENT = True
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/accounts/login'
LOGIN_URL = EDX_ROOT_URL + '/accounts/login'
COURSE_NAME = "6.002_Spring_2012"
COURSE_NUMBER = "6.002x"
COURSE_TITLE = "Circuits and Electronics"
ENABLE_MULTICOURSE = False # set to False to disable multicourse display (see lib.util.views.edXhome)
WIKI_ENABLED = False
COURSE_DEFAULT = '6.002x_Fall_2012'
COURSE_SETTINGS = {
'6.002x_Fall_2012': {
'number': '6.002x',
'title': 'Circuits and Electronics',
'xmlpath': '6002x/',
'location': 'i4x://edx/6002xs12/course/6.002x_Fall_2012',
}
}
LMS_MIGRATION_ALLOWED_IPS = []
TRACK_MAX_EVENT = 10000
DEBUG_TRACK_LOG = False
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
if FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat']
TRACKING_ENABLED = True
COURSE_LISTINGS = {}
SUBDOMAIN_BRANDING = {}
VIRTUAL_UNIVERSITIES = []
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': DATA_DIR,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
}
}
}
CONTENTSTORE = None
DOC_STORE_CONFIG = {
'host': 'localhost',
'db': 'xmodule',
'collection': 'modulestore',
}
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin)
XBLOCK_SELECT_FUNCTION = prefer_xmodules
CODE_JAIL = {
# Path to a sandboxed Python executable. None means don't bother.
'python_bin': None,
# User to run as in the sandbox.
'user': 'sandbox',
# Configurable limits.
'limits': {
# How many CPU seconds can jailed code use?
'CPU': 1,
},
}
COURSES_WITH_UNSAFE_CODE = []
DEBUG = False
TEMPLATE_DEBUG = False
USE_TZ = True
CMS_BASE = 'localhost:8001'
SITE_ID = 1
SITE_NAME = "edx.org"
HTTPS = 'on'
ROOT_URLCONF = 'lms.urls'
IGNORABLE_404_ENDS = ('favicon.ico')
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = 'registration@example.com'
DEFAULT_FEEDBACK_EMAIL = 'feedback@example.com'
SERVER_EMAIL = 'devops@example.com'
TECH_SUPPORT_EMAIL = 'technical@example.com'
CONTACT_EMAIL = 'info@example.com'
BUGS_EMAIL = 'bugs@example.com'
ADMINS = ()
MANAGERS = ADMINS
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ENV_ROOT / "staticfiles"
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
]
FAVICON_PATH = 'images/favicon.ico'
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGES = (
('en', u'English'),
('eo', u'Dummy Language (Esperanto)'), # Dummy languaged used for testing
('fake2', u'Fake translations'), # Another dummy language for testing (not pushed to prod)
('ar', u'العربية'), # Arabic
('ca', u'Català'), # Catalan
('cs', u'Čeština'), # Czech
('de-de', u'Deutsch (Deutschland)'), # German (Germany)
('en@lolcat', u'LOLCAT English'), # LOLCAT English
('en@pirate', u'Pirate English'), # Pirate English
('es-419', u'Español (Latinoamérica)'), # Spanish (Latin America)
('es-es', u'Español (España)'), # Spanish (Spain)
('fr', u'Français'), # French
('hi', u'हिन्दी'), # Hindi
('hy-am', u'Հայերէն (Հայաստանի Հանրապետութիւն)'), # Armenian (Armenia)
('id', u'Bahasa Indonesia'), # Indonesian
('it-it', u'Italiano (Italia)'), # Italian (Italy)
('ja-jp', u'日本語(日本)'), # Japanese (Japan)
('ko-kr', u'한국어(대한민국)'), # Korean (Korea)
('lt-lt', u'Lietuvių (Lietuva)'), # Lithuanian (Lithuania)
('nb', u'Norsk bokmål'), # Norwegian Bokmål
('nl-nl', u'Nederlands (Nederland)'), # Dutch (Netherlands)
('pl', u'Polski'), # Polish
('pt-br', u'Português (Brasil)'), # Portuguese (Brazil)
('sl', u'Slovenščina'), # Slovenian
('tr-tr', u'Türkçe (Türkiye)'), # Turkish (Turkey)
('uk', u'Українська'), # Ukranian
('vi', u'Tiếng Việt'), # Vietnamese
('zh-cn', u'中文(简体)'), # Chinese (China)
('zh-tw', u'中文(台灣)'), # Chinese (Taiwan)
)
LANGUAGE_DICT = dict(LANGUAGES)
USE_I18N = True
USE_L10N = True
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
ALLOWED_GITRELOAD_IPS = ['207.97.227.253', '50.57.128.197', '108.171.174.178']
AWS_QUERYSTRING_EXPIRE = 10 * 365 * 24 * 60 * 60 # 10 years
SIMPLE_WIKI_REQUIRE_LOGIN_EDIT = True
SIMPLE_WIKI_REQUIRE_LOGIN_VIEW = False
from course_wiki import settings as course_wiki_settings
WIKI_ACCOUNT_HANDLING = False
WIKI_EDITOR = 'course_wiki.editors.CodeMirror'
WIKI_SHOW_MAX_CHILDREN = 0 # We don't use the little menu that shows children of an article in the breadcrumb
WIKI_ANONYMOUS = False # Don't allow anonymous access until the styling is figured out
WIKI_CAN_DELETE = course_wiki_settings.CAN_DELETE
WIKI_CAN_MODERATE = course_wiki_settings.CAN_MODERATE
WIKI_CAN_CHANGE_PERMISSIONS = course_wiki_settings.CAN_CHANGE_PERMISSIONS
WIKI_CAN_ASSIGN = course_wiki_settings.CAN_ASSIGN
WIKI_USE_BOOTSTRAP_SELECT_WIDGET = False
WIKI_LINK_LIVE_LOOKUPS = False
WIKI_LINK_DEFAULT_LEVEL = 2
FEEDBACK_SUBMISSION_EMAIL = None
ZENDESK_URL = None
ZENDESK_USER = None
ZENDESK_API_KEY = None
PAYMENT_SUPPORT_EMAIL = 'payment@example.com'
CC_PROCESSOR = {
'CyberSource': {
'SHARED_SECRET': '',
'MERCHANT_ID': '',
'SERIAL_NUMBER': '',
'ORDERPAGE_VERSION': '7',
'PURCHASE_ENDPOINT': '',
}
}
PAID_COURSE_REGISTRATION_CURRENCY = ['usd', '$']
PAYMENT_REPORT_GENERATOR_GROUP = 'shoppingcart_report_access'
OPEN_ENDED_GRADING_INTERFACE = {
'url': 'http://example.com/peer_grading',
'username': 'incorrect_user',
'password': 'incorrect_pass',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller'
}
MOCK_PEER_GRADING = False
MOCK_STAFF_GRADING = False
JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee'
WAFFLE_COOKIE = "waffle_flag_%s"
WAFFLE_MAX_AGE = 1209600
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
TEMPLATE_LOADERS = (
'edxmako.makoloader.MakoFilesystemLoader',
'edxmako.makoloader.MakoAppDirectoriesLoader',
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'microsite_configuration.middleware.MicrositeMiddleware',
'django_comment_client.middleware.AjaxExceptionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# Instead of AuthenticationMiddleware, we use a cached backed version
#'django.contrib.auth.middleware.AuthenticationMiddleware',
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
# Adds user tags to tracking events
# Must go before TrackMiddleware, to get the context set up
'user_api.middleware.UserTagsEventContextMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'splash.middleware.SplashMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Allows us to set user preferences
# should be after DarkLangMiddleware
'lang_pref.middleware.LanguagePreferenceMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_comment_client.utils.ViewNameMiddleware',
'codejail.django_integration.ConfigureCodeJailMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# For A/B testing
'waffle.middleware.WaffleMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'course_wiki.middleware.WikiAccessMiddleware',
)
X_FRAME_OPTIONS = 'ALLOW'
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
from rooted_paths import rooted_glob
courseware_js = (
[
'coffee/src/' + pth + '.js'
for pth in ['courseware', 'histogram', 'navigation', 'time']
] +
sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/modules/**/*.js'))
)
main_vendor_js = [
'js/vendor/require.js',
'js/RequireJS-namespace-undefine.js',
'js/vendor/json2.js',
'js/vendor/jquery.min.js',
'js/vendor/jquery-ui.min.js',
'js/vendor/jquery.cookie.js',
'js/vendor/jquery.qtip.min.js',
'js/vendor/swfobject/swfobject.js',
'js/vendor/jquery.ba-bbq.min.js',
'js/vendor/ova/annotator-full.js',
'js/vendor/ova/video.dev.js',
'js/vendor/ova/vjs.youtube.js',
'js/vendor/ova/rangeslider.js',
'js/vendor/ova/share-annotator.js',
'js/vendor/ova/tinymce.min.js',
'js/vendor/ova/richText-annotator.js',
'js/vendor/ova/reply-annotator.js',
'js/vendor/ova/tags-annotator.js',
'js/vendor/ova/flagging-annotator.js',
'js/vendor/ova/jquery-Watch.js',
'js/vendor/ova/ova.js',
'js/vendor/ova/catch/js/catch.js',
'js/vendor/ova/catch/js/handlebars-1.1.2.js',
'js/vendor/URI.min.js',
]
discussion_js = sorted(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/discussion/**/*.js'))
staff_grading_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/staff_grading/**/*.js'))
open_ended_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/open_ended/**/*.js'))
notes_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/notes/**/*.js'))
instructor_dash_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/instructor_dashboard/**/*.js'))
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/font-awesome.css',
'css/vendor/jquery.qtip.min.css',
'css/vendor/responsive-carousel/responsive-carousel.css',
'css/vendor/responsive-carousel/responsive-carousel.slide.css',
'css/vendor/ova/edx-annotator.css',
'css/vendor/ova/annotator.css',
'css/vendor/ova/video-js.min.css',
'css/vendor/ova/rangeslider.css',
'css/vendor/ova/share-annotator.css',
'css/vendor/ova/richText-annotator.css',
'css/vendor/ova/tags-annotator.css',
'css/vendor/ova/flagging-annotator.css',
'css/vendor/ova/ova.css',
'js/vendor/ova/catch/css/main.css'
],
'output_filename': 'css/lms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-skin.css',
},
'style-app': {
'source_filenames': [
'sass/application.css',
'sass/ie.css'
],
'output_filename': 'css/lms-style-app.css',
},
'style-app-extend1': {
'source_filenames': [
'sass/application-extend1.css',
],
'output_filename': 'css/lms-style-app-extend1.css',
},
'style-app-extend2': {
'source_filenames': [
'sass/application-extend2.css',
],
'output_filename': 'css/lms-style-app-extend2.css',
},
'style-course-vendor': {
'source_filenames': [
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/jquery.treeview.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
],
'output_filename': 'css/lms-style-course-vendor.css',
},
'style-course': {
'source_filenames': [
'sass/course.css',
'xmodule/modules.css',
],
'output_filename': 'css/lms-style-course.css',
},
}
common_js = set(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js)
project_js = set(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js)
PIPELINE_JS = {
'application': {
# Application will contain all paths not in courseware_only_js
'source_filenames': sorted(common_js) + sorted(project_js) + [
'js/form.ext.js',
'js/my_courses_dropdown.js',
'js/toggle_login_modal.js',
'js/sticky_filter.js',
'js/query-params.js',
'js/src/utility.js',
'js/src/accessibility_tools.js',
],
'output_filename': 'js/lms-application.js',
'test_order': 1,
},
'courseware': {
'source_filenames': courseware_js,
'output_filename': 'js/lms-courseware.js',
'test_order': 2,
},
'main_vendor': {
'source_filenames': main_vendor_js,
'output_filename': 'js/lms-main_vendor.js',
'test_order': 0,
},
'module-descriptor-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js'),
'output_filename': 'js/lms-module-descriptors.js',
'test_order': 8,
},
'module-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static', 'xmodule/modules/js/*.js'),
'output_filename': 'js/lms-modules.js',
'test_order': 3,
},
'discussion': {
'source_filenames': discussion_js,
'output_filename': 'js/discussion.js',
'test_order': 4,
},
'staff_grading': {
'source_filenames': staff_grading_js,
'output_filename': 'js/staff_grading.js',
'test_order': 5,
},
'open_ended': {
'source_filenames': open_ended_js,
'output_filename': 'js/open_ended.js',
'test_order': 6,
},
'notes': {
'source_filenames': notes_js,
'output_filename': 'js/notes.js',
'test_order': 7
},
'instructor_dash': {
'source_filenames': instructor_dash_js,
'output_filename': 'js/instructor_dash.js',
'test_order': 9,
},
}
PIPELINE_DISABLE_WRAPPER = True
if os.path.isdir(DATA_DIR):
for course_dir in os.listdir(DATA_DIR):
js_dir = DATA_DIR / course_dir / "js"
if not os.path.isdir(js_dir):
continue
for filename in os.listdir(js_dir):
if filename.endswith('coffee'):
new_filename = os.path.splitext(filename)[0] + ".js"
if os.path.exists(js_dir / new_filename):
coffee_timestamp = os.stat(js_dir / filename).st_mtime
js_timestamp = os.stat(js_dir / new_filename).st_mtime
if coffee_timestamp <= js_timestamp:
continue
os.system("rm %s" % (js_dir / new_filename))
os.system("coffee -c %s" % (js_dir / filename))
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
STATICFILES_IGNORE_PATTERNS = (
"sass/*",
"coffee/*",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_YUI_BINARY = 'yui-compressor'
PIPELINE_COMPILE_INPLACE = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
HIGH_MEM_QUEUE = 'edx.core.high_mem'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
}
CELERYD_HIJACK_ROOT_LOGGER = False
BULK_EMAIL_DEFAULT_FROM_EMAIL = 'no-reply@example.com'
BULK_EMAIL_EMAILS_PER_TASK = 100
BULK_EMAIL_EMAILS_PER_QUERY = 1000
BULK_EMAIL_DEFAULT_RETRY_DELAY = 30
BULK_EMAIL_MAX_RETRIES = 5
BULK_EMAIL_INFINITE_RETRY_CAP = 1000
BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE
BULK_EMAIL_LOG_SENT_EMAILS = False
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = 0.02
YOUTUBE = {
# YouTube JavaScript API
'API': 'www.youtube.com/iframe_api',
# URL to test YouTube availability
'TEST_URL': 'gdata.youtube.com/feeds/api/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
}
INSTALLED_APPS = (
# Standard ones that are always installed...
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
'south',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
# Our courseware
'circuit',
'courseware',
'student',
'static_template_view',
'staticbook',
'track',
'eventtracking.django',
'util',
'certificates',
'dashboard',
'instructor',
'instructor_task',
'open_ended_grading',
'psychometrics',
'licenses',
'course_groups',
'bulk_email',
# External auth (OpenID, shib)
'external_auth',
'django_openid_auth',
# For the wiki
'wiki', # The new django-wiki from benjaoming
'django_notify',
'course_wiki', # Our customizations
'mptt',
'sekizai',
#'wiki.plugins.attachments',
'wiki.plugins.links',
'wiki.plugins.notifications',
'course_wiki.plugins.markdownedx',
# Foldit integration
'foldit',
# For A/B testing
'waffle',
# For testing
'django.contrib.admin', # only used in DEBUG mode
'django_nose',
'debug',
# Discussion forums
'django_comment_client',
'django_comment_common',
'notes',
# Splash screen
'splash',
# Monitoring
'datadog',
# User API
'rest_framework',
'user_api',
# Shopping cart
'shoppingcart',
# Notification preferences setting
'notification_prefs',
# Different Course Modes
'course_modes',
# Student Identity Verification
'verify_student',
# Dark-launching languages
'dark_lang',
# Microsite configuration
'microsite_configuration',
# Student Identity Reverification
'reverification',
'embargo',
# Monitoring functionality
'monitoring',
)
EDXMKTG_COOKIE_NAME = 'edxloggedin'
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
'ABOUT': 'about_edx',
'CONTACT': 'contact',
'FAQ': 'help_edx',
'COURSES': 'courses',
'ROOT': 'root',
'TOS': 'tos',
'HONOR': 'honor',
'PRIVACY': 'privacy_edx',
'JOBS': 'jobs',
'PRESS': 'press',
# Verified Certificates
'WHAT_IS_VERIFIED_CERT': 'verified-certificate',
}
VERIFY_STUDENT = {
"DAYS_GOOD_FOR": 365, # How many days is a verficiation good for?
}
FEATURES['CLASS_DASHBOARD'] = False
if FEATURES.get('CLASS_DASHBOARD'):
INSTALLED_APPS += ('class_dashboard',)
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = 'https://provide_your_cas_url_here'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
REGISTRATION_EXTRA_FIELDS = {
'level_of_education': 'optional',
'gender': 'optional',
'year_of_birth': 'optional',
'mailing_address': 'optional',
'goals': 'optional',
'honor_code': 'required',
'city': 'hidden',
'country': 'hidden',
}
CERT_NAME_SHORT = "Certificate"
CERT_NAME_LONG = "Certificate of Achievement"
GRADES_DOWNLOAD_ROUTING_KEY = HIGH_MEM_QUEUE
GRADES_DOWNLOAD = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-grades',
'ROOT_PATH': '/tmp/edx-s3/grades',
}
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
INSTALLED_APPS += ('django_openid_auth',)
INSTALLED_APPS += ('linkedin',)
LINKEDIN_API = {
'EMAIL_WHITELIST': [],
'COMPANY_ID': '2746406',
}
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
TIME_ZONE_DISPLAYED_FOR_DEADLINES = 'UTC'
ALL_LANGUAGES = (
[u"aa", u"Afar"],
[u"ab", u"Abkhazian"],
[u"af", u"Afrikaans"],
[u"ak", u"Akan"],
[u"sq", u"Albanian"],
[u"am", u"Amharic"],
[u"ar", u"Arabic"],
[u"an", u"Aragonese"],
[u"hy", u"Armenian"],
[u"as", u"Assamese"],
[u"av", u"Avaric"],
[u"ae", u"Avestan"],
[u"ay", u"Aymara"],
[u"az", u"Azerbaijani"],
[u"ba", u"Bashkir"],
[u"bm", u"Bambara"],
[u"eu", u"Basque"],
[u"be", u"Belarusian"],
[u"bn", u"Bengali"],
[u"bh", u"Bihari languages"],
[u"bi", u"Bislama"],
[u"bs", u"Bosnian"],
[u"br", u"Breton"],
[u"bg", u"Bulgarian"],
[u"my", u"Burmese"],
[u"ca", u"Catalan"],
[u"ch", u"Chamorro"],
[u"ce", u"Chechen"],
[u"zh", u"Chinese"],
[u"cu", u"Church Slavic"],
[u"cv", u"Chuvash"],
[u"kw", u"Cornish"],
[u"co", u"Corsican"],
[u"cr", u"Cree"],
[u"cs", u"Czech"],
[u"da", u"Danish"],
[u"dv", u"Divehi"],
[u"nl", u"Dutch"],
[u"dz", u"Dzongkha"],
[u"en", u"English"],
[u"eo", u"Esperanto"],
[u"et", u"Estonian"],
[u"ee", u"Ewe"],
[u"fo", u"Faroese"],
[u"fj", u"Fijian"],
[u"fi", u"Finnish"],
[u"fr", u"French"],
[u"fy", u"Western Frisian"],
[u"ff", u"Fulah"],
[u"ka", u"Georgian"],
[u"de", u"German"],
[u"gd", u"Gaelic"],
[u"ga", u"Irish"],
[u"gl", u"Galician"],
[u"gv", u"Manx"],
[u"el", u"Greek"],
[u"gn", u"Guarani"],
[u"gu", u"Gujarati"],
[u"ht", u"Haitian"],
[u"ha", u"Hausa"],
[u"he", u"Hebrew"],
[u"hz", u"Herero"],
[u"hi", u"Hindi"],
[u"ho", u"Hiri Motu"],
[u"hr", u"Croatian"],
[u"hu", u"Hungarian"],
[u"ig", u"Igbo"],
[u"is", u"Icelandic"],
[u"io", u"Ido"],
[u"ii", u"Sichuan Yi"],
[u"iu", u"Inuktitut"],
[u"ie", u"Interlingue"],
[u"ia", u"Interlingua"],
[u"id", u"Indonesian"],
[u"ik", u"Inupiaq"],
[u"it", u"Italian"],
[u"jv", u"Javanese"],
[u"ja", u"Japanese"],
[u"kl", u"Kalaallisut"],
[u"kn", u"Kannada"],
[u"ks", u"Kashmiri"],
[u"kr", u"Kanuri"],
[u"kk", u"Kazakh"],
[u"km", u"Central Khmer"],
[u"ki", u"Kikuyu"],
[u"rw", u"Kinyarwanda"],
[u"ky", u"Kirghiz"],
[u"kv", u"Komi"],
[u"kg", u"Kongo"],
[u"ko", u"Korean"],
[u"kj", u"Kuanyama"],
[u"ku", u"Kurdish"],
[u"lo", u"Lao"],
[u"la", u"Latin"],
[u"lv", u"Latvian"],
[u"li", u"Limburgan"],
[u"ln", u"Lingala"],
[u"lt", u"Lithuanian"],
[u"lb", u"Luxembourgish"],
[u"lu", u"Luba-Katanga"],
[u"lg", u"Ganda"],
[u"mk", u"Macedonian"],
[u"mh", u"Marshallese"],
[u"ml", u"Malayalam"],
[u"mi", u"Maori"],
[u"mr", u"Marathi"],
[u"ms", u"Malay"],
[u"mg", u"Malagasy"],
[u"mt", u"Maltese"],
[u"mn", u"Mongolian"],
[u"na", u"Nauru"],
[u"nv", u"Navajo"],
[u"nr", u"Ndebele, South"],
[u"nd", u"Ndebele, North"],
[u"ng", u"Ndonga"],
[u"ne", u"Nepali"],
[u"nn", u"Norwegian Nynorsk"],
[u"nb", u"Bokmål, Norwegian"],
[u"no", u"Norwegian"],
[u"ny", u"Chichewa"],
[u"oc", u"Occitan"],
[u"oj", u"Ojibwa"],
[u"or", u"Oriya"],
[u"om", u"Oromo"],
[u"os", u"Ossetian"],
[u"pa", u"Panjabi"],
[u"fa", u"Persian"],
[u"pi", u"Pali"],
[u"pl", u"Polish"],
[u"pt", u"Portuguese"],
[u"ps", u"Pushto"],
[u"qu", u"Quechua"],
[u"rm", u"Romansh"],
[u"ro", u"Romanian"],
[u"rn", u"Rundi"],
[u"ru", u"Russian"],
[u"sg", u"Sango"],
[u"sa", u"Sanskrit"],
[u"si", u"Sinhala"],
[u"sk", u"Slovak"],
[u"sl", u"Slovenian"],
[u"se", u"Northern Sami"],
[u"sm", u"Samoan"],
[u"sn", u"Shona"],
[u"sd", u"Sindhi"],
[u"so", u"Somali"],
[u"st", u"Sotho, Southern"],
[u"es", u"Spanish"],
[u"sc", u"Sardinian"],
[u"sr", u"Serbian"],
[u"ss", u"Swati"],
[u"su", u"Sundanese"],
[u"sw", u"Swahili"],
[u"sv", u"Swedish"],
[u"ty", u"Tahitian"],
[u"ta", u"Tamil"],
[u"tt", u"Tatar"],
[u"te", u"Telugu"],
[u"tg", u"Tajik"],
[u"tl", u"Tagalog"],
[u"th", u"Thai"],
[u"bo", u"Tibetan"],
[u"ti", u"Tigrinya"],
[u"to", u"Tonga (Tonga Islands)"],
[u"tn", u"Tswana"],
[u"ts", u"Tsonga"],
[u"tk", u"Turkmen"],
[u"tr", u"Turkish"],
[u"tw", u"Twi"],
[u"ug", u"Uighur"],
[u"uk", u"Ukrainian"],
[u"ur", u"Urdu"],
[u"uz", u"Uzbek"],
[u"ve", u"Venda"],
[u"vi", u"Vietnamese"],
[u"vo", u"Volapük"],
[u"cy", u"Welsh"],
[u"wa", u"Walloon"],
[u"wo", u"Wolof"],
[u"xh", u"Xhosa"],
[u"yi", u"Yiddish"],
[u"yo", u"Yoruba"],
[u"za", u"Zhuang"],
[u"zu", u"Zulu"]
)
OPTIONAL_APPS = (
'edx_jsdraw',
'mentoring',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.workflow',
'openassessment.xblock'
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
THIRD_PARTY_AUTH = {}
ADVANCED_SECURITY_CONFIG = {}
|
from osv import fields, osv
from tools.translate import _
class receipt_receipt (osv.osv):
_name = "receipt.receipt"
_description = 'Receipt'
_columns = {
'name':fields.char(string='Receipt number',size=128, required=True, readonly=True, ondelete='set null'),
'receiptbook_id': fields.many2one('receipt.receiptbook','ReceiptBook',readonly=True,required=True),
'date': fields.date('Receipt Date',readonly=True),
'total_ammount':fields.float('Total Ammount',readonly=True),
'partner_id':fields.many2one('res.partner',string='Partner',readonly=True),
'voucher_ids':fields.one2many('account.voucher','receipt_id',string='Vouchers Lines'),
'receipt_type': fields.selection([('receipt','receipt'),
('payment','payment')],'Receipt Type', required=1),
}
_sql_constraints = [('name_uniq','unique(name)','The name must be unique!')]
_order = "date"
# def unlink(self, cr, uid, ids, context=None):
#raise osv.except_osv(_('Invalid action !'), _('Cannot delete Receipt(s) !'))
#return {'type': 'ir.actions.act_window_close'}
receipt_receipt()
class receipt_receipt_line (osv.osv):
_name = "receipt.receipt.line"
_description = 'Receipt Line'
_columns = {
'receipt_id':fields.many2one('receipt.receipt', 'Receipt', ondelete='set null'),
'name':fields.char('Description', size=256),
'voucher_id': fields.many2one('account.voucher', 'Voucher'),
}
_order = "receipt_id"
receipt_receipt_line()
|
from cybox.core import Observable, ObservableComposition
from cybox.objects.file_object import File
from cybox.objects.address_object import Address
from cybox.objects.hostname_object import Hostname
from cybox.objects.uri_object import URI
from cybox.objects.pipe_object import Pipe
from cybox.objects.mutex_object import Mutex
from cybox.objects.artifact_object import Artifact
from cybox.objects.memory_object import Memory
from cybox.objects.email_message_object import EmailMessage, EmailHeader, Attachments
from cybox.objects.domain_name_object import DomainName
from cybox.objects.win_registry_key_object import *
from cybox.common import Hash, ByteRun, ByteRuns
from cybox.objects.http_session_object import *
from cybox.objects.as_object import AutonomousSystem
from stix.extensions.test_mechanism.snort_test_mechanism import *
import ntpath, socket, sys
from stix.indicator import Indicator
this_module = sys.modules[__name__]
simple_type_to_method = {}
simple_type_to_method.update(dict.fromkeys(["md5", "sha1", "sha256", "filename", "filename|md5", "filename|sha1", "filename|sha256", "malware-sample", "attachment"], "resolveFileObservable"))
simple_type_to_method.update(dict.fromkeys(["ip-src", "ip-dst"], "generateIPObservable"))
simple_type_to_method.update(dict.fromkeys(["regkey", "regkey|value"], "generateRegkeyObservable"))
simple_type_to_method.update(dict.fromkeys(["hostname", "domain", "url", "AS", "mutex", "named pipe", "link"], "generateSimpleObservable"))
simple_type_to_method.update(dict.fromkeys(["email-src", "email-dst", "email-subject"], "resolveEmailObservable"))
simple_type_to_method.update(dict.fromkeys(["http-method", "user-agent"], "resolveHTTPObservable"))
simple_type_to_method.update(dict.fromkeys(["pattern-in-file", "pattern-in-traffic", "pattern-in-memory"], "resolvePatternObservable"))
misp_cybox_name = {"domain" : "DomainName", "hostname" : "Hostname", "url" : "URI", "AS" : "AutonomousSystem", "mutex" : "Mutex", "named pipe" : "Pipe", "link" : "URI"}
cybox_name_attribute = {"DomainName" : "value", "Hostname" : "hostname_value", "URI" : "value", "AutonomousSystem" : "number", "Pipe" : "name", "Mutex" : "name"}
def generateObservable(indicator, attribute):
if (attribute["type"] in ("snort", "yara")):
generateTM(indicator, attribute)
else:
observable = None;
if (attribute["type"] in simple_type_to_method.keys()):
action = getattr(this_module, simple_type_to_method[attribute["type"]], None)
if (action != None):
observable = action(attribute)
indicator.add_observable(observable)
def resolveFileObservable(attribute):
hashValue = ""
filenameValue = ""
if (attribute["type"] in ("filename|md5", "filename|sha1", "filename|sha256", "malware-sample")):
values = attribute["value"].split('|')
filenameValue = values[0]
hashValue = values[1]
else:
if (attribute["type"] in ("filename", "attachment")):
filenameValue = attribute["value"]
else:
hashValue = attribute["value"]
observable = generateFileObservable(filenameValue, hashValue)
return observable
def generateFileObservable(filenameValue, hashValue):
file_object = File()
if (filenameValue != ""):
if (("/" in filenameValue) or ("\\" in filenameValue)):
file_object.file_path = ntpath.dirname(filenameValue)
file_object.file_name = ntpath.basename(filenameValue)
else:
file_object.file_name = filenameValue
if (hashValue != ""):
file_object.add_hash(Hash(hashValue))
return file_object
def generateIPObservable(attribute):
address_object = Address()
cidr = False
if ("/" in attribute["value"]):
ip = attribute["value"].split('/')[0]
cidr = True
else:
ip = attribute["value"]
try:
socket.inet_aton(ip)
ipv4 = True
except socket.error:
ipv4 = False
if (cidr == True):
address_object.category = "cidr"
elif (ipv4 == True):
address_object.category = "ipv4-addr"
else:
address_object.category = "ipv6-addr"
if (attribute["type"] == "ip-src"):
address_object.is_source = True
else:
address_object.is_source = False
address_object.address_value = attribute["value"]
return address_object
def generateRegkeyObservable(attribute):
regkey = ""
regvalue = ""
if (attribute["type"] == "regkey|value"):
regkey = attribute["value"].split('|')[0]
regvalue = attribute["value"].split('|')[1]
else:
regkey = attribute["value"]
reg_object = WinRegistryKey()
reg_object.key = regkey
if (regvalue != ""):
reg_value_object = RegistryValue()
reg_value_object.data = regvalue
reg_object.values = RegistryValues(reg_value_object)
return reg_object
def generateSimpleObservable(attribute):
cyboxName = misp_cybox_name[attribute["type"]]
constructor = getattr(this_module, cyboxName, None)
new_object = constructor()
setattr(new_object, cybox_name_attribute[cyboxName], attribute["value"])
return new_object
def generateTM(indicator, attribute):
if (attribute["type"] == "snort"):
tm = SnortTestMechanism()
tm.rules = [attribute["value"]]
else:
# remove the following line and uncomment the code below once yara test mechanisms get added to python-stix
return indicator
#tm = SnortTestMechanism()
#tm.rules = [attribute["value"]]
indicator.test_mechanisms = [tm]
def resolveEmailObservable(attribute):
new_object = EmailMessage()
email_header = EmailHeader()
if (attribute["type"] == "email-src"):
email_header.from_ = attribute["value"]
elif(attribute["type"] == "email-dst"):
email_header.to = attribute["value"]
else:
email_header.subject = attribute["value"]
new_object.header = email_header
return new_object
def resolveHTTPObservable(attribute):
request_response = HTTPRequestResponse()
client_request = HTTPClientRequest()
if (attribute["type"] == "user-agent"):
header = HTTPRequestHeader()
header_fields = HTTPRequestHeaderFields()
header_fields.user_agent = attribute["value"]
header.parsed_header = header_fields
client_request.http_request_header = header
else:
line = HTTPRequestLine()
line.http_method = attribute["value"]
client_request.http_request_line = line
request_response.http_client_request = client_request
new_object = HTTPSession()
request_response.to_xml()
new_object.http_request_response = [request_response]
return new_object
def resolvePatternObservable(attribute):
new_object = None
if attribute["type"] == "pattern-in-file":
byte_run = ByteRun()
byte_run.byte_run_data = attribute["value"]
new_object = File()
new_object.byte_runs = ByteRuns(byte_run)
# custom properties are not implemented in the API yet
# elif attribute["type"] == "pattern-in-memory":
# elif attribute["type"] == "pattern-in-traffic":
return new_object
def createArtifactObject(indicator, attribute):
artifact = Artifact(data = attribute["data"])
indicator.add_observable(artifact)
def returnAttachmentComposition(attribute):
file_object = File()
file_object.file_name = attribute["value"]
observable = Observable()
if "data" in attribute:
artifact = Artifact(data = attribute["data"])
composition = ObservableComposition(observables = [artifact, file_object])
observable.observable_composition = composition
else:
observable = Observable(file_object)
return observable
def generateEmailAttachmentObject(indicator, filename):
file_object = File()
file_object.file_name = filename
email = EmailMessage()
email.attachments = Attachments()
email.add_related(file_object, "Contains", inline=True)
email.attachments.append(file_object.parent.id_)
indicator.observable = email
|
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class StockPickingCarriageCondition(orm.Model):
""" Model name: Carriage condition
"""
_inherit = 'stock.picking.carriage_condition'
_columns = {
'account_code': fields.char('Account code', size=2),
}
|
from datetime import datetime, timedelta
import requests
from recall.convenience import settings
from recall import jobs, convenience, messages
_VERIFY_SSL = False
class UpstreamError(Exception):
def __init__(self, response):
Exception.__init__(self, repr(
{"message": response.text,
"status_code": response.status_code,
"form": response.request.data}))
class OurError(Exception):
def __init__(self, response):
Exception.__init__(self, repr(
{"message": response.text,
"status_code": response.status_code,
"form": response.request.data}))
def _url():
return settings["RECALL_PAYMILL_URL"]
def _auth():
return settings["RECALL_PAYMILL_PRIVATE_KEY"], "no_password"
def _handle_failure(response):
if 400 <= response.status_code <= 499:
raise OurError(response)
elif 500 <= response.status_code <= 599:
raise UpstreamError(response)
def _create_client(user):
form = {"email": user["email"]}
response = requests.post(
_url() + "clients", verify=_VERIFY_SSL, auth=_auth(), data=form)
_handle_failure(response)
return response.json["data"]["id"]
def _create_credit_card(token, client_identifier):
form = {"client": client_identifier, "token": token}
response = requests.post(
_url() + "payments", verify=_VERIFY_SSL, auth=_auth(), data=form)
_handle_failure(response)
return response.json["data"]["id"]
def _create_subscription(client_identifier, credit_card_identifier):
offer_identifier = settings["RECALL_PAYMILL_OFFER"]
form = {
"client": client_identifier,
"offer": offer_identifier,
"payment": credit_card_identifier
}
response = requests.post(
_url() + "subscriptions", verify=_VERIFY_SSL, auth=_auth(), data=form)
_handle_failure(response)
return response.json["data"]["id"]
def _start_billing(user, token):
"""Begin billing a user in Paymill, returning Paymill's identifiers.
Register the user as a client in Paymill. Attach the credit card
details the token refers to to that client. Subscribe the user to
the offer (from settings).
No guarantee is given that the user has been billed"""
client_identifier = _create_client(user)
credit_card_identifier = _create_credit_card(token, client_identifier)
subscription_identifier = _create_subscription(client_identifier,
credit_card_identifier)
return {"client_identifier": client_identifier,
"credit_card_identifier": credit_card_identifier,
"subscription_identifier": subscription_identifier}
def _has_been_recently_billed(user):
"""Return True if a recent billing can be found, False otherwise."""
response = requests.get(
_url() + "transactions", verify=_VERIFY_SSL, auth=_auth())
_handle_failure(response)
client_identifier = user["paymill"]["client_identifier"]
transactions = response.json["data"]
for transaction in transactions:
if transaction["client"]["id"] == client_identifier:
return transaction["status"] == "closed"
return False
class StartBilling(jobs.Job):
def __init__(self, email, token):
assert type(email) == str
assert type(token) == str
self.email = email
self.token = token
def do(self):
user = convenience.db().users.find_one({"email": self.email})
user["paymill"] = _start_billing(user, self.token)
convenience.db().users.save(user, safe=True)
self.logger.info("Started billing " + user["email"])
jobs.enqueue(CheckBilling(user), priority=3)
class CheckBilling(jobs.Job):
def __init__(self, user, last_noted=datetime.now()):
assert type(user) == dict
self.user = user
self.last_noted = last_noted
def do(self):
if _has_been_recently_billed(self.user):
self.logger.info("Billing for {email} went through".format(
email=self.user["email"]))
jobs.enqueue(messages.SendInvite(self.user))
else:
if self.last_noted < (datetime.now() - timedelta(hours=1)):
email = self.user["email"]
# send some message it's all going wrong!
self.logger.error(
"Billing for {email} has not happened in last hour".format(
email=email))
jobs.enqueue(CheckBilling(self.user), priority=3)
else:
self.logger.debug(
"Billing for {email} has not happened".format(
email=email))
jobs.enqueue(CheckBilling(
self.user, last_noted=self.last_noted), priority=3)
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('representatives', '0003_auto_20150702_1827'),
('representatives_votes', '0002_auto_20150707_1611'),
]
operations = [
migrations.RemoveField(
model_name='vote',
name='representative_fingerprint',
),
migrations.AddField(
model_name='proposal',
name='representatives',
field=models.ManyToManyField(
to='representatives.Representative',
through='representatives_votes.Vote'),
),
]
|
from medea.agnostic import freq, ticks_ms, ticks_diff
freq(160000000)
def timeit(fun):
startMs = ticks_ms()
fun()
stopMs = ticks_ms()
print("Took:", ticks_diff(stopMs, startMs))
|
from . import medical_patient_occupation
from . import medical_patient
|
{
'name': 'Piwik for Website',
'version': '0.1',
'category': 'Website',
'complexity': "easy",
'description': """
Piwik for Website.
=================
Collects web application usage of website with Piwik.
""",
'author': 'Echoes Technologies SAS',
'website': 'https://www.echoes-tech.com',
'depends': ['website'],
'data': [
'views/website_piwik.xml',
],
'installable': True,
'active': False,
}
|
from ts_utils import decryptTsSegment
import stress_base
import http_utils
import urlparse
import commands
import tempfile
import urllib2
import hashlib
import shutil
import random
import struct
import socket
import time
import gzip
import os
import re
from hls_compare_params import *
MAX_TS_SEGMENTS_TO_COMPARE = 10
TS_PACKET_LENGTH = 188
PTS_THRESHOLD = 1000 # 1/90 sec
DTS_THRESHOLD = 1000 # 1/90 sec
IGNORED_PREFIXES = ['pos=', 'pts_time=', 'dts_time=']
IGNORED_PREFIXES_FULL = ['pos=', 'pts_time=', 'dts_time=', 'pts=', 'dts=']
TEMP_DIR = '/mnt/vodtest/'
RETRIES = 1
def parseTimestamp(value):
if value == 'N/A':
return -1
return int(value)
class TestThread(stress_base.TestThreadBase):
def writeToTempFile(self, data):
f = tempfile.NamedTemporaryFile(delete=False, dir=TEMP_DIR)
f.write(data)
f.close()
return f.name
def shouldCompareLine(self, line):
for ignoredPrefix in IGNORED_PREFIXES_FULL:
if line.startswith(ignoredPrefix):
return False
return True
def writeLinesToDiff(self, lines):
lines = filter(self.shouldCompareLine, lines)
return self.writeToTempFile('\n'.join(lines))
def compareFfprobeOutputs(self, ffprobeData1, ffprobeData2, streamType, messages):
lines1 = ffprobeData1.split('\n')
lines2 = ffprobeData2.split('\n')
if len(lines1) != len(lines2):
fileName1 = self.writeLinesToDiff(lines1)
fileName2 = self.writeLinesToDiff(lines2)
diffResult = commands.getoutput('diff -bBu %s.tmp %s.tmp < /dev/null' % (fileName1, fileName2))
os.remove(fileName1)
os.remove(fileName2)
diffResult = '\n'.join(diffResult.split('\n')[:1000])
messages.append('Error: line count mismatch %s vs %s' % (len(lines1), len(lines2)))
messages.append(diffResult)
return False
#ptsDiff = None
#dtsDiff = None
result = True
for curIndex in xrange(len(lines1)):
line1 = lines1[curIndex].strip()
line2 = lines2[curIndex].strip()
if line1 == line2:
continue
skipLine = False
for ignoredPrefix in IGNORED_PREFIXES:
if line1.startswith(ignoredPrefix) and line2.startswith(ignoredPrefix):
skipLine = True
break
if skipLine:
continue
# pts
if line1.startswith('pts=') and line2.startswith('pts='):
pts1 = parseTimestamp(line1.split('=')[1])
pts2 = parseTimestamp(line2.split('=')[1])
#if ptsDiff is None:
# ptsDiff = pts1 - pts2
# continue
curDiff = abs(pts1 - pts2) #(pts2 + ptsDiff))
if curDiff > PTS_THRESHOLD:
messages.append('Error: pts diff exceeds threshold - pts1 %s pts2 %s streamType %s' % (pts1, pts2, streamType))
result = False
continue
# dts
if line1.startswith('dts=') and line2.startswith('dts='):
dts1 = parseTimestamp(line1.split('=')[1])
dts2 = parseTimestamp(line2.split('=')[1])
#if dtsDiff is None:
# dtsDiff = dts1 - dts2
# continue
curDiff = abs(dts1 - dts2) #(dts2 + dtsDiff))
if curDiff > DTS_THRESHOLD:
messages.append('Error: dts diff exceeds threshold - dts1 %s dts2 %s streamType %s' % (dts1, dts2, streamType))
result = False
continue
messages.append('Error: test failed line1=%s line2=%s' % (line1, line2))
result = False
return result
def runFfprobe(self, tsData, streamType):
tempFilename = self.writeToTempFile(tsData)
ffprobeData = commands.getoutput('%s -i %s -show_packets -show_data -select_streams %s 2>/dev/null' % (FFPROBE_BIN, tempFilename, streamType))
os.remove(tempFilename)
return ffprobeData
def compareStream(self, tsData1, tsData2, streamType, messages):
ffprobeData1 = self.runFfprobe(tsData1, streamType)
ffprobeData2 = self.runFfprobe(tsData2, streamType)
return self.compareFfprobeOutputs(ffprobeData1, ffprobeData2, streamType, messages)
def testContinuity(self, tsData, continuityCounters):
okCounters = 0
result = True
for curPos in xrange(0, len(tsData), TS_PACKET_LENGTH):
pid = ((ord(tsData[curPos + 1]) & 0x1f) << 8) | ord(tsData[curPos + 2])
cc = ord(tsData[curPos + 3]) & 0x0f
if continuityCounters.has_key(pid):
lastValue = continuityCounters[pid]
expectedValue = (lastValue + 1) & 0x0f
if cc != expectedValue:
self.writeOutput('Error: bad continuity counter - pos=%s pid=%d exp=%s actual=%s' %
(curPos, pid, expectedValue, cc))
result = False
else:
okCounters += 1
continuityCounters[pid] = cc
self.writeOutput('Info: validated %s counters' % okCounters)
return result
def md5sum(self, data):
m = hashlib.md5()
m.update(data)
return m.hexdigest()
def retrieveUrl(self, url):
startTime = time.time()
code, headers, data = http_utils.getUrl(url)
if code == 0:
self.writeOutput(data)
self.writeOutput('Info: get %s took %s, size %s cksum %s' % (url, time.time() - startTime, len(data), self.md5sum(data)))
return code, data
def compareTsUris(self, url1, url2, segmentIndex, aesKey, continuityCounters):
result = True
self.writeOutput('Info: comparing %s %s' % (url1, url2))
code1, tsData1 = self.retrieveUrl(url1)
if code1 == 200:
if aesKey != None:
tsData1, error = decryptTsSegment(self.tsData1, aesKey, segmentIndex)
if tsData1 == None:
self.writeOutput(error)
return False
if not self.testContinuity(tsData1, continuityCounters):
result = False
else:
continuityCounters.clear()
for attempt in xrange(RETRIES):
code2, tsData2 = self.retrieveUrl(url2)
if code1 != code2:
if code1 != 0 and code2 != 0:
self.writeOutput('Error: got different status codes %s vs %s (ts)' % (code1, code2))
return False
messages = []
curResult = True
if not self.compareStream(tsData1, tsData2, 'a', messages):
curResult = False
if not self.compareStream(tsData1, tsData2, 'v', messages):
curResult = False
if curResult or attempt + 1 >= RETRIES:
break
self.writeOutput('Info: got errors, retrying...')
self.writeOutput('Info: size diff is %s' % (len(tsData2) - len(tsData1)))
for message in messages:
self.writeOutput(message)
return result and curResult
@staticmethod
def getTsSegments(manifest):
result = []
duration = None
for curLine in manifest.split('\n'):
curLine = curLine.strip()
if curLine.startswith('#EXTINF:'):
duration = float(curLine[len('#EXTINF:'):].split(',')[0])
if len(curLine) > 0 and curLine[0] != '#':
result.append((curLine, duration))
return result
def runTest(self, uri):
url1 = URL1_BASE + random.choice(URL1_PREFIXES) + uri
url2 = URL2_PREFIX + uri
self.writeOutput('Info: testing %s %s' % (url1, url2))
# avoid billing real partners
url1 = re.sub('/p/\d+/sp/\d+/', '/p/%s/sp/%s00/' % (TEST_PARTNER_ID, TEST_PARTNER_ID), url1)
url2 = re.sub('/p/\d+/sp/\d+/', '/p/%s/sp/%s00/' % (TEST_PARTNER_ID, TEST_PARTNER_ID), url2)
# get the manifests
code1, manifest1 = self.retrieveUrl(url1 + URL1_SUFFIX)
code2, manifest2 = self.retrieveUrl(url2 + URL2_SUFFIX)
if code1 != code2:
self.writeOutput('Error: got different status codes %s vs %s (m3u8)' % (code1, code2))
return False
if code1 == 404 and code2 == 404:
self.writeOutput('Notice: both servers returned 404')
return True
if not manifest1.startswith('#EXTM3U') or not manifest2.startswith('#EXTM3U'):
if not manifest1.startswith('#EXTM3U') and not manifest2.startswith('#EXTM3U'):
self.writeOutput('Notice: both servers returned invalid manifests')
return True
if not manifest1.startswith('#EXTM3U'):
self.writeOutput('Error: server1 returned invalid manifest')
self.writeOutput('manifest1=%s' % manifest1)
return True
if not manifest2.startswith('#EXTM3U'):
self.writeOutput('Error: server2 returned invalid manifest')
self.writeOutput('manifest2=%s' % manifest2)
return True
# extract the ts uris
tsUris1 = self.getTsSegments(manifest1)
tsUris2 = self.getTsSegments(manifest2)
if len(tsUris1) != len(tsUris2):
if len(tsUris1) < len(tsUris2) and '/clipTo/' in uri:
clipToValue = uri.split('/clipTo/')[1].split('/')[0]
self.writeOutput('Notice: ignoring TS count mismatch (%s vs %s) due to clipTo %s' % (len(tsUris1), len(tsUris2), clipToValue))
tsUris2 = tsUris2[:len(tsUris1)]
else:
self.writeOutput('Error: TS segment count mismatch %s vs %s' % (len(tsUris1), len(tsUris2)))
# check the durations
minCount = min(len(tsUris1), len(tsUris2))
for curIndex in xrange(minCount):
duration1 = tsUris1[curIndex][1]
duration2 = tsUris2[curIndex][1]
if abs(duration1 - duration2) > 0.01:
self.writeOutput('Error: TS durations mismatch at index %s - %s vs %s' % (curIndex, duration1, duration2))
return False
# get the encryption key, if exists
keyUri = None
for curLine in manifest1.split('\n'):
if curLine.startswith('#EXT-X-KEY'):
keyUri = curLine.split('"')[1]
if keyUri != None:
url = url1 + URL1_SUFFIX
baseUrl = url[:url.rfind('/')] + '/'
try:
aesKey = urllib2.urlopen(baseUrl + keyUri).read()
except urllib2.HTTPError as e:
self.writeOutput('Error: failed to get the encryption key, code=%s' % e.getcode())
return False
else:
aesKey = None
# compare the ts segments
result = True
continuityCounters = {}
self.writeOutput('Info: segmentCount=%s' % minCount)
for curIndex in xrange(max(minCount - MAX_TS_SEGMENTS_TO_COMPARE, 0), minCount, 1):
if os.path.exists(STOP_FILE):
return True
tsUrl1 = urlparse.urljoin(url1 + '/', tsUris1[curIndex][0])
tsUrl2 = urlparse.urljoin(url2 + '/', tsUris2[curIndex][0])
if not self.compareTsUris(tsUrl1, tsUrl2, curIndex + 1, aesKey, continuityCounters):
self.writeOutput('Error: ts comparison failed - url1=%s, url2=%s' % (tsUrl1, tsUrl2))
result = False
self.writeOutput('Info: success')
return result
if __name__ == '__main__':
# delete temp files
for curFile in os.listdir(TEMP_DIR):
fullPath = os.path.join(TEMP_DIR, curFile)
os.remove(fullPath)
# run the stress main
stress_base.main(TestThread, STOP_FILE)
|
import usb.core
import usb.util
import serial
import socket
from datecs import *
from exceptions import *
from time import sleep
class Usb(Datecs):
""" Define USB printer """
def __init__(self, idVendor, idProduct, interface=0, in_ep=0x82, out_ep=0x01):
"""
@param idVendor : Vendor ID
@param idProduct : Product ID
@param interface : USB device interface
@param in_ep : Input end point
@param out_ep : Output end point
"""
self.errorText = ""
self.idVendor = idVendor
self.idProduct = idProduct
self.interface = interface
self.in_ep = in_ep
self.out_ep = out_ep
self.open()
def open(self):
""" Search device on USB tree and set is as datecs device """
self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct)
if self.device is None:
raise NoDeviceError()
try:
if self.device.is_kernel_driver_active(self.interface):
self.device.detach_kernel_driver(self.interface)
self.device.set_configuration()
usb.util.claim_interface(self.device, self.interface)
except usb.core.USBError as e:
raise HandleDeviceError(e)
def close(self):
i = 0
while True:
try:
if not self.device.is_kernel_driver_active(self.interface):
usb.util.release_interface(self.device, self.interface)
self.device.attach_kernel_driver(self.interface)
usb.util.dispose_resources(self.device)
else:
self.device = None
return True
except usb.core.USBError as e:
i += 1
if i > 10:
return False
sleep(0.1)
def _raw(self, msg):
""" Print any command sent in raw format """
if len(msg) != self.device.write(self.out_ep, msg, self.interface):
self.device.write(self.out_ep, self.errorText, self.interface)
raise TicketNotPrinted()
def __del__(self):
""" Release USB interface """
if self.device:
self.close()
self.device = None
class Serial(Datecs):
""" Define Serial printer """
def __init__(self, devfile="/dev/ttyS0", baudrate=9600, bytesize=8, timeout=1):
"""
@param devfile : Device file under dev filesystem
@param baudrate : Baud rate for serial transmission
@param bytesize : Serial buffer size
@param timeout : Read/Write timeout
"""
self.devfile = devfile
self.baudrate = baudrate
self.bytesize = bytesize
self.timeout = timeout
self.open()
def open(self):
""" Setup serial port and set is as datecs device """
self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate, bytesize=self.bytesize, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=self.timeout, dsrdtr=True)
if self.device is not None:
print "Serial printer enabled"
else:
print "Unable to open serial printer on: %s" % self.devfile
def _raw(self, msg):
""" Print any command sent in raw format """
self.device.write(msg)
def __del__(self):
""" Close Serial interface """
if self.device is not None:
self.device.close()
class Network(Datecs):
""" Define Network printer """
def __init__(self,host,port=9100):
"""
@param host : Printer's hostname or IP address
@param port : Port to write to
"""
self.host = host
self.port = port
self.open()
def open(self):
""" Open TCP socket and set it as datecs device """
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.connect((self.host, self.port))
if self.device is None:
print "Could not open socket for %s" % self.host
def _raw(self, msg):
self.device.send(msg)
def __del__(self):
""" Close TCP connection """
self.device.close()
|
from __future__ import absolute_import
from .tests_mechanism import AbstractTestFixture, dataset
from jormungandr.street_network.asgard import Asgard
import logging
from navitiacommon import response_pb2, type_pb2
MOCKED_ASGARD_CONF = [
{
"modes": ['walking', 'car', 'bss', 'bike', 'car_no_park'],
"class": "tests.direct_path_asgard_integration_tests.MockAsgard",
"args": {
"costing_options": {"bicycle": {"bicycle_type": "Hybrid"}},
"api_key": "",
"asgard_socket": "bob_socket",
"service_url": "http://bob.com",
"timeout": 10,
},
}
]
MOCKED_ASGARD_CONF_WITH_BAD_RESPONSE = [
{
"modes": ['walking', 'car', 'bss', 'bike'],
"class": "tests.direct_path_asgard_integration_tests.MockAsgardWithBadResponse",
"args": {
"costing_options": {"bicycle": {"bicycle_type": "Hybrid"}},
"api_key": "",
"asgard_socket": "bob_socket",
"service_url": "http://bob.com",
"timeout": 10,
},
}
]
s_coord = "0.0000898312;0.0000898312" # coordinate of S in the dataset
r_coord = "0.00188646;0.00071865" # coordinate of R in the dataset
journey_basic_query = "journeys?from={from_coord}&to={to_coord}&datetime={datetime}".format(
from_coord=s_coord, to_coord=r_coord, datetime="20120614T080000"
)
def add_cycle_path_type_in_section(section):
path_item = section.street_network.path_items.add()
path_item.length = 10
path_item.cycle_path_type = response_pb2.NoCycleLane
path_item = section.street_network.path_items.add()
path_item.length = 10
path_item.cycle_path_type = response_pb2.SharedCycleWay
path_item = section.street_network.path_items.add()
path_item.length = 10
path_item.cycle_path_type = response_pb2.DedicatedCycleWay
path_item = section.street_network.path_items.add()
path_item.length = 10
path_item.cycle_path_type = response_pb2.SeparatedCycleWay
def route_response(mode):
map_mode_dist = {"walking": 200, "car": 50, "bike": 100, "car_no_park": 50}
map_mode_time = {"walking": 2000, "car": 500, "bike": 1000, "car_no_park": 500}
response = response_pb2.Response()
response.response_type = response_pb2.ITINERARY_FOUND
journey = response.journeys.add()
journey.nb_transfers = 0
journey.nb_sections = 1
journey.departure_date_time = 1548669936
journey.arrival_date_time = 1548670472
journey.requested_date_time = 1548669936
duration = map_mode_time.get(mode)
distance = map_mode_dist.get(mode)
journey.duration = duration
journey.durations.total = duration
section = journey.sections.add()
add_cycle_path_type_in_section(section)
if mode == "walking":
journey.durations.walking = duration
journey.distances.walking = distance
section.street_network.mode = response_pb2.Walking
elif mode == "car":
journey.durations.car = duration
journey.distances.car = distance
section.street_network.mode = response_pb2.Car
else:
journey.durations.bike = duration
journey.distances.bike = distance
section.street_network.mode = response_pb2.Bike
section.type = response_pb2.STREET_NETWORK
section.duration = duration
section.length = distance
section.id = "section"
section.begin_date_time = 1548669936
section.end_date_time = 1548670472
return response
def valid_response(request):
if request.requested_api == type_pb2.direct_path:
return route_response(request.direct_path.streetnetwork_params.origin_mode)
else:
return response_pb2.Response()
def bad_response(request):
return response_pb2.Response()
def check_journeys(resp):
assert not resp.get('journeys') or sum([1 for j in resp['journeys'] if j['type'] == "best"]) == 1
class MockAsgard(Asgard):
def __init__(
self, instance, service_url, asgard_socket, modes=None, id=None, timeout=10, api_key=None, **kwargs
):
Asgard.__init__(
self, instance, service_url, asgard_socket, modes or [], id or 'asgard', timeout, api_key, **kwargs
)
def _call_asgard(self, request):
return valid_response(request)
class MockAsgardWithBadResponse(Asgard):
def __init__(
self, instance, service_url, asgard_socket, modes=None, id=None, timeout=10, api_key=None, **kwargs
):
Asgard.__init__(
self, instance, service_url, asgard_socket, modes or [], id or 'asgard', timeout, api_key, **kwargs
)
def _call_asgard(self, request):
return bad_response(request)
@dataset(
{'main_routing_test': {'scenario': 'distributed', 'instance_config': {'street_network': MOCKED_ASGARD_CONF}}}
)
class TestAsgardDirectPath(AbstractTestFixture):
def test_journey_with_direct_path(self):
"""
we only want direct path
"""
query = (
journey_basic_query
+ "&first_section_mode[]=walking"
+ "&first_section_mode[]=bike"
+ "&first_section_mode[]=car_no_park"
+ "&forbidden_uris[]=stop_point:stopA"
+ "&forbidden_uris[]=stop_point:stopB"
)
response = self.query_region(query)
check_journeys(response)
assert len(response['journeys']) == 3
# car direct path from asgard
assert 'car' in response['journeys'][0]['tags']
assert len(response['journeys'][0]['sections']) == 1
assert response['journeys'][0]['duration'] == 500
assert response['journeys'][0]['durations']['car'] == 500
assert response['journeys'][0]['durations']['total'] == 500
assert response['journeys'][0]['distances']['car'] == 50
assert not response['journeys'][0]['sections'][0].get('cycle_lane_length')
# bike direct path from asgard
assert 'bike' in response['journeys'][1]['tags']
assert len(response['journeys'][1]['sections']) == 1
assert response['journeys'][1]['duration'] == 1000
assert response['journeys'][1]['durations']['bike'] == 1000
assert response['journeys'][1]['durations']['total'] == 1000
assert response['journeys'][1]['distances']['bike'] == 100
assert response['journeys'][1]['duration'] == 1000
assert response['journeys'][1]['sections'][0]['cycle_lane_length'] == 30
# walking direct path from asgard
assert 'walking' in response['journeys'][2]['tags']
assert len(response['journeys'][2]['sections']) == 1
assert response['journeys'][2]['duration'] == 2000
assert response['journeys'][2]['durations']['walking'] == 2000
assert response['journeys'][2]['durations']['total'] == 2000
assert response['journeys'][2]['distances']['walking'] == 200
assert response['journeys'][2]['duration'] == 2000
assert not response['journeys'][2]['sections'][0].get('cycle_lane_length')
assert not response.get('feed_publishers')
@dataset(
{
'main_routing_test': {
'scenario': 'distributed',
'instance_config': {'street_network': MOCKED_ASGARD_CONF_WITH_BAD_RESPONSE},
}
}
)
class TestAsgardDirectPath(AbstractTestFixture):
def test_crowfly_replaces_section_if_street_network_failed(self):
"""
Topic: This case arrives when the street network computation has failed.
In this case, we replace the lost street network section by a crowfly, like in New Default
"""
response, status = self.query_region(journey_basic_query, check=False)
assert status == 200
check_journeys(response)
assert len(response['journeys']) == 1
assert len(response['journeys'][0]['sections']) == 3
assert (
response['journeys'][0]['sections'][0]['type'] == 'crow_fly'
), "A crow fly should replace the street network"
assert response['journeys'][0]['sections'][1]['type'] == 'public_transport'
assert (
response['journeys'][0]['sections'][2]['type'] == 'crow_fly'
), "A crow fly should replace the street network"
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'primdb_app.views.index', name='index'),
url(r'^admin/', include(admin.site.urls)),
url(r'^mzml/$','primdb_app.views.mzml', name='mzml'),
url(r'^pepxml/$','primdb_app.views.pepxml', name='pepxml'),
url(r'^result/(?P<match>\d+)/(?P<tab>\d+)$', 'primdb_app.views.detail', name='detail'),
url(r'^match/(?P<match_id>\d+)/tab/(?P<tab_id>\d+)$', 'primdb_app.views.matchdetail', name='matchdetail'),
url(r'^result/tab/(?P<tab_id>\d+)/match/(?P<match_id>\d+)$', 'primdb_app.views.tabdetail', name='tabdetail'),
)
|
from odoo.tests.common import SavepointCase
from .. import exceptions
class UnsubscriptionCase(SavepointCase):
def test_details_required(self):
"""Cannot create unsubscription without details when required."""
with self.assertRaises(exceptions.DetailsRequiredError):
self.env["mail.unsubscription"].create(
{
"email": "axelor@yourcompany.example.com",
"mass_mailing_id": self.env.ref("mass_mailing.mass_mail_1").id,
"unsubscriber_id": "res.partner,%d"
% self.env.ref("base.res_partner_2").id,
"reason_id": self.env.ref(
"mass_mailing_custom_unsubscribe.reason_other"
).id,
}
)
def test_reason_required(self):
"""Cannot create unsubscription without reason when required."""
with self.assertRaises(exceptions.ReasonRequiredError):
self.env["mail.unsubscription"].create(
{
"email": "axelor@yourcompany.example.com",
"mass_mailing_id": self.env.ref("mass_mailing.mass_mail_1").id,
"unsubscriber_id": "res.partner,%d"
% self.env.ref("base.res_partner_2").id,
}
)
|
"""
WSGI config for RapidPro project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "temba.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
"""
Code which dynamically discovers comprehensive themes. Deliberately uses no Django settings,
as the discovery happens during the initial setup of Django settings.
"""
from __future__ import absolute_import
import os
from django.utils.encoding import python_2_unicode_compatible
from path import Path
def get_theme_base_dirs_from_settings(theme_dirs=None):
"""
Return base directories that contains all the themes.
Example:
>> get_theme_base_dirs_from_settings('/edx/app/ecommerce/ecommerce/themes')
['/edx/app/ecommerce/ecommerce/themes']
Returns:
(List of Paths): Base theme directory paths
"""
theme_base_dirs = []
if theme_dirs:
theme_base_dirs.extend([Path(theme_dir) for theme_dir in theme_dirs])
return theme_base_dirs
def get_themes_unchecked(themes_dirs, project_root=None):
"""
Returns a list of all themes known to the system.
Args:
themes_dirs (list): Paths to themes base directory
project_root (str): (optional) Path to project root
Returns:
List of themes known to the system.
"""
themes_base_dirs = [Path(themes_dir) for themes_dir in themes_dirs]
# pick only directories and discard files in themes directory
themes = []
for themes_dir in themes_base_dirs:
themes.extend([Theme(name, name, themes_dir, project_root) for name in get_theme_dirs(themes_dir)])
return themes
def get_theme_dirs(themes_dir=None):
"""
Returns theme dirs in given dirs
Args:
themes_dir (Path): base dir that contains themes.
"""
return [_dir for _dir in os.listdir(themes_dir) if is_theme_dir(themes_dir / _dir)]
def is_theme_dir(_dir):
"""
Returns true if given dir contains theme overrides.
A theme dir must have subdirectory 'lms' or 'cms' or both.
Args:
_dir: directory path to check for a theme
Returns:
Returns true if given dir is a theme directory.
"""
theme_sub_directories = {'lms', 'cms'}
return bool(os.path.isdir(_dir) and theme_sub_directories.intersection(os.listdir(_dir)))
def get_project_root_name_from_settings(project_root):
"""
Return root name for the current project
Example:
>> get_project_root_name()
'lms'
# from studio
>> get_project_root_name()
'cms'
Args:
project_root (str): Root directory of the project.
Returns:
(str): component name of platform e.g lms, cms
"""
root = Path(project_root)
if root.name == "":
root = root.parent
return root.name
@python_2_unicode_compatible
class Theme(object):
"""
class to encapsulate theme related information.
"""
name = ''
theme_dir_name = ''
themes_base_dir = None
project_root = None
def __init__(self, name='', theme_dir_name='', themes_base_dir=None, project_root=None):
"""
init method for Theme
Args:
name: name if the theme
theme_dir_name: directory name of the theme
themes_base_dir: directory path of the folder that contains the theme
"""
self.name = name
self.theme_dir_name = theme_dir_name
self.themes_base_dir = themes_base_dir
self.project_root = project_root
def __eq__(self, other):
"""
Returns True if given theme is same as the self
Args:
other: Theme object to compare with self
Returns:
(bool) True if two themes are the same else False
"""
return (self.theme_dir_name, self.path) == (other.theme_dir_name, other.path)
def __hash__(self):
return hash((self.theme_dir_name, self.path))
def __str__(self):
# pylint: disable=line-too-long
return u"<Theme: {name} at '{path}'>".format(name=self.name, path=self.path) # xss-lint: disable=python-wrap-html
def __repr__(self):
return self.__str__()
@property
def path(self):
"""
Get absolute path of the directory that contains current theme's templates, static assets etc.
Returns:
Path: absolute path to current theme's contents
"""
return Path(self.themes_base_dir) / self.theme_dir_name / get_project_root_name_from_settings(self.project_root)
@property
def template_path(self):
"""
Get absolute path of current theme's template directory.
Returns:
Path: absolute path to current theme's template directory
"""
return Path(self.theme_dir_name) / get_project_root_name_from_settings(self.project_root) / 'templates'
@property
def template_dirs(self):
"""
Get a list of all template directories for current theme.
Returns:
list: list of all template directories for current theme.
"""
return [
self.path / 'templates',
]
|
import pytest
from werkzeug.datastructures import Headers
from flask import jsonify, request, Response, json
import config
from skylines.api.cors import cors
from skylines.api.oauth import oauth
from skylines.app import SkyLines
from skylines.model import User
from skylines.database import db as _db
ORIGIN = 'https://www.google.com'
@pytest.fixture(scope='session')
def app():
app = SkyLines(config_file=config.TESTING_CONF_PATH)
app.response_class = ApiResponse
_db.init_app(app)
oauth.init_app(app)
cors.init_app(app)
@app.route('/secrets')
@oauth.required()
def secrets():
return jsonify({'secrets': [1, 1, 2, 3, 5, 8, 13]})
@app.route('/user')
@oauth.optional()
def user():
return jsonify({'user': request.user_id})
return app
class ApiResponse(Response):
@property
def json(self):
return json.loads(self.data)
@pytest.fixture(scope='session')
def db(app):
_db.app = app
_db.create_all()
yield _db
_db.drop_all()
@pytest.fixture(scope='session')
def test_user(db):
user = User(email_address='test@foo.com', password='secret123', first_name='test')
db.session.add(user)
db.session.commit()
return user
@pytest.fixture
def tokens(client, test_user):
headers = Headers()
headers.set('Origin', ORIGIN)
response = client.post('/oauth/token', headers=headers, data={
'grant_type': 'password',
'username': 'test@foo.com',
'password': 'secret123',
})
assert response.status_code == 200
assert response.headers.get('Access-Control-Allow-Origin') == ORIGIN
assert response.headers.get('Access-Control-Allow-Credentials') == 'true'
assert response.json.get('access_token')
assert response.json.get('expires_in')
assert response.json.get('token_type') == 'Bearer'
assert response.json.get('refresh_token')
return response.json
@pytest.fixture
def access_token(tokens):
return tokens.get('access_token')
@pytest.fixture
def refresh_token(tokens):
return tokens.get('refresh_token')
|
import os.path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'es-ES'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = os.path.join(os.path.dirname(__file__), "static")
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = 'hx*y68lmy92df6$j1$fbkj3+q7asx^u^!wwr$s*h5h%$c=5b2e'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_CONTEXT_PROCESSORS = ("django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages")
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'djangomoqueta',
'books',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
}
}
|
# -*- coding: utf-8 -*-
__author__ = "OKso <okso.me>"
__version__ = '0.2.1'
from druid.druid import Druid
import druid.image as image
import druid.bootstrap as bootstrap
|
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.conf import settings
from taiga.importers.jira.agile import JiraAgileImporter
from taiga.importers.jira.normal import JiraNormalImporter
from taiga.users.models import User
import json
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--token', dest="token", type=str,
help='Auth token')
parser.add_argument('--server', dest="server", type=str,
help='Server address (default: https://jira.atlassian.com)',
default="https://jira.atlassian.com")
parser.add_argument('--project-id', dest="project_id", type=str,
help='Project ID or full name (ex: taigaio/taiga-back)')
parser.add_argument('--project-type', dest="project_type", type=str,
help='Project type in jira: project or board')
parser.add_argument('--template', dest='template', default="scrum",
help='template to use: scrum or scrum (default scrum)')
parser.add_argument('--ask-for-users', dest='ask_for_users', const=True,
action="store_const", default=False,
help='Import closed data')
parser.add_argument('--closed-data', dest='closed_data', const=True,
action="store_const", default=False,
help='Import closed data')
parser.add_argument('--keep-external-reference', dest='keep_external_reference', const=True,
action="store_const", default=False,
help='Store external reference of imported data')
def handle(self, *args, **options):
admin = User.objects.get(username="admin")
server = options.get("server")
if options.get('token', None) == "anon":
token = None
elif options.get('token', None):
token = json.loads(options.get('token'))
else:
(rtoken, rtoken_secret, url) = JiraNormalImporter.get_auth_url(
server,
settings.IMPORTERS.get('jira', {}).get('consumer_key', None),
settings.IMPORTERS.get('jira', {}).get('cert', None),
True
)
print(url)
input("Go to the url, allow the user and get back and press enter")
token = JiraNormalImporter.get_access_token(
server,
settings.IMPORTERS.get('jira', {}).get('consumer_key', None),
settings.IMPORTERS.get('jira', {}).get('cert', None),
rtoken,
rtoken_secret,
True
)
print("Auth token: {}".format(json.dumps(token)))
if options.get('project_type', None) is None:
print("Select the type of project to import (project or board): ")
project_type = input("Project type: ")
else:
project_type = options.get('project_type')
if project_type not in ["project", "board"]:
print("ERROR: Bad project type.")
return
if project_type == "project":
importer = JiraNormalImporter(admin, server, token)
else:
importer = JiraAgileImporter(admin, server, token)
if options.get('project_id', None):
project_id = options.get('project_id')
else:
print("Select the project to import:")
for project in importer.list_projects():
print("- {}: {}".format(project['id'], project['name']))
project_id = input("Project id or key: ")
users_bindings = {}
if options.get('ask_for_users', None):
print("Add the username or email for next jira users:")
for user in importer.list_users():
try:
users_bindings[user['key']] = User.objects.get(Q(email=user['email']))
break
except User.DoesNotExist:
pass
while True:
username_or_email = input("{}: ".format(user['full_name']))
if username_or_email == "":
break
try:
users_bindings[user['key']] = User.objects.get(Q(username=username_or_email) | Q(email=username_or_email))
break
except User.DoesNotExist:
print("ERROR: Invalid username or email")
options = {
"template": options.get('template'),
"import_closed_data": options.get("closed_data", False),
"users_bindings": users_bindings,
"keep_external_reference": options.get('keep_external_reference'),
}
if project_type == "project":
print("Bind jira issue types to (epic, us, issue)")
types_bindings = {
"epic": [],
"us": [],
"task": [],
"issue": [],
}
for issue_type in importer.list_issue_types(project_id):
while True:
if issue_type['subtask']:
types_bindings['task'].append(issue_type)
break
taiga_type = input("{}: ".format(issue_type['name']))
if taiga_type not in ['epic', 'us', 'issue']:
print("use a valid taiga type (epic, us, issue)")
continue
types_bindings[taiga_type].append(issue_type)
break
options["types_bindings"] = types_bindings
importer.import_project(project_id, options)
|
__author__ = 'Iván Arias León (ivan.ariasleon@telefonica.com)'
from lettuce import world
import json
import os
import sys
"""
Parse the JSON configuration file located in the src folder and
store the resulting dictionary in the lettuce world global variable.
"""
with open("properties.json") as config_file:
try:
world.config = json.load(config_file)
except Exception, e:
print 'Error parsing config file: %s' % (e)
sys.exit(1)
"""
Make sure the logs path exists and create it otherwise.
"""
if not os.path.exists(world.config["environment"]["logs_path"]):
os.makedirs(world.config["environment"]["logs_path"])
|
import os
import django
from django.conf import global_settings
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '!DJANGO_JET_TESTS!'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = True
ROOT_URLCONF = 'jet.tests.urls'
INSTALLED_APPS = (
'jet.dashboard',
'jet',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'jet.tests',
)
MIDDLEWARE = MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
if django.VERSION[:2] < (1, 9):
TEMPLATE_CONTEXT_PROCESSORS = tuple(global_settings.TEMPLATE_CONTEXT_PROCESSORS) + (
'django.core.context_processors.request',
)
else:
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
)
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-US'
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_URL = '/static/'
JET_INDEX_DASHBOARD = 'jet.tests.dashboard.TestIndexDashboard'
JET_APP_INDEX_DASHBOARD = 'jet.tests.dashboard.TestAppIndexDashboard'
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("popolo", "0002_update_models_from_upstream"),
("candidates", "0009_migrate_to_django_popolo"),
]
operations = [
migrations.AddField(
model_name="loggedaction",
name="person",
field=models.ForeignKey(
blank=True,
to="popolo.Person",
null=True,
on_delete=models.CASCADE,
),
)
]
|
from ckan import plugins
from ckan.logic.schema import (
default_create_package_schema,
default_update_package_schema,
default_show_package_schema,
)
ignore_missing = plugins.toolkit.get_validator('ignore_missing')
convert_to_extras = plugins.toolkit.get_converter('convert_to_extras')
convert_from_extras = plugins.toolkit.get_converter('convert_from_extras')
is_positive_integer = plugins.toolkit.get_validator('is_positive_integer')
def package_create_schema():
schema = default_create_package_schema()
schema.update({
'frequency_time_modifier': [ignore_missing, unicode,
convert_to_extras],
'frequency_count': [ignore_missing, convert_to_extras],
'frequency_update_period': [ignore_missing, unicode,
convert_to_extras],
'frequency_period': [ignore_missing, unicode, convert_to_extras],
# frequency is constructed from the other frequency_ fields
'frequency': [ignore_missing],
'retention_count': [ignore_missing, is_positive_integer,
convert_to_extras],
'retention_period': [ignore_missing, unicode, convert_to_extras],
'delivery_unit': [ignore_missing, unicode, convert_to_extras],
'service': [ignore_missing, unicode, convert_to_extras],
'next_update': [ignore_missing, unicode, convert_to_extras],
'review_date': [ignore_missing, unicode, convert_to_extras],
'coverage_start_date': [ignore_missing, unicode, convert_to_extras],
'coverage_end_date': [ignore_missing, unicode, convert_to_extras],
})
return schema
def package_update_schema():
schema = default_update_package_schema()
schema.update({
'frequency_time_modifier': [ignore_missing, unicode,
convert_to_extras],
'frequency_count': [ignore_missing, convert_to_extras],
'frequency_update_period': [ignore_missing, unicode,
convert_to_extras],
'frequency_period': [ignore_missing, unicode, convert_to_extras],
# frequency is constructed from the other frequency_ fields
'frequency': [ignore_missing],
'retention_count': [ignore_missing, is_positive_integer,
convert_to_extras],
'retention_period': [ignore_missing, unicode, convert_to_extras],
'delivery_unit': [ignore_missing, unicode, convert_to_extras],
'service': [ignore_missing, unicode, convert_to_extras],
'next_update': [ignore_missing, unicode, convert_to_extras],
'review_date': [ignore_missing, unicode, convert_to_extras],
'coverage_start_date': [ignore_missing, unicode, convert_to_extras],
'coverage_end_date': [ignore_missing, unicode, convert_to_extras],
})
return schema
def package_show_schema():
schema = default_show_package_schema()
schema.update({
'frequency_time_modifier': [convert_from_extras, ignore_missing,
unicode],
'frequency_count': [convert_from_extras, ignore_missing,
is_positive_integer],
'frequency_update_period': [convert_from_extras, ignore_missing],
'frequency_period': [convert_from_extras, ignore_missing],
# frequency is constructed from the other frequency_ fields
'frequency': [collate_frequency_fields, ignore_missing],
'retention_count': [convert_from_extras, ignore_missing,
is_positive_integer],
'retention_period': [convert_from_extras, ignore_missing],
'delivery_unit': [convert_from_extras, ignore_missing],
'service': [convert_from_extras, ignore_missing],
'next_update': [convert_from_extras, ignore_missing],
'review_date': [convert_from_extras, ignore_missing],
'coverage_start_date': [convert_from_extras, ignore_missing],
'coverage_end_date': [convert_from_extras, ignore_missing],
})
return schema
def collate_frequency_fields(key, converted_data, errors, context):
'''frequency is just freuqency_update_period if it exists
otherwise it is 'Every {0} {1}'.format(frequency_count, frequency_perdiod
'''
# convert all the extras from
# ('extras', <int>, 'key'): 'frequency_count'
# ('extras', <int>, 'value'): '10'
# format into a dict
extras = {}
for k, v in converted_data.iteritems():
if k[0] == 'extras' and k[-1] == 'key':
extras[v] = converted_data[k[0], k[1], 'value']
option_one = extras.get('frequency_update_period')
option_two = extras.get('frequency_period')
if option_one:
converted_data['frequency', ] = option_one
elif option_two:
converted_data['frequency', ] = ' '.join([
'Every',
extras.get('frequency_count', ''),
option_two
])
else:
converted_data['frequency', ] = ''
|
from django import forms
from django.contrib.auth.models import User
from amcat.forms import forms
from django.contrib.auth.forms import PasswordResetForm
class UserPasswordResetForm(PasswordResetForm):
"""
Reset Password by filling in either email address or user name.
If email is valid, choose the email address.
Otherwise, if the username is recognize find the matching email.
Send the user a password reset message (super class function).
"""
username = forms.CharField()
def __init__(self, *args, **kwargs):
super(UserPasswordResetForm, self).__init__(*args, **kwargs)
self.fields['email'].widget.attrs['placeholder'] = 'Email'
self.fields['username'].widget.attrs['placeholder'] = 'Username'
self.users_cache = None
def clean(self):
cleaned_data = super(UserPasswordResetForm, self).clean()
email = cleaned_data.get("email")
username = cleaned_data.get("username")
usererror = self._errors.get("username")
if usererror:
del self._errors["username"]
if email:
return self.cleaned_data
if username:
self.users_cache = User.objects.filter(username=username)
if len(self.users_cache) == 0:
msg = u"User unknown"
self._errors["username"] = self.error_class([msg])
else:
emailerror = self._errors.get("email")
if emailerror:
del self._errors["email"]
cleaned_data["email"] = self.users_cache[0]
return self.cleaned_data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.