text
stringlengths 2
999k
|
|---|
import os
import sys
import copy
import pprint
import numpy as np
import tensorflow as tf
import json
import time
import gc
from memory_profiler import profile
from readers.inference_reader import InferenceReader
from readers.test_reader import TestDataReader
from models.figer_model.el_model import ELModel
from readers.config import Config
from readers.vocabloader import VocabLoader
import readers.utils as utils
np.set_printoptions(threshold=np.inf)
np.set_printoptions(precision=7)
pp = pprint.PrettyPrinter()
flags = tf.app.flags
flags.DEFINE_integer("max_steps", 32000, "Maximum of iteration [450000]")
flags.DEFINE_integer("pretraining_steps", 32000, "Number of steps to run pretraining")
flags.DEFINE_float("learning_rate", 0.005, "Learning rate of adam optimizer [0.001]")
flags.DEFINE_string("model_path", "/fs/clip-quiz/naveen/neural_el/neural-el_resources/models/CD.model", "Path to trained model")
flags.DEFINE_string("dataset", "el-figer", "The name of dataset [ptb]")
flags.DEFINE_string("checkpoint_dir", "/tmp",
"Directory name to save the checkpoints [checkpoints]")
flags.DEFINE_integer("batch_size", 1, "Batch Size for training and testing")
flags.DEFINE_integer("word_embed_dim", 300, "Word Embedding Size")
flags.DEFINE_integer("context_encoded_dim", 100, "Context Encoded Dim")
flags.DEFINE_integer("context_encoder_num_layers", 1, "Num of Layers in context encoder network")
flags.DEFINE_integer("context_encoder_lstmsize", 100, "Size of context encoder hidden layer")
flags.DEFINE_integer("coherence_numlayers", 1, "Number of layers in the Coherence FF")
flags.DEFINE_integer("jointff_numlayers", 1, "Number of layers in the Coherence FF")
flags.DEFINE_integer("num_cand_entities", 30, "Num CrossWikis entity candidates")
flags.DEFINE_float("reg_constant", 0.00, "Regularization constant for NN weight regularization")
flags.DEFINE_float("dropout_keep_prob", 0.6, "Dropout Keep Probability")
flags.DEFINE_float("wordDropoutKeep", 0.6, "Word Dropout Keep Probability")
flags.DEFINE_float("cohDropoutKeep", 0.4, "Coherence Dropout Keep Probability")
flags.DEFINE_boolean("decoder_bool", True, "Decoder bool")
flags.DEFINE_string("mode", 'inference', "Mode to run")
flags.DEFINE_boolean("strict_context", False, "Strict Context exludes mention surface")
flags.DEFINE_boolean("pretrain_wordembed", True, "Use Word2Vec Embeddings")
flags.DEFINE_boolean("coherence", True, "Use Coherence")
flags.DEFINE_boolean("typing", True, "Perform joint typing")
flags.DEFINE_boolean("el", True, "Perform joint typing")
flags.DEFINE_boolean("textcontext", True, "Use text context from LSTM")
flags.DEFINE_boolean("useCNN", False, "Use wiki descp. CNN")
flags.DEFINE_boolean("glove", True, "Use Glove Embeddings")
flags.DEFINE_boolean("entyping", False, "Use Entity Type Prediction")
flags.DEFINE_integer("WDLength", 100, "Length of wiki description")
flags.DEFINE_integer("Fsize", 5, "For CNN filter size")
flags.DEFINE_string("optimizer", 'adam', "Optimizer to use. adagrad, adadelta or adam")
flags.DEFINE_string("config", 'configs/config.ini',
"VocabConfig Filepath")
flags.DEFINE_string("test_out_fp", "", "Write Test Prediction Data")
FLAGS = flags.FLAGS
from unidecode import unidecode
prog_start = time.time()
def FLAGS_check(FLAGS):
if not (FLAGS.textcontext and FLAGS.coherence):
print("*** Local and Document context required ***")
sys.exit(0)
assert os.path.exists(FLAGS.model_path), "Model path doesn't exist."
def decrypt(s):
l = ""
i = 0
while i <len(s):
if ord(s[i])< 128:
l+=s[i]
i+=1
else:
if len(unidecode(s[i]))>0:
l+=unidecode(s[i])[0]
else:
l+="a"
i+=1
return l
def getCurrentMemoryUsage():
''' Memory usage in kB '''
with open('/proc/self/status') as f:
memusage = f.read().split('VmRSS:')[1].split('\n')[0][:-3]
return int(memusage.strip())
@profile
def main(_):
pp.pprint(flags.FLAGS.__flags)
output_file = "data/output.json"#sys.argv[2]
range_start = 0#int(sys.argv[3])
range_end = 10#int(sys.argv[4])
file_name = "data/qanta.train.2018.04.18.json"#sys.argv[1]
question_list = json.loads(open(file_name).read())["questions"]
sentences = question_list[range_start:min(range_end,len(question_list))]
FLAGS_check(FLAGS)
config = Config(FLAGS.config, verbose=False)
vocabloader = VocabLoader(config)
print("Loading in variables!")
word2idx, idx2word = vocabloader.getGloveWordVocab()
wid2WikiTitle = vocabloader.getWID2Wikititle()
crosswikis = utils.load(config.crosswikis_pruned_pkl)
word2vec = vocabloader.loadGloveVectors()
print("DONE LOADING IN VARIABLES!!!")
all_entities = []
for sent in sentences:
tf.reset_default_graph()
loc = config.test_file.replace("sampletest.txt","{}_{}.txt".format(range_start,range_end))
w = open(loc,"w")
config.test_file = loc
sent["text"] = decrypt(sent["text"].replace("\xa0"," "))
w.write(sent["text"].encode("ascii","ignore").decode("ascii"))
print(sent["text"].encode("ascii","ignore").decode("ascii"))
w.close()
FLAGS.dropout_keep_prob = 1.0
FLAGS.wordDropoutKeep = 1.0
FLAGS.cohDropoutKeep = 1.0
start = time.time()
print("Test file {} ".format(config.test_file))
reader = InferenceReader(config=config,
vocabloader=vocabloader,
test_mens_file=config.test_file,
num_cands=FLAGS.num_cand_entities,
batch_size=FLAGS.batch_size, word2idx=word2idx, idx2word=idx2word,
wid2WikiTitle=wid2WikiTitle,crosswikis=crosswikis,word2vec=word2vec
,strict_context=FLAGS.strict_context,
pretrain_wordembed=FLAGS.pretrain_wordembed,
coherence=FLAGS.coherence)
print("Took {} time to create inference reader".format(time.time()-start))
docta = reader.ccgdoc
model_mode = 'inference'
config_proto = tf.ConfigProto()
config_proto.allow_soft_placement = True
config_proto.gpu_options.allow_growth=True
sess = tf.Session(config=config_proto)
print("COHSTR",reader.num_cohstr)
"""with sess.as_default():
start = time.time()
model = ELModel(
sess=sess, reader=reader, dataset=FLAGS.dataset,
max_steps=FLAGS.max_steps,
pretrain_max_steps=FLAGS.pretraining_steps,
word_embed_dim=FLAGS.word_embed_dim,
context_encoded_dim=FLAGS.context_encoded_dim,
context_encoder_num_layers=FLAGS.context_encoder_num_layers,
context_encoder_lstmsize=FLAGS.context_encoder_lstmsize,
coherence_numlayers=FLAGS.coherence_numlayers,
jointff_numlayers=FLAGS.jointff_numlayers,
learning_rate=FLAGS.learning_rate,
dropout_keep_prob=FLAGS.dropout_keep_prob,
reg_constant=FLAGS.reg_constant,
checkpoint_dir=FLAGS.checkpoint_dir,
optimizer=FLAGS.optimizer,
mode=model_mode,
strict=FLAGS.strict_context,
pretrain_word_embed=FLAGS.pretrain_wordembed,
typing=FLAGS.typing,
el=FLAGS.el,
coherence=FLAGS.coherence,
textcontext=FLAGS.textcontext,
useCNN=FLAGS.useCNN,
WDLength=FLAGS.WDLength,
Fsize=FLAGS.Fsize,
entyping=FLAGS.entyping)
print("Loading EL Model took {} time".format(time.time()-start))
print("Doing inference")
try:
start = time.time()
(predTypScNPmat_list,
widIdxs_list,
priorProbs_list,
textProbs_list,
jointProbs_list,
evWTs_list,
pred_TypeSetsList) = model.inference(ckptpath=FLAGS.model_path)
print("Inference took {} time".format(time.time()-start))
except:
entity_list = {'qanta_id':sent['qanta_id'],'mentions':[]}
all_entities.append(entity_list)
print("No entities")
continue
start = time.time()
numMentionsInference = len(widIdxs_list)
numMentionsReader = 0
for sent_idx in reader.sentidx2ners:
numMentionsReader += len(reader.sentidx2ners[sent_idx])
assert numMentionsInference == numMentionsReader
mentionnum = 0
entityTitleList = []
print("Tokenized sentences {}".format(reader.sentences_tokenized))
for sent_idx in reader.sentidx2ners:
nerDicts = reader.sentidx2ners[sent_idx]
sentence = ' '.join(reader.sentences_tokenized[sent_idx])
for s, ner in nerDicts:
[evWTs, evWIDS, evProbs] = evWTs_list[mentionnum]
predTypes = pred_TypeSetsList[mentionnum]
entityTitleList.append(evWTs[2])
mentionnum += 1
elview = copy.deepcopy(docta.view_dictionary['NER_CONLL'])
elview.view_name = 'ENG_NEURAL_EL'
for i, cons in enumerate(elview.cons_list):
cons['label'] = entityTitleList[i]
docta.view_dictionary['ENG_NEURAL_EL'] = elview
print("Processing took {} time".format(time.time()-start))
print("List of entities")
#print(elview.cons_list)
print("\n")
s = sent["text"]
print("New S is {}".format(s))
e = elview.cons_list
t = reader.sentences_tokenized
c = []
f = []
print(s)
#print("E {}".format(e))
print("T {}".format(t))
for i in t:
for j in i:
f.append(j)
i = 0
token_pointer = 0
while token_pointer < len(f) and i < len(s):
token_len = len(f[token_pointer])
while i+token_len<len(s) and s[i:i+token_len] != f[token_pointer]:
i+=1
c.append((i,token_len+i))
i+=1
token_pointer+=1
if len(c) != len(f):
print("ERROR in C and F")
unflattened_c = []
c_pointer = 0
for i in range(len(t)):
l = c[c_pointer:c_pointer+len(t[i])]
c_pointer+=len(t[i])
unflattened_c.append(l)
#print("C {}".format(c))
#print("F {}".format(f))
#print("Unflattened C {}".format(unflattened_c))
entity_list = {'qanta_id':sent['qanta_id'],'mentions':[]}
sentence_num = 0
UNK = "<unk_wid>"
for i in range(len(e)):
if e[i]["label"]!=UNK:
all_words = False
while not all_words and sentence_num < len(t):
all_words = True
#print(e[i])
for word in range(e[i]["start"],e[i]["end"]+1):
if len(t[sentence_num])<=word or t[sentence_num][word] not in e[i]["tokens"]:
all_words = False
if not all_words:
sentence_num+=1
if sentence_num == len(t):
print("Error with sentence_num")
else:
entity_list['mentions'].append({'entity':e[i]["label"],'span':[unflattened_c[sentence_num][e[i]['start']][0],unflattened_c[sentence_num][e[i]['end']][1]]})
#print("Entity list is {}".format(entity_list))
all_entities.append(entity_list)
local_vars = list(locals().items())
del reader
del predTypScNPmat_list
del widIdxs_list
del priorProbs_list
del textProbs_list
del jointProbs_list
del evWTs_list
del model
del pred_TypeSetsList
print("Memory usage {}".format(getCurrentMemoryUsage()))
#print("All entities are {}".format(all_entities))
del sess"""
gc.collect()
tf.reset_default_graph()
w=open(output_file,"w")
w.write(json.dumps(all_entities))
w.close()
print("Dumped JSON, all done")
print("Took {} time".format(time.time()-prog_start))
return
sys.exit()
if __name__ == '__main__':
tf.app.run()
|
from PyQt5 import QtCore, QtGui, QtWidgets
from AssignmentSectionWindow_ui import Ui_AssignmentSectionWindow
from AddAssignmentDialog import AddAssignmentDialog
class AssignmentSectionWindow(QtWidgets.QMainWindow, Ui_AssignmentSectionWindow):
def __init__(self, parent=None):
super(AssignmentSectionWindow, self).__init__(parent)
self.setupUi(self)
self.addAssignmentButton.clicked.connect(self.addAssignment)
self.assignmentLabel.setText("Assignments of xxx")
@QtCore.pyqtSlot()
def addAssignment(self):
w = AddAssignmentDialog()
if w.exec_() == QtWidgets.QDialog.Accepted:
data = w.get_assignment()
print(data)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
w = AssignmentSectionWindow()
w.show()
sys.exit(app.exec_())
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for running backend tests in parallel.
This should not be run directly. Instead, navigate to the oppia/ folder and
execute:
bash scripts/run_backend_tests.sh
"""
# Pylint has issues with the import order of argparse.
# pylint: disable=wrong-import-order
import argparse
import datetime
import os
import re
import subprocess
import threading
import time
# pylint: enable=wrong-import-order
COVERAGE_PATH = os.path.join(
os.getcwd(), '..', 'oppia_tools', 'coverage-4.0', 'coverage')
TEST_RUNNER_PATH = os.path.join(os.getcwd(), 'core', 'tests', 'gae_suite.py')
LOG_LOCK = threading.Lock()
ALL_ERRORS = []
# This should be the same as core.test_utils.LOG_LINE_PREFIX.
LOG_LINE_PREFIX = 'LOG_INFO_TEST: '
_LOAD_TESTS_DIR = os.path.join(os.getcwd(), 'core', 'tests', 'load_tests')
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--generate_coverage_report',
help='optional; if specified, generates a coverage report',
action='store_true')
_PARSER.add_argument(
'--test_target',
help='optional dotted module name of the test(s) to run',
type=str)
_PARSER.add_argument(
'--test_path',
help='optional subdirectory path containing the test(s) to run',
type=str)
_PARSER.add_argument(
'--exclude_load_tests',
help='optional; if specified, exclude load tests from being run',
action='store_true')
_PARSER.add_argument(
'-v',
'--verbose',
help='optional; if specified, display the output of the tests being run',
action='store_true')
def log(message, show_time=False):
"""Logs a message to the terminal.
If show_time is True, prefixes the message with the current time.
"""
with LOG_LOCK:
if show_time:
print datetime.datetime.utcnow().strftime('%H:%M:%S'), message
else:
print message
def run_shell_cmd(exe, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
"""Runs a shell command and captures the stdout and stderr output.
If the cmd fails, raises Exception. Otherwise, returns a string containing
the concatenation of the stdout and stderr logs.
"""
p = subprocess.Popen(exe, stdout=stdout, stderr=stderr)
last_stdout_str, last_stderr_str = p.communicate()
last_stdout = last_stdout_str.split('\n')
if LOG_LINE_PREFIX in last_stdout_str:
log('')
for line in last_stdout:
if line.startswith(LOG_LINE_PREFIX):
log('INFO: %s' % line[len(LOG_LINE_PREFIX): ])
log('')
result = '%s%s' % (last_stdout_str, last_stderr_str)
if p.returncode != 0:
raise Exception('Error %s\n%s' % (p.returncode, result))
return result
class TaskThread(threading.Thread):
"""Runs a task in its own thread."""
def __init__(self, func, verbose, name=None):
super(TaskThread, self).__init__()
self.func = func
self.output = None
self.exception = None
self.verbose = verbose
self.name = name
self.finished = False
def run(self):
try:
self.output = self.func()
if self.verbose:
log('LOG %s:' % self.name, show_time=True)
log(self.output)
log('----------------------------------------')
log('FINISHED %s: %.1f secs' %
(self.name, time.time() - self.start_time), show_time=True)
self.finished = True
except Exception as e:
self.exception = e
if 'KeyboardInterrupt' not in str(self.exception):
log('ERROR %s: %.1f secs' %
(self.name, time.time() - self.start_time), show_time=True)
self.finished = True
class TestingTaskSpec(object):
"""Executes a set of tests given a test class name."""
def __init__(self, test_target, generate_coverage_report):
self.test_target = test_target
self.generate_coverage_report = generate_coverage_report
def run(self):
"""Runs all tests corresponding to the given test target."""
test_target_flag = '--test_target=%s' % self.test_target
if self.generate_coverage_report:
exc_list = [
'python', COVERAGE_PATH, 'run', '-p', TEST_RUNNER_PATH,
test_target_flag]
else:
exc_list = ['python', TEST_RUNNER_PATH, test_target_flag]
return run_shell_cmd(exc_list)
def _check_all_tasks(tasks):
"""Checks the results of all tasks."""
running_tasks_data = []
for task in tasks:
if task.isAlive():
running_tasks_data.append(' %s (started %s)' % (
task.name,
time.strftime('%H:%M:%S', time.localtime(task.start_time))
))
if task.exception:
ALL_ERRORS.append(task.exception)
if running_tasks_data:
log('----------------------------------------')
log('Tasks still running:')
for task_details in running_tasks_data:
log(task_details)
def _execute_tasks(tasks, batch_size=24):
"""Starts all tasks and checks the results.
Runs no more than 'batch_size' tasks at a time.
"""
remaining_tasks = [] + tasks
currently_running_tasks = set([])
while remaining_tasks or currently_running_tasks:
if currently_running_tasks:
for task in list(currently_running_tasks):
task.join(1)
if not task.isAlive():
currently_running_tasks.remove(task)
while remaining_tasks and len(currently_running_tasks) < batch_size:
task = remaining_tasks.pop()
currently_running_tasks.add(task)
task.start()
task.start_time = time.time()
time.sleep(5)
if remaining_tasks:
log('----------------------------------------')
log('Number of unstarted tasks: %s' % len(remaining_tasks))
_check_all_tasks(tasks)
log('----------------------------------------')
def _get_all_test_targets(test_path=None, include_load_tests=True):
"""Returns a list of test targets for all classes under test_path
containing tests.
"""
def _convert_to_test_target(path):
"""Remove the .py suffix and replace all slashes with periods."""
return os.path.relpath(path, os.getcwd())[:-3].replace('/', '.')
base_path = os.path.join(os.getcwd(), test_path or '')
result = []
for root in os.listdir(base_path):
if any([s in root for s in ['.git', 'third_party', 'core/tests']]):
continue
if root.endswith('_test.py'):
result.append(_convert_to_test_target(
os.path.join(base_path, root)))
for subroot, _, files in os.walk(os.path.join(base_path, root)):
if _LOAD_TESTS_DIR in subroot and include_load_tests:
for f in files:
if f.endswith('_test.py'):
result.append(_convert_to_test_target(
os.path.join(subroot, f)))
for f in files:
if (f.endswith('_test.py') and
os.path.join('core', 'tests') not in subroot):
result.append(_convert_to_test_target(
os.path.join(subroot, f)))
return result
def main():
"""Run the tests."""
parsed_args = _PARSER.parse_args()
if parsed_args.test_target and parsed_args.test_path:
raise Exception('At most one of test_path and test_target '
'should be specified.')
if parsed_args.test_path and '.' in parsed_args.test_path:
raise Exception('The delimiter in test_path should be a slash (/)')
if parsed_args.test_target and '/' in parsed_args.test_target:
raise Exception('The delimiter in test_target should be a dot (.)')
if parsed_args.test_target:
all_test_targets = [parsed_args.test_target]
else:
include_load_tests = not parsed_args.exclude_load_tests
all_test_targets = _get_all_test_targets(
test_path=parsed_args.test_path,
include_load_tests=include_load_tests)
# Prepare tasks.
task_to_taskspec = {}
tasks = []
for test_target in all_test_targets:
test = TestingTaskSpec(
test_target, parsed_args.generate_coverage_report)
task = TaskThread(test.run, parsed_args.verbose, name=test_target)
task_to_taskspec[task] = test
tasks.append(task)
task_execution_failed = False
try:
_execute_tasks(tasks)
except Exception:
task_execution_failed = True
for task in tasks:
if task.exception:
log(str(task.exception))
print ''
print '+------------------+'
print '| SUMMARY OF TESTS |'
print '+------------------+'
print ''
# Check we ran all tests as expected.
total_count = 0
total_errors = 0
total_failures = 0
for task in tasks:
spec = task_to_taskspec[task]
if not task.finished:
print 'CANCELED %s' % spec.test_target
test_count = 0
elif 'No tests were run' in str(task.exception):
print 'ERROR %s: No tests found.' % spec.test_target
test_count = 0
elif task.exception:
exc_str = str(task.exception).decode('utf-8')
print exc_str[exc_str.find('=') : exc_str.rfind('-')]
tests_failed_regex_match = re.search(
r'Test suite failed: ([0-9]+) tests run, ([0-9]+) errors, '
'([0-9]+) failures',
str(task.exception))
try:
test_count = int(tests_failed_regex_match.group(1))
errors = int(tests_failed_regex_match.group(2))
failures = int(tests_failed_regex_match.group(3))
total_errors += errors
total_failures += failures
print 'FAILED %s: %s errors, %s failures' % (
spec.test_target, errors, failures)
except AttributeError:
# There was an internal error, and the tests did not run. (The
# error message did not match `tests_failed_regex_match`.)
test_count = 0
print ''
print '------------------------------------------------------'
print ' WARNING: FAILED TO RUN TESTS.'
print ''
print ' This is most likely due to an import error.'
print '------------------------------------------------------'
else:
try:
tests_run_regex_match = re.search(
r'Ran ([0-9]+) tests? in ([0-9\.]+)s', task.output)
test_count = int(tests_run_regex_match.group(1))
test_time = float(tests_run_regex_match.group(2))
print ('SUCCESS %s: %d tests (%.1f secs)' %
(spec.test_target, test_count, test_time))
except Exception:
print (
'An unexpected error occurred. '
'Task output:\n%s' % task.output)
total_count += test_count
print ''
if total_count == 0:
raise Exception('WARNING: No tests were run.')
else:
print 'Ran %s test%s in %s test class%s.' % (
total_count, '' if total_count == 1 else 's',
len(tasks), '' if len(tasks) == 1 else 'es')
if total_errors or total_failures:
print '(%s ERRORS, %s FAILURES)' % (total_errors, total_failures)
else:
print 'All tests passed.'
if task_execution_failed:
raise Exception('Task execution failed.')
elif total_errors or total_failures:
raise Exception(
'%s errors, %s failures' % (total_errors, total_failures))
if __name__ == '__main__':
main()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
from datetime import datetime
import math
from typing import Any, List, Optional
from pandas import DataFrame, Series, Timestamp
import pytest
from superset.exceptions import QueryObjectValidationError
from superset.utils import pandas_postprocessing as proc
from superset.utils.core import (
DTTM_ALIAS,
PostProcessingContributionOrientation,
PostProcessingBoxplotWhiskerType,
)
from .base_tests import SupersetTestCase
from .fixtures.dataframes import (
categories_df,
lonlat_df,
names_df,
timeseries_df,
prophet_df,
)
AGGREGATES_SINGLE = {"idx_nulls": {"operator": "sum"}}
AGGREGATES_MULTIPLE = {
"idx_nulls": {"operator": "sum"},
"asc_idx": {"operator": "mean"},
}
def series_to_list(series: Series) -> List[Any]:
"""
Converts a `Series` to a regular list, and replaces non-numeric values to
Nones.
:param series: Series to convert
:return: list without nan or inf
"""
return [
None
if not isinstance(val, str) and (math.isnan(val) or math.isinf(val))
else val
for val in series.tolist()
]
def round_floats(
floats: List[Optional[float]], precision: int
) -> List[Optional[float]]:
"""
Round list of floats to certain precision
:param floats: floats to round
:param precision: intended decimal precision
:return: rounded floats
"""
return [round(val, precision) if val else None for val in floats]
class TestPostProcessing(SupersetTestCase):
def test_flatten_column_after_pivot(self):
"""
Test pivot column flattening function
"""
# single aggregate cases
self.assertEqual(
proc._flatten_column_after_pivot(
aggregates=AGGREGATES_SINGLE, column="idx_nulls",
),
"idx_nulls",
)
self.assertEqual(
proc._flatten_column_after_pivot(
aggregates=AGGREGATES_SINGLE, column=1234,
),
"1234",
)
self.assertEqual(
proc._flatten_column_after_pivot(
aggregates=AGGREGATES_SINGLE, column=Timestamp("2020-09-29T00:00:00"),
),
"2020-09-29 00:00:00",
)
self.assertEqual(
proc._flatten_column_after_pivot(
aggregates=AGGREGATES_SINGLE, column="idx_nulls",
),
"idx_nulls",
)
self.assertEqual(
proc._flatten_column_after_pivot(
aggregates=AGGREGATES_SINGLE, column=("idx_nulls", "col1"),
),
"col1",
)
self.assertEqual(
proc._flatten_column_after_pivot(
aggregates=AGGREGATES_SINGLE, column=("idx_nulls", "col1", 1234),
),
"col1, 1234",
)
# Multiple aggregate cases
self.assertEqual(
proc._flatten_column_after_pivot(
aggregates=AGGREGATES_MULTIPLE, column=("idx_nulls", "asc_idx", "col1"),
),
"idx_nulls, asc_idx, col1",
)
self.assertEqual(
proc._flatten_column_after_pivot(
aggregates=AGGREGATES_MULTIPLE,
column=("idx_nulls", "asc_idx", "col1", 1234),
),
"idx_nulls, asc_idx, col1, 1234",
)
def test_pivot_without_columns(self):
"""
Make sure pivot without columns returns correct DataFrame
"""
df = proc.pivot(df=categories_df, index=["name"], aggregates=AGGREGATES_SINGLE,)
self.assertListEqual(
df.columns.tolist(), ["name", "idx_nulls"],
)
self.assertEqual(len(df), 101)
self.assertEqual(df.sum()[1], 1050)
def test_pivot_with_single_column(self):
"""
Make sure pivot with single column returns correct DataFrame
"""
df = proc.pivot(
df=categories_df,
index=["name"],
columns=["category"],
aggregates=AGGREGATES_SINGLE,
)
self.assertListEqual(
df.columns.tolist(), ["name", "cat0", "cat1", "cat2"],
)
self.assertEqual(len(df), 101)
self.assertEqual(df.sum()[1], 315)
df = proc.pivot(
df=categories_df,
index=["dept"],
columns=["category"],
aggregates=AGGREGATES_SINGLE,
)
self.assertListEqual(
df.columns.tolist(), ["dept", "cat0", "cat1", "cat2"],
)
self.assertEqual(len(df), 5)
def test_pivot_with_multiple_columns(self):
"""
Make sure pivot with multiple columns returns correct DataFrame
"""
df = proc.pivot(
df=categories_df,
index=["name"],
columns=["category", "dept"],
aggregates=AGGREGATES_SINGLE,
)
self.assertEqual(len(df.columns), 1 + 3 * 5) # index + possible permutations
def test_pivot_fill_values(self):
"""
Make sure pivot with fill values returns correct DataFrame
"""
df = proc.pivot(
df=categories_df,
index=["name"],
columns=["category"],
metric_fill_value=1,
aggregates={"idx_nulls": {"operator": "sum"}},
)
self.assertEqual(df.sum()[1], 382)
def test_pivot_exceptions(self):
"""
Make sure pivot raises correct Exceptions
"""
# Missing index
self.assertRaises(
TypeError,
proc.pivot,
df=categories_df,
columns=["dept"],
aggregates=AGGREGATES_SINGLE,
)
# invalid index reference
self.assertRaises(
QueryObjectValidationError,
proc.pivot,
df=categories_df,
index=["abc"],
columns=["dept"],
aggregates=AGGREGATES_SINGLE,
)
# invalid column reference
self.assertRaises(
QueryObjectValidationError,
proc.pivot,
df=categories_df,
index=["dept"],
columns=["abc"],
aggregates=AGGREGATES_SINGLE,
)
# invalid aggregate options
self.assertRaises(
QueryObjectValidationError,
proc.pivot,
df=categories_df,
index=["name"],
columns=["category"],
aggregates={"idx_nulls": {}},
)
def test_aggregate(self):
aggregates = {
"asc sum": {"column": "asc_idx", "operator": "sum"},
"asc q2": {
"column": "asc_idx",
"operator": "percentile",
"options": {"q": 75},
},
"desc q1": {
"column": "desc_idx",
"operator": "percentile",
"options": {"q": 25},
},
}
df = proc.aggregate(
df=categories_df, groupby=["constant"], aggregates=aggregates
)
self.assertListEqual(
df.columns.tolist(), ["constant", "asc sum", "asc q2", "desc q1"]
)
self.assertEqual(series_to_list(df["asc sum"])[0], 5050)
self.assertEqual(series_to_list(df["asc q2"])[0], 75)
self.assertEqual(series_to_list(df["desc q1"])[0], 25)
def test_sort(self):
df = proc.sort(df=categories_df, columns={"category": True, "asc_idx": False})
self.assertEqual(96, series_to_list(df["asc_idx"])[1])
self.assertRaises(
QueryObjectValidationError, proc.sort, df=df, columns={"abc": True}
)
def test_rolling(self):
# sum rolling type
post_df = proc.rolling(
df=timeseries_df,
columns={"y": "y"},
rolling_type="sum",
window=2,
min_periods=0,
)
self.assertListEqual(post_df.columns.tolist(), ["label", "y"])
self.assertListEqual(series_to_list(post_df["y"]), [1.0, 3.0, 5.0, 7.0])
# mean rolling type with alias
post_df = proc.rolling(
df=timeseries_df,
rolling_type="mean",
columns={"y": "y_mean"},
window=10,
min_periods=0,
)
self.assertListEqual(post_df.columns.tolist(), ["label", "y", "y_mean"])
self.assertListEqual(series_to_list(post_df["y_mean"]), [1.0, 1.5, 2.0, 2.5])
# count rolling type
post_df = proc.rolling(
df=timeseries_df,
rolling_type="count",
columns={"y": "y"},
window=10,
min_periods=0,
)
self.assertListEqual(post_df.columns.tolist(), ["label", "y"])
self.assertListEqual(series_to_list(post_df["y"]), [1.0, 2.0, 3.0, 4.0])
# quantile rolling type
post_df = proc.rolling(
df=timeseries_df,
columns={"y": "q1"},
rolling_type="quantile",
rolling_type_options={"quantile": 0.25},
window=10,
min_periods=0,
)
self.assertListEqual(post_df.columns.tolist(), ["label", "y", "q1"])
self.assertListEqual(series_to_list(post_df["q1"]), [1.0, 1.25, 1.5, 1.75])
# incorrect rolling type
self.assertRaises(
QueryObjectValidationError,
proc.rolling,
df=timeseries_df,
columns={"y": "y"},
rolling_type="abc",
window=2,
)
# incorrect rolling type options
self.assertRaises(
QueryObjectValidationError,
proc.rolling,
df=timeseries_df,
columns={"y": "y"},
rolling_type="quantile",
rolling_type_options={"abc": 123},
window=2,
)
def test_select(self):
# reorder columns
post_df = proc.select(df=timeseries_df, columns=["y", "label"])
self.assertListEqual(post_df.columns.tolist(), ["y", "label"])
# one column
post_df = proc.select(df=timeseries_df, columns=["label"])
self.assertListEqual(post_df.columns.tolist(), ["label"])
# rename and select one column
post_df = proc.select(df=timeseries_df, columns=["y"], rename={"y": "y1"})
self.assertListEqual(post_df.columns.tolist(), ["y1"])
# rename one and leave one unchanged
post_df = proc.select(df=timeseries_df, rename={"y": "y1"})
self.assertListEqual(post_df.columns.tolist(), ["label", "y1"])
# drop one column
post_df = proc.select(df=timeseries_df, exclude=["label"])
self.assertListEqual(post_df.columns.tolist(), ["y"])
# rename and drop one column
post_df = proc.select(df=timeseries_df, rename={"y": "y1"}, exclude=["label"])
self.assertListEqual(post_df.columns.tolist(), ["y1"])
# invalid columns
self.assertRaises(
QueryObjectValidationError,
proc.select,
df=timeseries_df,
columns=["abc"],
rename={"abc": "qwerty"},
)
# select renamed column by new name
self.assertRaises(
QueryObjectValidationError,
proc.select,
df=timeseries_df,
columns=["label_new"],
rename={"label": "label_new"},
)
def test_diff(self):
# overwrite column
post_df = proc.diff(df=timeseries_df, columns={"y": "y"})
self.assertListEqual(post_df.columns.tolist(), ["label", "y"])
self.assertListEqual(series_to_list(post_df["y"]), [None, 1.0, 1.0, 1.0])
# add column
post_df = proc.diff(df=timeseries_df, columns={"y": "y1"})
self.assertListEqual(post_df.columns.tolist(), ["label", "y", "y1"])
self.assertListEqual(series_to_list(post_df["y"]), [1.0, 2.0, 3.0, 4.0])
self.assertListEqual(series_to_list(post_df["y1"]), [None, 1.0, 1.0, 1.0])
# look ahead
post_df = proc.diff(df=timeseries_df, columns={"y": "y1"}, periods=-1)
self.assertListEqual(series_to_list(post_df["y1"]), [-1.0, -1.0, -1.0, None])
# invalid column reference
self.assertRaises(
QueryObjectValidationError,
proc.diff,
df=timeseries_df,
columns={"abc": "abc"},
)
def test_cum(self):
# create new column (cumsum)
post_df = proc.cum(df=timeseries_df, columns={"y": "y2"}, operator="sum",)
self.assertListEqual(post_df.columns.tolist(), ["label", "y", "y2"])
self.assertListEqual(series_to_list(post_df["label"]), ["x", "y", "z", "q"])
self.assertListEqual(series_to_list(post_df["y"]), [1.0, 2.0, 3.0, 4.0])
self.assertListEqual(series_to_list(post_df["y2"]), [1.0, 3.0, 6.0, 10.0])
# overwrite column (cumprod)
post_df = proc.cum(df=timeseries_df, columns={"y": "y"}, operator="prod",)
self.assertListEqual(post_df.columns.tolist(), ["label", "y"])
self.assertListEqual(series_to_list(post_df["y"]), [1.0, 2.0, 6.0, 24.0])
# overwrite column (cummin)
post_df = proc.cum(df=timeseries_df, columns={"y": "y"}, operator="min",)
self.assertListEqual(post_df.columns.tolist(), ["label", "y"])
self.assertListEqual(series_to_list(post_df["y"]), [1.0, 1.0, 1.0, 1.0])
# invalid operator
self.assertRaises(
QueryObjectValidationError,
proc.cum,
df=timeseries_df,
columns={"y": "y"},
operator="abc",
)
def test_geohash_decode(self):
# decode lon/lat from geohash
post_df = proc.geohash_decode(
df=lonlat_df[["city", "geohash"]],
geohash="geohash",
latitude="latitude",
longitude="longitude",
)
self.assertListEqual(
sorted(post_df.columns.tolist()),
sorted(["city", "geohash", "latitude", "longitude"]),
)
self.assertListEqual(
round_floats(series_to_list(post_df["longitude"]), 6),
round_floats(series_to_list(lonlat_df["longitude"]), 6),
)
self.assertListEqual(
round_floats(series_to_list(post_df["latitude"]), 6),
round_floats(series_to_list(lonlat_df["latitude"]), 6),
)
def test_geohash_encode(self):
# encode lon/lat into geohash
post_df = proc.geohash_encode(
df=lonlat_df[["city", "latitude", "longitude"]],
latitude="latitude",
longitude="longitude",
geohash="geohash",
)
self.assertListEqual(
sorted(post_df.columns.tolist()),
sorted(["city", "geohash", "latitude", "longitude"]),
)
self.assertListEqual(
series_to_list(post_df["geohash"]), series_to_list(lonlat_df["geohash"]),
)
def test_geodetic_parse(self):
# parse geodetic string with altitude into lon/lat/altitude
post_df = proc.geodetic_parse(
df=lonlat_df[["city", "geodetic"]],
geodetic="geodetic",
latitude="latitude",
longitude="longitude",
altitude="altitude",
)
self.assertListEqual(
sorted(post_df.columns.tolist()),
sorted(["city", "geodetic", "latitude", "longitude", "altitude"]),
)
self.assertListEqual(
series_to_list(post_df["longitude"]),
series_to_list(lonlat_df["longitude"]),
)
self.assertListEqual(
series_to_list(post_df["latitude"]), series_to_list(lonlat_df["latitude"]),
)
self.assertListEqual(
series_to_list(post_df["altitude"]), series_to_list(lonlat_df["altitude"]),
)
# parse geodetic string into lon/lat
post_df = proc.geodetic_parse(
df=lonlat_df[["city", "geodetic"]],
geodetic="geodetic",
latitude="latitude",
longitude="longitude",
)
self.assertListEqual(
sorted(post_df.columns.tolist()),
sorted(["city", "geodetic", "latitude", "longitude"]),
)
self.assertListEqual(
series_to_list(post_df["longitude"]),
series_to_list(lonlat_df["longitude"]),
)
self.assertListEqual(
series_to_list(post_df["latitude"]), series_to_list(lonlat_df["latitude"]),
)
def test_contribution(self):
df = DataFrame(
{
DTTM_ALIAS: [
datetime(2020, 7, 16, 14, 49),
datetime(2020, 7, 16, 14, 50),
],
"a": [1, 3],
"b": [1, 9],
}
)
with pytest.raises(QueryObjectValidationError, match="not numeric"):
proc.contribution(df, columns=[DTTM_ALIAS])
with pytest.raises(QueryObjectValidationError, match="same length"):
proc.contribution(df, columns=["a"], rename_columns=["aa", "bb"])
# cell contribution across row
processed_df = proc.contribution(
df, orientation=PostProcessingContributionOrientation.ROW,
)
self.assertListEqual(processed_df.columns.tolist(), [DTTM_ALIAS, "a", "b"])
self.assertListEqual(processed_df["a"].tolist(), [0.5, 0.25])
self.assertListEqual(processed_df["b"].tolist(), [0.5, 0.75])
# cell contribution across column without temporal column
df.pop(DTTM_ALIAS)
processed_df = proc.contribution(
df, orientation=PostProcessingContributionOrientation.COLUMN
)
self.assertListEqual(processed_df.columns.tolist(), ["a", "b"])
self.assertListEqual(processed_df["a"].tolist(), [0.25, 0.75])
self.assertListEqual(processed_df["b"].tolist(), [0.1, 0.9])
# contribution only on selected columns
processed_df = proc.contribution(
df,
orientation=PostProcessingContributionOrientation.COLUMN,
columns=["a"],
rename_columns=["pct_a"],
)
self.assertListEqual(processed_df.columns.tolist(), ["a", "b", "pct_a"])
self.assertListEqual(processed_df["a"].tolist(), [1, 3])
self.assertListEqual(processed_df["b"].tolist(), [1, 9])
self.assertListEqual(processed_df["pct_a"].tolist(), [0.25, 0.75])
def test_prophet_valid(self):
pytest.importorskip("fbprophet")
df = proc.prophet(
df=prophet_df, time_grain="P1M", periods=3, confidence_interval=0.9
)
columns = {column for column in df.columns}
assert columns == {
DTTM_ALIAS,
"a__yhat",
"a__yhat_upper",
"a__yhat_lower",
"a",
"b__yhat",
"b__yhat_upper",
"b__yhat_lower",
"b",
}
assert df[DTTM_ALIAS].iloc[0].to_pydatetime() == datetime(2018, 12, 31)
assert df[DTTM_ALIAS].iloc[-1].to_pydatetime() == datetime(2022, 3, 31)
assert len(df) == 7
df = proc.prophet(
df=prophet_df, time_grain="P1M", periods=5, confidence_interval=0.9
)
assert df[DTTM_ALIAS].iloc[0].to_pydatetime() == datetime(2018, 12, 31)
assert df[DTTM_ALIAS].iloc[-1].to_pydatetime() == datetime(2022, 5, 31)
assert len(df) == 9
def test_prophet_missing_temporal_column(self):
df = prophet_df.drop(DTTM_ALIAS, axis=1)
self.assertRaises(
QueryObjectValidationError,
proc.prophet,
df=df,
time_grain="P1M",
periods=3,
confidence_interval=0.9,
)
def test_prophet_incorrect_confidence_interval(self):
self.assertRaises(
QueryObjectValidationError,
proc.prophet,
df=prophet_df,
time_grain="P1M",
periods=3,
confidence_interval=0.0,
)
self.assertRaises(
QueryObjectValidationError,
proc.prophet,
df=prophet_df,
time_grain="P1M",
periods=3,
confidence_interval=1.0,
)
def test_prophet_incorrect_periods(self):
self.assertRaises(
QueryObjectValidationError,
proc.prophet,
df=prophet_df,
time_grain="P1M",
periods=0,
confidence_interval=0.8,
)
def test_prophet_incorrect_time_grain(self):
self.assertRaises(
QueryObjectValidationError,
proc.prophet,
df=prophet_df,
time_grain="yearly",
periods=10,
confidence_interval=0.8,
)
def test_boxplot_tukey(self):
df = proc.boxplot(
df=names_df,
groupby=["region"],
whisker_type=PostProcessingBoxplotWhiskerType.TUKEY,
metrics=["cars"],
)
columns = {column for column in df.columns}
assert columns == {
"cars__mean",
"cars__median",
"cars__q1",
"cars__q3",
"cars__max",
"cars__min",
"cars__count",
"cars__outliers",
"region",
}
assert len(df) == 4
def test_boxplot_min_max(self):
df = proc.boxplot(
df=names_df,
groupby=["region"],
whisker_type=PostProcessingBoxplotWhiskerType.MINMAX,
metrics=["cars"],
)
columns = {column for column in df.columns}
assert columns == {
"cars__mean",
"cars__median",
"cars__q1",
"cars__q3",
"cars__max",
"cars__min",
"cars__count",
"cars__outliers",
"region",
}
assert len(df) == 4
def test_boxplot_percentile(self):
df = proc.boxplot(
df=names_df,
groupby=["region"],
whisker_type=PostProcessingBoxplotWhiskerType.PERCENTILE,
metrics=["cars"],
percentiles=[1, 99],
)
columns = {column for column in df.columns}
assert columns == {
"cars__mean",
"cars__median",
"cars__q1",
"cars__q3",
"cars__max",
"cars__min",
"cars__count",
"cars__outliers",
"region",
}
assert len(df) == 4
def test_boxplot_percentile_incorrect_params(self):
with pytest.raises(QueryObjectValidationError):
proc.boxplot(
df=names_df,
groupby=["region"],
whisker_type=PostProcessingBoxplotWhiskerType.PERCENTILE,
metrics=["cars"],
)
with pytest.raises(QueryObjectValidationError):
proc.boxplot(
df=names_df,
groupby=["region"],
whisker_type=PostProcessingBoxplotWhiskerType.PERCENTILE,
metrics=["cars"],
percentiles=[10],
)
with pytest.raises(QueryObjectValidationError):
proc.boxplot(
df=names_df,
groupby=["region"],
whisker_type=PostProcessingBoxplotWhiskerType.PERCENTILE,
metrics=["cars"],
percentiles=[90, 10],
)
with pytest.raises(QueryObjectValidationError):
proc.boxplot(
df=names_df,
groupby=["region"],
whisker_type=PostProcessingBoxplotWhiskerType.PERCENTILE,
metrics=["cars"],
percentiles=[10, 90, 10],
)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import importlib.resources
from textwrap import dedent
import pytest
from pants.backend.scala.compile.scalac import rules as scalac_rules
from pants.backend.scala.subsystems.scalatest import Scalatest
from pants.backend.scala.target_types import (
ScalaSourcesGeneratorTarget,
ScalatestTestsGeneratorTarget,
)
from pants.backend.scala.target_types import rules as scala_target_types_rules
from pants.backend.scala.target_types import rules as target_types_rules
from pants.backend.scala.test.scalatest import ScalatestTestFieldSet
from pants.backend.scala.test.scalatest import rules as scalatest_rules
from pants.build_graph.address import Address
from pants.core.goals.test import TestResult
from pants.core.target_types import FilesGeneratorTarget, FileTarget, RelocatedFiles
from pants.core.util_rules import config_files, source_files, system_binaries
from pants.engine.addresses import Addresses
from pants.engine.target import CoarsenedTargets
from pants.jvm import classpath
from pants.jvm.jdk_rules import rules as jdk_util_rules
from pants.jvm.non_jvm_dependencies import rules as non_jvm_dependencies_rules
from pants.jvm.resolve.common import Coordinate
from pants.jvm.resolve.coursier_fetch import rules as coursier_fetch_rules
from pants.jvm.resolve.coursier_setup import rules as coursier_setup_rules
from pants.jvm.target_types import JvmArtifactTarget
from pants.jvm.testutil import maybe_skip_jdk_test
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, QueryRule, RuleRunner
# TODO(12812): Switch tests to using parsed scalatest.xml results instead of scanning stdout strings.
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
preserve_tmpdirs=True,
rules=[
*classpath.rules(),
*config_files.rules(),
*coursier_fetch_rules(),
*coursier_setup_rules(),
*jdk_util_rules(),
*non_jvm_dependencies_rules(),
*scalac_rules(),
*scalatest_rules(),
*scala_target_types_rules(),
*scalac_rules(),
*source_files.rules(),
*system_binaries.rules(),
*target_types_rules(),
*util_rules(),
QueryRule(CoarsenedTargets, (Addresses,)),
QueryRule(TestResult, (ScalatestTestFieldSet,)),
QueryRule(Scalatest, ()),
],
target_types=[
JvmArtifactTarget,
FileTarget,
FilesGeneratorTarget,
RelocatedFiles,
ScalaSourcesGeneratorTarget,
ScalatestTestsGeneratorTarget,
],
)
rule_runner.set_options(args=[], env_inherit=PYTHON_BOOTSTRAP_ENV)
return rule_runner
@maybe_skip_jdk_test
def test_simple_success(rule_runner: RuleRunner) -> None:
scalatest_coord = Coordinate(group="org.scalatest", artifact="scalatest_2.13", version="3.2.10")
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": importlib.resources.read_text(
*Scalatest.default_lockfile_resource
),
"BUILD": dedent(
f"""\
jvm_artifact(
name = 'org.scalatest_scalatest',
group = '{scalatest_coord.group}',
artifact = '{scalatest_coord.artifact}',
version = '{scalatest_coord.version}',
)
scalatest_tests(
name='example-test',
dependencies= [
':org.scalatest_scalatest',
],
)
"""
),
"SimpleSpec.scala": dedent(
"""
package org.pantsbuild.example;
import org.scalatest.funspec.AnyFunSpec
class SimpleSpec extends AnyFunSpec {
describe("Simple") {
it("should be simple") {
assert("Simple".toLowerCase == "simple")
}
}
}
"""
),
}
)
test_result = run_scalatest_test(rule_runner, "example-test", "SimpleSpec.scala")
assert test_result.exit_code == 0
assert "Tests: succeeded 1, failed 0, canceled 0, ignored 0, pending 0" in test_result.stdout
assert test_result.xml_results and test_result.xml_results.files
@maybe_skip_jdk_test
def test_file_deps_success(rule_runner: RuleRunner) -> None:
scalatest_coord = Coordinate(group="org.scalatest", artifact="scalatest_2.13", version="3.2.10")
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": importlib.resources.read_text(
*Scalatest.default_lockfile_resource
),
"BUILD": dedent(
f"""\
jvm_artifact(
name = 'org.scalatest_scalatest',
group = '{scalatest_coord.group}',
artifact = '{scalatest_coord.artifact}',
version = '{scalatest_coord.version}',
)
scalatest_tests(
name='example-test',
dependencies= [
':org.scalatest_scalatest',
':ducks',
],
)
file(
name="ducks",
source="ducks.txt",
)
"""
),
"SimpleSpec.scala": dedent(
"""
package org.pantsbuild.example;
import org.scalatest.funspec.AnyFunSpec
import java.nio.file.Files
import java.nio.file.Path
class SimpleSpec extends AnyFunSpec {
describe("Ducks") {
it("should be ducks") {
assert(Files.readString(Path.of("ducks.txt")) == "lol ducks")
}
}
}
"""
),
"ducks.txt": "lol ducks",
}
)
test_result = run_scalatest_test(rule_runner, "example-test", "SimpleSpec.scala")
assert test_result.exit_code == 0
assert "Tests: succeeded 1, failed 0, canceled 0, ignored 0, pending 0" in test_result.stdout
assert test_result.xml_results and test_result.xml_results.files
def run_scalatest_test(
rule_runner: RuleRunner, target_name: str, relative_file_path: str
) -> TestResult:
tgt = rule_runner.get_target(
Address(spec_path="", target_name=target_name, relative_file_path=relative_file_path)
)
return rule_runner.request(TestResult, [ScalatestTestFieldSet.create(tgt)])
|
# Copyright 2019 Robert Bosch GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from tracetools_test.case import TraceTestCase
VERSION_REGEX = r'^[0-9]+\.[0-9]+\.[0-9]+$'
class TestNode(TraceTestCase):
def __init__(self, *args) -> None:
super().__init__(
*args,
session_name_prefix='session-test-node-creation',
events_ros=[
'ros2:rcl_init',
'ros2:rcl_node_init',
],
package='test_tracetools',
nodes=['test_publisher'],
)
def test_all(self):
# Check events as set
self.assertEventsSet(self._events_ros)
# Check fields
rcl_init_events = self.get_events_with_name('ros2:rcl_init')
for event in rcl_init_events:
self.assertValidHandle(event, 'context_handle')
# TODO actually compare to version fetched from the tracetools package?
version_field = self.get_field(event, 'version')
self.assertRegex(version_field, VERSION_REGEX, 'invalid version number')
rcl_node_init_events = self.get_events_with_name('ros2:rcl_node_init')
for event in rcl_node_init_events:
self.assertValidHandle(event, ['node_handle', 'rmw_handle'])
self.assertStringFieldNotEmpty(event, 'node_name')
self.assertStringFieldNotEmpty(event, 'namespace')
# Check that the launched nodes have a corresponding rcl_node_init event
node_name_fields = [self.get_field(e, 'node_name') for e in rcl_node_init_events]
for node_name in self._nodes:
self.assertTrue(
node_name in node_name_fields,
f'cannot find node_init event for node name: {node_name} ({node_name_fields})',
)
if __name__ == '__main__':
unittest.main()
|
"""
Defines the base class for optimizations as well as a certain
amount of useful generic optimization tools.
"""
import abc
import contextlib
import copy
import inspect
import logging
import pdb
import sys
import time
import traceback
import warnings
from collections import OrderedDict, UserList, defaultdict, deque
from collections.abc import Iterable
from functools import partial, reduce
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import aesara
from aesara.assert_op import Assert, assert_op
from aesara.configdefaults import config
from aesara.graph import destroyhandler as dh
from aesara.graph.basic import (
Apply,
Constant,
Variable,
applys_between,
io_toposort,
nodes_constructed,
)
from aesara.graph.features import Feature, NodeFinder
from aesara.graph.fg import FunctionGraph, InconsistencyError
from aesara.graph.op import Op
from aesara.graph.utils import AssocList
from aesara.misc.ordered_set import OrderedSet
from aesara.utils import flatten
_logger = logging.getLogger("aesara.graph.opt")
_optimizer_idx = [0]
class LocalMetaOptimizerSkipAssertionError(AssertionError):
"""This is an AssertionError, but instead of having the
LocalMetaOptimizer print the error, it just skip that
compilation.
"""
class GlobalOptimizer(abc.ABC):
"""A optimizer that can be applied to a `FunctionGraph` in order to transform it.
It can represent an optimization or, in general, any kind of transformation
one could apply to a `FunctionGraph`.
"""
@abc.abstractmethod
def apply(self, fgraph):
"""Apply the optimization to a `FunctionGraph`.
It may use all the methods defined by the `FunctionGraph`. If the
`GlobalOptimizer` needs to use a certain tool, such as an
`InstanceFinder`, it can do so in its `add_requirements` method.
"""
raise NotImplementedError()
def optimize(self, fgraph, *args, **kwargs):
"""
This is meant as a shortcut for the following::
opt.add_requirements(fgraph)
opt.apply(fgraph)
"""
self.add_requirements(fgraph)
ret = self.apply(fgraph, *args, **kwargs)
return ret
def __call__(self, fgraph):
"""Optimize a `FunctionGraph`.
This is the same as ``self.optimize(fgraph)``.
"""
return self.optimize(fgraph)
def add_requirements(self, fgraph):
"""Add features to `fgraph` that are required to apply the optimization.
For example::
fgraph.attach_feature(History())
fgraph.attach_feature(MyFeature())
# etc.
"""
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
name = getattr(self, "name", None)
print(
f"{' ' * level}{self.__class__.__name__} {name} id={id(self)}",
file=stream,
)
@staticmethod
def print_profile(stream, prof, level=0):
if prof is not None:
raise NotImplementedError(
"The function print_profile must be overridden if the"
" optimizer return profiling information."
)
def __hash__(self):
if not hasattr(self, "_optimizer_idx"):
self._optimizer_idx = _optimizer_idx[0]
_optimizer_idx[0] += 1
return self._optimizer_idx
class FromFunctionOptimizer(GlobalOptimizer):
"""A `GlobalOptimizer` constructed from a given function."""
def __init__(self, fn, requirements=()):
self.fn = fn
self.requirements = requirements
def apply(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def add_requirements(self, fgraph):
for req in self.requirements:
req(fgraph)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print(f"{' ' * level}{self.apply} id={id(self)}", file=stream)
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def __str__(self):
return self.__name__
def optimizer(f):
"""Decorator for `FromFunctionOptimizer`."""
rval = FromFunctionOptimizer(f)
rval.__name__ = f.__name__
return rval
def inplace_optimizer(f):
"""Decorator for `FromFunctionOptimizer` that also adds the `DestroyHandler` features."""
dh_handler = dh.DestroyHandler
requirements = (lambda fgraph: fgraph.attach_feature(dh_handler()),)
rval = FromFunctionOptimizer(f, requirements)
rval.__name__ = f.__name__
return rval
class SeqOptimizer(GlobalOptimizer, UserList):
"""A `GlobalOptimizer` that applies a list of optimizers sequentially."""
@staticmethod
def warn(exc, self, optimizer):
"""Default ``failure_callback`` for `SeqOptimizer`."""
_logger.error(f"SeqOptimizer apply {optimizer}")
_logger.error("Traceback:")
_logger.error(traceback.format_exc())
if config.on_opt_error == "raise":
raise exc
elif config.on_opt_error == "pdb":
pdb.post_mortem(sys.exc_info()[2])
def __init__(self, *opts, failure_callback=None):
"""
Parameters
----------
*opts :
The List of optimizers to be applied to a node
failure_callback : callable or None
Keyword only argument. A callback used when a failure
happen during optimization.
"""
if len(opts) == 1 and isinstance(opts[0], (list, tuple)):
opts = opts[0]
super().__init__(opts)
self.failure_callback = failure_callback
def apply(self, fgraph):
"""Applies each `GlobalOptimizer` in ``self.data`` to `fgraph`."""
l = []
if fgraph.profile:
validate_before = fgraph.profile.validate_time
sub_validate_time = [validate_before]
callbacks_before = fgraph.execute_callbacks_times.copy()
else:
sub_validate_time = []
callbacks_before = []
callback_before = fgraph.execute_callbacks_time
nb_node_before = len(fgraph.apply_nodes)
sub_profs = []
nb_nodes = []
self.pre_profile = (
self,
l,
-1,
-1,
nb_node_before,
-1,
sub_profs,
sub_validate_time,
nb_nodes,
{},
)
try:
for optimizer in self.data:
try:
nb_nodes_before = len(fgraph.apply_nodes)
t0 = time.time()
sub_prof = optimizer.optimize(fgraph)
l.append(float(time.time() - t0))
sub_profs.append(sub_prof)
nb_nodes.append((nb_nodes_before, len(fgraph.apply_nodes)))
if fgraph.profile:
sub_validate_time.append(fgraph.profile.validate_time)
except AssertionError:
# do not catch Assertion failures
raise
except Exception as e:
if self.failure_callback:
self.failure_callback(e, self, optimizer)
continue
else:
raise
finally:
if fgraph.profile:
validate_time = fgraph.profile.validate_time - validate_before
callbacks_time = {}
for k, v in fgraph.execute_callbacks_times.items():
if k in callbacks_before:
t = v - callbacks_before[k]
if t > 0:
callbacks_time[k] = t
else:
callbacks_time[k] = v
else:
validate_time = None
callbacks_time = {}
callback_time = fgraph.execute_callbacks_time - callback_before
self.pre_profile = (
self,
l,
validate_time,
callback_time,
nb_node_before,
len(fgraph.apply_nodes),
sub_profs,
sub_validate_time,
nb_nodes,
callbacks_time,
)
return self.pre_profile
def __repr__(self):
return f"SeqOpt({self.data})"
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
name = getattr(self, "name", None)
print(
f"{' ' * level}{self.__class__.__name__} {name} id={id(self)}", file=stream
)
# This way, -1 will do all depth
if depth != 0:
depth -= 1
for opt in self.data:
opt.print_summary(stream, level=(level + 2), depth=depth)
@staticmethod
def print_profile(stream, prof, level=0):
(
opts,
prof,
validate_time,
callback_time,
nb_node_before,
nb_node_after,
sub_profs,
sub_validate_time,
nb_nodes,
callbacks_time,
) = prof
blanc = " " * level
print(blanc, "SeqOptimizer", end=" ", file=stream)
if hasattr(opts, "name"):
print(blanc, opts.name, end=" ", file=stream)
elif hasattr(opts, "__name__"):
print(blanc, opts.__name__, end=" ", file=stream)
print(
(
f" time {sum(prof):.3f}s for {int(nb_node_before)}/{int(nb_node_after)} nodes"
" before/after optimization"
),
file=stream,
)
print(blanc, f" {callback_time:.3f}s for callback", file=stream)
print(blanc, f" {validate_time:.3f}s for fgraph.validate()", file=stream)
if callback_time > 1:
print(blanc, " callbacks_time", file=stream)
for i in sorted(callbacks_time.items(), key=lambda a: -a[1]):
if i[1] > 0:
# We want to have the __str__ called, so we can't
# just print i.
print(blanc, " ", i[0], ",", i[1], file=stream)
if level == 0:
print(
blanc,
" time - (name, class, index, nodes before, nodes after) - validate time",
file=stream,
)
ll = []
for (opt, nb_n) in zip(opts, nb_nodes):
if hasattr(opt, "__name__"):
name = opt.__name__
else:
name = opt.name
idx = opts.index(opt)
ll.append((name, opt.__class__.__name__, idx) + nb_n)
lll = sorted(zip(prof, ll), key=lambda a: a[0])
for (t, opt) in lll[::-1]:
i = opt[2]
if sub_validate_time:
val_time = sub_validate_time[i + 1] - sub_validate_time[i]
print(
blanc,
f" {t:.6f}s - {opt} - {val_time:.3f}s",
file=stream,
)
else:
print(blanc, f" {t:.6f}s - {opt}", file=stream)
if sub_profs[i]:
opts[i].print_profile(stream, sub_profs[i], level=level + 1)
print(file=stream)
@staticmethod
def merge_profile(prof1, prof2):
"""Merge two profiles."""
new_t = [] # the time for the optimization
new_l = [] # the optimization
new_sub_profile = []
# merge common(same object) opt
for l in set(prof1[0]).intersection(set(prof2[0])):
idx1 = prof1[0].index(l)
idx2 = prof2[0].index(l)
new_t.append(prof1[1][idx1] + prof2[1][idx2])
new_l.append(l)
if hasattr(l, "merge_profile"):
assert len(prof1[6][idx1]) == len(prof2[6][idx2])
new_sub_profile.append(l.merge_profile(prof1[6][idx1], prof2[6][idx2]))
else:
new_sub_profile.append(None)
# merge not common opt
from io import StringIO
for l in set(prof1[0]).symmetric_difference(set(prof2[0])):
# The set trick above only work for the same object optimization
# It don't work for equivalent optimization.
# So we try to merge equivalent optimization here.
new_l_names = [o.name for o in new_l]
if l.name in new_l_names:
idx = new_l_names.index(l.name)
io1 = StringIO()
io2 = StringIO()
l.print_summary(io1)
new_l[idx].print_summary(io2)
if io1.read() == io2.read():
if l in prof1[0]:
p = prof1
else:
p = prof2
new_t[idx] += p[1][p[0].index(l)]
if hasattr(l, "merge_profile"):
assert len(p[6][p[0].index(l)]) == len(new_sub_profile[idx])
new_sub_profile[idx] = l.merge_profile(
new_sub_profile[idx], p[6][p[0].index(l)]
)
else:
new_sub_profile[idx] = None
continue
if l in prof1[0]:
p = prof1
else:
p = prof2
new_t.append(p[1][p[0].index(l)])
idx = p[0].index(l)
new_l.append(l)
new_sub_profile.append(p[6][idx])
new_opt = SeqOptimizer(*new_l)
new_nb_nodes = []
for p1, p2 in zip(prof1[8], prof2[8]):
new_nb_nodes.append((p1[0] + p2[0], p1[1] + p2[1]))
new_nb_nodes.extend(prof1[8][len(new_nb_nodes) :])
new_nb_nodes.extend(prof2[8][len(new_nb_nodes) :])
new_callbacks_times = merge_dict(prof1[9], prof2[9])
# We need to assert based on the name as we merge also based on
# the name.
assert {l.name for l in prof1[0]}.issubset({l.name for l in new_l})
assert {l.name for l in prof2[0]}.issubset({l.name for l in new_l})
assert len(new_t) == len(new_opt) == len(new_sub_profile)
return (
new_opt,
new_t,
prof1[2] + prof2[2],
prof1[3] + prof2[3],
-1,
-1,
new_sub_profile,
[],
new_nb_nodes,
new_callbacks_times,
)
class MergeFeature(Feature):
"""Keeps track of variables in a `FunctionGraph` that cannot be merged together.
That way, the `MergeOptimizer` can remember the result of the last
merge-pass on the `FunctionGraph`.
"""
def on_attach(self, fgraph):
assert not hasattr(fgraph, "merge_feature")
fgraph.merge_feature = self
# For constants
self.seen_constants = set()
# variable -> signature (for constants)
self.const_sig = AssocList()
# signature -> variable (for constants)
self.const_sig_inv = AssocList()
# For all Apply nodes
# Set of distinct (not mergeable) nodes
self.nodes_seen = set()
# Ordered set of distinct (not mergeable) nodes without any input
self.noinput_nodes = OrderedSet()
# Each element of scheduled is a list of list of (out, new_out) pairs.
# Each list of pairs represent the substitution needed to replace all
# the outputs of a node with the outputs of a replacement candidate.
# Each node can have several candidates. For instance, if "node" has
# 2 outputs, and there are 3 replacement candidates, we will have:
# shelf.scheduled = [
# [[(node.out1, cand1.out1), (node.out2, cand1.out2)],
# [(node.out1, cand2.out1), (node.out2, cand2.out2)],
# [(node.out1, cand3.out1), (node.out2, cand3.out2)]]]
self.scheduled = []
# List of (node, candidate) pairs, where we tried to replace node by
# candidate, but it failed. This is used to avoid infinite loops
# during the replacement phase.
self.blacklist = []
for node in fgraph.toposort():
self.on_import(fgraph, node, "on_attach")
def on_change_input(self, fgraph, node, i, r, new_r, reason):
# If inputs to node change, it is not guaranteed that it is distinct
# from the other nodes in nodes_seen
if node in self.nodes_seen:
self.nodes_seen.discard(node)
self.process_node(fgraph, node)
# Since we are in on_change_input, node should have inputs.
if not isinstance(node, str):
assert node.inputs
if isinstance(new_r, Constant):
self.process_constant(fgraph, new_r)
def on_import(self, fgraph, node, reason):
for c in node.inputs:
if isinstance(c, Constant):
self.process_constant(fgraph, c)
self.process_node(fgraph, node)
def on_prune(self, fgraph, node, reason):
self.nodes_seen.discard(node)
if not node.inputs:
self.noinput_nodes.discard(node)
for c in node.inputs:
if isinstance(c, Constant) and (len(fgraph.clients[c]) <= 1):
# This was the last node using this constant
sig = self.const_sig[c]
self.const_sig.discard(c)
self.const_sig_inv.discard(sig)
self.seen_constants.discard(id(c))
def process_constant(self, fgraph, c):
"""Check if a constant `c` can be merged, and queue that replacement."""
if id(c) in self.seen_constants:
return
sig = c.merge_signature()
other_c = self.const_sig_inv.get(sig, None)
if other_c is not None:
# multiple names will clobber each other..
# we adopt convention to keep the last name
if c.name:
other_c.name = c.name
self.scheduled.append([[(c, other_c, "merge")]])
else:
# this is a new constant
self.const_sig[c] = sig
self.const_sig_inv[sig] = c
self.seen_constants.add(id(c))
def process_node(self, fgraph, node):
"""Check if a `node` can be merged, and queue that replacement."""
if node in self.nodes_seen:
return
node_has_assert = False
# These asserts ensure that the fgraph has set the clients field
# properly.
# The clients should at least contain `node` itself!
if node.inputs:
# Take the smallest clients list. Some ops like elemwise
# have optimization that put constant as the first inputs.
# As constant have in general more clients than other type of nodes
# using always inputs[0] make us look at more nodes.
# Always pick the smallest clints list between inputs 0
# and -1 speed up optimization.
a_clients = fgraph.clients[node.inputs[0]]
b_clients = fgraph.clients[node.inputs[-1]]
if len(a_clients) < len(b_clients):
clients = a_clients
else:
clients = b_clients
assert len(clients) > 0
merge_candidates = [c for c, i in clients if c in self.nodes_seen]
# Put all clients of Assert inputs (if exist) into merge_candidates
# TODO: Deactivated for now as this cause cycle in the graph.
# (There is a second deactivation part below.)
for i in []: # node.inputs:
if i.owner and isinstance(i.owner.op, Assert):
node_has_assert = True
i_clients = fgraph.clients[i.owner.inputs[0]]
assert_clients = [c for (c, _) in i_clients if c in self.nodes_seen]
for idx in range(len(assert_clients)):
client = assert_clients[idx]
if isinstance(i.owner.op, Assert):
o_clients = fgraph.clients[client.outputs[0]]
for c in o_clients:
if c[0] in self.nodes_seen:
assert_clients.append(c[0])
merge_candidates.extend(assert_clients)
else:
# If two nodes have no input, but perform the same operation,
# they are not always constant-folded, so we want to merge them.
# In that case, the candidates are all the nodes without inputs.
merge_candidates = self.noinput_nodes
replacement_candidates = []
for candidate in merge_candidates:
if candidate is node:
continue
if len(node.inputs) != len(candidate.inputs):
continue
cand_has_assert = False
# Get input list of the candidate with assert removed
cand_inputs_assert_removed = []
# TODO: Deactivated while Assert merging is disabled. (See above and below.)
for i in []: # candidate.inputs:
if i.owner and isinstance(i.owner.op, Assert):
cand_has_assert = True
cand_inputs_assert_removed.append(i.owner.inputs[0])
else:
cand_inputs_assert_removed.append(i)
# TODO: Remove this when Assert merging is re-enabled. (See above.)
# Without Assert merging we can still look for identical Asserts,
# so we should not treat Asserts separately for now.
cand_inputs_assert_removed = candidate.inputs
# Get input list of the node with assert removed
if node_has_assert:
node_inputs_assert_removed = []
for i in node.inputs:
if i.owner and isinstance(i.owner.op, Assert):
node_inputs_assert_removed.append(i.owner.inputs[0])
else:
node_inputs_assert_removed.append(i)
else:
node_inputs_assert_removed = node.inputs
inputs_match = all(
node_in is cand_in
for node_in, cand_in in zip(
node_inputs_assert_removed, cand_inputs_assert_removed
)
)
if inputs_match and node.op == candidate.op:
if (node, candidate) in self.blacklist:
# They were already tried, and there was an error
continue
# replace node with candidate
if not (node_has_assert or cand_has_assert):
# Schedule transfer of clients from node to candidate
pairs = list(
zip(
node.outputs,
candidate.outputs,
["merge"] * len(node.outputs),
)
)
# if the current node has assert input, it should not be
# replaced with a candidate node which has no assert input
elif node_has_assert and not cand_has_assert:
pairs = list(
zip(
candidate.outputs,
node.outputs,
["merge"] * len(node.outputs),
)
)
else:
new_inputs = self.get_merged_assert_input(node, candidate)
new_node = node.op(*new_inputs)
pairs = list(
zip(
node.outputs,
new_node.owner.outputs,
["new_node"] * len(node.outputs),
)
) + list(
zip(
candidate.outputs,
new_node.owner.outputs,
["new_node"] * len(node.outputs),
)
)
# transfer names
for pair in pairs:
node_output, cand_output = pair[:2]
# clobber old name with new one
# it's arbitrary... one of the names has to go
if node_output.name:
cand_output.name = node_output.name
replacement_candidates.append(pairs)
if replacement_candidates:
self.scheduled.append(replacement_candidates)
else:
self.nodes_seen.add(node)
if not node.inputs:
self.noinput_nodes.add(node)
def get_merged_assert_input(self, node, candidate):
new_inputs = []
for node_i, cand_i in zip(node.inputs, candidate.inputs):
# if node_i is assert
if node_i.owner and isinstance(node_i.owner.op, Assert):
# node_i is assert, cand_i is assert
if cand_i.owner and isinstance(cand_i.owner.op, Assert):
# Here two assert nodes are merged.
# Step 1. Merge conditions of both assert nodes.
# Step 2. Make the new assert node
node_cond = node_i.owner.inputs[1:]
cand_cond = cand_i.owner.inputs[1:]
new_cond = list(set(node_cond + cand_cond))
new_inputs.append(assert_op(node_i.owner.inputs[0], *new_cond))
# node_i is assert, cand_i is not assert
else:
new_inputs.append(node_i)
else:
# if node_i is not an assert node, append cand_i
new_inputs.append(cand_i)
return new_inputs
class MergeOptimizer(GlobalOptimizer):
r"""Merges parts of the graph that are identical and redundant.
The basic principle is that if two `Apply`\s have `Op`\s that compare equal, and
identical inputs, then they do not both need to be computed. The clients of
one are transferred to the other and one of them is removed from the graph.
This procedure is carried out in input-to-output order throughout the graph.
The first step of merging is constant-merging, so that all clients of an
``int(1)`` for example, are transferred to just one particular instance of
``int(1)``.
"""
def add_requirements(self, fgraph):
if not hasattr(fgraph, "merge_feature"):
fgraph.attach_feature(MergeFeature())
def apply(self, fgraph):
# Constant and non-constant are now applied in the same phase.
# I am not sure why, but it seems to be faster this way.
sched = fgraph.merge_feature.scheduled
nb_fail = 0
t0 = time.time()
if fgraph.profile:
validate_before = fgraph.profile.validate_time
callback_before = fgraph.execute_callbacks_time
callbacks_before = fgraph.execute_callbacks_times.copy()
nb_merged = 0
nb_constant = 0
while sched:
pairs_list = sched.pop()
success = True
for pairs_ in pairs_list:
# We must check again the equivalence, as the graph
# could've changed. If so, doing the replacement can
# introduce a node that depends on itself. Doing the
# full check of such cycles every time is very time
# consuming. I think this double check is faster than
# doing the full cycle check. The full cycle check is
# skipped by validate() if the graph doesn't contain
# destroyers.
var, candidate, merge_mode = pairs_[0]
if merge_mode == "new_node" and var in fgraph.variables:
pass
elif var not in fgraph.variables or candidate not in fgraph.variables:
continue
# Keep len(item) == 2 for item in pairs
pairs = [pair[:2] for pair in pairs_]
if var.owner and candidate.owner:
node = var.owner
candidate = candidate.owner
# Get input list of the candidate node with assert
# nodes removed
cand_inputs_assert_removed = []
for i in candidate.inputs:
if i.owner and isinstance(i.owner.op, Assert):
cand_inputs_assert_removed.append(i.owner.inputs[0])
else:
cand_inputs_assert_removed.append(i)
# Get input list of the node with assert nodes removed
node_inputs_assert_removed = []
for i in node.inputs:
if i.owner and isinstance(i.owner.op, Assert):
node_inputs_assert_removed.append(i.owner.inputs[0])
else:
node_inputs_assert_removed.append(i)
if merge_mode == "new_node":
inputs_match = True
else:
inputs_match = all(
node_in is cand_in
for node_in, cand_in in zip(
node_inputs_assert_removed, cand_inputs_assert_removed
)
)
# No need to compare the op again, as it don't change.
if not inputs_match:
continue
if hasattr(fgraph, "destroy_handler"):
# If both nodes have clients that destroy them, we
# can't merge them.
clients = (
fgraph.clients[pairs[0][0]] + fgraph.clients[pairs[0][1]]
)
if (
sum(
[
i in flatten(c.op.destroy_map.values())
for c, i in clients
if c != "output" and c.op.destroy_map
]
)
> 1
):
continue
if len(pairs) == 1 and pairs[0][0].type != pairs[0][1].type:
res = pairs[0][0].type.convert_variable(pairs[0][1])
# Since the fgraph.replace only checks the convert_variable
# in one way, we change the order in the case that
# convert_variable will not be successful.
if not res:
pairs = [(pairs[0][1], pairs[0][0])]
try:
# If all Constants, no need to call validate.
# Only need to check one of the var of each pairs.
# If it is a Constant, the other must also be a Constant as we merge them.
if all([isinstance(old, Constant) for old, new in pairs]):
fgraph.replace_all(pairs, reason="MergeOptimizer")
else:
fgraph.replace_all_validate(pairs, reason="MergeOptimizer")
except InconsistencyError:
success = False
nb_fail += 1
fgraph.merge_feature.blacklist.append(
(pairs[0][0].owner, pairs[0][1].owner)
)
if success:
nb_merged += len(pairs)
if isinstance(pairs[0][0], Constant):
nb_constant += 1
# print pairs, pairs[0][0].type
break
if fgraph.profile:
validate_time = fgraph.profile.validate_time - validate_before
callback_time = fgraph.execute_callbacks_time - callback_before
callbacks_time = {}
for k, v in fgraph.execute_callbacks_times.items():
if k in callbacks_before:
t = v - callbacks_before[k]
if t > 0:
callbacks_time[k] = t
else:
callbacks_time[k] = v
else:
validate_time = None
callback_time = None
callbacks_time = {}
# clear blacklist
fgraph.merge_feature.blacklist = []
return (
nb_fail,
time.time() - t0,
validate_time,
callback_time,
callbacks_time,
nb_merged,
nb_constant,
)
def __str__(self):
return self.__class__.__name__
@staticmethod
def print_profile(stream, prof, level=0):
(
nb_fail,
replace_time,
validate_time,
callback_time,
callbacks_time,
nb_merged,
nb_constant,
) = prof
blanc = " " * level
print(blanc, "MergeOptimizer", file=stream)
print(
blanc,
f" nb fail={nb_fail:5d} merged={nb_merged:5d} constant={nb_constant:5d}",
file=stream,
)
print(
blanc,
f" time replace={replace_time:2.2f} validate={validate_time:2.2f} callback={callback_time:2.2f}",
file=stream,
)
if callback_time > 1:
print(blanc, " callbacks_time", file=stream)
for i in sorted(callbacks_time.items(), key=lambda a: a[1]):
if i[1] > 0:
# We want to have the __str__ called, so we can't
# just print i.
print(blanc, " ", i[0], ",", i[1], file=stream)
@staticmethod
def merge_profile(prof1, prof2):
def merge_none_number(v1, v2):
if v1 is None:
return v2
if v2 is None:
return v1
return v1 + v2
nb_fail = prof1[0] + prof2[0]
replace_time = prof1[1] + prof2[1]
validate_time = merge_none_number(prof1[2], prof2[2])
callback_time = merge_none_number(prof1[3], prof2[3])
callbacks_time = merge_dict(prof1[4], prof2[4])
nb_merged = prof1[5] + prof2[5]
nb_constant = prof1[6] + prof2[6]
return (
nb_fail,
replace_time,
validate_time,
callback_time,
callbacks_time,
nb_merged,
nb_constant,
)
def pre_constant_merge(fgraph, variables):
"""Merge constants in the graphs given by `variables`.
.. warning::
This changes the nodes in a graph in-place!
Parameters
----------
fgraph
A `FunctionGraph` instance in which some of these `variables` may
reside.
We want to avoid terms in `variables` that are contained in `fgraph`.
The reason for that: it will break consistency of `fgraph` and its
features (e.g. `ShapeFeature`).
variables
A list of nodes for which we want to merge constant inputs.
Notes
-----
It is used to pre-merge nodes generated inside an optimization. It is
useful if there are many such replacements to make, so that `DebugMode`
will not check each of them.
"""
seen_var = set()
# signature -> variable (for constants)
const_sig_inv = {}
if isinstance(variables, Variable):
variables = [variables]
def recursive_merge(var):
if var in seen_var:
return var
if not hasattr(var, "owner"):
return var
# We don't want to merge constants that are *within* the
# `FunctionGraph`
if var.owner in fgraph.apply_nodes:
return var
seen_var.add(var)
if isinstance(var, Constant):
sig = var.signature()
if sig in const_sig_inv:
return const_sig_inv[sig]
const_sig_inv[sig] = var
return var
if var.owner:
for idx, inp in enumerate(var.owner.inputs):
# XXX: This is changing the graph in place!
var.owner.inputs[idx] = recursive_merge(inp)
return var
return [recursive_merge(v) for v in variables]
class LocalOptimizer(abc.ABC):
"""A node-based optimizer."""
def __hash__(self):
if not hasattr(self, "_optimizer_idx"):
self._optimizer_idx = _optimizer_idx[0]
_optimizer_idx[0] += 1
return self._optimizer_idx
def tracks(self):
"""Return the list of `Op` classes to which this optimization applies.
Returns ``None`` when the optimization applies to all nodes.
"""
return None
@abc.abstractmethod
def transform(
self, fgraph: FunctionGraph, node: Apply, *args, **kwargs
) -> Union[bool, List[Variable], Dict[Variable, Variable]]:
r"""Transform a subgraph whose output is `node`.
Subclasses should implement this function so that it returns one of the
following:
- ``False`` to indicate that no optimization can be applied to this `node`;
- A list of `Variable`\s to use in place of the `node`'s current outputs.
- A ``dict`` mapping old `Variable`\s to `Variable`\s.
Parameters
----------
fgraph :
A `FunctionGraph` containing `node`.
node :
An `Apply` node to be transformed.
"""
raise NotImplementedError()
def add_requirements(self, fgraph):
r"""Add required `Feature`\s to `fgraph`."""
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print(f"{' ' * level}{self.__class__.__name__} id={id(self)}", file=stream)
class LocalMetaOptimizer(LocalOptimizer):
r"""
Base class for meta-optimizers that try a set of `LocalOptimizer`\s
to replace a node and choose the one that executes the fastest.
If the error ``LocalMetaOptimizerSkipAssertionError`` is raised during
compilation, we will skip that function compilation and not print
the error.
"""
def __init__(self):
self.verbose = config.metaopt__verbose
self.track_dict = defaultdict(lambda: [])
self.tag_dict = defaultdict(lambda: [])
self._tracks = []
self.optimizers = []
def register(self, optimizer, tag_list):
self.optimizers.append(optimizer)
for c in optimizer.tracks():
self.track_dict[c].append(optimizer)
self._tracks.append(c)
for tag in tag_list:
self.tag_dict[tag].append(optimizer)
def tracks(self):
return self._tracks
def transform(self, fgraph, node, *args, **kwargs):
# safety check: depending on registration, tracks may have been ignored
if self._tracks is not None:
if not isinstance(node.op, tuple(self._tracks)):
return
# first, we need to provide dummy values for all inputs
# to the node that are not shared variables anyway
givens = {}
missing = set()
for input in node.inputs:
if isinstance(input, aesara.compile.SharedVariable):
pass
elif hasattr(input.tag, "test_value"):
givens[input] = aesara.shared(
input.type.filter(input.tag.test_value),
input.name,
broadcastable=input.broadcastable,
borrow=True,
)
else:
missing.add(input)
if missing:
givens.update(self.provide_inputs(node, missing))
missing.difference_update(givens.keys())
# ensure we have data for all input variables that need it
if missing:
if self.verbose > 0:
print(
f"{self.__class__.__name__} cannot meta-optimize {node}, "
f"{len(missing)} of {int(node.nin)} input shapes unknown"
)
return
# now we can apply the different optimizations in turn,
# compile the resulting subgraphs and time their execution
if self.verbose > 1:
print(
f"{self.__class__.__name__} meta-optimizing {node} ({len(self.get_opts(node))} choices):"
)
timings = []
for opt in self.get_opts(node):
outputs = opt.transform(fgraph, node, *args, **kwargs)
if outputs:
try:
fn = aesara.function(
[], outputs, givens=givens, on_unused_input="ignore"
)
fn.trust_input = True
timing = min(self.time_call(fn) for _ in range(2))
except LocalMetaOptimizerSkipAssertionError:
continue
except Exception as e:
if self.verbose > 0:
print(f"* {opt}: exception", e)
continue
else:
if self.verbose > 1:
print(f"* {opt}: {timing:.5g} sec")
timings.append((timing, outputs, opt))
else:
if self.verbose > 0:
print(f"* {opt}: not applicable")
# finally, we choose the fastest one
if timings:
timings.sort()
if self.verbose > 1:
print(f"= {timings[0][2]}")
return timings[0][1]
return
def provide_inputs(self, node, inputs):
"""Return a dictionary mapping some `inputs` to `SharedVariable` instances of with dummy values.
The `node` argument can be inspected to infer required input shapes.
"""
raise NotImplementedError()
def get_opts(self, node):
"""Return the optimizations that apply to `node`.
This uses ``self.track_dict[type(node.op)]`` by default.
"""
return self.track_dict[type(node.op)]
def time_call(self, fn):
start = time.time()
fn()
return time.time() - start
class FromFunctionLocalOptimizer(LocalOptimizer):
"""A `LocalOptimizer` constructed from a function."""
def __init__(self, fn, tracks=None, requirements=()):
self.fn = fn
self._tracks = tracks
self.requirements = requirements
def transform(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def add_requirements(self, fgraph):
for req in self.requirements:
req(fgraph)
def tracks(self):
return self._tracks
def __str__(self):
return getattr(self, "__name__", "<FromFunctionLocalOptimizer instance>")
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print(f"{' ' * level}{self.transform} id={id(self)}", file=stream)
def local_optimizer(
tracks: Optional[List[Union[Op, type]]],
inplace: bool = False,
requirements: Optional[Tuple[type, ...]] = (),
):
r"""A decorator used to construct `FromFunctionLocalOptimizer` instances.
Parameters
----------
tracks :
The `Op` types or instances to which this optimization applies.
inplace :
A boolean indicating whether or not the optimization works in-place.
If ``True``, a `DestroyHandler` `Feature` is added automatically added
to the `FunctionGraph`\s applied to this optimization.
requirements :
`Feature` types required by this optimization.
"""
if requirements is None:
requirements = ()
def decorator(f):
if tracks is not None:
if len(tracks) == 0:
raise ValueError(
"Use None instead of an empty list to apply to all nodes.",
f.__module__,
f.__name__,
)
for t in tracks:
if not (isinstance(t, Op) or issubclass(t, Op)):
raise ValueError(
"Tracks are op classes or instances", f.__module__, f.__name__
)
req = requirements
if inplace:
dh_handler = dh.DestroyHandler
req = tuple(requirements) + (
lambda fgraph: fgraph.attach_feature(dh_handler()),
)
rval = FromFunctionLocalOptimizer(f, tracks, req)
rval.__name__ = f.__name__
return rval
return decorator
class LocalOptGroup(LocalOptimizer):
r"""An optimizer that applies a list of `LocalOptimizer`\s to a node.
Parameters
----------
optimizers :
A list of optimizers to be applied to nodes.
apply_all_opts : bool (Default False)
If ``False``, it will return after the new node after the first optimizer
applied. Otherwise, it will start again with the new node until no new
optimization apply.
profile :
Whether or not to profile the optimizations.
Attributes
----------
reentrant : bool
Some global optimizer like `NavigatorOptimizer` can use this value to
determine if it ignore new nodes during a pass on the nodes. Sometimes,
``ignore_newtrees`` is not reentrant.
retains_inputs : bool
States whether or not the inputs of a transformed node are transferred
to the outputs.
"""
def __init__(self, *optimizers, apply_all_opts=False, profile=False):
if len(optimizers) == 1 and isinstance(optimizers[0], list):
# This happen when created by LocalGroupDB.
optimizers = tuple(optimizers[0])
self.opts = optimizers
assert isinstance(self.opts, tuple)
self.reentrant = any(getattr(opt, "reentrant", True) for opt in optimizers)
self.retains_inputs = all(
getattr(opt, "retains_inputs", False) for opt in optimizers
)
self.apply_all_opts = apply_all_opts
self.profile = profile
self.track_map = defaultdict(lambda: [])
if self.profile:
self.time_opts = {}
self.process_count = {}
self.applied_true = {}
self.node_created = {}
for o in self.opts:
if self.profile:
self.time_opts.setdefault(o, 0)
self.process_count.setdefault(o, 0)
self.applied_true.setdefault(o, 0)
self.node_created.setdefault(o, 0)
tracks = o.tracks()
if tracks is None:
self.track_map[None].append(o)
else:
for c in tracks:
self.track_map[c].append(o)
def __str__(self):
return getattr(
self,
"__name__",
f"LocalOptGroup({','.join([str(o) for o in self.opts])})",
)
def tracks(self):
t = []
for l in self.opts:
aet = l.tracks()
if aet:
t.extend(aet)
return t
def transform(self, fgraph, node):
if len(self.opts) == 0:
return
repl = None
while True:
opts = (
self.track_map[type(node.op)]
+ self.track_map[node.op]
+ self.track_map[None]
)
new_repl = None
for opt in opts:
opt_start = time.time()
new_repl = opt.transform(fgraph, node)
opt_finish = time.time()
if self.profile:
self.time_opts[opt] += opt_start - opt_finish
self.process_count[opt] += 1
if not new_repl:
continue
if isinstance(new_repl, (tuple, list)):
new_vars = new_repl
else: # It must be a dict
new_vars = list(new_repl.values())
if self.profile:
self.node_created[opt] += len(
list(applys_between(fgraph.variables, new_vars))
)
self.applied_true[opt] += 1
break # break from the for loop over optimization.
if not new_repl: # No optimization applied in the last iteration
return repl
# only 1 iteration
if not self.apply_all_opts:
return new_repl
if not new_vars[0].owner:
# We are at the start of the graph.
return new_repl
if len(new_repl) > 1:
s = {v.owner for v in new_repl}
assert len(s) == 1
repl = new_repl
node = new_vars[0].owner
@staticmethod
def print_profile(stream, prof, level=0):
(time_opts, process_count, applied_true, node_created, profile) = prof
if not profile:
return
blanc = " " * int(level)
print(blanc, "LocalOptGroup", file=stream)
print(blanc, "---------------------", file=stream)
count_opt = []
not_used = []
not_used_time = 0
for o, count in process_count.items():
if count > 0:
count_opt.append(
(time_opts[o], applied_true[o], count, o, node_created[o])
)
else:
not_used.append((time_opts[o], o))
not_used_time += time_opts[o]
if count_opt:
print(
blanc,
" time taken - times applied - times tried - name - node_created:",
file=stream,
)
count_opt.sort()
for (t, a_t, count, o, n_c) in count_opt[::-1]:
print(
blanc,
f" {t:.3f}s - {int(a_t)} - {int(count)} - {o} - {int(n_c)}",
file=stream,
)
print(
blanc,
f" {not_used_time:.3f}s - in {len(not_used)} optimization that were not used (display those with runtime greater than 0)",
file=stream,
)
not_used.sort(key=lambda nu: (nu[0], str(nu[1])))
for (t, o) in not_used[::-1]:
if t > 0:
# Skip opt that have 0 times, they probably wasn't even tried.
print(blanc + " ", f" {t:.3f}s - {o}", file=stream)
else:
print(blanc, " The optimizer wasn't successful ", file=stream)
print(file=stream)
def merge_profile(prof1, prof2):
raise NotImplementedError
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print(f"{' ' * level}{self.__class__.__name__} id={id(self)}", file=stream)
if depth != 0:
depth -= 1
for lopt in self.opts:
lopt.print_summary(stream, level=(level + 2), depth=depth)
def add_requirements(self, fgraph):
for opt in self.opts:
opt.add_requirements(fgraph)
class GraphToGPULocalOptGroup(LocalOptGroup):
"""This is the equivalent of `LocalOptGroup` for `GraphToGPU`.
The main different is the function signature of the local
optimizer that use the `GraphToGPU` signature and not the normal
`LocalOptimizer` signature.
``apply_all_opts=True`` is not supported
"""
def __init__(self, *optimizers, **kwargs):
super().__init__(*optimizers, **kwargs)
assert self.apply_all_opts is False
def transform(self, fgraph, op, context_name, inputs, outputs):
if len(self.opts) == 0:
return
opts = self.track_map[type(op)] + self.track_map[op] + self.track_map[None]
for opt in opts:
opt_start = time.time()
new_repl = opt.transform(fgraph, op, context_name, inputs, outputs)
opt_finish = time.time()
if self.profile:
self.time_opts[opt] += opt_start - opt_finish
self.process_count[opt] += 1
if not new_repl:
continue
if self.profile:
self.node_created[opt] += len(
list(applys_between(fgraph.variables, new_repl))
)
self.applied_true[opt] += 1
return new_repl
class OpSub(LocalOptimizer):
"""
Replaces the application of a certain `Op` by the application of
another `Op` that takes the same inputs as what it is replacing.
Parameters
----------
op1, op2
``op1.make_node`` and ``op2.make_node`` must take the same number of
inputs and have the same number of outputs.
Examples
--------
OpSub(add, sub) ==>
add(div(x, y), add(y, x)) -> sub(div(x, y), sub(y, x))
"""
# an OpSub does not apply to the nodes it produces
reentrant = False
# all the inputs of the original node are transferred to the outputs
retains_inputs = True
def __init__(self, op1, op2, transfer_tags=True):
self.op1 = op1
self.op2 = op2
self.transfer_tags = transfer_tags
def op_key(self):
return self.op1
def tracks(self):
return [self.op1]
def transform(self, fgraph, node):
if node.op != self.op1:
return False
repl = self.op2.make_node(*node.inputs)
if self.transfer_tags:
repl.tag = copy.copy(node.tag)
for output, new_output in zip(node.outputs, repl.outputs):
new_output.tag = copy.copy(output.tag)
return repl.outputs
def __str__(self):
return f"{self.op1} -> {self.op2}"
class OpRemove(LocalOptimizer):
"""
Removes all applications of an `Op` by transferring each of its
outputs to the corresponding input.
"""
reentrant = False # no nodes are added at all
def __init__(self, op):
self.op = op
def op_key(self):
return self.op
def tracks(self):
return [self.op]
def transform(self, fgraph, node):
if node.op != self.op:
return False
return node.inputs
def __str__(self):
return f"{self.op}(x) -> x"
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print(
f"{' ' * level}{self.__class__.__name__}(self.op) id={id(self)}",
file=stream,
)
class PatternSub(LocalOptimizer):
"""
@todo update
Replaces all occurrences of the input pattern by the output pattern:
input_pattern ::= (op, <sub_pattern1>, <sub_pattern2>, ...)
input_pattern ::= dict(pattern = <input_pattern>,
constraint = <constraint>)
sub_pattern ::= input_pattern
sub_pattern ::= string
sub_pattern ::= a Constant instance
sub_pattern ::= int
sub_pattern ::= float
constraint ::= lambda fgraph, expr: additional matching condition
output_pattern ::= (op, <output_pattern1>, <output_pattern2>, ...)
output_pattern ::= string
output_pattern ::= int
output_pattern ::= float
Each string in the input pattern is a variable that will be set to
whatever expression is found in its place. If the same string is
used more than once, the same expression must be found in those
places. If a string used in the input pattern is used in the
output pattern, the matching expression will be inserted in its
place. The input pattern cannot just be a string but the output
pattern can.
If you put a constant variable in the input pattern, there will be a
match iff a constant variable with the same value and the same type
is found in its place.
You can add a constraint to the match by using the ``dict(...)`` form
described above with a ``'constraint'`` key. The constraint must be a
function that takes the fgraph and the current Variable that we are
trying to match and returns True or False according to an
arbitrary criterion.
The constructor creates a `PatternSub` that replaces occurrences of
`in_pattern` by occurrences of `out_pattern`.
Parameters
----------
in_pattern :
The input pattern that we want to replace.
out_pattern :
The replacement pattern.
allow_multiple_clients : bool
If False, the pattern matching will fail if one of the subpatterns has
more than one client.
skip_identities_fn : TODO
name :
Allows to override this optimizer name.
pdb : bool
If True, we invoke pdb when the first node in the pattern matches.
tracks : optional
The values that :meth:`self.tracks` will return. Useful to speed up
optimization sometimes.
get_nodes : optional
If you provide `tracks`, you must provide this parameter. It must be a
function that takes the tracked node and returns a list of nodes on
which we will try this optimizer.
Notes
-----
`tracks` and `get_nodes` can be used to make this optimizer track a less
frequent `Op`, so this will make this optimizer tried less frequently.
Examples
--------
PatternSub((add, 'x', 'y'), (add, 'y', 'x'))
PatternSub((multiply, 'x', 'x'), (square, 'x'))
PatternSub((subtract, (add, 'x', 'y'), 'y'), 'x')
PatternSub((power, 'x', Constant(double, 2.0)), (square, 'x'))
PatternSub((boggle, {'pattern': 'x',
'constraint': lambda expr: expr.type == scrabble}),
(scrabble, 'x'))
"""
def __init__(
self,
in_pattern,
out_pattern,
allow_multiple_clients=False,
skip_identities_fn=None,
name=None,
pdb=False,
tracks=(),
get_nodes=None,
values_eq_approx=None,
):
self.in_pattern = in_pattern
self.out_pattern = out_pattern
self.values_eq_approx = values_eq_approx
if isinstance(in_pattern, (list, tuple)):
self.op = self.in_pattern[0]
elif isinstance(in_pattern, dict):
self.op = self.in_pattern["pattern"][0]
else:
raise TypeError(
"The pattern to search for must start with a specific Op instance."
)
self.__doc__ = (
self.__class__.__doc__ + "\n\nThis instance does: " + str(self) + "\n"
)
self.allow_multiple_clients = allow_multiple_clients
self.skip_identities_fn = skip_identities_fn
if name:
self.__name__ = name
self.pdb = pdb
self._tracks = tracks
self.get_nodes = get_nodes
if tracks != ():
assert get_nodes
def op_key(self):
return self.op
def tracks(self):
if self._tracks != ():
return self._tracks
return [self.op]
def transform(self, fgraph, node, get_nodes=True):
"""Check if the graph from node corresponds to ``in_pattern``.
If it does, it constructs ``out_pattern`` and performs the replacement.
"""
from aesara.graph import unify
if get_nodes and self.get_nodes is not None:
for real_node in self.get_nodes(fgraph, node):
if real_node == "output":
continue
ret = self.transform(fgraph, real_node, get_nodes=False)
if ret is not False and ret is not None:
return dict(zip(real_node.outputs, ret))
if node.op != self.op:
return False
# TODO: if we remove pdb, do this speed things up?
def match(pattern, expr, u, allow_multiple_clients=False, pdb=False):
# TODO move outside match
def retry_with_equiv():
if not self.skip_identities_fn:
return False
expr_equiv = self.skip_identities_fn(expr)
if expr_equiv is None:
return False
# TODO: Not sure how to handle multiple_clients flag
return match(
pattern,
expr_equiv,
u,
allow_multiple_clients=allow_multiple_clients,
)
if isinstance(pattern, (list, tuple)):
if expr.owner is None:
return False
if not (expr.owner.op == pattern[0]) or (
not allow_multiple_clients and len(fgraph.clients[expr]) > 1
):
return retry_with_equiv()
if len(pattern) - 1 != len(expr.owner.inputs):
return retry_with_equiv()
for p, v in zip(pattern[1:], expr.owner.inputs):
u = match(p, v, u, self.allow_multiple_clients)
if not u:
return False
elif isinstance(pattern, dict):
try:
real_pattern = pattern["pattern"]
except KeyError:
raise KeyError(
f"Malformed pattern: {pattern} (expected key 'pattern')"
)
constraint = pattern.get("constraint", lambda expr: True)
if constraint(expr):
return match(
real_pattern,
expr,
u,
pattern.get("allow_multiple_clients", allow_multiple_clients),
)
else:
return retry_with_equiv()
elif isinstance(pattern, str):
v = unify.Var(pattern)
if u[v] is not v and u[v] is not expr:
return retry_with_equiv()
else:
u = u.merge(expr, v)
elif isinstance(pattern, (int, float)) and isinstance(expr, Constant):
if np.all(aesara.tensor.constant(pattern).value == expr.value):
return u
else:
return retry_with_equiv()
elif (
isinstance(pattern, Constant)
and isinstance(expr, Constant)
and pattern.equals(expr)
):
return u
else:
return retry_with_equiv()
if pdb:
import pdb
pdb.set_trace()
return u
u = match(self.in_pattern, node.out, unify.Unification(), True, self.pdb)
if not u:
return False
def build(pattern, u):
if isinstance(pattern, (list, tuple)):
args = [build(p, u) for p in pattern[1:]]
return pattern[0](*args)
elif isinstance(pattern, str):
return u[unify.Var(pattern)]
elif isinstance(pattern, (int, float)):
return pattern
else:
return pattern.clone()
ret = build(self.out_pattern, u)
if isinstance(ret, (int, float)):
# TODO: Should we convert these to constants explicitly?
return [ret]
if self.values_eq_approx:
ret.tag.values_eq_approx = self.values_eq_approx
if ret.owner:
if [out.type for out in ret.owner.outputs] != [
out.type for out in node.outputs
]:
return False
else:
# ret is just an input variable
assert len(node.outputs) == 1
if ret.type != node.outputs[0].type:
return False
return [ret]
def __str__(self):
if getattr(self, "__name__", None):
return self.__name__
def pattern_to_str(pattern):
if isinstance(pattern, (list, tuple)):
return "{}({})".format(
str(pattern[0]),
", ".join([pattern_to_str(p) for p in pattern[1:]]),
)
elif isinstance(pattern, dict):
return "{} subject to {}".format(
pattern_to_str(pattern["pattern"]),
str(pattern.get("constraint", "no conditions")),
)
else:
return str(pattern)
return "{} -> {}".format(
pattern_to_str(self.in_pattern),
pattern_to_str(self.out_pattern),
)
def __repr__(self):
return str(self)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
name = getattr(self, "__name__", getattr(self, "name", None))
print(
f"{' ' * level}{self.__class__.__name__} {name}({self.in_pattern}, {self.out_pattern}) id={id(self)}",
file=stream,
)
class Updater(Feature):
def __init__(self, importer, pruner, chin, name=None):
self.importer = importer
self.pruner = pruner
self.chin = chin
self.name = name
def __str__(self):
return "Updater{%s}" % str(self.name)
def on_import(self, fgraph, node, reason):
if self.importer:
self.importer(node)
def on_prune(self, fgraph, node, reason):
if self.pruner:
self.pruner(node)
def on_change_input(self, fgraph, node, i, r, new_r, reason):
if self.chin:
self.chin(node, i, r, new_r, reason)
def on_detach(self, fgraph):
# To allow pickling this object
self.importer = None
self.pruner = None
self.chin = None
class NavigatorOptimizer(GlobalOptimizer):
r"""An optimizer that applies a `LocalOptimizer` with considerations for the new nodes it creates.
This optimizer also allows the `LocalOptimizer` to use a special ``"remove"`` value
in the ``dict``\s returned by :meth:`LocalOptimizer`. `Variable`\s mapped to this
value are removed from the `FunctionGraph`.
Parameters
----------
local_opt :
A `LocalOptimizer` to apply over a `FunctionGraph` (or ``None``).
ignore_newtrees :
- ``True``: new subgraphs returned by an optimization are not a
candidate for optimization.
- ``False``: new subgraphs returned by an optimization is a candidate
for optimization.
- ``'auto'``: let the `local_opt` set this parameter via its :attr:`reentrant`
attribute.
failure_callback
A function with the signature ``(exception, navigator, [(old, new),
(old,new),...])`` that is called when there's an exception.
If the exception is raised in ``local_opt.transform``, the ``new`` variables
will be ``None``.
If the exception is raised during validation (e.g. the new types don't
match) then the new variables will be the ones created by ``self.transform``.
If this parameter is ``None``, then exceptions are not caught here and
are raised normally.
"""
@staticmethod
def warn(exc, nav, repl_pairs, local_opt, node):
"""A failure callback that prints a traceback."""
if config.on_opt_error != "ignore":
_logger.error(f"Optimization failure due to: {local_opt}")
_logger.error(f"node: {node}")
_logger.error("TRACEBACK:")
_logger.error(traceback.format_exc())
if config.on_opt_error == "pdb":
pdb.post_mortem(sys.exc_info()[2])
elif isinstance(exc, AssertionError) or config.on_opt_error == "raise":
# We always crash on AssertionError because something may be
# seriously wrong if such an exception is raised.
raise exc
@staticmethod
def warn_inplace(exc, nav, repl_pairs, local_opt, node):
r"""A failure callback that ignores ``InconsistencyError``\s and prints a traceback.
If the error occurred during replacement, ``repl_pairs`` is set;
otherwise, its value is ``None``.
"""
if isinstance(exc, InconsistencyError):
return
return NavigatorOptimizer.warn(exc, nav, repl_pairs, local_opt, node)
@staticmethod
def warn_ignore(exc, nav, repl_pairs, local_opt, node):
"""A failure callback that ignores all errors."""
def __init__(self, local_opt, ignore_newtrees="auto", failure_callback=None):
self.local_opt = local_opt
if ignore_newtrees == "auto":
self.ignore_newtrees = not getattr(local_opt, "reentrant", True)
else:
self.ignore_newtrees = ignore_newtrees
self.failure_callback = failure_callback
def attach_updater(self, fgraph, importer, pruner, chin=None, name=None):
r"""Install `FunctionGraph` listeners to help the navigator deal with the ``ignore_trees``-related functionality.
Parameters
----------
importer :
Function that will be called whenever optimizations add stuff
to the graph.
pruner :
Function to be called when optimizations remove stuff
from the graph.
chin :
"on change input" called whenever a node's inputs change.
name :
name of the ``Updater`` to attach.
Returns
-------
The `FunctionGraph` plugin that handles the three tasks.
Keep this around so that `Feature`\s can be detached later.
"""
if self.ignore_newtrees:
importer = None
if importer is None and pruner is None:
return None
u = Updater(importer, pruner, chin, name=name)
fgraph.attach_feature(u)
return u
def detach_updater(self, fgraph, u):
"""Undo the work of ``attach_updater``.
Parameters
----------
fgraph
The `FunctionGraph`.
u
A return-value of ``attach_updater``.
Returns
-------
None
"""
if u is not None:
fgraph.remove_feature(u)
def process_node(self, fgraph, node, lopt=None):
r"""Apply `lopt` to `node`.
The :meth:`lopt.transform` method will return either ``False`` or a
list of `Variable`\s that are intended to replace :attr:`node.outputs`.
If the `fgraph` accepts the replacement, then the optimization is
successful, and this function returns ``True``.
If there are no replacement candidates or the `fgraph` rejects the
replacements, this function returns ``False``.
Parameters
----------
fgraph :
A `FunctionGraph`.
node :
An `Apply` instance in `fgraph`
lopt :
A `LocalOptimizer` instance that may have a better idea for
how to compute node's outputs.
Returns
-------
bool
``True`` iff the `node`'s outputs were replaced in the `fgraph`.
"""
lopt = lopt or self.local_opt
try:
replacements = lopt.transform(fgraph, node)
except Exception as e:
if self.failure_callback is not None:
self.failure_callback(
e, self, [(x, None) for x in node.outputs], lopt, node
)
return False
else:
raise
if replacements is False or replacements is None:
return False
old_vars = node.outputs
remove = []
if isinstance(replacements, dict):
if "remove" in replacements:
remove = replacements.pop("remove")
old_vars = list(replacements.keys())
replacements = list(replacements.values())
elif not isinstance(replacements, (tuple, list)):
raise TypeError(
f"Optimizer {lopt} gave wrong type of replacement. "
f"Expected list or tuple. Got {replacements}"
)
if len(old_vars) != len(replacements):
raise ValueError(f"Optimizer {lopt} gave wrong number of replacements")
# None in the replacement mean that this variable isn't used
# and we want to remove it
for r, rnew in zip(old_vars, replacements):
if rnew is None and len(fgraph.clients[r]) > 0:
raise ValueError(
"A local optimizer tried to remove a Variable that is used"
)
# If an output would be replaced by itself, no need to perform
# the replacement
repl_pairs = [
(r, rnew)
for r, rnew in zip(old_vars, replacements)
if rnew is not r and rnew is not None
]
if len(repl_pairs) == 0:
return False
try:
fgraph.replace_all_validate_remove(repl_pairs, reason=lopt, remove=remove)
return True
except Exception as e:
# This means the replacements were rejected by the fgraph.
#
# This is not supposed to happen. The default failure_callback
# will print a traceback as a warning.
if self.failure_callback is not None:
self.failure_callback(e, self, repl_pairs, lopt, node)
return False
else:
raise
def add_requirements(self, fgraph):
super().add_requirements(fgraph)
# Added by default
# fgraph.attach_feature(ReplaceValidate())
if self.local_opt:
self.local_opt.add_requirements(fgraph)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print(f"{' ' * level}{self.__class__.__name__} id={id(self)}", file=stream)
if depth != 0:
self.local_opt.print_summary(stream, level=(level + 2), depth=(depth - 1))
class TopoOptimizer(NavigatorOptimizer):
"""An optimizer that applies a single `LocalOptimizer` to each node in topological order (or reverse)."""
def __init__(
self, local_opt, order="in_to_out", ignore_newtrees=False, failure_callback=None
):
if order not in ["out_to_in", "in_to_out"]:
raise ValueError("order must be 'out_to_in' or 'in_to_out'")
self.order = order
super().__init__(local_opt, ignore_newtrees, failure_callback)
def apply(self, fgraph, start_from=None):
if start_from is None:
start_from = fgraph.outputs
callback_before = fgraph.execute_callbacks_time
nb_nodes_start = len(fgraph.apply_nodes)
t0 = time.time()
q = deque(io_toposort(fgraph.inputs, start_from))
io_t = time.time() - t0
def importer(node):
if node is not current_node:
q.append(node)
u = self.attach_updater(
fgraph, importer, None, name=getattr(self, "name", None)
)
nb = 0
try:
t0 = time.time()
while q:
if self.order == "out_to_in":
node = q.pop()
else:
node = q.popleft()
if node not in fgraph.apply_nodes:
continue
current_node = node
nb += self.process_node(fgraph, node)
loop_t = time.time() - t0
finally:
self.detach_updater(fgraph, u)
callback_time = fgraph.execute_callbacks_time - callback_before
nb_nodes_end = len(fgraph.apply_nodes)
return (
self,
nb,
nb_nodes_start,
nb_nodes_end,
io_t,
loop_t,
callback_time,
self.local_opt,
)
@staticmethod
def print_profile(stream, prof, level=0):
blanc = " " * level
if prof is None: # Happen as merge_profile() isn't implemented
print(blanc, "TopoOptimizer merge_profile not implemented", file=stream)
return
(
opt,
nb,
nb_nodes_start,
nb_nodes_end,
io_t,
loop_t,
callback_time,
lopt,
) = prof
print(
blanc,
"TopoOptimizer ",
getattr(opt, "name", getattr(opt, "__name__", "")),
file=stream,
)
print(
blanc,
" nb_node (start, end, changed)",
(nb_nodes_start, nb_nodes_end, nb),
file=stream,
)
print(blanc, " init io_toposort", io_t, file=stream)
print(blanc, " loop time", loop_t, file=stream)
print(blanc, " callback_time", callback_time, file=stream)
if isinstance(lopt, LocalOptGroup):
if lopt.profile:
lopt.print_profile(
stream,
(
lopt.time_opts,
lopt.process_count,
lopt.applied_true,
lopt.node_created,
lopt.profile,
),
level=level + 1,
)
def __str__(self):
return getattr(self, "__name__", "<TopoOptimizer instance>")
def topogroup_optimizer(order, *local_opts, name=None, **kwargs):
"""Apply `local_opts` from the input/output nodes to the output/input nodes of a graph.
This uses a combination of `LocalOptGroup` and `TopoOptimizer`.
"""
if len(local_opts) > 1:
# Don't wrap it uselessly if their is only 1 optimization.
local_opts = LocalOptGroup(*local_opts)
else:
(local_opts,) = local_opts
if not name:
name = local_opts.__name__
ret = TopoOptimizer(
local_opts,
order="in_to_out",
failure_callback=TopoOptimizer.warn_inplace,
**kwargs,
)
if name:
ret.__name__ = name
return ret
in2out = partial(topogroup_optimizer, "in_to_out")
out2in = partial(topogroup_optimizer, "out_to_in")
class OpKeyOptimizer(NavigatorOptimizer):
r"""An optimizer that applies a `LocalOptimizer` to specific `Op`\s.
The `Op`\s are provided by a :meth:`LocalOptimizer.op_key` method (either
as a list of `Op`\s or a single `Op`), and discovered within a
`FunctionGraph` using the `NodeFinder` `Feature`.
This is similar to the ``tracks`` feature used by other optimizers.
"""
def __init__(self, local_opt, ignore_newtrees=False, failure_callback=None):
if not hasattr(local_opt, "op_key"):
raise TypeError(f"{local_opt} must have an `op_key` method.")
super().__init__(local_opt, ignore_newtrees, failure_callback)
def apply(self, fgraph):
op = self.local_opt.op_key()
if isinstance(op, (list, tuple)):
q = reduce(list.__iadd__, map(fgraph.get_nodes, op))
else:
q = list(fgraph.get_nodes(op))
def importer(node):
if node is not current_node:
if node.op == op:
q.append(node)
u = self.attach_updater(
fgraph, importer, None, name=getattr(self, "name", None)
)
try:
while q:
node = q.pop()
if node not in fgraph.apply_nodes:
continue
current_node = node
self.process_node(fgraph, node)
finally:
self.detach_updater(fgraph, u)
def add_requirements(self, fgraph):
super().add_requirements(fgraph)
fgraph.attach_feature(NodeFinder())
class ChangeTracker(Feature):
def __init__(self):
self.changed = False
self.nb_imported = 0
def on_import(self, fgraph, node, reason):
self.nb_imported += 1
self.changed = True
def on_change_input(self, fgraph, node, i, r, new_r, reason):
self.changed = True
def reset(self):
self.changed = False
def on_attach(self, fgraph):
fgraph.change_tracker = self
def on_detach(self, fgraph):
del fgraph.change_tracker
def merge_dict(d1, d2):
r"""Merge two ``dict``\s by adding their values."""
d = d1.copy()
for k, v in d2.items():
if k in d:
d[k] += v
else:
d[k] = v
return d
class EquilibriumOptimizer(NavigatorOptimizer):
"""An optimizer that applies an optimization until a fixed-point/equilibrium is reached.
Parameters
----------
optimizers : list or set
Local or global optimizations to apply until equilibrium.
The global optimizer will be run at the start of each iteration before
the local optimizer.
max_use_ratio : int or float
Each optimizer can be applied at most ``(size of graph * this number)``
times.
ignore_newtrees :
See :attr:`EquilibriumDB.ignore_newtrees`.
final_optimizers :
Global optimizers that will be run after each iteration.
cleanup_optimizers :
Global optimizers that apply a list of pre determined optimization.
They must not traverse the graph as they are called very frequently.
The MergeOptimizer is one example of optimization that respect this.
They are applied after all global optimizers, then when one local
optimizer is applied, then after all final optimizers.
"""
def __init__(
self,
optimizers,
failure_callback=None,
ignore_newtrees=True,
tracks_on_change_inputs=False,
max_use_ratio=None,
final_optimizers=None,
cleanup_optimizers=None,
):
super().__init__(
None, ignore_newtrees=ignore_newtrees, failure_callback=failure_callback
)
self.local_optimizers_map = OrderedDict()
self.local_optimizers_all = []
self.global_optimizers = []
self.final_optimizers = []
self.cleanup_optimizers = []
self.tracks_on_change_inputs = tracks_on_change_inputs
for opt in optimizers:
if isinstance(opt, LocalOptimizer):
if opt.tracks() is None:
self.local_optimizers_all.append(opt)
else:
for c in opt.tracks():
self.local_optimizers_map.setdefault(c, []).append(opt)
else:
self.global_optimizers.append(opt)
if final_optimizers:
self.final_optimizers = final_optimizers
if cleanup_optimizers:
self.cleanup_optimizers = cleanup_optimizers
self.max_use_ratio = max_use_ratio
assert self.max_use_ratio is not None, "max_use_ratio has to be a number"
def get_local_optimizers(self):
for opt in self.local_optimizers_all:
yield opt
# if repeat is not a problem we can drop the set
s = set()
for lopt in self.local_optimizers_map.values():
for opt in lopt:
if opt not in s:
yield opt
s.add(opt)
def add_requirements(self, fgraph):
super().add_requirements(fgraph)
for opt in self.get_local_optimizers():
opt.add_requirements(fgraph)
for opt in self.global_optimizers:
opt.add_requirements(fgraph)
for opt in self.final_optimizers:
opt.add_requirements(fgraph)
for opt in self.cleanup_optimizers:
opt.add_requirements(fgraph)
def apply(self, fgraph, start_from=None):
change_tracker = ChangeTracker()
fgraph.attach_feature(change_tracker)
if start_from is None:
start_from = fgraph.outputs
else:
for node in start_from:
assert node in fgraph.outputs
changed = True
max_use_abort = False
opt_name = None
global_process_count = {}
start_nb_nodes = len(fgraph.apply_nodes)
max_nb_nodes = len(fgraph.apply_nodes)
max_use = max_nb_nodes * self.max_use_ratio
loop_timing = []
loop_process_count = []
global_opt_timing = []
time_opts = {}
io_toposort_timing = []
nb_nodes = []
node_created = {}
global_sub_profs = []
final_sub_profs = []
cleanup_sub_profs = []
for opt in (
self.global_optimizers
+ list(self.get_local_optimizers())
+ self.final_optimizers
+ self.cleanup_optimizers
):
global_process_count.setdefault(opt, 0)
time_opts.setdefault(opt, 0)
node_created.setdefault(opt, 0)
def apply_cleanup(profs_dict):
changed = False
for copt in self.cleanup_optimizers:
change_tracker.reset()
nb = change_tracker.nb_imported
t_opt = time.time()
sub_prof = copt.apply(fgraph)
time_opts[copt] += time.time() - t_opt
profs_dict[copt].append(sub_prof)
if change_tracker.changed:
process_count.setdefault(copt, 0)
process_count[copt] += 1
global_process_count[copt] += 1
changed = True
node_created[copt] += change_tracker.nb_imported - nb
return changed
while changed and not max_use_abort:
process_count = {}
t0 = time.time()
changed = False
iter_cleanup_sub_profs = {}
for copt in self.cleanup_optimizers:
iter_cleanup_sub_profs[copt] = []
# apply global optimizers
sub_profs = []
for gopt in self.global_optimizers:
change_tracker.reset()
nb = change_tracker.nb_imported
t_opt = time.time()
sub_prof = gopt.apply(fgraph)
time_opts[gopt] += time.time() - t_opt
sub_profs.append(sub_prof)
if change_tracker.changed:
process_count.setdefault(gopt, 0)
process_count[gopt] += 1
global_process_count[gopt] += 1
changed = True
node_created[gopt] += change_tracker.nb_imported - nb
if global_process_count[gopt] > max_use:
max_use_abort = True
opt_name = getattr(gopt, "name", None) or getattr(
gopt, "__name__", ""
)
global_sub_profs.append(sub_profs)
global_opt_timing.append(float(time.time() - t0))
# apply clean up as global opt can have done changes that
# request that
changed |= apply_cleanup(iter_cleanup_sub_profs)
# apply local optimizer
topo_t0 = time.time()
q = deque(io_toposort(fgraph.inputs, start_from))
io_toposort_timing.append(time.time() - topo_t0)
nb_nodes.append(len(q))
max_nb_nodes = max(max_nb_nodes, len(q))
max_use = max_nb_nodes * self.max_use_ratio
def importer(node):
if node is not current_node:
q.append(node)
chin = None
if self.tracks_on_change_inputs:
def chin(node, i, r, new_r, reason):
if node is not current_node and not isinstance(node, str):
q.append(node)
u = self.attach_updater(
fgraph, importer, None, chin=chin, name=getattr(self, "name", None)
)
try:
while q:
node = q.pop()
if node not in fgraph.apply_nodes:
continue
current_node = node
for lopt in (
self.local_optimizers_all
+ self.local_optimizers_map.get(type(node.op), [])
+ self.local_optimizers_map.get(node.op, [])
):
nb = change_tracker.nb_imported
t_opt = time.time()
lopt_change = self.process_node(fgraph, node, lopt)
time_opts[lopt] += time.time() - t_opt
if not lopt_change:
continue
process_count.setdefault(lopt, 0)
process_count[lopt] += 1
global_process_count[lopt] += 1
changed = True
node_created[lopt] += change_tracker.nb_imported - nb
changed |= apply_cleanup(iter_cleanup_sub_profs)
if global_process_count[lopt] > max_use:
max_use_abort = True
opt_name = getattr(lopt, "name", None) or getattr(
lopt, "__name__", ""
)
if node not in fgraph.apply_nodes:
# go to next node
break
finally:
self.detach_updater(fgraph, u)
# Apply final optimizers
sub_profs = []
t_before_final_opt = time.time()
for gopt in self.final_optimizers:
change_tracker.reset()
nb = change_tracker.nb_imported
t_opt = time.time()
sub_prof = gopt.apply(fgraph)
time_opts[gopt] += time.time() - t_opt
sub_profs.append(sub_prof)
if change_tracker.changed:
process_count.setdefault(gopt, 0)
process_count[gopt] += 1
global_process_count[gopt] += 1
changed = True
node_created[gopt] += change_tracker.nb_imported - nb
if global_process_count[gopt] > max_use:
max_use_abort = True
opt_name = getattr(gopt, "name", None) or getattr(
gopt, "__name__", ""
)
final_sub_profs.append(sub_profs)
global_opt_timing[-1] += time.time() - t_before_final_opt
# apply clean up as final opt can have done changes that
# request that
changed |= apply_cleanup(iter_cleanup_sub_profs)
# merge clean up profiles during that iteration.
c_sub_profs = []
for copt, sub_profs in iter_cleanup_sub_profs.items():
sub_prof = sub_profs[0]
for s_p in sub_profs[1:]:
sub_prof = copt.merge_profile(sub_prof, s_p)
c_sub_profs.append(sub_prof)
cleanup_sub_profs.append(c_sub_profs)
loop_process_count.append(process_count)
loop_timing.append(float(time.time() - t0))
end_nb_nodes = len(fgraph.apply_nodes)
if max_use_abort:
msg = (
f"EquilibriumOptimizer max'ed out by '{opt_name}'"
+ ". You can safely raise the current threshold of "
+ "{config.optdb__max_use_ratio:f} with the aesara flag 'optdb__max_use_ratio'."
)
if config.on_opt_error == "raise":
raise AssertionError(msg)
else:
_logger.error(msg)
fgraph.remove_feature(change_tracker)
assert len(loop_process_count) == len(loop_timing)
assert len(loop_process_count) == len(global_opt_timing)
assert len(loop_process_count) == len(nb_nodes)
assert len(loop_process_count) == len(io_toposort_timing)
assert len(loop_process_count) == len(global_sub_profs)
assert len(loop_process_count) == len(final_sub_profs)
assert len(loop_process_count) == len(cleanup_sub_profs)
return (
self,
loop_timing,
loop_process_count,
(start_nb_nodes, end_nb_nodes, max_nb_nodes),
global_opt_timing,
nb_nodes,
time_opts,
io_toposort_timing,
node_created,
global_sub_profs,
final_sub_profs,
cleanup_sub_profs,
)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
name = getattr(self, "name", None)
print(
f"{' ' * level}{self.__class__.__name__} {name} id={id(self)}", file=stream
)
if depth != 0:
for lopt in self.get_local_optimizers():
lopt.print_summary(stream, level=(level + 2), depth=(depth - 1))
@staticmethod
def print_profile(stream, prof, level=0):
(
opt,
loop_timing,
loop_process_count,
(start_nb_nodes, end_nb_nodes, max_nb_nodes),
global_opt_timing,
nb_nodes,
time_opts,
io_toposort_timing,
node_created,
global_sub_profs,
final_sub_profs,
cleanup_sub_profs,
) = prof
blanc = " " * level
print(blanc, "EquilibriumOptimizer", end=" ", file=stream)
print(blanc, getattr(opt, "name", getattr(opt, "__name__", "")), file=stream)
print(
blanc,
f" time {sum(loop_timing):.3f}s for {len(loop_timing)} passes",
file=stream,
)
print(
blanc,
f" nb nodes (start, end, max) {int(start_nb_nodes)} {int(end_nb_nodes)} {int(max_nb_nodes)}",
file=stream,
)
print(blanc, f" time io_toposort {sum(io_toposort_timing):.3f}s", file=stream)
s = sum([time_opts[o] for o in opt.get_local_optimizers()])
print(blanc, f" time in local optimizers {s:.3f}s", file=stream)
s = sum([time_opts[o] for o in opt.global_optimizers])
print(blanc, f" time in global optimizers {s:.3f}s", file=stream)
s = sum([time_opts[o] for o in opt.final_optimizers])
print(blanc, f" time in final optimizers {s:.3f}s", file=stream)
s = sum([time_opts[o] for o in opt.cleanup_optimizers])
print(blanc, f" time in cleanup optimizers {s:.3f}s", file=stream)
for i in range(len(loop_timing)):
lopt = ""
if loop_process_count[i]:
d = list(
reversed(sorted(loop_process_count[i].items(), key=lambda a: a[1]))
)
lopt = " ".join([str((str(k), v)) for k, v in d[:5]])
if len(d) > 5:
lopt += " ..."
print(
blanc,
(
f" {int(i):2d} - {loop_timing[i]:.3f}s {int(sum(loop_process_count[i].values()))} ({global_opt_timing[i]:.3f}s in global opts, "
f"{io_toposort_timing[i]:.3f}s io_toposort) - {int(nb_nodes[i])} nodes - {lopt}"
),
file=stream,
)
count_opt = []
not_used = []
not_used_time = 0
process_count = {}
for o in (
opt.global_optimizers
+ list(opt.get_local_optimizers())
+ list(opt.final_optimizers)
+ list(opt.cleanup_optimizers)
):
process_count.setdefault(o, 0)
for count in loop_process_count:
for o, v in count.items():
process_count[o] += v
for o, count in process_count.items():
if count > 0:
count_opt.append((time_opts[o], count, node_created[o], o))
else:
not_used.append((time_opts[o], o))
not_used_time += time_opts[o]
if count_opt:
print(
blanc, " times - times applied - nb node created - name:", file=stream
)
count_opt.sort()
for (t, count, n_created, o) in count_opt[::-1]:
print(
blanc,
f" {t:.3f}s - {int(count)} - {int(n_created)} - {o}",
file=stream,
)
print(
blanc,
f" {not_used_time:.3f}s - in {len(not_used)} optimization that were not used (display only those with a runtime > 0)",
file=stream,
)
not_used.sort(key=lambda nu: (nu[0], str(nu[1])))
for (t, o) in not_used[::-1]:
if t > 0:
# Skip opt that have 0 times, they probably wasn't even tried.
print(blanc + " ", f" {t:.3f}s - {o}", file=stream)
print(file=stream)
gf_opts = [
o
for o in (
opt.global_optimizers
+ list(opt.final_optimizers)
+ list(opt.cleanup_optimizers)
)
if o.print_profile.__code__ is not GlobalOptimizer.print_profile.__code__
]
if not gf_opts:
return
print(blanc, "Global, final and clean up optimizers", file=stream)
for i in range(len(loop_timing)):
print(blanc, f"Iter {int(i)}", file=stream)
for o, prof in zip(opt.global_optimizers, global_sub_profs[i]):
try:
o.print_profile(stream, prof, level + 2)
except NotImplementedError:
print(blanc, "merge not implemented for ", o)
for o, prof in zip(opt.final_optimizers, final_sub_profs[i]):
try:
o.print_profile(stream, prof, level + 2)
except NotImplementedError:
print(blanc, "merge not implemented for ", o)
for o, prof in zip(opt.cleanup_optimizers, cleanup_sub_profs[i]):
try:
o.print_profile(stream, prof, level + 2)
except NotImplementedError:
print(blanc, "merge not implemented for ", o)
@staticmethod
def merge_profile(prof1, prof2):
# (opt, loop_timing, loop_process_count, max_nb_nodes,
# global_opt_timing, nb_nodes, time_opts, io_toposort_timing) = prof1
local_optimizers = OrderedSet(prof1[0].get_local_optimizers()).union(
prof2[0].get_local_optimizers()
)
global_optimizers = OrderedSet(prof1[0].global_optimizers).union(
prof2[0].global_optimizers
)
final_optimizers = list(
OrderedSet(prof1[0].final_optimizers).union(prof2[0].final_optimizers)
)
cleanup_optimizers = list(
OrderedSet(prof1[0].cleanup_optimizers).union(prof2[0].cleanup_optimizers)
)
new_opt = EquilibriumOptimizer(
local_optimizers.union(global_optimizers),
max_use_ratio=1,
final_optimizers=final_optimizers,
cleanup_optimizers=cleanup_optimizers,
)
def add_append_list(l1, l2):
l = copy.copy(l1)
for idx, nb in enumerate(l2):
if idx < len(l):
l[idx] += nb
else:
l.append(nb)
return l
loop_timing = add_append_list(prof1[1], prof2[1])
loop_process_count = list(prof1[2])
global_sub_profs = []
final_sub_profs = []
cleanup_sub_profs = []
for i in range(min(len(loop_process_count), len(prof2[2]))):
process_count = loop_process_count[i]
for process, count in prof2[2][i].items():
if process in process_count:
process_count[process] += count
else:
process_count[process] = count
def merge(opts, attr, idx):
tmp = []
for opt in opts:
o1 = getattr(prof1[0], attr)
o2 = getattr(prof2[0], attr)
if opt in o1 and opt in o2:
p1 = prof1[idx][i][o1.index(opt)]
p2 = prof2[idx][i][o2.index(opt)]
m = None
if hasattr(opt, "merge_profile"):
m = opt.merge_profile(p1, p2)
elif opt in o1:
m = prof1[idx][i][o1.index(opt)]
else:
m = prof2[idx][i][o2.index(opt)]
tmp.append(m)
return tmp
global_sub_profs.append(merge(global_optimizers, "global_optimizers", 9))
final_sub_profs.append(merge(final_optimizers, "final_optimizers", 10))
cleanup_sub_profs.append(
merge(cleanup_optimizers, "cleanup_optimizers", 11)
)
# Add the iteration done by only one of the profile.
loop_process_count.extend(prof1[2][len(loop_process_count) :])
global_sub_profs.extend(prof1[9][len(global_sub_profs) :])
final_sub_profs.extend(prof1[10][len(final_sub_profs) :])
cleanup_sub_profs.extend(prof1[11][len(cleanup_sub_profs) :])
global_sub_profs.extend(prof2[9][len(loop_process_count) :])
final_sub_profs.extend(prof2[10][len(loop_process_count) :])
cleanup_sub_profs.extend(prof2[11][len(loop_process_count) :])
max_nb_nodes = max(prof1[3], prof2[3])
global_opt_timing = add_append_list(prof1[4], prof2[4])
nb_nodes = add_append_list(prof1[5], prof2[5])
time_opts = merge_dict(prof1[6], prof2[6])
io_toposort_timing = add_append_list(prof1[7], prof2[7])
assert (
len(loop_timing)
== len(global_opt_timing)
== len(global_sub_profs)
== len(io_toposort_timing)
== len(nb_nodes)
)
assert len(loop_timing) == max(len(prof1[1]), len(prof2[1]))
node_created = merge_dict(prof1[8], prof2[8])
return (
new_opt,
loop_timing,
loop_process_count,
max_nb_nodes,
global_opt_timing,
nb_nodes,
time_opts,
io_toposort_timing,
node_created,
global_sub_profs,
final_sub_profs,
cleanup_sub_profs,
)
def _check_chain(r, chain):
"""
WRITEME
"""
chain = list(reversed(chain))
while chain:
elem = chain.pop()
if elem is None:
if r.owner is not None:
return False
elif r.owner is None:
return False
elif isinstance(elem, Op):
if not r.owner.op == elem:
return False
else:
try:
if issubclass(elem, Op) and not isinstance(r.owner.op, elem):
return False
except TypeError:
return False
if chain:
r = r.owner.inputs[chain.pop()]
# print 'check_chain', _check_chain.n_calls
# _check_chain.n_calls += 1
# The return value will be used as a Boolean, but some Variables cannot
# be used as Booleans (the results of comparisons, for instance)
return r is not None
def check_chain(r, *chain):
"""
WRITEME
"""
if isinstance(r, Apply):
r = r.outputs[0]
return _check_chain(r, reduce(list.__iadd__, ([x, 0] for x in chain)))
def pre_greedy_local_optimizer(fgraph, optimizations, out):
"""Apply local optimizations to a graph.
This function traverses the computation graph in the graph before the
variable `out` but that are not in the `fgraph`. It applies
`optimizations` to each variable on the traversed graph.
.. warning::
This changes the nodes in a graph in-place.
Its main use is to apply locally constant folding when generating
the graph of the indices of a subtensor.
Changes should not be applied to nodes that are in an `fgraph`,
so we use `fgraph` to prevent that.
Notes
-----
This doesn't do an equilibrium optimization, so, if there is an
optimization--like `local_upcast_elemwise_constant_inputs`--in the list
that adds additional nodes to the inputs of the node, it might be necessary
to call this function multiple times.
Parameters
----------
fgraph : FunctionGraph
The graph used to avoid/filter nodes.
optimizations : list of LocalOptimizer
The list of local optimizations to apply
out : Variable
A `Variable` specifying the graph to optimize.
"""
def local_recursive_function(list_opt, out, optimized_vars, depth):
if not getattr(out, "owner", None):
return [out], optimized_vars
node = out.owner
if node in fgraph.apply_nodes:
return node.outputs, optimized_vars
# Walk up the graph via the node's inputs
for idx, inp in enumerate(node.inputs):
if inp in optimized_vars:
nw_in = optimized_vars[inp]
else:
if inp.owner:
outs, optimized_vars = local_recursive_function(
list_opt, inp, optimized_vars, depth + 1
)
for k, v in zip(inp.owner.outputs, outs):
optimized_vars[k] = v
nw_in = outs[inp.owner.outputs.index(inp)]
else:
nw_in = inp
optimized_vars[inp] = inp
# XXX: An in-place change
node.inputs[idx] = nw_in
# Apply the optimizations
results = node.outputs
for opt in list_opt:
ret = opt.transform(fgraph, node)
if ret is not False and ret is not None:
assert len(ret) == len(node.outputs), opt
for k, v in zip(node.outputs, ret):
optimized_vars[k] = v
results = ret
if ret[0].owner:
node = out.owner
else:
break
return results, optimized_vars
if out.owner:
out_index = out.owner.outputs.index(out)
else:
out_index = 0
final_outs, optimized_nodes = local_recursive_function(optimizations, out, {}, 0)
return final_outs[out_index]
def copy_stack_trace(from_var, to_var):
r"""Copy the stack traces from `from_var` to `to_var`.
Parameters
----------
from_var :
`Variable` or list `Variable`\s to copy stack traces from.
to_var :
`Variable` or list `Variable`\s to copy stack traces to.
Notes
-----
The stacktrace is assumed to be of the form of a list of lists
of tuples. Each tuple contains the filename, line number, function name
and so on. Each list of tuples contains the truples belonging to a
particular `Variable`.
"""
# Store stack traces from from_var
tr = []
if isinstance(from_var, Iterable) and not isinstance(from_var, Variable):
# If from_var is a list, store concatenated stack traces
for v in from_var:
tr += getattr(v.tag, "trace", [])
else:
# If from_var is not a list, it must be a single tensor variable,
# so just store that particular stack trace
tr = getattr(from_var.tag, "trace", [])
if tr and isinstance(tr[0], tuple):
# There was one single stack trace, we encapsulate it in a list
tr = [tr]
# Copy over stack traces to to_var
if isinstance(to_var, Iterable) and not isinstance(to_var, Variable):
# Copy over stack traces from from_var to each variable in
# to_var, including the stack_trace of the to_var before
for v in to_var:
v.tag.trace = getattr(v.tag, "trace", []) + tr
else:
# Copy over stack traces from from_var to each variable to
# to_var, including the stack_trace of the to_var before
to_var.tag.trace = getattr(to_var.tag, "trace", []) + tr
return to_var
@contextlib.contextmanager
def inherit_stack_trace(from_var):
"""
A context manager that copies the stack trace from one or more variable nodes to all
variable nodes constructed in the body. ``new_nodes`` is the list of all the newly created
variable nodes inside an optimization that is managed by ``graph.nodes_constructed``.
Parameters
----------
from_var :
`Variable` node or a list of `Variable` nodes to copy stack traces from.
"""
with nodes_constructed() as new_nodes:
yield
copy_stack_trace(from_var, new_nodes)
def check_stack_trace(f_or_fgraph, ops_to_check="last", bug_print="raise"):
r"""Checks if the outputs of specific `Op`\s have a stack trace.
Parameters
----------
f_or_fgraph : Function or FunctionGraph
The compiled function or the function graph to be analysed.
ops_to_check
This value can be of four different types:
- classes or instances inheriting from `Op`
- tuple/list of classes or instances inheriting from `Op`
- string
- function returning a boolean and taking as input an instance of `Op`
- if `ops_to_check` is a string, it should be either ``'last'`` or ``'all'``.
``'last'`` will check only the last `Op` of the graph while ``'all'`` will
check all the `Op`\s of the graph.
- if `ops_to_check` is an `Op` or a tuple/list of `Op`\s, the function will
check that all the outputs of their occurrences in the graph have a
stack trace.
- if `ops_to_check` is a function, it should take as input a
`Op` and return a boolean indicating if the input `Op` should
be checked or not.
bug_print
This value is a string belonging to ``{'raise', 'warn', 'ignore'}``.
You can specify the behaviour of the function when the specified
`ops_to_check` are not in the graph of `f_or_fgraph`: it can either raise
an exception, write a warning or simply ignore it.
Returns
-------
boolean
``True`` if the outputs of the specified ops have a stack, ``False``
otherwise.
"""
if isinstance(f_or_fgraph, aesara.compile.function.types.Function):
fgraph = f_or_fgraph.maker.fgraph
elif isinstance(f_or_fgraph, aesara.graph.fg.FunctionGraph):
fgraph = f_or_fgraph
else:
raise ValueError("The type of f_or_fgraph is not supported")
if isinstance(ops_to_check, Op) or (
inspect.isclass(ops_to_check) and issubclass(ops_to_check, Op)
):
ops_to_check = (ops_to_check,)
# if ops_to_check is a string
if isinstance(ops_to_check, str):
if ops_to_check == "last":
apply_nodes_to_check = [
fgraph.outputs[i].owner for i in range(len(fgraph.outputs))
]
elif ops_to_check == "all":
apply_nodes_to_check = fgraph.apply_nodes
else:
raise ValueError("The string ops_to_check is not recognised")
# if ops_to_check is a list/tuple of ops
elif isinstance(ops_to_check, (tuple, list)):
# Separate classes from instances in ops_to_check
op_instances = []
op_classes = []
for obj in ops_to_check:
if isinstance(obj, Op):
op_instances.append(obj)
else:
op_classes.append(obj)
op_classes = tuple(op_classes)
apply_nodes_to_check = [
node for node in fgraph.apply_nodes if node.op in ops_to_check
] + [
node
for node in fgraph.apply_nodes
if isinstance(node.op, op_classes)
or (
hasattr(node.op, "scalar_op")
and isinstance(node.op.scalar_op, op_classes)
)
]
# if ops_to_check is a function
elif callable(ops_to_check):
apply_nodes_to_check = [
node for node in fgraph.apply_nodes if ops_to_check(node)
]
else:
raise ValueError("ops_to_check does not have the right type")
if not apply_nodes_to_check:
msg = (
"Provided op instances/classes are not in the graph or the "
"graph is empty"
)
if bug_print == "warn":
warnings.warn(msg)
elif bug_print == "raise":
raise Exception(msg)
elif bug_print == "ignore":
pass
else:
raise ValueError("The string bug_print is not recognised")
for node in apply_nodes_to_check:
for output in node.outputs:
if not hasattr(output.tag, "trace") or not output.tag.trace:
return False
return True
class CheckStackTraceFeature(Feature):
def on_import(self, fgraph, node, reason):
# In optdb we only register the CheckStackTraceOptimization when
# config.check_stack_trace is not off but we also double check here.
if config.check_stack_trace != "off" and not check_stack_trace(fgraph, "all"):
if config.check_stack_trace == "raise":
raise AssertionError(
"Empty stack trace! The optimization that inserted this variable is "
+ str(reason)
)
elif config.check_stack_trace in ["log", "warn"]:
apply_nodes_to_check = fgraph.apply_nodes
for node in apply_nodes_to_check:
for output in node.outputs:
if not hasattr(output.tag, "trace") or not output.tag.trace:
output.tag.trace = [
[
(
"",
0,
"Empty stack trace! The optimization that"
+ "inserted this variable is "
+ str(reason),
"",
)
]
]
if config.check_stack_trace == "warn":
warnings.warn(
"Empty stack trace! The optimization that inserted this variable is"
+ str(reason)
)
class CheckStackTraceOptimization(GlobalOptimizer):
"""Optimizer that serves to add `CheckStackTraceOptimization` as a feature."""
def add_requirements(self, fgraph):
if not hasattr(fgraph, "CheckStackTraceFeature"):
fgraph.attach_feature(CheckStackTraceFeature())
def apply(self, fgraph):
pass
|
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for smbios_validation_tool.matcher."""
import os
import dmiparse
from smbios_validation_tool import constants
from smbios_validation_tool import matcher
from google3.pyglib import resources
from google3.testing.pybase import googletest
TEST_PATH = 'google3/third_party/py/smbios_validation_tool/test_data'
class MatcherTest(googletest.TestCase):
def setUp(self):
super(MatcherTest, self).setUp()
data_path = os.path.join(TEST_PATH, 'less_compliant_smbios_records.txt')
data_file = resources.GetResourceFilename(data_path)
self.records, _ = dmiparse.DmiParser(data_file).parse()
self.assertLen(self.records, 294)
def testRecordTypeMatcherMatchesSingleRecord(self):
matchers = matcher.Matcher(
[matcher.RecordTypeMatcher(constants.RecordType.BIOS_RECORD)])
matched_records = []
for _, record in self.records.items():
if matchers.is_matched_record(record):
matched_records.append(record)
self.assertLen(matched_records, 1)
def testRecordTypeMatcherMatchesMultipleRecords(self):
matchers = matcher.Matcher([
matcher.RecordTypeMatcher(
constants.RecordType.GROUP_ASSOCIATIONS_RECORD)
])
matched_records = []
for _, record in self.records.items():
if matchers.is_matched_record(record):
matched_records.append(record)
self.assertLen(matched_records, 3)
if __name__ == '__main__':
googletest.main()
|
from rs_grading import do_autograde, do_calculate_totals
import json
userinfo = json.loads(os.environ["RSM_USERINFO"])
# print(userinfo['course'], userinfo['pset'])
# # print(db.keys())
# print(settings)
assignmentid = userinfo["pset"]
assignment = db(db.assignments.id == assignmentid).select().first()
course = db(db.courses.course_name == userinfo["course"]).select().first()
do_autograde(
assignment,
course.id,
course.course_name,
sid=None,
student_rownum=None,
question_name=None,
enforce_deadline=userinfo["enforce_deadline"],
# I don't know what this is for, but if you want to set this to Michigan timezone offset, it should be 4
# not 5.
timezoneoffset=240,
db=db,
settings=settings,
)
do_calculate_totals(
assignment, course.id, course.course_name, sid=None, db=db, settings=settings
)
|
"""empty message
Revision ID: 7361ebe97e4b
Revises:
Create Date: 2020-07-28 17:14:05.345504
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7361ebe97e4b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.create_table('measurement',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('date', sa.Date(), nullable=True),
sa.Column('weight', sa.Float(), nullable=True),
sa.Column('bmi', sa.Float(), nullable=True),
sa.Column('body_fat', sa.Float(), nullable=True),
sa.Column('muscle', sa.Float(), nullable=True),
sa.Column('rm_kcal', sa.Float(), nullable=True),
sa.Column('visceral_fat', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_measurement_date'), 'measurement', ['date'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_measurement_date'), table_name='measurement')
op.drop_table('measurement')
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
|
# chordNode.py>
import hashlib
import logging
EXTRA_STR = "String to help push hashed values further from one another."
MAX_HASH = 2**30
def chord_hash(key: str):
hash_hex = hashlib.sha256(key.encode('utf-8')).hexdigest()
return (int(hash_hex, 16) % MAX_HASH)
class ChordNode:
def __init__(self, key: str):
self.key = key
self.hash_id = chord_hash(key)
def __str__(self) -> str:
return 'Node(' + self.key + ',' + str(self.hash_id) + ')'
# Create a new chord ring from an array of broker addresses
def create_chord_ring(brokers):
# build chord nodes from broker addresses
chord_ring = []
for address in brokers:
node = ChordNode(address)
chord_ring.append(node)
sortChordRing(chord_ring)
return chord_ring
def find_chord_successor(key: str, chord_ring, index: int = None):
"""Find the successor in the ring for the key.
Parameters:
- key: string to be hashed an sought after in the chord ring
- chord_ring: array of ChordNode (assumed that this array is sorted by hash index)
- index: optional value for when you know a Broker's index and can get a faster result
Returns: tuple (ChordNode, index) chord node representing the successor and an
int value for its index. Could return (None, -1) is the chord_ring is empty.
"""
if len(chord_ring) == 0:
return None, -1
if index != None:
succ_index = successor_index(index, len(chord_ring))
return chord_ring[succ_index], succ_index
# calculate hash of key
id = chord_hash(key)
# Slow Linear version of finding the Identifier
for i in range(1, len(chord_ring)):
# Current range is (chord_ring[i], chord_ring[i+1]]
lower = chord_ring[i-1].hash_id
upper = chord_ring[i].hash_id
if id >= lower and id < upper:
return chord_ring[i], i
# Exiting the for loop means that the ID didn't belong
# to any of the ranges we checked through. So it must belong
# to Node[0] => either ID <= Node[0].ID OR ID > Node[last].ID
return chord_ring[0], 0
def find_chord_predecessor(key: str, chord_ring, index: int = None):
"""Find the predecessor in the ring for the key.
Parameters:
- key: string to be hashed an sought after in the chord ring
- chord_ring: array of ChordNode (assumed that this array is sorted by hash index)
- index: optional value for when you know a Broker's index and can get a faster result
Returns: tuple (ChordNode, index) chord node representing the predecessor and an
int value for its index. Could return (None, -1) is the chord_ring is empty.
"""
if len(chord_ring) == 0:
return None, -1
if index != None and index >= 0:
pred_index = predecessor_index(index, len(chord_ring))
return chord_ring[pred_index], pred_index
# calculate hash of key
id = chord_hash(key)
# Slow Linear version of finding the Identifier
for i in range(1, len(chord_ring)):
# Current range is (chord_ring[i], chord_ring[i+1]]
lower = chord_ring[i-1].hash_id
upper = chord_ring[i].hash_id
if id > lower and id <= upper:
return chord_ring[i-1], i-1
# Exiting the for loop means that the ID didn't belong
# to any of the ranges we checked through. So it must belong
# to Node[0] => either ID <= Node[0].ID OR ID > Node[last].ID
last = len(chord_ring) - 1
return chord_ring[last], last
def find_prime_chord_segment(address: str, chord_ring):
"""Find address tuple (start, end), both inclusive values, that represent
the start and end of the primary responsibility segment of this address.
- Tip: address does not need to belong to an existing broker on the ring, but
it may.
- If chord_ring is empty, then returned segment will be entire chord ring,
which is [0, MAX_HASH]
"""
pred_node, pred_index = find_chord_predecessor(address, chord_ring)
if pred_node == None:
return 0, MAX_HASH
start = pred_node.hash_id + 1
end = chord_hash(address)
return start, end
def find_repl_chord_segment(address: str, chord_ring):
# pred1 is immediate predecessor of address
pred1, pred1_index = find_chord_predecessor(address, chord_ring)
# pred2 is the predeccesor of pred1
pred2, pred2_index = find_chord_predecessor(pred1.key, chord_ring, pred1_index)
repl_start = -1
repl_end = -1
if pred1.key != address:
repl_start, repl_end = find_prime_chord_segment(pred1.key, chord_ring)
if pred2.key != address:
repl2_start, repl2_end = find_prime_chord_segment(pred2.key, chord_ring)
repl_start = repl2_start
return repl_start, repl_end
# Detect what part of the chord ring changed
# Return Boolean if your predecessor changed.
def check_if_new_leader(new_ring, old_ring, my_address):
# determine indices of this Broker in both rings
new_index = my_ring_index(new_ring, my_address)
if new_index == -1:
logging.warning("Addr {} was not found on the new chord ring".format(my_address))
return False
old_index = my_ring_index(old_ring, my_address)
if old_index == -1:
return True
# determine indices of predecessors on both rings
new_pred = predecessor_index(new_index, len(new_ring))
old_pred = predecessor_index(old_index, len(old_ring))
if new_ring[new_pred].key != old_ring[old_pred].key and new_index < old_index:
return True
else:
return False
def segment_range(start, end):
if start == -1 and end == -1:
return 0
elif start <= end:
return (end - start + 1)
else:
return (MAX_HASH - start) + (end + 1)
def in_segment_range(value, start, end):
if start <= end:
return value >= start and value <= end
else:
return value >= start or value <= end
# Retrieve the Index of Broker in the Chord Ring
def my_ring_index(chord_ring, my_address):
for i, node in chord_ring:
if node.key == my_address:
return i
return -1
def predecessor_index(my_index, ring_length):
if my_index > 0:
return my_index - 1
else:
return ring_length - 1
def successor_index(my_index, ring_length):
return (my_index + 1) % ring_length
def customChordSort(node: ChordNode):
return node.hash_id
# sortChordRing will modify your array in memory. It will not return
# a new array.
def sortChordRing(node_array):
node_array.sort(key=customChordSort)
return
|
import pygame as pg
from classes.game import Screen
from classes.player import Player
from classes.fruit import Fruit
from joblib import dump, load
from sklearn.neural_network import MLPClassifier
import pandas as pd
import sys
def main():
df = pd.read_csv("data.csv")
df.drop_duplicates()
y = df.id_move
df = df.drop("id_move", axis=1)
x = df
clf = MLPClassifier().fit(x, y)
dump(clf, "mlpc.model")
pg.init()
game = Screen(pg)
player = Player()
fruit = Fruit()
game.spriters.append(player)
game.spriters.append(fruit)
running = True
clock = game.pg.time.Clock()
while running:
clock.tick(60)
game.update()
for event in game.pg.event.get():
if event.type == game.pg.QUIT:
running = False
pressed = game.pg.key.get_pressed()
if pressed[game.pg.K_UP]:
player.move_up(game)
if pressed[game.pg.K_DOWN]:
player.move_down(game)
if pressed[game.pg.K_LEFT]:
player.move_left(game)
if pressed[game.pg.K_RIGHT]:
player.move_right(game)
# player.next_move(fruit, game)
player.select_move(clf.predict([[player.x, fruit.x, player.y, fruit.y]])[0], game)
# Colision
if player.x < fruit.x + fruit.width:
if player.x + player.width > fruit.x:
if player.y < fruit.y + fruit.height:
if player.y + player.height > fruit.y:
fruit.reposition()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""microcms package, minimalistic flatpage enhancement.
THIS SOFTWARE IS UNDER BSD LICENSE.
Copyright (c) 2010-2012 Daniele Tricoli <eriol@mornie.org>
Read LICENSE for more informations.
"""
VERSION = (0, 2, 0)
|
from website.conferences.model import DEFAULT_FIELD_NAMES
def serialize_meeting(meeting):
is_meeting = True
if hasattr(meeting, 'is_meeting') and meeting.is_meeting is not None:
is_meeting = meeting.is_meeting
return {
'endpoint': meeting.endpoint,
'name': meeting.name,
'info_url': meeting.info_url,
'homepage_link_text': meeting.field_names.get('homepage_link_text', DEFAULT_FIELD_NAMES.get('homepage_link_text', '')),
'logo_url': meeting.logo_url,
'active': meeting.active,
'admins': meeting.admins.all().values_list('username', flat=True),
'public_projects': meeting.public_projects,
'poster': meeting.poster,
'talk': meeting.talk,
'num_submissions': meeting.num_submissions,
'location': meeting.location,
'start_date': meeting.start_date,
'end_date': meeting.end_date,
'submission1': meeting.field_names.get('submission1', DEFAULT_FIELD_NAMES.get('submission1', '')),
'submission2': meeting.field_names.get('submission2', DEFAULT_FIELD_NAMES.get('submission2', '')),
'submission1_plural': meeting.field_names.get('submission1_plural', DEFAULT_FIELD_NAMES.get('submission1_plural', '')),
'submission2_plural': meeting.field_names.get('submission2_plural', DEFAULT_FIELD_NAMES.get('submission2_plural', '')),
'meeting_title_type': meeting.field_names.get('meeting_title_type', DEFAULT_FIELD_NAMES.get('meeting_title_type', '')),
'add_submission': meeting.field_names.get('add_submission', DEFAULT_FIELD_NAMES.get('add_submission', '')),
'mail_subject': meeting.field_names.get('mail_subject', DEFAULT_FIELD_NAMES.get('mail_subject', '')),
'mail_message_body': meeting.field_names.get('mail_message_body', DEFAULT_FIELD_NAMES.get('mail_message_body', '')),
'mail_attachment': meeting.field_names.get('mail_attachment', DEFAULT_FIELD_NAMES.get('mail_attachment', '')),
'is_meeting': is_meeting,
}
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
class FairseqOptimizer(object):
def __init__(self, args):
super().__init__()
self.args = args
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
pass
@property
def optimizer(self):
"""Return a torch.optim.optimizer.Optimizer instance."""
if not hasattr(self, '_optimizer'):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError('_optimizer must be an instance of torch.optim.Optimizer')
return self._optimizer
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
raise NotImplementedError
@property
def params(self):
"""Return an iterable of the parameters held by the optimizer."""
for param_group in self.optimizer.param_groups:
for p in param_group['params']:
yield p
def __getstate__(self):
return self._optimizer.__getstate__()
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]['lr']
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def state_dict(self):
"""Return the optimizer's state dict."""
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
self.optimizer.load_state_dict(state_dict)
if optimizer_overrides is not None and len(optimizer_overrides) > 0:
# override learning rate, momentum, etc. with latest values
for group in self.optimizer.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves."""
loss.backward()
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
for p in self.params:
if p.grad is not None:
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm):
"""Clips gradient norm."""
if max_norm > 0:
return torch.nn.utils.clip_grad_norm_(self.params, max_norm)
else:
return math.sqrt(sum(p.grad.data.norm()**2 for p in self.params if p.grad is not None))
def step(self, closure=None):
"""Performs a single optimization step."""
self.optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.params:
p.grad = None
self.optimizer.zero_grad()
@property
def supports_memory_efficient_fp16(self):
if hasattr(self.optimizer, 'supports_memory_efficient_fp16'):
return self.optimizer.supports_memory_efficient_fp16
return False
|
import sys
import base64
import logging
import json
import gzip
import inspect
import collections
from copy import deepcopy
from datetime import datetime
import pytest
from pytest import fixture
import hypothesis.strategies as st
from hypothesis import given, assume
import six
from chalice import app
from chalice import NotFoundError
from chalice.test import Client
from chalice.app import (
APIGateway,
Request,
Response,
handle_extra_types,
MultiDict,
WebsocketEvent,
BadRequestError,
WebsocketDisconnectedError,
WebsocketEventSourceHandler,
ConvertToMiddleware,
WebsocketAPI,
ChaliceUnhandledError,
)
from chalice import __version__ as chalice_version
from chalice.deploy.validate import ExperimentalFeatureError
from chalice.deploy.validate import validate_feature_flags
# These are used to generate sample data for hypothesis tests.
STR_MAP = st.dictionaries(st.text(), st.text())
STR_TO_LIST_MAP = st.dictionaries(
st.text(),
st.lists(elements=st.text(), min_size=1, max_size=5)
)
HTTP_METHOD = st.sampled_from(['GET', 'POST', 'PUT', 'PATCH',
'OPTIONS', 'HEAD', 'DELETE'])
PATHS = st.sampled_from(['/', '/foo/bar'])
HTTP_BODY = st.none() | st.text()
HTTP_REQUEST = st.fixed_dictionaries({
'query_params': STR_TO_LIST_MAP,
'headers': STR_MAP,
'uri_params': STR_MAP,
'method': HTTP_METHOD,
'body': HTTP_BODY,
'context': STR_MAP,
'stage_vars': STR_MAP,
'is_base64_encoded': st.booleans(),
'path': PATHS,
})
HTTP_REQUEST = st.fixed_dictionaries({
'multiValueQueryStringParameters': st.fixed_dictionaries({}),
'headers': STR_MAP,
'pathParameters': STR_MAP,
'requestContext': st.fixed_dictionaries({
'httpMethod': HTTP_METHOD,
'resourcePath': PATHS,
}),
'body': HTTP_BODY,
'stageVariables': STR_MAP,
'isBase64Encoded': st.booleans(),
})
BINARY_TYPES = APIGateway().binary_types
class FakeLambdaContextIdentity(object):
def __init__(self, cognito_identity_id, cognito_identity_pool_id):
self.cognito_identity_id = cognito_identity_id
self.cognito_identity_pool_id = cognito_identity_pool_id
class FakeLambdaContext(object):
def __init__(self):
self.function_name = 'test_name'
self.function_version = 'version'
self.invoked_function_arn = 'arn'
self.memory_limit_in_mb = 256
self.aws_request_id = 'id'
self.log_group_name = 'log_group_name'
self.log_stream_name = 'log_stream_name'
self.identity = FakeLambdaContextIdentity('id', 'id_pool')
# client_context is set by the mobile SDK and wont be set for chalice
self.client_context = None
def get_remaining_time_in_millis(self):
return 500
def serialize(self):
serialized = {}
serialized.update(vars(self))
serialized['identity'] = vars(self.identity)
return serialized
class FakeGoneException(Exception):
pass
class FakeExceptionFactory(object):
def __init__(self):
self.GoneException = FakeGoneException
class FakeClient(object):
def __init__(self, errors=None, infos=None):
if errors is None:
errors = []
if infos is None:
infos = []
self._errors = errors
self._infos = infos
self.calls = collections.defaultdict(lambda: [])
self.exceptions = FakeExceptionFactory()
def post_to_connection(self, ConnectionId, Data):
self._call('post_to_connection', ConnectionId, Data)
def delete_connection(self, ConnectionId):
self._call('close', ConnectionId)
def get_connection(self, ConnectionId):
self._call('info', ConnectionId)
if self._infos is not None:
info = self._infos.pop()
return info
def _call(self, name, *args):
self.calls[name].append(tuple(args))
if self._errors:
error = self._errors.pop()
raise error
class FakeSession(object):
def __init__(self, client=None, region_name='us-west-2'):
self.calls = []
self._client = client
self.region_name = region_name
def client(self, name, endpoint_url=None):
self.calls.append((name, endpoint_url))
return self._client
@pytest.fixture
def view_function():
def _func():
return {"hello": "world"}
def create_request_with_content_type(content_type):
body = '{"json": "body"}'
event = {
'multiValueQueryStringParameters': '',
'headers': {'Content-Type': content_type},
'pathParameters': {},
'requestContext': {
'httpMethod': 'GET',
'resourcePath': '/',
},
'body': body,
'stageVariables': {},
'isBase64Encoded': False,
}
return app.Request(event, FakeLambdaContext())
def assert_response_body_is(response, body):
assert json.loads(response['body']) == body
def json_response_body(response):
return json.loads(response['body'])
def assert_requires_opt_in(app, flag):
with pytest.raises(ExperimentalFeatureError):
validate_feature_flags(app)
# Now ensure if we opt in to the feature, we don't
# raise an exception.
app.experimental_feature_flags.add(flag)
try:
validate_feature_flags(app)
except ExperimentalFeatureError:
raise AssertionError(
"Opting in to feature %s still raises an "
"ExperimentalFeatureError." % flag
)
def websocket_handler_for_route(route, app):
fn = app.websocket_handlers[route].handler_function
handler = WebsocketEventSourceHandler(
fn, WebsocketEvent, app.websocket_api)
return handler
@fixture
def sample_app():
demo = app.Chalice('demo-app')
@demo.route('/index', methods=['GET'])
def index():
return {'hello': 'world'}
@demo.route('/name/{name}', methods=['GET'])
def name(name):
return {'provided-name': name}
return demo
@fixture
def sample_app_with_cors():
demo = app.Chalice('demo-app')
@demo.route('/image', methods=['POST'], cors=True,
content_types=['image/gif'])
def image():
return {'image': True}
return demo
@fixture
def sample_app_with_default_cors():
demo = app.Chalice('demo-app')
demo.api.cors = True
@demo.route('/on', methods=['POST'],
content_types=['image/gif'])
def on():
return {'image': True}
@demo.route('/off', methods=['POST'], cors=False,
content_types=['image/gif'])
def off():
return {'image': True}
@demo.route('/default', methods=['POST'], cors=None,
content_types=['image/gif'])
def default():
return {'image': True}
return demo
@fixture
def sample_websocket_app():
demo = app.Chalice('app-name')
demo.websocket_api.session = FakeSession()
calls = []
@demo.on_ws_connect()
def connect(event):
demo.websocket_api.send(event.connection_id, 'connected')
calls.append(('connect', event))
@demo.on_ws_disconnect()
def disconnect(event):
demo.websocket_api.send(event.connection_id, 'message')
calls.append(('disconnect', event))
@demo.on_ws_message()
def message(event):
demo.websocket_api.send(event.connection_id, 'disconnected')
calls.append(('default', event))
return demo, calls
@fixture
def sample_middleware_app():
demo = app.Chalice('app-name')
demo.calls = []
@demo.middleware('all')
def mymiddleware(event, get_response):
demo.calls.append({'type': 'all',
'event': event.__class__.__name__})
return get_response(event)
@demo.middleware('s3')
def mymiddleware_s3(event, get_response):
demo.calls.append({'type': 's3',
'event': event.__class__.__name__})
return get_response(event)
@demo.middleware('sns')
def mymiddleware_sns(event, get_response):
demo.calls.append({'type': 'sns',
'event': event.__class__.__name__})
return get_response(event)
@demo.middleware('http')
def mymiddleware_http(event, get_response):
demo.calls.append({'type': 'http',
'event': event.__class__.__name__})
return get_response(event)
@demo.middleware('websocket')
def mymiddleware_websocket(event, get_response):
demo.calls.append({'type': 'websocket',
'event': event.__class__.__name__})
return get_response(event)
@demo.middleware('pure_lambda')
def mymiddleware_pure_lambda(event, get_response):
demo.calls.append({'type': 'pure_lambda',
'event': event.__class__.__name__})
return get_response(event)
@demo.route('/')
def index():
return {}
@demo.on_s3_event(bucket='foo')
def s3_handler(event):
pass
@demo.on_sns_message(topic='foo')
def sns_handler(event):
pass
@demo.on_sqs_message(queue='foo')
def sqs_handler(event):
pass
@demo.lambda_function()
def lambda_handler(event, context):
pass
@demo.on_ws_message()
def ws_handler(event):
pass
return demo
@fixture
def auth_request():
method_arn = (
"arn:aws:execute-api:us-west-2:123:rest-api-id/dev/GET/needs/auth")
request = app.AuthRequest('TOKEN', 'authtoken', method_arn)
return request
@pytest.mark.skipif(sys.version[0] == '2',
reason=('Test is irrelevant under python 2, since str and '
'bytes are interchangeable.'))
def test_invalid_binary_response_body_throws_value_error(sample_app):
response = app.Response(
status_code=200,
body={'foo': 'bar'},
headers={'Content-Type': 'application/octet-stream'}
)
with pytest.raises(ValueError):
response.to_dict(sample_app.api.binary_types)
def test_invalid_JSON_response_body_throws_type_error(sample_app):
response = app.Response(
status_code=200,
body={'foo': object()},
headers={'Content-Type': 'application/json'}
)
with pytest.raises(TypeError):
response.to_dict()
def test_can_encode_binary_body_as_base64(sample_app):
response = app.Response(
status_code=200,
body=b'foobar',
headers={'Content-Type': 'application/octet-stream'}
)
encoded_response = response.to_dict(sample_app.api.binary_types)
assert encoded_response['body'] == 'Zm9vYmFy'
def test_can_return_unicode_body(sample_app):
unicode_data = u'\u2713'
response = app.Response(
status_code=200,
body=unicode_data
)
encoded_response = response.to_dict()
assert encoded_response['body'] == unicode_data
def test_can_encode_binary_body_with_header_charset(sample_app):
response = app.Response(
status_code=200,
body=b'foobar',
headers={'Content-Type': 'application/octet-stream; charset=binary'}
)
encoded_response = response.to_dict(sample_app.api.binary_types)
assert encoded_response['body'] == 'Zm9vYmFy'
def test_can_encode_binary_json(sample_app):
sample_app.api.binary_types.extend(['application/json'])
response = app.Response(
status_code=200,
body={'foo': 'bar'},
headers={'Content-Type': 'application/json'}
)
encoded_response = response.to_dict(sample_app.api.binary_types)
assert encoded_response['body'] == 'eyJmb28iOiJiYXIifQ=='
def test_wildcard_accepts_with_native_python_types_serializes_json(
sample_app, create_event):
sample_app.api.binary_types = ['*/*']
@sample_app.route('/py-dict')
def py_dict():
return {'foo': 'bar'}
event = create_event('/py-dict', 'GET', {})
event['headers']['Accept'] = '*/*'
response = sample_app(event, context=None)
# In this case, they've return a native python dict type, which should
# be serialized to JSON and returned back to the user as JSON. Because
# we also have ``*/*`` as a binary type, we'll return the response
# as a binary response type.
assert base64.b64decode(response['body']) == b'{"foo":"bar"}'
assert response['isBase64Encoded']
def test_wildcard_accepts_with_response_class(
sample_app, create_event):
sample_app.api.binary_types = ['*/*']
@sample_app.route('/py-dict')
def py_dict():
return Response(body=json.dumps({'foo': 'bar'}).encode('utf-8'),
headers={'Content-Type': 'application/json'},
status_code=200)
event = create_event('/py-dict', 'GET', {})
event['headers']['Accept'] = '*/*'
response = sample_app(event, context=None)
# Because our binary types is '*/*' we should be returning this
# content as binary.
assert base64.b64decode(response['body']) == b'{"foo": "bar"}'
assert response['isBase64Encoded']
def test_can_parse_route_view_args():
entry = app.RouteEntry(lambda: {"foo": "bar"}, 'view-name',
'/foo/{bar}/baz/{qux}', method='GET')
assert entry.view_args == ['bar', 'qux']
def test_can_route_single_view():
demo = app.Chalice('app-name')
@demo.route('/index')
def index_view():
return {}
assert demo.routes['/index']['GET'] == app.RouteEntry(
index_view, 'index_view', '/index', 'GET',
content_types=['application/json'])
def test_can_handle_multiple_routes():
demo = app.Chalice('app-name')
@demo.route('/index')
def index_view():
return {}
@demo.route('/other')
def other_view():
return {}
assert len(demo.routes) == 2, demo.routes
assert '/index' in demo.routes, demo.routes
assert '/other' in demo.routes, demo.routes
assert demo.routes['/index']['GET'].view_function == index_view
assert demo.routes['/other']['GET'].view_function == other_view
def test_error_on_unknown_event(sample_app):
bad_event = {'random': 'event'}
raw_response = sample_app(bad_event, context=None)
assert raw_response['statusCode'] == 500
assert json_response_body(raw_response)['Code'] == 'InternalServerError'
def test_can_route_api_call_to_view_function(sample_app, create_event):
event = create_event('/index', 'GET', {})
response = sample_app(event, context=None)
assert_response_body_is(response, {'hello': 'world'})
def test_can_call_to_dict_on_current_request(sample_app, create_event):
@sample_app.route('/todict')
def todict():
return sample_app.current_request.to_dict()
event = create_event('/todict', 'GET', {})
response = json_response_body(sample_app(event, context=None))
assert isinstance(response, dict)
# The dict can change over time so we'll just pick
# out a few keys as a basic sanity test.
assert response['method'] == 'GET'
# We also want to verify that to_dict() is always
# JSON serializable so we check we can roundtrip
# the data to/from JSON.
assert isinstance(json.loads(json.dumps(response)), dict)
def test_can_call_to_dict_on_request_with_querystring(sample_app,
create_event):
@sample_app.route('/todict')
def todict():
return sample_app.current_request.to_dict()
event = create_event('/todict', 'GET', {})
event['multiValueQueryStringParameters'] = {
'key': ['val1', 'val2'],
'key2': ['val']
}
response = json_response_body(sample_app(event, context=None))
assert isinstance(response, dict)
# The dict can change over time so we'll just pick
# out a few keys as a basic sanity test.
assert response['method'] == 'GET'
assert response['query_params'] is not None
assert response['query_params']['key'] == 'val2'
assert response['query_params']['key2'] == 'val'
# We also want to verify that to_dict() is always
# JSON serializable so we check we can roundtrip
# the data to/from JSON.
assert isinstance(json.loads(json.dumps(response)), dict)
def test_request_to_dict_does_not_contain_internal_attrs(sample_app,
create_event):
@sample_app.route('/todict')
def todict():
return sample_app.current_request.to_dict()
event = create_event('/todict', 'GET', {})
response = json_response_body(sample_app(event, context=None))
internal_attrs = [key for key in response if key.startswith('_')]
assert not internal_attrs
def test_will_pass_captured_params_to_view(sample_app, create_event):
event = create_event('/name/{name}', 'GET', {'name': 'james'})
response = sample_app(event, context=None)
response = json_response_body(response)
assert response == {'provided-name': 'james'}
def test_error_on_unsupported_method(sample_app, create_event):
event = create_event('/name/{name}', 'POST', {'name': 'james'})
raw_response = sample_app(event, context=None)
assert raw_response['statusCode'] == 405
assert raw_response['headers']['Allow'] == 'GET'
assert json_response_body(raw_response)['Code'] == 'MethodNotAllowedError'
def test_error_on_unsupported_method_gives_feedback_on_method(sample_app,
create_event):
method = 'POST'
event = create_event('/name/{name}', method, {'name': 'james'})
raw_response = sample_app(event, context=None)
assert 'POST' in json_response_body(raw_response)['Message']
def test_error_contains_cors_headers(sample_app_with_cors, create_event):
event = create_event('/image', 'POST', {'not': 'image'})
raw_response = sample_app_with_cors(event, context=None)
assert raw_response['statusCode'] == 415
assert 'Access-Control-Allow-Origin' in raw_response['headers']
class TestDefaultCORS(object):
def test_cors_enabled(self, sample_app_with_default_cors, create_event):
event = create_event('/on', 'POST', {'not': 'image'})
raw_response = sample_app_with_default_cors(event, context=None)
assert raw_response['statusCode'] == 415
assert 'Access-Control-Allow-Origin' in raw_response['headers']
def test_cors_none(self, sample_app_with_default_cors, create_event):
event = create_event('/default', 'POST', {'not': 'image'})
raw_response = sample_app_with_default_cors(event, context=None)
assert raw_response['statusCode'] == 415
assert 'Access-Control-Allow-Origin' in raw_response['headers']
def test_cors_disabled(self, sample_app_with_default_cors, create_event):
event = create_event('/off', 'POST', {'not': 'image'})
raw_response = sample_app_with_default_cors(event, context=None)
assert raw_response['statusCode'] == 415
assert 'Access-Control-Allow-Origin' not in raw_response['headers']
def test_can_access_context(create_event):
demo = app.Chalice('app-name')
@demo.route('/index')
def index_view():
serialized = demo.lambda_context.serialize()
return serialized
event = create_event('/index', 'GET', {})
lambda_context = FakeLambdaContext()
result = demo(event, lambda_context)
result = json_response_body(result)
serialized_lambda_context = lambda_context.serialize()
assert result == serialized_lambda_context
def test_can_access_raw_body(create_event):
demo = app.Chalice('app-name')
@demo.route('/index')
def index_view():
return {'rawbody': demo.current_request.raw_body.decode('utf-8')}
event = create_event('/index', 'GET', {})
event['body'] = '{"hello": "world"}'
result = demo(event, context=None)
result = json_response_body(result)
assert result == {'rawbody': '{"hello": "world"}'}
def test_raw_body_cache_returns_same_result(create_event):
demo = app.Chalice('app-name')
@demo.route('/index')
def index_view():
# The first raw_body decodes base64,
# the second value should return the cached value.
# Both should be the same value
return {'rawbody': demo.current_request.raw_body.decode('utf-8'),
'rawbody2': demo.current_request.raw_body.decode('utf-8')}
event = create_event('/index', 'GET', {})
event['base64-body'] = base64.b64encode(
b'{"hello": "world"}').decode('ascii')
result = demo(event, context=None)
result = json_response_body(result)
assert result['rawbody'] == result['rawbody2']
def test_can_have_views_of_same_route_but_different_methods(create_event):
demo = app.Chalice('app-name')
@demo.route('/index', methods=['GET'])
def get_view():
return {'method': 'GET'}
@demo.route('/index', methods=['PUT'])
def put_view():
return {'method': 'PUT'}
assert demo.routes['/index']['GET'].view_function == get_view
assert demo.routes['/index']['PUT'].view_function == put_view
event = create_event('/index', 'GET', {})
result = demo(event, context=None)
assert json_response_body(result) == {'method': 'GET'}
event = create_event('/index', 'PUT', {})
result = demo(event, context=None)
assert json_response_body(result) == {'method': 'PUT'}
def test_error_on_duplicate_route_methods():
demo = app.Chalice('app-name')
@demo.route('/index', methods=['PUT'])
def index_view():
return {'foo': 'bar'}
with pytest.raises(ValueError):
@demo.route('/index', methods=['PUT'])
def index_view_dup():
return {'foo': 'bar'}
def test_json_body_available_with_right_content_type(create_event):
demo = app.Chalice('demo-app')
@demo.route('/', methods=['POST'])
def index():
return demo.current_request.json_body
event = create_event('/', 'POST', {})
event['body'] = json.dumps({'foo': 'bar'})
result = demo(event, context=None)
result = json_response_body(result)
assert result == {'foo': 'bar'}
def test_json_body_none_with_malformed_json(create_event):
demo = app.Chalice('demo-app')
@demo.route('/', methods=['POST'])
def index():
return demo.current_request.json_body
event = create_event('/', 'POST', {})
event['body'] = '{"foo": "bar"'
result = demo(event, context=None)
assert result['statusCode'] == 400
assert json_response_body(result)['Code'] == 'BadRequestError'
def test_cant_access_json_body_with_wrong_content_type(create_event):
demo = app.Chalice('demo-app')
@demo.route('/', methods=['POST'], content_types=['application/xml'])
def index():
return (demo.current_request.json_body,
demo.current_request.raw_body.decode('utf-8'))
event = create_event('/', 'POST', {}, content_type='application/xml')
event['body'] = '<Message>hello</Message>'
response = json_response_body(demo(event, context=None))
json_body, raw_body = response
assert json_body is None
assert raw_body == '<Message>hello</Message>'
def test_json_body_available_on_multiple_content_types(create_event_with_body):
demo = app.Chalice('demo-app')
@demo.route('/', methods=['POST'],
content_types=['application/xml', 'application/json'])
def index():
return (demo.current_request.json_body,
demo.current_request.raw_body.decode('utf-8'))
event = create_event_with_body('<Message>hello</Message>',
content_type='application/xml')
response = json_response_body(demo(event, context=None))
json_body, raw_body = response
assert json_body is None
assert raw_body == '<Message>hello</Message>'
# Now if we create an event with JSON, we should be able
# to access .json_body as well.
event = create_event_with_body({'foo': 'bar'},
content_type='application/json')
response = json_response_body(demo(event, context=None))
json_body, raw_body = response
assert json_body == {'foo': 'bar'}
assert raw_body == '{"foo": "bar"}'
def test_json_body_available_with_lowercase_content_type_key(
create_event_with_body):
demo = app.Chalice('demo-app')
@demo.route('/', methods=['POST'])
def index():
return (demo.current_request.json_body,
demo.current_request.raw_body.decode('utf-8'))
event = create_event_with_body({'foo': 'bar'})
del event['headers']['Content-Type']
event['headers']['content-type'] = 'application/json'
json_body, raw_body = json_response_body(demo(event, context=None))
assert json_body == {'foo': 'bar'}
assert raw_body == '{"foo": "bar"}'
def test_content_types_must_be_lists():
demo = app.Chalice('app-name')
with pytest.raises(ValueError):
@demo.route('/index', content_types='application/not-a-list')
def index_post():
return {'foo': 'bar'}
def test_content_type_validation_raises_error_on_unknown_types(create_event):
demo = app.Chalice('demo-app')
@demo.route('/', methods=['POST'], content_types=['application/xml'])
def index():
return "success"
bad_content_type = 'application/bad-xml'
event = create_event('/', 'POST', {}, content_type=bad_content_type)
event['body'] = 'Request body'
json_response = json_response_body(demo(event, context=None))
assert json_response['Code'] == 'UnsupportedMediaType'
assert 'application/bad-xml' in json_response['Message']
def test_content_type_with_charset(create_event):
demo = app.Chalice('demo-app')
@demo.route('/', content_types=['application/json'])
def index():
return {'foo': 'bar'}
event = create_event('/', 'GET', {}, 'application/json; charset=utf-8')
response = json_response_body(demo(event, context=None))
assert response == {'foo': 'bar'}
def test_can_return_response_object(create_event):
demo = app.Chalice('app-name')
@demo.route('/index')
def index_view():
return app.Response(
status_code=200,
body={'foo': 'bar'},
headers={
'Content-Type': 'application/json',
'Set-Cookie': ['key=value', 'foo=bar'],
},
)
event = create_event('/index', 'GET', {})
response = demo(event, context=None)
assert response == {
'statusCode': 200,
'body': '{"foo":"bar"}',
'headers': {'Content-Type': 'application/json'},
'multiValueHeaders': {'Set-Cookie': ['key=value', 'foo=bar']},
}
def test_headers_have_basic_validation(create_event):
demo = app.Chalice('app-name')
@demo.route('/index')
def index_view():
return app.Response(
status_code=200, body='{}',
headers={'Invalid-Header': 'foo\nbar'})
event = create_event('/index', 'GET', {})
response = demo(event, context=None)
assert response['statusCode'] == 500
assert 'Invalid-Header' not in response['headers']
assert json.loads(response['body'])['Code'] == 'InternalServerError'
def test_empty_headers_have_basic_validation(create_empty_header_event):
demo = app.Chalice('app-name')
@demo.route('/index')
def index_view():
return app.Response(
status_code=200, body='{}', headers={})
event = create_empty_header_event('/index', 'GET', {})
response = demo(event, context=None)
assert response['statusCode'] == 200
def test_no_content_type_is_still_allowed(create_event):
# When the content type validation happens in API gateway, it appears
# to assume a default of application/json, so the chalice handler needs
# to emulate that behavior.
demo = app.Chalice('demo-app')
@demo.route('/', methods=['POST'], content_types=['application/json'])
def index():
return {'success': True}
event = create_event('/', 'POST', {})
del event['headers']['Content-Type']
json_response = json_response_body(demo(event, context=None))
assert json_response == {'success': True}
@pytest.mark.parametrize('content_type,accept', [
('application/octet-stream', 'application/octet-stream'),
(
'application/octet-stream', (
'text/html,application/xhtml+xml,application/xml'
';q=0.9,image/webp,*/*;q=0.8'
)
),
('image/gif', 'text/html,image/gif'),
('image/gif', 'text/html ,image/gif'),
('image/gif', 'text/html, image/gif'),
('image/gif', 'text/html;q=0.8, image/gif ;q=0.5'),
('image/gif', 'text/html,image/png'),
('image/png', 'text/html,image/gif'),
])
def test_can_base64_encode_binary_multiple_media_types(
create_event, content_type, accept):
demo = app.Chalice('demo-app')
@demo.route('/index')
def index_view():
return app.Response(
status_code=200,
body=u'\u2713'.encode('utf-8'),
headers={'Content-Type': content_type})
event = create_event('/index', 'GET', {})
event['headers']['Accept'] = accept
response = demo(event, context=None)
assert response['statusCode'] == 200
assert response['isBase64Encoded'] is True
assert response['body'] == '4pyT'
assert response['headers']['Content-Type'] == content_type
def test_can_return_text_even_with_binary_content_type_configured(
create_event):
demo = app.Chalice('demo-app')
@demo.route('/index')
def index_view():
return app.Response(
status_code=200,
body='Plain text',
headers={'Content-Type': 'text/plain'})
event = create_event('/index', 'GET', {})
event['headers']['Accept'] = 'application/octet-stream'
response = demo(event, context=None)
assert response['statusCode'] == 200
assert response['body'] == 'Plain text'
assert response['headers']['Content-Type'] == 'text/plain'
def test_route_equality(view_function):
a = app.RouteEntry(
view_function,
view_name='myview', path='/',
method='GET',
api_key_required=True,
content_types=['application/json'],
)
b = app.RouteEntry(
view_function,
view_name='myview', path='/',
method='GET',
api_key_required=True,
content_types=['application/json'],
)
assert a == b
def test_route_inequality(view_function):
a = app.RouteEntry(
view_function,
view_name='myview', path='/',
method='GET',
api_key_required=True,
content_types=['application/json'],
)
b = app.RouteEntry(
view_function,
view_name='myview', path='/',
method='GET',
api_key_required=True,
# Different content types
content_types=['application/xml'],
)
assert not a == b
def test_exceptions_raised_as_chalice_errors(sample_app, create_event):
@sample_app.route('/error')
def raise_error():
raise TypeError("Raising arbitrary error, should never see.")
event = create_event('/error', 'GET', {})
# This is intentional behavior. If we're not in debug mode
# we don't want to surface internal errors that get raised.
# We should reply with a general internal server error.
raw_response = sample_app(event, context=None)
response = json_response_body(raw_response)
assert response['Code'] == 'InternalServerError'
assert raw_response['statusCode'] == 500
def test_original_exception_raised_in_debug_mode(sample_app, create_event):
sample_app.debug = True
@sample_app.route('/error')
def raise_error():
raise ValueError("You will see this error")
event = create_event('/error', 'GET', {})
response = sample_app(event, context=None)
# In debug mode, we let the original exception propagate.
# This includes the original type as well as the message.
assert response['statusCode'] == 500
assert 'ValueError' in response['body']
assert 'You will see this error' in response['body']
def test_chalice_view_errors_propagate_in_non_debug_mode(sample_app,
create_event):
@sample_app.route('/notfound')
def notfound():
raise NotFoundError("resource not found")
event = create_event('/notfound', 'GET', {})
raw_response = sample_app(event, context=None)
assert raw_response['statusCode'] == 404
assert json_response_body(raw_response)['Code'] == 'NotFoundError'
def test_chalice_view_errors_propagate_in_debug_mode(sample_app, create_event):
@sample_app.route('/notfound')
def notfound():
raise NotFoundError("resource not found")
sample_app.debug = True
event = create_event('/notfound', 'GET', {})
raw_response = sample_app(event, context=None)
assert raw_response['statusCode'] == 404
assert json_response_body(raw_response)['Code'] == 'NotFoundError'
def test_case_insensitive_mapping():
mapping = app.CaseInsensitiveMapping({'HEADER': 'Value'})
assert mapping['hEAdEr']
assert mapping.get('hEAdEr')
assert 'hEAdEr' in mapping
assert repr({'header': 'Value'}) in repr(mapping)
def test_unknown_kwargs_raise_error(sample_app, create_event):
with pytest.raises(TypeError):
@sample_app.route('/foo', unknown_kwargs='foo')
def badkwargs():
pass
def test_name_kwargs_does_not_raise_error(sample_app):
try:
@sample_app.route('/foo', name='foo')
def name_kwarg():
pass
except TypeError:
pytest.fail('route name kwarg should not raise TypeError.')
def test_default_logging_handlers_created():
handlers_before = logging.getLogger('log_app').handlers[:]
# configure_logs = True is the default, but we're
# being explicit here.
app.Chalice('log_app', configure_logs=True)
handlers_after = logging.getLogger('log_app').handlers[:]
new_handlers = set(handlers_after) - set(handlers_before)
# Should have added a new handler
assert len(new_handlers) == 1
def test_default_logging_only_added_once():
# And creating the same app object means we shouldn't
# configure logging again.
handlers_before = logging.getLogger('added_once').handlers[:]
app.Chalice('added_once', configure_logs=True)
# The same app name, we should still only configure logs
# once.
app.Chalice('added_once', configure_logs=True)
handlers_after = logging.getLogger('added_once').handlers[:]
new_handlers = set(handlers_after) - set(handlers_before)
# Should have added a new handler
assert len(new_handlers) == 1
def test_logs_can_be_disabled():
handlers_before = logging.getLogger('log_app').handlers[:]
app.Chalice('log_app', configure_logs=False)
handlers_after = logging.getLogger('log_app').handlers[:]
new_handlers = set(handlers_after) - set(handlers_before)
assert len(new_handlers) == 0
@pytest.mark.parametrize('content_type,is_json', [
('application/json', True),
('application/json;charset=UTF-8', True),
('application/notjson', False),
])
def test_json_body_available_when_content_type_matches(content_type, is_json):
request = create_request_with_content_type(content_type)
if is_json:
assert request.json_body == {'json': 'body'}
else:
assert request.json_body is None
def test_can_receive_binary_data(create_event_with_body):
content_type = 'application/octet-stream'
demo = app.Chalice('demo-app')
@demo.route('/bincat', methods=['POST'], content_types=[content_type])
def bincat():
raw_body = demo.current_request.raw_body
return app.Response(
raw_body,
headers={'Content-Type': content_type},
status_code=200)
body = 'L3UyNzEz'
event = create_event_with_body(body, '/bincat', 'POST', content_type)
event['headers']['Accept'] = content_type
event['isBase64Encoded'] = True
response = demo(event, context=None)
assert response['statusCode'] == 200
assert response['body'] == body
def test_cannot_receive_base64_string_with_binary_response(
create_event_with_body):
content_type = 'application/octet-stream'
demo = app.Chalice('demo-app')
@demo.route('/bincat', methods=['GET'], content_types=[content_type])
def bincat():
return app.Response(
status_code=200,
body=u'\u2713'.encode('utf-8'),
headers={'Content-Type': content_type})
event = create_event_with_body('', '/bincat', 'GET', content_type)
response = demo(event, context=None)
assert response['statusCode'] == 400
def test_can_serialize_cognito_auth():
auth = app.CognitoUserPoolAuthorizer(
'Name', provider_arns=['Foo'], header='Authorization')
assert auth.to_swagger() == {
'in': 'header',
'type': 'apiKey',
'name': 'Authorization',
'x-amazon-apigateway-authtype': 'cognito_user_pools',
'x-amazon-apigateway-authorizer': {
'type': 'cognito_user_pools',
'providerARNs': ['Foo'],
}
}
def test_can_serialize_iam_auth():
auth = app.IAMAuthorizer()
assert auth.to_swagger() == {
'in': 'header',
'type': 'apiKey',
'name': 'Authorization',
'x-amazon-apigateway-authtype': 'awsSigv4',
}
def test_typecheck_list_type():
with pytest.raises(TypeError):
app.CognitoUserPoolAuthorizer('Name', 'Authorization',
provider_arns='foo')
def test_can_serialize_custom_authorizer():
auth = app.CustomAuthorizer(
'Name', 'myuri', ttl_seconds=10, header='NotAuth',
invoke_role_arn='role-arn'
)
assert auth.to_swagger() == {
'in': 'header',
'type': 'apiKey',
'name': 'NotAuth',
'x-amazon-apigateway-authtype': 'custom',
'x-amazon-apigateway-authorizer': {
'type': 'token',
'authorizerUri': 'myuri',
'authorizerResultTtlInSeconds': 10,
'authorizerCredentials': 'role-arn',
}
}
class TestCORSConfig(object):
def test_eq(self):
cors_config = app.CORSConfig()
other_cors_config = app.CORSConfig()
assert cors_config == other_cors_config
def test_not_eq_different_type(self):
cors_config = app.CORSConfig()
different_type_obj = object()
assert not cors_config == different_type_obj
def test_not_eq_differing_configurations(self):
cors_config = app.CORSConfig()
differing_cors_config = app.CORSConfig(
allow_origin='https://foo.example.com')
assert cors_config != differing_cors_config
def test_eq_non_default_configurations(self):
custom_cors = app.CORSConfig(
allow_origin='https://foo.example.com',
allow_headers=['X-Special-Header'],
max_age=600,
expose_headers=['X-Special-Header'],
allow_credentials=True
)
same_custom_cors = app.CORSConfig(
allow_origin='https://foo.example.com',
allow_headers=['X-Special-Header'],
max_age=600,
expose_headers=['X-Special-Header'],
allow_credentials=True
)
assert custom_cors == same_custom_cors
def test_can_handle_builtin_auth():
demo = app.Chalice('builtin-auth')
@demo.authorizer()
def my_auth(auth_request):
pass
@demo.route('/', authorizer=my_auth)
def index_view():
return {}
assert len(demo.builtin_auth_handlers) == 1
authorizer = demo.builtin_auth_handlers[0]
assert isinstance(authorizer, app.BuiltinAuthConfig)
assert authorizer.name == 'my_auth'
assert authorizer.handler_string == 'app.my_auth'
def test_builtin_auth_can_transform_event():
event = {
'type': 'TOKEN',
'authorizationToken': 'authtoken',
'methodArn': 'arn:aws:execute-api:...:foo',
}
auth_app = app.Chalice('builtin-auth')
request = []
@auth_app.authorizer()
def builtin_auth(auth_request):
request.append(auth_request)
builtin_auth(event, None)
assert len(request) == 1
transformed = request[0]
assert transformed.auth_type == 'TOKEN'
assert transformed.token == 'authtoken'
assert transformed.method_arn == 'arn:aws:execute-api:...:foo'
def test_can_return_auth_dict_directly():
# A user can bypass our AuthResponse and return the auth response
# dict that API gateway expects.
event = {
'type': 'TOKEN',
'authorizationToken': 'authtoken',
'methodArn': 'arn:aws:execute-api:...:foo',
}
auth_app = app.Chalice('builtin-auth')
response = {
'context': {'foo': 'bar'},
'principalId': 'user',
'policyDocument': {
'Version': '2012-10-17',
'Statement': []
}
}
@auth_app.authorizer()
def builtin_auth(auth_request):
return response
actual = builtin_auth(event, None)
assert actual == response
def test_can_specify_extra_auth_attributes():
auth_app = app.Chalice('builtin-auth')
@auth_app.authorizer(ttl_seconds=10, execution_role='arn:my-role')
def builtin_auth(auth_request):
pass
handler = auth_app.builtin_auth_handlers[0]
assert handler.ttl_seconds == 10
assert handler.execution_role == 'arn:my-role'
def test_validation_raised_on_unknown_kwargs():
auth_app = app.Chalice('builtin-auth')
with pytest.raises(TypeError):
@auth_app.authorizer(this_is_an_unknown_kwarg=True)
def builtin_auth(auth_request):
pass
def test_can_return_auth_response():
event = {
'type': 'TOKEN',
'authorizationToken': 'authtoken',
'methodArn': 'arn:aws:execute-api:us-west-2:1:id/dev/GET/a',
}
auth_app = app.Chalice('builtin-auth')
response = {
'context': {},
'principalId': 'principal',
'policyDocument': {
'Version': '2012-10-17',
'Statement': [
{'Action': 'execute-api:Invoke',
'Effect': 'Allow',
'Resource': [
'arn:aws:execute-api:us-west-2:1:id/dev/*/a'
]}
]
}
}
@auth_app.authorizer()
def builtin_auth(auth_request):
return app.AuthResponse(['/a'], 'principal')
actual = builtin_auth(event, None)
assert actual == response
def test_auth_response_serialization():
method_arn = (
"arn:aws:execute-api:us-west-2:123:rest-api-id/dev/GET/needs/auth")
request = app.AuthRequest('TOKEN', 'authtoken', method_arn)
response = app.AuthResponse(routes=['/needs/auth'], principal_id='foo')
response_dict = response.to_dict(request)
expected = [method_arn.replace('GET', '*')]
assert response_dict == {
'policyDocument': {
'Version': '2012-10-17',
'Statement': [
{
'Action': 'execute-api:Invoke',
'Resource': expected,
'Effect': 'Allow'
}
]
},
'context': {},
'principalId': 'foo',
}
def test_auth_response_can_include_context(auth_request):
response = app.AuthResponse(['/foo'], 'principal', {'foo': 'bar'})
serialized = response.to_dict(auth_request)
assert serialized['context'] == {'foo': 'bar'}
def test_can_use_auth_routes_instead_of_strings(auth_request):
expected = [
"arn:aws:execute-api:us-west-2:123:rest-api-id/dev/GET/a",
"arn:aws:execute-api:us-west-2:123:rest-api-id/dev/GET/a/b",
"arn:aws:execute-api:us-west-2:123:rest-api-id/dev/POST/a/b",
]
response = app.AuthResponse(
[app.AuthRoute('/a', ['GET']),
app.AuthRoute('/a/b', ['GET', 'POST'])],
'principal')
serialized = response.to_dict(auth_request)
assert serialized['policyDocument'] == {
'Version': '2012-10-17',
'Statement': [{
'Action': 'execute-api:Invoke',
'Effect': 'Allow',
'Resource': expected,
}]
}
def test_auth_response_wildcard(auth_request):
response = app.AuthResponse(
routes=[app.AuthRoute(path='*', methods=['*'])],
principal_id='user')
serialized = response.to_dict(auth_request)
assert serialized['policyDocument'] == {
'Statement': [
{'Action': 'execute-api:Invoke',
'Effect': 'Allow',
'Resource': [
'arn:aws:execute-api:us-west-2:123:rest-api-id/dev/*/*']}],
'Version': '2012-10-17'
}
def test_auth_response_wildcard_string(auth_request):
response = app.AuthResponse(
routes=['*'], principal_id='user')
serialized = response.to_dict(auth_request)
assert serialized['policyDocument'] == {
'Statement': [
{'Action': 'execute-api:Invoke',
'Effect': 'Allow',
'Resource': [
'arn:aws:execute-api:us-west-2:123:rest-api-id/dev/*/*']}],
'Version': '2012-10-17'
}
def test_can_mix_auth_routes_and_strings(auth_request):
expected = [
'arn:aws:execute-api:us-west-2:123:rest-api-id/dev/*/a',
'arn:aws:execute-api:us-west-2:123:rest-api-id/dev/GET/a/b',
]
response = app.AuthResponse(
['/a', app.AuthRoute('/a/b', ['GET'])],
'principal')
serialized = response.to_dict(auth_request)
assert serialized['policyDocument'] == {
'Version': '2012-10-17',
'Statement': [{
'Action': 'execute-api:Invoke',
'Effect': 'Allow',
'Resource': expected,
}]
}
def test_root_resource(auth_request):
auth_request.method_arn = (
"arn:aws:execute-api:us-west-2:123:rest-api-id/dev/GET/")
expected = [
"arn:aws:execute-api:us-west-2:123:rest-api-id/dev/GET/"
]
response = app.AuthResponse(
[app.AuthRoute('/', ['GET'])],
'principal')
serialized = response.to_dict(auth_request)
assert serialized['policyDocument'] == {
'Version': '2012-10-17',
'Statement': [{
'Action': 'execute-api:Invoke',
'Effect': 'Allow',
'Resource': expected,
}]
}
def test_can_register_scheduled_event_with_str(sample_app):
@sample_app.schedule('rate(1 minute)')
def foo(event):
pass
assert len(sample_app.event_sources) == 1
event_source = sample_app.event_sources[0]
assert event_source.name == 'foo'
assert event_source.schedule_expression == 'rate(1 minute)'
assert event_source.handler_string == 'app.foo'
def test_can_register_scheduled_event_with_rate(sample_app):
@sample_app.schedule(app.Rate(value=2, unit=app.Rate.HOURS))
def foo(event):
pass
# We don't convert the rate down to its string form until
# we actually deploy.
assert len(sample_app.event_sources) == 1
expression = sample_app.event_sources[0].schedule_expression
# We already check the event source in the test above, so we're
# only interested in the schedule expression here.
assert expression.value == 2
assert expression.unit == app.Rate.HOURS
def test_can_register_scheduled_event_with_event(sample_app):
@sample_app.schedule(app.Cron(0, 10, '*', '*', '?', '*'))
def foo(event):
pass
assert len(sample_app.event_sources) == 1
expression = sample_app.event_sources[0].schedule_expression
assert expression.minutes == 0
assert expression.hours == 10
assert expression.day_of_month == '*'
assert expression.month == '*'
assert expression.day_of_week == '?'
assert expression.year == '*'
@pytest.mark.parametrize('value,unit,expected', [
(1, app.Rate.MINUTES, 'rate(1 minute)'),
(2, app.Rate.MINUTES, 'rate(2 minutes)'),
(1, app.Rate.HOURS, 'rate(1 hour)'),
(2, app.Rate.HOURS, 'rate(2 hours)'),
(1, app.Rate.DAYS, 'rate(1 day)'),
(2, app.Rate.DAYS, 'rate(2 days)'),
])
def test_rule_object_converts_to_str(value, unit, expected):
assert app.Rate(value=value, unit=unit).to_string() == expected
@pytest.mark.parametrize(('minutes,hours,day_of_month,month,'
'day_of_week,year,expected'), [
# These are taken from the scheduled events docs page.
# Invoke a Lambda function at 10:00am (UTC) everyday
(0, 10, '*', '*', '?', '*', 'cron(0 10 * * ? *)'),
# Invoke a Lambda function 12:15pm (UTC) everyday
(15, 12, '*', '*', '?', '*', 'cron(15 12 * * ? *)'),
# Invoke a Lambda function at 06:00pm (UTC) every Mon-Fri
(0, 18, '?', '*', 'MON-FRI', '*', 'cron(0 18 ? * MON-FRI *)'),
# Invoke a Lambda function at 8:00am (UTC) every first day of the month
(0, 8, 1, '*', '?', '*', 'cron(0 8 1 * ? *)'),
# Invoke a Lambda function every 10 min Mon-Fri
('0/10', '*', '?', '*', 'MON-FRI', '*', 'cron(0/10 * ? * MON-FRI *)'),
# Invoke a Lambda function every 5 minutes Mon-Fri between 8:00am and
# 5:55pm (UTC)
('0/5', '8-17', '?', '*', 'MON-FRI', '*', 'cron(0/5 8-17 ? * MON-FRI *)'),
# Invoke a Lambda function at 9 a.m. (UTC) the first Monday of each month
(0, 9, '?', '*', '2#1', '*', 'cron(0 9 ? * 2#1 *)'),
])
def test_cron_expression_converts_to_str(minutes, hours, day_of_month, month,
day_of_week, year, expected):
assert app.Cron(
minutes=minutes,
hours=hours,
day_of_month=day_of_month,
month=month,
day_of_week=day_of_week,
year=year,
).to_string() == expected
def test_can_map_schedule_event_dict_to_object(sample_app):
@sample_app.schedule('rate(1 hour)')
def handler(event):
return event
# This is the event dict that lambda provides
# to the lambda handler
lambda_event = {
"version": "0",
"account": "123456789012",
"region": "us-west-2",
"detail": {},
"detail-type": "Scheduled Event",
"source": "aws.events",
"time": "1970-01-01T00:00:00Z",
"id": "event-id",
"resources": [
"arn:aws:events:us-west-2:123456789012:rule/my-schedule"
]
}
event_object = handler(lambda_event, context=None)
assert event_object.version == '0'
assert event_object.event_id == 'event-id'
assert event_object.source == 'aws.events'
assert event_object.account == '123456789012'
assert event_object.time == '1970-01-01T00:00:00Z'
assert event_object.region == 'us-west-2'
assert event_object.resources == [
"arn:aws:events:us-west-2:123456789012:rule/my-schedule"
]
assert event_object.detail == {}
assert event_object.detail_type == "Scheduled Event"
# This is meant as a fall back in case you need access to
# the raw lambda event dict.
assert event_object.to_dict() == lambda_event
def test_can_create_cwe_event_handler(sample_app):
@sample_app.on_cw_event({'source': ['aws.ec2']})
def handler(event):
pass
assert len(sample_app.event_sources) == 1
event = sample_app.event_sources[0]
assert event.name == 'handler'
assert event.event_pattern == {'source': ['aws.ec2']}
assert event.handler_string == 'app.handler'
def test_can_map_cwe_event_dict_to_object(sample_app):
@sample_app.on_cw_event({'source': ['aws.ec2']})
def handler(event):
return event
lambda_event = {
"version": 0,
"id": "7bf73129-1428-4cd3-a780-95db273d1602",
"detail-type": "EC2 Instance State-change Notification",
"source": "aws.ec2",
"account": "123456789012",
"time": "2015-11-11T21:29:54Z",
"region": "us-east-1",
"resources": [
"arn:aws:ec2:us-east-1:123456789012:instance/i-abcd1111"
],
"detail": {
"instance-id": "i-abcd1111",
"state": "pending"
}
}
event_object = handler(lambda_event, context=None)
assert event_object.detail_type == "EC2 Instance State-change Notification"
assert event_object.account == '123456789012'
assert event_object.region == 'us-east-1'
assert event_object.detail == {
'instance-id': 'i-abcd1111',
'state': 'pending'
}
def test_pure_lambda_function_direct_mapping(sample_app):
@sample_app.lambda_function()
def handler(event, context):
return event, context
return_value = handler({'fake': 'event'}, {'fake': 'context'})
assert return_value[0] == {'fake': 'event'}
assert return_value[1] == {'fake': 'context'}
def test_pure_lambda_functions_are_registered_in_app(sample_app):
@sample_app.lambda_function()
def handler(event, context):
pass
assert len(sample_app.pure_lambda_functions) == 1
lambda_function = sample_app.pure_lambda_functions[0]
assert lambda_function.name == 'handler'
assert lambda_function.handler_string == 'app.handler'
def test_aws_execution_env_set():
env = {'AWS_EXECUTION_ENV': 'AWS_Lambda_python2.7'}
app.Chalice('app-name', env=env)
assert env['AWS_EXECUTION_ENV'] == (
'AWS_Lambda_python2.7 aws-chalice/%s' % chalice_version
)
def test_can_use_out_of_order_args(create_event):
demo = app.Chalice('demo-app')
# Note how the url params and function args are out of order.
@demo.route('/{a}/{b}', methods=['GET'])
def index(b, a):
return {'a': a, 'b': b}
event = create_event('/{a}/{b}', 'GET', {'a': 'first', 'b': 'second'})
response = demo(event, context=None)
response = json_response_body(response)
assert response == {'a': 'first', 'b': 'second'}
def test_ensure_debug_mode_is_false_by_default():
# These logger tests need to each have a unique name because the Chalice
# app creates a logger with it's name. If these tests are run in a batch
# the logger names will overlap in the logging module and cause test
# failures.
test_app = app.Chalice('logger-test-1')
assert test_app.debug is False
assert test_app.log.getEffectiveLevel() == logging.ERROR
def test_can_explicitly_set_debug_false_in_initializer():
test_app = app.Chalice('logger-test-2', debug=False)
assert test_app.debug is False
assert test_app.log.getEffectiveLevel() == logging.ERROR
def test_can_set_debug_mode_in_initialzier():
test_app = app.Chalice('logger-test-3', debug=True)
assert test_app.debug is True
assert test_app.log.getEffectiveLevel() == logging.DEBUG
def test_debug_mode_changes_log_level():
test_app = app.Chalice('logger-test-4', debug=False)
test_app.debug = True
assert test_app.debug is True
assert test_app.log.getEffectiveLevel() == logging.DEBUG
def test_internal_exception_debug_false(capsys, create_event):
test_app = app.Chalice('logger-test-5', debug=False)
@test_app.route('/error')
def error():
raise Exception('Something bad happened')
event = create_event('/error', 'GET', {})
test_app(event, context=None)
out, err = capsys.readouterr()
assert 'logger-test-5' in out
assert 'Caught exception' in out
assert 'Something bad happened' in out
def test_raw_body_is_none_if_body_is_none():
event = {
'body': None,
'multiValueQueryStringParameters': '',
'headers': {},
'pathParameters': {},
'requestContext': {
'httpMethod': 'GET',
'resourcePath': '/',
},
'stageVariables': {},
'isBase64Encoded': False,
}
request = app.Request(event, FakeLambdaContext())
assert request.raw_body == b''
@given(http_request_event=HTTP_REQUEST)
def test_http_request_to_dict_is_json_serializable(http_request_event):
# We have to do some slight pre-preprocessing here
# to maintain preconditions. If the
# is_base64_encoded arg is True, we'll
# base64 encode the body. We assume API Gateway
# upholds this precondition.
is_base64_encoded = http_request_event['isBase64Encoded']
if is_base64_encoded:
# Confirmed that if you send an empty body,
# API Gateway will always say the body is *not*
# base64 encoded.
assume(http_request_event['body'] is not None)
body = base64.b64encode(
http_request_event['body'].encode('utf-8'))
http_request_event['body'] = body.decode('ascii')
request = Request(http_request_event, FakeLambdaContext())
assert isinstance(request.raw_body, bytes)
request_dict = request.to_dict()
# We should always be able to dump the request dict
# to JSON.
assert json.dumps(request_dict, default=handle_extra_types)
@given(body=st.text(), headers=STR_MAP,
status_code=st.integers(min_value=200, max_value=599))
def test_http_response_to_dict(body, headers, status_code):
r = Response(body=body, headers=headers, status_code=status_code)
serialized = r.to_dict()
assert 'headers' in serialized
assert 'statusCode' in serialized
assert 'body' in serialized
assert isinstance(serialized['body'], six.string_types)
@given(body=st.binary(), content_type=st.sampled_from(BINARY_TYPES))
def test_handles_binary_responses(body, content_type):
r = Response(body=body, headers={'Content-Type': content_type})
serialized = r.to_dict(BINARY_TYPES)
# A binary response should always result in the
# response being base64 encoded.
assert serialized['isBase64Encoded']
assert isinstance(serialized['body'], six.string_types)
assert isinstance(base64.b64decode(serialized['body']), bytes)
def test_can_create_s3_event_handler(sample_app):
@sample_app.on_s3_event(bucket='mybucket')
def handler(event):
pass
assert len(sample_app.event_sources) == 1
event = sample_app.event_sources[0]
assert event.name == 'handler'
assert event.bucket == 'mybucket'
assert event.events == ['s3:ObjectCreated:*']
assert event.handler_string == 'app.handler'
def test_can_map_to_s3_event_object(sample_app):
@sample_app.on_s3_event(bucket='mybucket')
def handler(event):
return event
s3_event = {
'Records': [
{'awsRegion': 'us-west-2',
'eventName': 'ObjectCreated:Put',
'eventSource': 'aws:s3',
'eventTime': '2018-05-22T04:41:23.823Z',
'eventVersion': '2.0',
'requestParameters': {'sourceIPAddress': '174.127.235.55'},
'responseElements': {
'x-amz-id-2': 'request-id-2',
'x-amz-request-id': 'request-id-1'},
's3': {
'bucket': {
'arn': 'arn:aws:s3:::mybucket',
'name': 'mybucket',
'ownerIdentity': {
'principalId': 'ABCD'
}
},
'configurationId': 'config-id',
'object': {
'eTag': 'd41d8cd98f00b204e9800998ecf8427e',
'key': 'hello-world.txt',
'sequencer': '005B039F73C627CE8B',
'size': 0
},
's3SchemaVersion': '1.0'
},
'userIdentity': {'principalId': 'AWS:XYZ'}
}
]
}
actual_event = handler(s3_event, context=None)
assert actual_event.bucket == 'mybucket'
assert actual_event.key == 'hello-world.txt'
assert actual_event.to_dict() == s3_event
def test_s3_event_urldecodes_keys():
s3_event = {
'Records': [
{'s3': {
'bucket': {
'arn': 'arn:aws:s3:::mybucket',
'name': 'mybucket',
},
'object': {
'key': 'file+with+spaces',
'sequencer': '005B039F73C627CE8B',
'size': 0
},
}},
]
}
event = app.S3Event(s3_event, FakeLambdaContext())
# We should urldecode the key name.
assert event.key == 'file with spaces'
# But the key should remain unchanged in to_dict().
assert event.to_dict() == s3_event
def test_s3_event_urldecodes_unicode_keys():
s3_event = {
'Records': [
{'s3': {
'bucket': {
'arn': 'arn:aws:s3:::mybucket',
'name': 'mybucket',
},
'object': {
# This is u'\u2713'
'key': '%E2%9C%93',
'sequencer': '005B039F73C627CE8B',
'size': 0
},
}},
]
}
event = app.S3Event(s3_event, FakeLambdaContext())
# We should urldecode the key name.
assert event.key == u'\u2713'
assert event.bucket == u'mybucket'
# But the key should remain unchanged in to_dict().
assert event.to_dict() == s3_event
def test_can_create_sns_handler(sample_app):
@sample_app.on_sns_message(topic='MyTopic')
def handler(event):
pass
assert len(sample_app.event_sources) == 1
event = sample_app.event_sources[0]
assert event.name == 'handler'
assert event.topic == 'MyTopic'
assert event.handler_string == 'app.handler'
def test_can_map_sns_event(sample_app):
@sample_app.on_sns_message(topic='MyTopic')
def handler(event):
return event
sns_event = {'Records': [{
'EventSource': 'aws:sns',
'EventSubscriptionArn': 'arn:subscription-arn',
'EventVersion': '1.0',
'Sns': {
'Message': 'This is a raw message',
'MessageAttributes': {
'AttributeKey': {
'Type': 'String',
'Value': 'AttributeValue'
}
},
'MessageId': 'abcdefgh-51e4-5ae2-9964-b296c8d65d1a',
'Signature': 'signature',
'SignatureVersion': '1',
'SigningCertUrl': 'https://sns.us-west-2.amazonaws.com/cert.pen',
'Subject': 'ThisIsTheSubject',
'Timestamp': '2018-06-26T19:41:38.695Z',
'TopicArn': 'arn:aws:sns:us-west-2:12345:ConsoleTestTopic',
'Type': 'Notification',
'UnsubscribeUrl': 'https://unsubscribe-url/'}}]}
lambda_context = FakeLambdaContext()
actual_event = handler(sns_event, context=lambda_context)
assert actual_event.message == 'This is a raw message'
assert actual_event.subject == 'ThisIsTheSubject'
assert actual_event.to_dict() == sns_event
assert actual_event.context == lambda_context
def test_can_create_sqs_handler(sample_app):
@sample_app.on_sqs_message(queue='MyQueue', batch_size=200)
def handler(event):
pass
assert len(sample_app.event_sources) == 1
event = sample_app.event_sources[0]
assert event.queue == 'MyQueue'
assert event.batch_size == 200
assert event.handler_string == 'app.handler'
def test_can_set_sqs_handler_name(sample_app):
@sample_app.on_sqs_message(queue='MyQueue', name='sqs_handler')
def handler(event):
pass
assert len(sample_app.event_sources) == 1
event = sample_app.event_sources[0]
assert event.name == 'sqs_handler'
def test_can_map_sqs_event(sample_app):
@sample_app.on_sqs_message(queue='queue-name')
def handler(event):
return event
sqs_event = {'Records': [{
'attributes': {
'ApproximateFirstReceiveTimestamp': '1530576251596',
'ApproximateReceiveCount': '1',
'SenderId': 'sender-id',
'SentTimestamp': '1530576251595'
},
'awsRegion': 'us-west-2',
'body': 'queue message body',
'eventSource': 'aws:sqs',
'eventSourceARN': 'arn:aws:sqs:us-west-2:12345:queue-name',
'md5OfBody': '754ac2f7a12df38320e0c5eafd060145',
'messageAttributes': {},
'messageId': 'message-id',
'receiptHandle': 'receipt-handle'
}]}
lambda_context = FakeLambdaContext()
actual_event = handler(sqs_event, context=lambda_context)
records = list(actual_event)
assert len(records) == 1
first_record = records[0]
assert first_record.body == 'queue message body'
assert first_record.receipt_handle == 'receipt-handle'
assert first_record.to_dict() == sqs_event['Records'][0]
assert actual_event.to_dict() == sqs_event
assert actual_event.context == lambda_context
def test_can_create_kinesis_handler(sample_app):
@sample_app.on_kinesis_record(stream='MyStream',
batch_size=1,
starting_position='TRIM_HORIZON')
def handler(event):
pass
assert len(sample_app.event_sources) == 1
config = sample_app.event_sources[0]
assert config.stream == 'MyStream'
assert config.batch_size == 1
assert config.starting_position == 'TRIM_HORIZON'
def test_can_map_kinesis_event(sample_app):
@sample_app.on_kinesis_record(stream='MyStream')
def handler(event):
return event
kinesis_event = {
"Records": [
{
"kinesis": {
"kinesisSchemaVersion": "1.0",
"partitionKey": "1",
"sequenceNumber": "12345",
"data": "SGVsbG8sIHRoaXMgaXMgYSB0ZXN0Lg==",
"approximateArrivalTimestamp": 1545084650.987
},
"eventSource": "aws:kinesis",
"eventVersion": "1.0",
"eventID": "shardId-000000000006:12345",
"eventName": "aws:kinesis:record",
"invokeIdentityArn": "arn:aws:iam::123:role/lambda-role",
"awsRegion": "us-east-2",
"eventSourceARN": (
"arn:aws:kinesis:us-east-2:123:stream/lambda-stream"
)
},
{
"kinesis": {
"kinesisSchemaVersion": "1.0",
"partitionKey": "1",
"sequenceNumber": "12346",
"data": "VGhpcyBpcyBvbmx5IGEgdGVzdC4=",
"approximateArrivalTimestamp": 1545084711.166
},
"eventSource": "aws:kinesis",
"eventVersion": "1.0",
"eventID": "shardId-000000000006:12346",
"eventName": "aws:kinesis:record",
"invokeIdentityArn": "arn:aws:iam::123:role/lambda-role",
"awsRegion": "us-east-2",
"eventSourceARN": (
"arn:aws:kinesis:us-east-2:123:stream/lambda-stream"
)
}
]
}
lambda_context = FakeLambdaContext()
actual_event = handler(kinesis_event, context=lambda_context)
records = list(actual_event)
assert len(records) == 2
assert records[0].data == b'Hello, this is a test.'
assert records[0].sequence_number == "12345"
assert records[0].partition_key == "1"
assert records[0].schema_version == "1.0"
assert records[0].timestamp == datetime(2018, 12, 17, 22, 10, 50, 987000)
assert records[1].data == b'This is only a test.'
def test_can_create_ddb_handler(sample_app):
@sample_app.on_dynamodb_record(
stream_arn='arn:aws:dynamodb:...:stream', batch_size=10,
starting_position='TRIM_HORIZON')
def handler(event):
pass
assert len(sample_app.event_sources) == 1
config = sample_app.event_sources[0]
assert config.stream_arn == 'arn:aws:dynamodb:...:stream'
assert config.batch_size == 10
assert config.starting_position == 'TRIM_HORIZON'
def test_can_map_ddb_event(sample_app):
@sample_app.on_dynamodb_record(stream_arn='arn:aws:...:stream')
def handler(event):
return event
ddb_event = {
'Records': [
{'awsRegion': 'us-west-2',
'dynamodb': {'ApproximateCreationDateTime': 1601317140.0,
'Keys': {'PK': {'S': 'foo'}, 'SK': {'S': 'bar'}},
'NewImage': {'PK': {'S': 'foo'}, 'SK': {'S': 'bar'}},
'SequenceNumber': '1700000000020701978607',
'SizeBytes': 20,
'StreamViewType': 'NEW_AND_OLD_IMAGES'},
'eventID': 'da037887f71a88a1f6f4cfd149709d5a',
'eventName': 'INSERT',
'eventSource': 'aws:dynamodb',
'eventSourceARN': (
'arn:aws:dynamodb:us-west-2:12345:table/MyTable/stream/'
'2020-09-28T16:49:14.209'
),
'eventVersion': '1.1'}
]
}
lambda_context = FakeLambdaContext()
actual_event = handler(ddb_event, context=lambda_context)
records = list(actual_event)
assert len(records) == 1
assert records[0].timestamp == datetime(2020, 9, 28, 18, 19)
assert records[0].keys == {'PK': {'S': 'foo'}, 'SK': {'S': 'bar'}}
assert records[0].new_image == {'PK': {'S': 'foo'}, 'SK': {'S': 'bar'}}
assert records[0].old_image is None
assert records[0].sequence_number == '1700000000020701978607'
assert records[0].size_bytes == 20
assert records[0].stream_view_type == 'NEW_AND_OLD_IMAGES'
# Mapping from top level keys in a record.
assert records[0].aws_region == 'us-west-2'
assert records[0].event_id == 'da037887f71a88a1f6f4cfd149709d5a'
assert records[0].event_name == 'INSERT'
assert records[0].event_source_arn == (
'arn:aws:dynamodb:us-west-2:12345:table/MyTable/stream/'
'2020-09-28T16:49:14.209')
# Computed value.
assert records[0].table_name == 'MyTable'
def test_bytes_when_binary_type_is_application_json():
demo = app.Chalice('demo-app')
demo.api.binary_types.append('application/json')
@demo.route('/compress_response')
def index():
blob = json.dumps({'hello': 'world'}).encode('utf-8')
payload = gzip.compress(blob)
custom_headers = {
'Content-Type': 'application/json',
'Content-Encoding': 'gzip'
}
return Response(body=payload, status_code=200, headers=custom_headers)
return demo
def test_can_register_blueprint_on_app():
myapp = app.Chalice('myapp')
foo = app.Blueprint('foo')
@foo.route('/foo')
def first():
pass
myapp.register_blueprint(foo)
assert sorted(list(myapp.routes.keys())) == ['/foo']
def test_can_combine_multiple_blueprints_in_single_app():
myapp = app.Chalice('myapp')
foo = app.Blueprint('foo')
bar = app.Blueprint('bar')
@foo.route('/foo')
def myfoo():
pass
@bar.route('/bar')
def mybar():
pass
myapp.register_blueprint(foo)
myapp.register_blueprint(bar)
assert sorted(list(myapp.routes)) == ['/bar', '/foo']
def test_can_preserve_signature_on_blueprint():
myapp = app.Chalice('myapp')
foo = app.Blueprint('foo')
@foo.lambda_function()
def first(event, context):
return {'foo': 'bar'}
myapp.register_blueprint(foo)
# The handler string given to a blueprint
# is the "module.function_name" so we have
# to ensure we can continue to invoke the
# function with its expected signature.
assert first({}, None) == {'foo': 'bar'}
def test_doc_saved_on_route():
myapp = app.Chalice('myapp')
@myapp.route('/')
def index():
"""My index docstring."""
pass
assert index.__doc__ == 'My index docstring.'
def test_blueprint_docstring_is_preserved():
foo = app.Blueprint('foo')
@foo.route('/foo')
def first():
"""Blueprint docstring."""
assert first.__doc__ == 'Blueprint docstring.'
def test_can_mount_apis_at_url_prefix():
myapp = app.Chalice('myapp')
foo = app.Blueprint('foo')
@foo.route('/foo')
def myfoo():
pass
@foo.route('/bar')
def mybar():
pass
myapp.register_blueprint(foo, url_prefix='/myprefix')
assert list(sorted(myapp.routes)) == ['/myprefix/bar', '/myprefix/foo']
def test_can_mount_root_url_in_blueprint():
myapp = app.Chalice('myapp')
foo = app.Blueprint('foo')
root = app.Blueprint('root')
@root.route('/')
def myroot():
pass
@foo.route('/')
def myfoo():
pass
@foo.route('/bar')
def mybar():
pass
myapp.register_blueprint(foo, url_prefix='/foo')
myapp.register_blueprint(root)
assert list(sorted(myapp.routes)) == ['/', '/foo', '/foo/bar']
def test_can_combine_lambda_functions_and_routes_in_blueprints():
myapp = app.Chalice('myapp')
foo = app.Blueprint('app.chalicelib.blueprints.foo')
@foo.route('/foo')
def myfoo():
pass
@foo.lambda_function()
def myfunction(event, context):
pass
myapp.register_blueprint(foo)
assert len(myapp.pure_lambda_functions) == 1
lambda_function = myapp.pure_lambda_functions[0]
assert lambda_function.name == 'myfunction'
assert lambda_function.handler_string == (
'app.chalicelib.blueprints.foo.myfunction')
assert list(myapp.routes) == ['/foo']
def test_can_mount_lambda_functions_with_name_prefix():
myapp = app.Chalice('myapp')
foo = app.Blueprint('app.chalicelib.blueprints.foo')
@foo.lambda_function()
def myfunction(event, context):
return event
myapp.register_blueprint(foo, name_prefix='myprefix_')
assert len(myapp.pure_lambda_functions) == 1
lambda_function = myapp.pure_lambda_functions[0]
assert lambda_function.name == 'myprefix_myfunction'
assert lambda_function.handler_string == (
'app.chalicelib.blueprints.foo.myfunction')
with Client(myapp) as c:
response = c.lambda_.invoke(
'myprefix_myfunction', {'foo': 'bar'}
)
assert response.payload == {'foo': 'bar'}
def test_can_mount_event_sources_with_blueprint():
myapp = app.Chalice('myapp')
foo = app.Blueprint('app.chalicelib.blueprints.foo')
@foo.schedule('rate(5 minutes)')
def myfunction(event):
return event
myapp.register_blueprint(foo, name_prefix='myprefix_')
assert len(myapp.event_sources) == 1
event_source = myapp.event_sources[0]
assert event_source.name == 'myprefix_myfunction'
assert event_source.schedule_expression == 'rate(5 minutes)'
assert event_source.handler_string == (
'app.chalicelib.blueprints.foo.myfunction')
def test_can_mount_all_decorators_in_blueprint():
myapp = app.Chalice('myapp')
foo = app.Blueprint('app.chalicelib.blueprints.foo')
@foo.route('/foo')
def routefoo():
pass
@foo.lambda_function(name='mylambdafunction')
def mylambda(event, context):
pass
@foo.schedule('rate(5 minutes)')
def bar(event):
pass
@foo.on_s3_event('MyBucket')
def on_s3(event):
pass
@foo.on_sns_message('MyTopic')
def on_sns(event):
pass
@foo.on_sqs_message('MyQueue')
def on_sqs(event):
pass
myapp.register_blueprint(foo, name_prefix='myprefix_', url_prefix='/bar')
event_sources = myapp.event_sources
assert len(event_sources) == 4
lambda_functions = myapp.pure_lambda_functions
assert len(lambda_functions) == 1
# Handles the name prefix and the name='' override in the decorator.
assert lambda_functions[0].name == 'myprefix_mylambdafunction'
assert list(myapp.routes) == ['/bar/foo']
def test_can_call_current_request_on_blueprint_when_mounted(create_event):
myapp = app.Chalice('myapp')
bp = app.Blueprint('app.chalicelib.blueprints.foo')
@bp.route('/todict')
def todict():
return bp.current_request.to_dict()
myapp.register_blueprint(bp)
event = create_event('/todict', 'GET', {})
response = json_response_body(myapp(event, context=None))
assert isinstance(response, dict)
assert response['method'] == 'GET'
def test_can_call_current_app_on_blueprint_when_mounted(create_event):
myapp = app.Chalice('myapp')
bp = app.Blueprint('app.chalicelib.blueprints.foo')
@bp.route('/appname')
def appname():
return {'name': bp.current_app.app_name}
myapp.register_blueprint(bp)
event = create_event('/appname', 'GET', {})
response = json_response_body(myapp(event, context=None))
assert response == {'name': 'myapp'}
def test_can_call_lambda_context_on_blueprint_when_mounted(create_event):
myapp = app.Chalice('myapp')
bp = app.Blueprint('app.chalicelib.blueprints.foo')
@bp.route('/context')
def context():
return bp.lambda_context
myapp.register_blueprint(bp)
event = create_event('/context', 'GET', {})
response = json_response_body(myapp(event, context={'context': 'foo'}))
assert response == {'context': 'foo'}
def test_can_access_log_when_mounted(create_event):
myapp = app.Chalice('myapp')
bp = app.Blueprint('app.chalicelib.blueprints.foo')
@bp.route('/log')
def log_message():
# We shouldn't get an error because we've registered it to
# an app.
bp.log.info("test log message")
return {}
myapp.register_blueprint(bp)
event = create_event('/log', 'GET', {})
response = json_response_body(myapp(event, context={'context': 'foo'}))
assert response == {}
def test_can_add_authorizer_with_url_prefix_and_routes():
myapp = app.Chalice('myapp')
foo = app.Blueprint('app.chalicelib.blueprints.foo')
@foo.authorizer()
def myauth(event):
pass
@foo.route('/foo', authorizer=myauth)
def routefoo():
pass
myapp.register_blueprint(foo, url_prefix='/bar')
assert len(myapp.builtin_auth_handlers) == 1
authorizer = myapp.builtin_auth_handlers[0]
assert isinstance(authorizer, app.BuiltinAuthConfig)
assert authorizer.name == 'myauth'
assert authorizer.handler_string == 'app.chalicelib.blueprints.foo.myauth'
def test_runtime_error_if_current_request_access_on_non_registered_blueprint():
bp = app.Blueprint('app.chalicelib.blueprints.foo')
with pytest.raises(RuntimeError):
bp.current_request
def test_every_decorator_added_to_blueprint():
def is_public_method(obj):
return inspect.isfunction(obj) and not obj.__name__.startswith('_')
public_api = inspect.getmembers(
app.DecoratorAPI,
predicate=is_public_method
)
blueprint_api = [
i[0] for i in
inspect.getmembers(app.Blueprint, predicate=is_public_method)
]
for method_name, _ in public_api:
assert method_name in blueprint_api
@pytest.mark.parametrize('input_dict', [
{},
{'key': []}
])
def test_multidict_raises_keyerror(input_dict):
d = MultiDict(input_dict)
with pytest.raises(KeyError):
val = d['key']
assert val is val
def test_multidict_pop_raises_del_error():
d = MultiDict({})
with pytest.raises(KeyError):
del d['key']
def test_multidict_getlist_does_raise_keyerror():
d = MultiDict({})
with pytest.raises(KeyError):
d.getlist('key')
@pytest.mark.parametrize('input_dict', [
{'key': ['value']},
{'key': ['']},
{'key': ['value1', 'value2', 'value3']},
{'key': ['value1', 'value2', None]}
])
def test_multidict_returns_lastvalue(input_dict):
d = MultiDict(input_dict)
assert d['key'] == input_dict['key'][-1]
@pytest.mark.parametrize('input_dict', [
{'key': ['value']},
{'key': ['']},
{'key': ['value1', 'value2', 'value3']},
{'key': ['value1', 'value2', None]}
])
def test_multidict_returns_all_values(input_dict):
d = MultiDict(input_dict)
assert d.getlist('key') == input_dict['key']
@pytest.mark.parametrize('input_dict', [
{'key': ['value']},
{'key': ['']},
{'key': ['value1', 'value2', 'value3']},
{'key': ['value1', 'value2', None]}
])
def test_multidict_list_wont_change_source(input_dict):
d = MultiDict(input_dict)
dict_copy = deepcopy(input_dict)
d.getlist('key')[0] = 'othervalue'
assert d.getlist('key') == dict_copy['key']
@pytest.mark.parametrize('input_dict,key,popped,leftover', [
(
{'key': ['value'], 'key2': [[]]},
'key',
'value',
{'key2': []},
),
(
{'key': [''], 'key2': [[]]},
'key',
'',
{'key2': []},
),
(
{'key': ['value1', 'value2', 'value3'],
'key2': [[]]},
'key',
'value3',
{'key2': []},
),
])
def test_multidict_list_can_pop_value(input_dict, key, popped, leftover):
d = MultiDict(input_dict)
pop_result = d.pop(key)
assert popped == pop_result
assert leftover == {key: d[key] for key in d}
def test_multidict_assignment():
d = MultiDict({})
d['key'] = 'value'
assert d['key'] == 'value'
def test_multidict_get_reassigned_value():
d = MultiDict({})
d['key'] = 'value'
assert d['key'] == 'value'
assert d.get('key') == 'value'
assert d.getlist('key') == ['value']
def test_multidict_get_list_wraps_key():
d = MultiDict({})
d['key'] = ['value']
assert d.getlist('key') == [['value']]
def test_multidict_repr():
d = MultiDict({
'foo': ['bar', 'baz'],
'buz': ['qux'],
})
rep = repr(d)
assert rep.startswith('MultiDict({')
assert "'foo': ['bar', 'baz']" in rep
assert "'buz': ['qux']" in rep
def test_multidict_str():
d = MultiDict({
'foo': ['bar', 'baz'],
'buz': ['qux'],
})
rep = str(d)
assert rep.startswith('MultiDict({')
assert "'foo': ['bar', 'baz']" in rep
assert "'buz': ['qux']" in rep
def test_can_configure_websockets(sample_websocket_app):
demo, _ = sample_websocket_app
assert len(demo.websocket_handlers) == 3, demo.websocket_handlers
assert '$connect' in demo.websocket_handlers, demo.websocket_handlers
assert '$disconnect' in demo.websocket_handlers, demo.websocket_handlers
assert '$default' in demo.websocket_handlers, demo.websocket_handlers
def test_websocket_event_json_body_available(sample_websocket_app,
create_websocket_event):
demo = app.Chalice('demo-app')
called = {'wascalled': False}
@demo.on_ws_message()
def message(event):
called['wascalled'] = True
assert event.json_body == {'foo': 'bar'}
# Second access hits the cache. Test that that works as well.
assert event.json_body == {'foo': 'bar'}
event = create_websocket_event('$default', body='{"foo": "bar"}')
handler = websocket_handler_for_route('$default', demo)
handler(event, context=None)
assert called['wascalled'] is True
def test_websocket_event_json_body_can_raise_error(sample_websocket_app,
create_websocket_event):
demo = app.Chalice('demo-app')
called = {'wascalled': False}
@demo.on_ws_message()
def message(event):
called['wascalled'] = True
with pytest.raises(BadRequestError):
event.json_body
event = create_websocket_event('$default', body='{"foo": "bar"')
handler = websocket_handler_for_route('$default', demo)
handler(event, context=None)
assert called['wascalled'] is True
def test_can_route_websocket_connect_message(sample_websocket_app,
create_websocket_event):
demo, calls = sample_websocket_app
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
event = create_websocket_event('$connect')
handler = websocket_handler_for_route('$connect', demo)
response = handler(event, context=None)
assert response == {'statusCode': 200}
assert len(calls) == 1
assert calls[0][0] == 'connect'
event = calls[0][1]
assert isinstance(event, WebsocketEvent)
assert event.domain_name == 'abcd1234.execute-api.us-west-2.amazonaws.com'
assert event.stage == 'api'
assert event.connection_id == 'ABCD1234='
def test_can_route_websocket_disconnect_message(sample_websocket_app,
create_websocket_event):
demo, calls = sample_websocket_app
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
event = create_websocket_event('$disconnect')
handler = websocket_handler_for_route('$disconnect', demo)
response = handler(event, context=None)
assert response == {'statusCode': 200}
assert len(calls) == 1
assert calls[0][0] == 'disconnect'
event = calls[0][1]
assert isinstance(event, WebsocketEvent)
assert event.domain_name == 'abcd1234.execute-api.us-west-2.amazonaws.com'
assert event.stage == 'api'
assert event.connection_id == 'ABCD1234='
def test_can_route_websocket_default_message(sample_websocket_app,
create_websocket_event):
demo, calls = sample_websocket_app
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
event = create_websocket_event('$default', body='foo bar')
handler = websocket_handler_for_route('$default', demo)
response = handler(event, context=None)
assert response == {'statusCode': 200}
assert len(calls) == 1
assert calls[0][0] == 'default'
event = calls[0][1]
assert isinstance(event, WebsocketEvent)
assert event.domain_name == 'abcd1234.execute-api.us-west-2.amazonaws.com'
assert event.stage == 'api'
assert event.connection_id == 'ABCD1234='
assert event.body == 'foo bar'
def test_can_configure_client_on_connect(sample_websocket_app,
create_websocket_event):
demo, calls = sample_websocket_app
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
event = create_websocket_event('$connect')
handler = websocket_handler_for_route('$connect', demo)
handler(event, context=None)
assert demo.websocket_api.session.calls == [
('apigatewaymanagementapi',
'https://abcd1234.execute-api.us-west-2.amazonaws.com/api'),
]
def test_uses_api_id_not_domain_name(sample_websocket_app,
create_websocket_event):
demo, calls = sample_websocket_app
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
event = create_websocket_event('$connect')
# If you configure a custom domain name, we should still use the
# original domainName generated from API gateway when configuring
# the apigatewaymanagementapi client.
event['requestContext']['domainName'] = 'api.custom-domain-name.com'
handler = websocket_handler_for_route('$connect', demo)
handler(event, context=None)
assert demo.websocket_api.session.calls == [
('apigatewaymanagementapi',
'https://abcd1234.execute-api.us-west-2.amazonaws.com/api'),
]
def test_fallsback_to_session_if_needed(sample_websocket_app,
create_websocket_event):
demo, calls = sample_websocket_app
client = FakeClient()
demo.websocket_api = WebsocketAPI(env={})
demo.websocket_api.session = FakeSession(client, region_name='us-east-2')
event = create_websocket_event('$connect')
# If you configure a custom domain name, we should still use the
# original domainName generated from API gateway when configuring
# the apigatewaymanagementapi client.
event['requestContext']['domainName'] = 'api.custom-domain-name.com'
handler = websocket_handler_for_route('$connect', demo)
handler(event, context=None)
assert demo.websocket_api.session.calls == [
('apigatewaymanagementapi',
'https://abcd1234.execute-api.us-east-2.amazonaws.com/api'),
]
def test_can_configure_client_on_disconnect(sample_websocket_app,
create_websocket_event):
demo, calls = sample_websocket_app
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
event = create_websocket_event('$disconnect')
handler = websocket_handler_for_route('$disconnect', demo)
handler(event, context=None)
assert demo.websocket_api.session.calls == [
('apigatewaymanagementapi',
'https://abcd1234.execute-api.us-west-2.amazonaws.com/api'),
]
def test_can_configure_client_on_message(sample_websocket_app,
create_websocket_event):
demo, calls = sample_websocket_app
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
event = create_websocket_event('$default', body='foo bar')
handler = websocket_handler_for_route('$default', demo)
handler(event, context=None)
assert demo.websocket_api.session.calls == [
('apigatewaymanagementapi',
'https://abcd1234.execute-api.us-west-2.amazonaws.com/api'),
]
def test_does_only_configure_client_once(sample_websocket_app,
create_websocket_event):
demo, calls = sample_websocket_app
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
event = create_websocket_event('$default', body='foo bar')
handler = websocket_handler_for_route('$default', demo)
handler(event, context=None)
handler(event, context=None)
assert demo.websocket_api.session.calls == [
('apigatewaymanagementapi',
'https://abcd1234.execute-api.us-west-2.amazonaws.com/api'),
]
def test_cannot_configure_client_without_session(sample_websocket_app,
create_websocket_event):
demo, calls = sample_websocket_app
demo.websocket_api.session = None
event = create_websocket_event('$default', body='foo bar')
handler = websocket_handler_for_route('$default', demo)
with pytest.raises(ValueError) as e:
handler(event, context=None)
assert str(e.value) == (
'Assign app.websocket_api.session to a boto3 session before using '
'the WebsocketAPI'
)
def test_cannot_send_websocket_message_without_configure(
sample_websocket_app, create_websocket_event):
demo = app.Chalice('app-name')
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
@demo.on_ws_message()
def message_handler(event):
demo.websocket_api.send('connection_id', event.body)
event = create_websocket_event('$default', body='foo bar')
event_obj = WebsocketEvent(event, None)
handler = demo.websocket_handlers['$default'].handler_function
with pytest.raises(ValueError) as e:
handler(event_obj)
assert str(e.value) == (
'WebsocketAPI.configure must be called before using the WebsocketAPI'
)
def test_can_close_websocket_connection(create_websocket_event):
demo = app.Chalice('app-name')
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
@demo.on_ws_message()
def message_handler(event):
demo.websocket_api.close('connection_id')
event = create_websocket_event('$default', body='foo bar')
handler = websocket_handler_for_route('$default', demo)
handler(event, context=None)
calls = client.calls['close']
assert len(calls) == 1
call = calls[0]
connection_id = call[0]
assert connection_id == 'connection_id'
def test_close_does_fail_if_already_disconnected(create_websocket_event):
demo = app.Chalice('app-name')
client = FakeClient(errors=[FakeGoneException])
demo.websocket_api.session = FakeSession(client)
@demo.on_ws_message()
def message_handler(event):
with pytest.raises(WebsocketDisconnectedError) as e:
demo.websocket_api.close('connection_id')
assert e.value.connection_id == 'connection_id'
event = create_websocket_event('$default', body='foo bar')
handler = websocket_handler_for_route('$default', demo)
handler(event, context=None)
calls = client.calls['close']
assert len(calls) == 1
call = calls[0]
connection_id = call[0]
assert connection_id == 'connection_id'
def test_info_does_fail_if_already_disconnected(create_websocket_event):
demo = app.Chalice('app-name')
client = FakeClient(errors=[FakeGoneException])
demo.websocket_api.session = FakeSession(client)
@demo.on_ws_message()
def message_handler(event):
with pytest.raises(WebsocketDisconnectedError) as e:
demo.websocket_api.info('connection_id')
assert e.value.connection_id == 'connection_id'
event = create_websocket_event('$default', body='foo bar')
handler = websocket_handler_for_route('$default', demo)
handler(event, context=None)
calls = client.calls['info']
assert len(calls) == 1
call = calls[0]
connection_id = call[0]
assert connection_id == 'connection_id'
def test_can__about_websocket_connection(create_websocket_event):
demo = app.Chalice('app-name')
client = FakeClient(infos=[{'foo': 'bar'}])
demo.websocket_api.session = FakeSession(client)
closure = {}
@demo.on_ws_message()
def message_handler(event):
closure['info'] = demo.websocket_api.info('connection_id')
event = create_websocket_event('$default', body='foo bar')
handler = websocket_handler_for_route('$default', demo)
handler(event, context=None)
assert closure['info'] == {'foo': 'bar'}
calls = client.calls['info']
assert len(calls) == 1
call = calls[0]
connection_id = call[0]
assert connection_id == 'connection_id'
def test_can_send_websocket_message(create_websocket_event):
demo = app.Chalice('app-name')
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
@demo.on_ws_message()
def message_handler(event):
demo.websocket_api.send('connection_id', event.body)
event = create_websocket_event('$default', body='foo bar')
handler = websocket_handler_for_route('$default', demo)
handler(event, context=None)
calls = client.calls['post_to_connection']
assert len(calls) == 1
call = calls[0]
connection_id, message = call
assert connection_id == 'connection_id'
assert message == 'foo bar'
def test_does_raise_on_send_to_bad_websocket(create_websocket_event):
demo = app.Chalice('app-name')
client = FakeClient(errors=[FakeGoneException])
demo.websocket_api.session = FakeSession(client)
@demo.on_ws_message()
def message_handler(event):
with pytest.raises(WebsocketDisconnectedError) as e:
demo.websocket_api.send('connection_id', event.body)
assert e.value.connection_id == 'connection_id'
event = create_websocket_event('$default', body='foo bar')
handler = websocket_handler_for_route('$default', demo)
handler(event, context=None)
def test_does_reraise_on_websocket_send_error(create_websocket_event):
class SomeOtherError(Exception):
pass
demo = app.Chalice('app-name')
fake_418_error = SomeOtherError()
fake_418_error.response = {'ResponseMetadata': {'HTTPStatusCode': 418}}
client = FakeClient(errors=[fake_418_error])
demo.websocket_api.session = FakeSession(client)
@demo.on_ws_message()
def message_handler(event):
with pytest.raises(SomeOtherError):
demo.websocket_api.send('connection_id', event.body)
event = create_websocket_event('$default', body='foo bar')
handler = websocket_handler_for_route('$default', demo)
handler(event, context=None)
def test_does_reraise_on_other_send_exception(create_websocket_event):
demo = app.Chalice('app-name')
fake_500_error = Exception()
fake_500_error.response = {'ResponseMetadata': {'HTTPStatusCode': 500}}
fake_500_error.key = 'foo'
client = FakeClient(errors=[fake_500_error])
demo.websocket_api.session = FakeSession(client)
@demo.on_ws_message()
def message_handler(event):
with pytest.raises(Exception) as e:
demo.websocket_api.send('connection_id', event.body)
assert e.value.key == 'foo'
event = create_websocket_event('$default', body='foo bar')
demo(event, context=None)
def test_cannot_send_message_on_unconfigured_app():
demo = app.Chalice('app-name')
demo.websocket_api.session = None
with pytest.raises(ValueError) as e:
demo.websocket_api.send('connection_id', 'body')
assert str(e.value) == (
'Assign app.websocket_api.session to a boto3 session before '
'using the WebsocketAPI'
)
def test_cannot_re_register_websocket_handlers(create_websocket_event):
demo = app.Chalice('app-name')
@demo.on_ws_message()
def message_handler(event):
pass
with pytest.raises(ValueError) as e:
@demo.on_ws_message()
def message_handler_2(event):
pass
assert str(e.value) == (
"Duplicate websocket handler: 'on_ws_message'. There can only be one "
"handler for each websocket decorator."
)
@demo.on_ws_connect()
def connect_handler(event):
pass
with pytest.raises(ValueError) as e:
@demo.on_ws_connect()
def conncet_handler_2(event):
pass
assert str(e.value) == (
"Duplicate websocket handler: 'on_ws_connect'. There can only be one "
"handler for each websocket decorator."
)
@demo.on_ws_disconnect()
def disconnect_handler(event):
pass
with pytest.raises(ValueError) as e:
@demo.on_ws_disconnect()
def disconncet_handler_2(event):
pass
assert str(e.value) == (
"Duplicate websocket handler: 'on_ws_disconnect'. There can only be "
"one handler for each websocket decorator."
)
def test_can_parse_json_websocket_body(create_websocket_event):
demo = app.Chalice('app-name')
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
@demo.on_ws_message()
def message(event):
assert event.json_body == {'foo': 'bar'}
event = create_websocket_event('$default', body='{"foo": "bar"}')
demo(event, context=None)
def test_can_access_websocket_json_body_twice(create_websocket_event):
demo = app.Chalice('app-name')
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
@demo.on_ws_message()
def message(event):
assert event.json_body == {'foo': 'bar'}
assert event.json_body == {'foo': 'bar'}
event = create_websocket_event('$default', body='{"foo": "bar"}')
demo(event, context=None)
def test_does_raise_on_invalid_json_wbsocket_body(create_websocket_event):
demo = app.Chalice('app-name')
client = FakeClient()
demo.websocket_api.session = FakeSession(client)
@demo.on_ws_message()
def message(event):
with pytest.raises(BadRequestError) as e:
event.json_body
assert 'Error Parsing JSON' in str(e.value)
event = create_websocket_event('$default', body='foo bar')
demo(event, context=None)
class TestMiddleware:
def test_middleware_basic_api(self):
demo = app.Chalice('app-name')
called = []
@demo.middleware('all')
def myhandler(event, get_response):
called.append({'name': 'myhandler', 'bucket': event.bucket})
return get_response(event)
@demo.middleware('all')
def myhandler2(event, get_response):
called.append({'name': 'myhandler2', 'bucket': event.bucket})
return get_response(event)
@demo.on_s3_event('mybucket')
def handler(event):
called.append({'name': 'main', 'bucket': event.bucket})
return {'bucket': event.bucket}
with Client(demo) as c:
response = c.lambda_.invoke(
'handler', c.events.generate_s3_event('mybucket', 'key')
)
assert response.payload == {'bucket': 'mybucket'}
assert called == [
{'name': 'myhandler', 'bucket': 'mybucket'},
{'name': 'myhandler2', 'bucket': 'mybucket'},
{'name': 'main', 'bucket': 'mybucket'},
]
def test_can_access_original_event_and_context_in_http(self):
demo = app.Chalice('app-name')
called = []
@demo.middleware('http')
def myhandler(event, get_response):
called.append({'event': event})
return get_response(event)
@demo.route('/')
def index():
return {'hello': 'world'}
with Client(demo) as c:
response = c.http.get('/')
assert response.json_body == {'hello': 'world'}
actual_event = called[0]['event']
assert actual_event.path == '/'
assert actual_event.lambda_context.function_name == 'api_handler'
assert actual_event.to_original_event()[
'requestContext']['resourcePath'] == '/'
def test_can_short_circuit_response(self):
demo = app.Chalice('app-name')
called = []
@demo.middleware('all')
def myhandler(event, get_response):
called.append({'name': 'myhandler', 'bucket': event.bucket})
return {'short-circuit': True}
@demo.middleware('all')
def myhandler2(event, get_response):
called.append({'name': 'myhandler2', 'bucket': event.bucket})
return get_response(event)
@demo.on_s3_event('mybucket')
def handler(event):
called.append({'name': 'main', 'bucket': event.bucket})
return {'bucket': event.bucket}
with Client(demo) as c:
response = c.lambda_.invoke(
'handler', c.events.generate_s3_event('mybucket', 'key')
)
assert response.payload == {'short-circuit': True}
assert called == [
{'name': 'myhandler', 'bucket': 'mybucket'},
]
def test_can_alter_response(self):
demo = app.Chalice('app-name')
called = []
@demo.middleware('all')
def myhandler(event, get_response):
called.append({'name': 'myhandler', 'bucket': event.bucket})
response = get_response(event)
response['myhandler'] = True
return response
@demo.middleware('all')
def myhandler2(event, get_response):
called.append({'name': 'myhandler2', 'bucket': event.bucket})
response = get_response(event)
response['myhandler2'] = True
return response
@demo.on_s3_event('mybucket')
def handler(event):
called.append({'name': 'main', 'bucket': event.bucket})
return {'bucket': event.bucket}
with Client(demo) as c:
response = c.lambda_.invoke(
'handler', c.events.generate_s3_event('mybucket', 'key')
)
assert response.payload == {
'bucket': 'mybucket',
'myhandler': True,
'myhandler2': True,
}
assert called == [
{'name': 'myhandler', 'bucket': 'mybucket'},
{'name': 'myhandler2', 'bucket': 'mybucket'},
{'name': 'main', 'bucket': 'mybucket'},
]
def test_can_change_order_of_definitions(self):
demo = app.Chalice('app-name')
called = []
@demo.on_s3_event('mybucket')
def handler(event):
called.append({'name': 'main', 'bucket': event.bucket})
return {'bucket': event.bucket}
@demo.middleware('all')
def myhandler(event, get_response):
called.append({'name': 'myhandler', 'bucket': event.bucket})
response = get_response(event)
response['myhandler'] = True
return response
@demo.middleware('all')
def myhandler2(event, get_response):
called.append({'name': 'myhandler2', 'bucket': event.bucket})
response = get_response(event)
response['myhandler2'] = True
return response
with Client(demo) as c:
response = c.lambda_.invoke(
'handler', c.events.generate_s3_event('mybucket', 'key')
)
assert response.payload == {
'bucket': 'mybucket',
'myhandler': True,
'myhandler2': True,
}
assert called == [
{'name': 'myhandler', 'bucket': 'mybucket'},
{'name': 'myhandler2', 'bucket': 'mybucket'},
{'name': 'main', 'bucket': 'mybucket'},
]
def test_can_use_middleware_for_pure_lambda(self):
demo = app.Chalice('app-name')
called = []
@demo.middleware('all')
def mymiddleware(event, get_response):
called.append({'name': 'mymiddleware', 'event': event.to_dict()})
return get_response(event)
@demo.lambda_function()
def myfunction(event, context):
called.append({'name': 'myfunction', 'event': event})
return {'foo': 'bar'}
with Client(demo) as c:
response = c.lambda_.invoke(
'myfunction', {'input-event': True}
)
assert response.payload == {'foo': 'bar'}
assert called == [
{'name': 'mymiddleware', 'event': {'input-event': True}},
{'name': 'myfunction', 'event': {'input-event': True}},
]
def test_can_use_for_websocket_handlers(self):
demo = app.Chalice('app-name')
called = []
@demo.middleware('all')
def mymiddleware(event, get_response):
called.append({'name': 'mymiddleware', 'event': event.to_dict()})
return get_response(event)
@demo.on_ws_message()
def myfunction(event):
called.append({'name': 'myfunction', 'event': event.to_dict()})
return {'foo': 'bar'}
with Client(demo) as c:
event = {
'requestContext': {
'domainName': 'example.com',
'stage': 'dev',
'connectionId': 'abcd',
'apiId': 'abcd1234',
},
'body': "body"
}
response = c.lambda_.invoke('myfunction', event)
assert response.payload == {'statusCode': 200}
assert called == [
{'name': 'mymiddleware', 'event': event},
{'name': 'myfunction', 'event': event},
]
def test_can_use_rest_api_for_middleware(self):
demo = app.Chalice('app-name')
called = []
@demo.middleware('all')
def mymiddleware(event, get_response):
called.append({'name': 'mymiddleware', 'method': event.method})
response = get_response(event)
response.status_code = 201
return response
@demo.route('/')
def index():
called.append({'url': '/'})
return {'index': True}
@demo.route('/hello')
def hello():
called.append({'url': '/hello'})
return {'hello': True}
with Client(demo) as c:
assert c.http.get('/').json_body == {'index': True}
response = c.http.get('/hello')
assert response.json_body == {'hello': True}
# Verify middleware can alter the response.
assert response.status_code == 201
assert called == [
{'name': 'mymiddleware', 'method': 'GET'},
{'url': '/'},
{'name': 'mymiddleware', 'method': 'GET'},
{'url': '/hello'},
]
def test_error_handler_rest_api_untouched(self):
demo = app.Chalice('app-name')
@demo.middleware('all')
def mymiddleware(event, get_response):
return get_response(event)
@demo.route('/error')
def index():
raise NotFoundError("resource not found")
with Client(demo) as c:
response = c.http.get('/error')
assert response.status_code == 404
assert response.json_body == {
'Code': 'NotFoundError',
'Message': 'NotFoundError: resource not found'
}
def test_unhandled_error_not_caught(self):
demo = app.Chalice('app-name')
@demo.middleware('all')
def mymiddleware(event, get_response):
try:
return get_response(event)
except ChaliceUnhandledError:
return Response(body={'foo': 'bar'}, status_code=200)
@demo.route('/error')
def index():
raise ChaliceUnhandledError("unhandled")
with Client(demo) as c:
response = c.http.get('/error')
assert response.status_code == 200
assert response.json_body == {'foo': 'bar'}
def test_middleware_errors_return_500_still_caught(self):
demo = app.Chalice('app-name')
@demo.middleware('all')
def mymiddleware(event, get_response):
return get_response(event)
@demo.route('/error')
def index():
raise ChaliceUnhandledError("unhandled")
with Client(demo) as c:
# An uncaught ChaliceUnhandledError should still result
# in the standard error handler processing for REST APIs
# if the exception propagates out of the middleware stack.
response = c.http.get('/error')
assert response.status_code == 500
assert response.json_body == {
'Code': 'InternalServerError',
'Message': 'An internal server error occurred.'
}
def test_middleware_errors_result_in_500(self):
demo = app.Chalice('app-name')
@demo.middleware('all')
def mymiddleware(event, get_response):
raise Exception("Error from middleware.")
@demo.route('/')
def index():
return {}
with Client(demo) as c:
response = c.http.get('/')
assert response.status_code == 500
assert response.json_body['Code'] == 'InternalServerError'
def test_can_filter_middleware_registration(self, sample_middleware_app):
with Client(sample_middleware_app) as c:
c.http.get('/')
assert sample_middleware_app.calls == [
{'type': 'all', 'event': 'Request'},
{'type': 'http', 'event': 'Request'},
]
sample_middleware_app.calls[:] = []
c.lambda_.invoke(
's3_handler', c.events.generate_s3_event('bucket', 'key'))
assert sample_middleware_app.calls == [
{'type': 'all', 'event': 'S3Event'},
{'type': 's3', 'event': 'S3Event'},
]
sample_middleware_app.calls[:] = []
c.lambda_.invoke(
'sns_handler', c.events.generate_sns_event('topic', 'message'))
assert sample_middleware_app.calls == [
{'type': 'all', 'event': 'SNSEvent'},
{'type': 'sns', 'event': 'SNSEvent'},
]
sample_middleware_app.calls[:] = []
c.lambda_.invoke(
'sqs_handler', c.events.generate_sns_event('queue', 'message'))
# There is no sqs specific middleware.
assert sample_middleware_app.calls == [
{'type': 'all', 'event': 'SQSEvent'},
]
sample_middleware_app.calls[:] = []
c.lambda_.invoke('lambda_handler', {})
assert sample_middleware_app.calls == [
{'type': 'all', 'event': 'LambdaFunctionEvent'},
{'type': 'pure_lambda', 'event': 'LambdaFunctionEvent'},
]
sample_middleware_app.calls[:] = []
c.lambda_.invoke('ws_handler', {
'requestContext': {
'domainName': 'example.com',
'stage': 'dev',
'connectionId': 'abcd',
'apiId': 'abcd1234',
},
'body': "body"
})
assert sample_middleware_app.calls == [
{'type': 'all', 'event': 'WebsocketEvent'},
{'type': 'websocket', 'event': 'WebsocketEvent'},
]
def test_can_register_middleware_on_blueprints(self):
demo = app.Chalice('app-name')
bp = app.Blueprint('bpmiddleware')
called = []
@demo.middleware('all')
def mymiddleware(event, get_response):
called.append({'name': 'fromapp', 'bucket': event.bucket})
return get_response(event)
@bp.middleware('all')
def bp_middleware(event, get_response):
called.append({'name': 'frombp', 'bucket': event.bucket})
return get_response(event)
@bp.on_s3_event('mybucket')
def bp_handler(event):
called.append({'name': 'bp_handler', 'bucket': event.bucket})
return {'bucket': event.bucket}
@bp.route('/')
def index():
pass
@demo.on_s3_event('mybucket')
def handler(event):
called.append({'name': 'main', 'bucket': event.bucket})
return {'bucket': event.bucket}
demo.register_blueprint(bp)
with Client(demo) as c:
# The order is particular here. When we're invoking the lambda
# function from the "app" (demo) object, we expect
# the order to be mymiddleware, bp_middleware because mymiddleware
# is registered before the .register_blueprint().
response = c.lambda_.invoke(
'handler', c.events.generate_s3_event('mybucket', 'key')
)
assert response.payload == {'bucket': 'mybucket'}
assert called == [
{'name': 'fromapp', 'bucket': 'mybucket'},
{'name': 'frombp', 'bucket': 'mybucket'},
{'name': 'main', 'bucket': 'mybucket'},
]
called[:] = []
response = c.lambda_.invoke(
'bp_handler', c.events.generate_s3_event('mybucket', 'key')
)
assert response.payload == {'bucket': 'mybucket'}
assert called == [
{'name': 'fromapp', 'bucket': 'mybucket'},
{'name': 'frombp', 'bucket': 'mybucket'},
{'name': 'bp_handler', 'bucket': 'mybucket'},
]
def test_blueprint_gets_middlware_added(self):
demo = app.Chalice('app-name')
bp = app.Blueprint('bpmiddleware')
called = []
@bp.middleware('all')
def bp_middleware(event, get_response):
called.append({'name': 'frombp', 'bucket': 'mybucket'})
return get_response(event)
@demo.on_s3_event('mybucket')
def handler(event):
called.append({'name': 'main', 'bucket': event.bucket})
return {'bucket': event.bucket}
demo.register_blueprint(bp)
with Client(demo) as c:
response = c.lambda_.invoke(
'handler', c.events.generate_s3_event('mybucket', 'key')
)
assert response.payload == {'bucket': 'mybucket'}
assert called == [
{'name': 'frombp', 'bucket': 'mybucket'},
{'name': 'main', 'bucket': 'mybucket'},
]
def test_can_register_middleware_without_decorator(self):
demo = app.Chalice('app-name')
called = []
def mymiddleware(event, get_response):
called.append({'name': 'mymiddleware', 'event': event.to_dict()})
return get_response(event)
@demo.lambda_function()
def myfunction(event, context):
called.append({'name': 'myfunction', 'event': event})
return {'foo': 'bar'}
demo.register_middleware(mymiddleware, 'all')
with Client(demo) as c:
response = c.lambda_.invoke(
'myfunction', {'input-event': True}
)
assert response.payload == {'foo': 'bar'}
assert called == [
{'name': 'mymiddleware', 'event': {'input-event': True}},
{'name': 'myfunction', 'event': {'input-event': True}},
]
def test_can_convert_existing_lambda_decorator_to_middleware(self):
demo = app.Chalice('app-name')
called = []
def mydecorator(func):
def _wrapped(event, context):
called.append({'name': 'wrapped', 'event': event})
return func(event, context)
return _wrapped
@demo.middleware('all')
def second_middleware(event, get_response):
called.append({'name': 'second', 'event': event.to_dict()})
return get_response(event)
@demo.lambda_function()
def myfunction(event, context):
called.append({'name': 'myfunction', 'event': event})
return {'foo': 'bar'}
demo.register_middleware(ConvertToMiddleware(mydecorator))
with Client(demo) as c:
response = c.lambda_.invoke(
'myfunction', {'input-event': True}
)
assert response.payload == {'foo': 'bar'}
assert called == [
{'name': 'second', 'event': {'input-event': True}},
{'name': 'wrapped', 'event': {'input-event': True}},
{'name': 'myfunction', 'event': {'input-event': True}},
]
|
# coding=utf8
#
# (C) 2015-2016, MIT License
'''
Helper utilities to make processing easier.
'''
import sys
from .constants import KATAKANA, HIRAGANA
# Set the correct code point function based on whether we're on Python 2 or 3.
if sys.version_info < (3, 0):
chr = unichr
# The start and end offsets of the hiragana and katakana Unicode blocks.
# The ranges are inclusive and include only printable kana characters,
# e.g. あ, ぃ, ヸ, etc.
offsets = {
KATAKANA: {
'start': 0x30A0,
'range': [0x30A1, 0x30FA],
'direction': 1
},
HIRAGANA: {
'start': 0x3040,
'range': [0x3041, 0x3096],
'direction': -1
}
}
# The total distance between both blocks.
block_offset = offsets[KATAKANA]['start'] - offsets[HIRAGANA]['start']
def in_range(offset, target=''):
'''
Returns whether a particular offset is within the range of printable
kana characters.
'''
range = offsets[target]['range']
return range[0] <= offset <= range[1]
def switch_charset(characters, target=''):
'''
Transforms an iterable of kana characters to its opposite script.
For example, it can turn [u'あ', u'い'] into [u'ア', u'イ'],
or {u'ホ': u'ボ} into {u'ほ': u'ぼ'}.
There are no safety checks--keep in mind that the correct source and target
values must be set, otherwise the resulting characters will be garbled.
'''
if isinstance(characters, dict):
return _switch_charset_dict(characters, target)
else:
return _switch_charset_list(characters, target)
def _switch_charset_dict(characters, target=''):
'''
Switches the character set of the key/value pairs in a dictionary.
'''
offset_characters = {}
offset = block_offset * offsets[target]['direction']
for char in characters:
offset_key = chr(ord(char) + offset)
offset_value = chr(ord(characters[char]) + offset)
offset_characters[offset_key] = offset_value
return offset_characters
def _switch_charset_list(characters, target=''):
'''
Switches the character set of a list. If a character does not have
an equivalent in the target script (e.g. ヹ when converting to hiragana),
the original character is kept.
'''
# Copy the list to avoid modifying the existing one.
characters = characters[:]
offset = block_offset * offsets[target]['direction']
for n in range(len(characters)):
chars = list(characters[n])
for m in range(len(chars)):
char = chars[m]
char_offset = ord(char) + offset
# Verify that the offset character is within the valid range.
if in_range(char_offset, target):
chars[m] = chr(char_offset)
else:
chars[m] = char
characters[n] = ''.join(chars)
return characters
def merge_dicts(*dicts):
'''
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
Taken from an answer by Aaron Hall on Stack Overflow:
<http://stackoverflow.com/a/26853961>.
'''
result = {}
for dictionary in dicts:
result.update(dictionary)
return result
def kana_romaji_lt(romaji, *kana):
'''
Generates a lookup table with the kana characters on the left side
and their rōmaji equivalents as the values.
For the consonant-vowel (cv) characters, we'll generate:
{u'か': ('ka', 'k', 'k', 'ā'),
u'が': ('ga', 'g', 'g', 'ā'),
[...]
Multiple kana character sets can be passed as rest arguments.
'''
lt = {}
for kana_set in kana:
for n in range(len(romaji)):
ro = romaji[n]
ka = kana_set[n]
lt[ka] = ro
return lt
def fw_romaji_lt(full, regular):
'''
Generates a lookup table with the fullwidth rōmaji characters
on the left side, and the regular rōmaji characters as the values.
'''
lt = {}
for n in range(len(full)):
fw = full[n]
reg = regular[n]
lt[fw] = reg
return lt
|
from setuptools import setup
setup(
name='SimsvcClient',
version='0.2.0',
description='Python client for distributed simulation service',
packages=['simclient', 'multiobjtools'],
author='Hannu Rummukainen',
author_email='hannu.rummukainen@vtt.fi',
keywords=['simulation'],
python_requires=">= 3.6",
install_requires=["requests", "python-socketio", "numpy"],
extras_require={"bo": ["dragonfly", "pandas", "pyyaml"]}
)
|
"""Model module."""
import json
from assemblyai.config import ASSEMBLYAI_URL
from assemblyai.exceptions import handle_warnings
import requests
class Model(object):
"""Custom model object."""
def __init__(self, client, phrases=None, name=None):
self.headers = client.headers
self.api = client.api
self.phrases = phrases
self.name = name
self.id = None
self.log = client.log
self.status = None
self.warning = None
self.dict = None
def __repr__(self):
return 'Model(id=%s, status=%s)' % (self.id, self.status)
def props(self):
return [i for i in self.__dict__.keys() if i[:1] != '_']
def reset(self, id=None):
if id:
self.id = id
self.status = None
self.name = None
self.phrases = None
self.warning = None
self.dict = None
def create(self):
data = {}
data["phrases"] = self.phrases # TODO validate phrases
if self.name:
data['name'] = self.name
payload = json.dumps(data)
url = ASSEMBLYAI_URL + '/model'
response = requests.post(url, data=payload, headers=self.headers)
self.warning = handle_warnings(response, 'model', self.log)
response = response.json()['model']
self.id, self.status = response['id'], response['status']
self.log.debug('Model %s %s' % (self.id, self.status))
return self
def get(self, id=None):
"""Get a custom model."""
self.reset(id)
url = ASSEMBLYAI_URL + '/model/' + str(self.id)
response = requests.get(url, headers=self.headers)
self.warning = handle_warnings(response, 'model', self.log)
response = response.json()['model']
# self.phrases = response['phrases']
self.dict = response
self.status = response['status']
self.log.debug('Model %s %s' % (self.id, self.status))
return self
|
""" Tests the ins and outs of automatic unit conversion in OpenMDAO."""
import unittest
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal, assert_warning
from openmdao.test_suite.components.unit_conv import UnitConvGroup, SrcComp, TgtCompC, TgtCompF, \
TgtCompK, SrcCompFD, TgtCompCFD, TgtCompFFD, TgtCompKFD, TgtCompFMulti
class SpeedComp(om.ExplicitComponent):
"""Simple speed computation from distance and time with unit conversations."""
def setup(self):
self.add_input('distance', val=1.0, units='km')
self.add_input('time', val=1.0, units='h')
self.add_output('speed', val=1.0, units='km/h')
def compute(self, inputs, outputs):
outputs['speed'] = inputs['distance'] / inputs['time']
class TestUnitConversion(unittest.TestCase):
""" Testing automatic unit conversion."""
def test_basic_dense_jac(self):
"""Test that output values and total derivatives are correct."""
prob = om.Problem(model=UnitConvGroup(assembled_jac_type='dense'))
prob.model.linear_solver = om.DirectSolver(assemble_jac=True)
# Check the outputs after running to test the unit conversions
prob.setup(check=False, mode='fwd')
prob.run_model()
assert_near_equal(prob['src.x2'], 100.0, 1e-6)
assert_near_equal(prob['tgtF.x3'], 212.0, 1e-6)
assert_near_equal(prob['tgtC.x3'], 100.0, 1e-6)
assert_near_equal(prob['tgtK.x3'], 373.15, 1e-6)
# Check the total derivatives in forward mode
wrt = ['px1.x1']
of = ['tgtF.x3', 'tgtC.x3', 'tgtK.x3']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['tgtF.x3', 'px1.x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['tgtC.x3', 'px1.x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['tgtK.x3', 'px1.x1'][0][0], 1.0, 1e-6)
# Check the total derivatives in reverse mode
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['tgtF.x3', 'px1.x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['tgtC.x3', 'px1.x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['tgtK.x3', 'px1.x1'][0][0], 1.0, 1e-6)
def test_dangling_input_w_units(self):
prob = om.Problem()
prob.model.add_subsystem('C1', om.ExecComp('y=x', x={'units': 'ft'}, y={'units': 'm'}))
prob.setup()
prob.run_model()
# this test passes as long as it doesn't raise an exception
def test_speed(self):
import openmdao.api as om
from openmdao.core.tests.test_units import SpeedComp
prob = om.Problem()
prob.model.add_subsystem('c1', SpeedComp())
prob.model.add_subsystem('c2', om.ExecComp('f=speed',speed={'units': 'm/s'}))
prob.model.set_input_defaults('c1.distance', val=1., units='m')
prob.model.set_input_defaults('c1.time', val=1., units='s')
prob.model.connect('c1.speed', 'c2.speed')
prob.setup()
prob.run_model()
assert_near_equal(prob.get_val('c1.distance'), 1.e-3) # units: km
assert_near_equal(prob.get_val('c1.time'), 1./3600.) # units: h
assert_near_equal(prob.get_val('c1.speed'), 3.6) # units: km/h
assert_near_equal(prob.get_val('c2.f'), 1.0) # units: m/s
def test_basic(self):
"""Test that output values and total derivatives are correct."""
prob = om.Problem(model=UnitConvGroup())
# Check the outputs after running to test the unit conversions
prob.setup(check=False, mode='fwd')
prob.run_model()
assert_near_equal(prob['src.x2'], 100.0, 1e-6)
assert_near_equal(prob['tgtF.x3'], 212.0, 1e-6)
assert_near_equal(prob['tgtC.x3'], 100.0, 1e-6)
assert_near_equal(prob['tgtK.x3'], 373.15, 1e-6)
# Check the total derivatives in forward mode
wrt = ['px1.x1']
of = ['tgtF.x3', 'tgtC.x3', 'tgtK.x3']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['tgtF.x3', 'px1.x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['tgtC.x3', 'px1.x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['tgtK.x3', 'px1.x1'][0][0], 1.0, 1e-6)
# Check the total derivatives in reverse mode
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['tgtF.x3', 'px1.x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['tgtC.x3', 'px1.x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['tgtK.x3', 'px1.x1'][0][0], 1.0, 1e-6)
# Make sure check partials handles conversion
data = prob.check_partials(out_stream=None)
for key1, val1 in data.items():
for key2, val2 in val1.items():
assert_near_equal(val2['abs error'][0], 0.0, 1e-6)
assert_near_equal(val2['rel error'][0], 0.0, 1e-6)
def test_basic_apply(self):
"""Test that output values and total derivatives are correct."""
class SrcCompa(om.ExplicitComponent):
"""Source provides degrees Celsius."""
def setup(self):
self.add_input('x1', 100.0)
self.add_output('x2', 100.0, units='degC')
def compute(self, inputs, outputs):
""" Pass through."""
outputs['x2'] = inputs['x1']
def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
""" Derivative is 1.0"""
if mode == 'fwd':
d_outputs['x2'] += d_inputs['x1']
else:
d_inputs['x1'] += d_outputs['x2']
class TgtCompFa(om.ExplicitComponent):
"""Target expressed in degrees F."""
def setup(self):
self.add_input('x2', 100.0, units='degF')
self.add_output('x3', 100.0)
def compute(self, inputs, outputs):
""" Pass through."""
outputs['x3'] = inputs['x2']
def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
""" Derivative is 1.0"""
if mode == 'fwd':
d_outputs['x3'] += d_inputs['x2']
else:
d_inputs['x2'] += d_outputs['x3']
prob = om.Problem()
model = prob.model
model.add_subsystem('px1', om.IndepVarComp('x1', 100.0))
model.add_subsystem('src', SrcCompa())
model.add_subsystem('tgtF', TgtCompFa())
model.connect('px1.x1', 'src.x1')
model.connect('src.x2', 'tgtF.x2')
# Check the outputs after running to test the unit conversions
prob.setup(check=False, mode='fwd')
prob.run_model()
assert_near_equal(prob['src.x2'], 100.0, 1e-6)
assert_near_equal(prob['tgtF.x3'], 212.0, 1e-6)
# Check the total derivatives in forward mode
wrt = ['px1.x1']
of = ['tgtF.x3']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['tgtF.x3', 'px1.x1'][0][0], 1.8, 1e-6)
# Check the total derivatives in reverse mode
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['tgtF.x3', 'px1.x1'][0][0], 1.8, 1e-6)
def test_basic_fd_comps(self):
prob = om.Problem()
prob.model.add_subsystem('px1', om.IndepVarComp('x1', 100.0), promotes=['x1'])
prob.model.add_subsystem('src', SrcCompFD())
prob.model.add_subsystem('tgtF', TgtCompFFD())
prob.model.add_subsystem('tgtC', TgtCompCFD())
prob.model.add_subsystem('tgtK', TgtCompKFD())
prob.model.connect('x1', 'src.x1')
prob.model.connect('src.x2', 'tgtF.x2')
prob.model.connect('src.x2', 'tgtC.x2')
prob.model.connect('src.x2', 'tgtK.x2')
prob.setup()
prob.run_model()
assert_near_equal(prob['src.x2'], 100.0, 1e-6)
assert_near_equal(prob['tgtF.x3'], 212.0, 1e-6)
assert_near_equal(prob['tgtC.x3'], 100.0, 1e-6)
assert_near_equal(prob['tgtK.x3'], 373.15, 1e-6)
indep_list = ['x1']
unknown_list = ['tgtF.x3', 'tgtC.x3', 'tgtK.x3']
J = prob.compute_totals(of=unknown_list, wrt=indep_list, return_format='dict')
assert_near_equal(J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=unknown_list, wrt=indep_list, return_format='dict')
assert_near_equal(J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
prob.model.approx_totals(method='fd')
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=unknown_list, wrt=indep_list, return_format='dict')
assert_near_equal(J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
# Make sure check partials handles conversion
data = prob.check_partials(out_stream=None)
for key1, val1 in data.items():
for key2, val2 in val1.items():
assert_near_equal(val2['abs error'][0], 0.0, 1e-6)
assert_near_equal(val2['rel error'][0], 0.0, 1e-6)
def test_bad_units(self):
"""Test error handling when invalid units are declared."""
class Comp1(om.ExplicitComponent):
def setup(self):
self.add_input('x', 0.0, units='junk')
class Comp2(om.ExplicitComponent):
def setup(self):
self.add_output('x', 0.0, units='junk')
with self.assertRaises(Exception) as cm:
prob = om.Problem(model=Comp1())
prob.setup()
expected_msg = "The units 'junk' are invalid"
self.assertTrue(expected_msg in str(cm.exception))
with self.assertRaises(Exception) as cm:
prob = om.Problem(model=Comp2())
prob.setup()
expected_msg = "The units 'junk' are invalid"
self.assertTrue(expected_msg in str(cm.exception))
def test_incompatible_units(self):
"""Test error handling when only one of src and tgt have units."""
prob = om.Problem()
prob.model.add_subsystem('px1', om.IndepVarComp('x1', 100.0), promotes_outputs=['x1'])
prob.model.add_subsystem('src', SrcComp(), promotes_inputs=['x1'])
prob.model.add_subsystem('tgt', om.ExecComp('yy=xx', xx={'value': 0.0, 'units': None}))
prob.model.connect('src.x2', 'tgt.xx')
msg = "<model> <class Group>: Output 'src.x2' with units of 'degC' is connected to input 'tgt.xx' which has no units."
with assert_warning(UserWarning, msg):
prob.setup()
def test_basic_implicit_conn(self):
"""Test units with all implicit connections."""
prob = om.Problem()
prob.model.add_subsystem('px1', om.IndepVarComp('x1', 100.0), promotes_outputs=['x1'])
prob.model.add_subsystem('src', SrcComp(), promotes_inputs=['x1'], promotes_outputs=['x2'])
prob.model.add_subsystem('tgtF', TgtCompF(), promotes_inputs=['x2'])
prob.model.add_subsystem('tgtC', TgtCompC(), promotes_inputs=['x2'])
prob.model.add_subsystem('tgtK', TgtCompK(), promotes_inputs=['x2'])
# Check the outputs after running to test the unit conversions
prob.setup()
prob.run_model()
assert_near_equal(prob['x2'], 100.0, 1e-6)
assert_near_equal(prob['tgtF.x3'], 212.0, 1e-6)
assert_near_equal(prob['tgtC.x3'], 100.0, 1e-6)
assert_near_equal(prob['tgtK.x3'], 373.15, 1e-6)
# Check the total derivatives in forward mode
wrt = ['x1']
of = ['tgtF.x3', 'tgtC.x3', 'tgtK.x3']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['tgtF.x3', 'x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['tgtC.x3', 'x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['tgtK.x3', 'x1'][0][0], 1.0, 1e-6)
# Check the total derivatives in reverse mode
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['tgtF.x3', 'x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['tgtC.x3', 'x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['tgtK.x3', 'x1'][0][0], 1.0, 1e-6)
def test_basic_grouped(self):
prob = om.Problem()
prob.model.add_subsystem('px1', om.IndepVarComp('x1', 100.0), promotes=['x1'])
sub1 = prob.model.add_subsystem('sub1', om.Group())
sub2 = prob.model.add_subsystem('sub2', om.Group())
sub1.add_subsystem('src', SrcComp())
sub2.add_subsystem('tgtF', TgtCompF())
sub2.add_subsystem('tgtC', TgtCompC())
sub2.add_subsystem('tgtK', TgtCompK())
prob.model.connect('x1', 'sub1.src.x1')
prob.model.connect('sub1.src.x2', 'sub2.tgtF.x2')
prob.model.connect('sub1.src.x2', 'sub2.tgtC.x2')
prob.model.connect('sub1.src.x2', 'sub2.tgtK.x2')
prob.setup()
prob.run_model()
assert_near_equal(prob['sub1.src.x2'], 100.0, 1e-6)
assert_near_equal(prob['sub2.tgtF.x3'], 212.0, 1e-6)
assert_near_equal(prob['sub2.tgtC.x3'], 100.0, 1e-6)
assert_near_equal(prob['sub2.tgtK.x3'], 373.15, 1e-6)
wrt = ['x1']
of = ['sub2.tgtF.x3', 'sub2.tgtC.x3', 'sub2.tgtK.x3']
J = prob.compute_totals(of=of, wrt=wrt, return_format='dict')
assert_near_equal(J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6)
# Check the total derivatives in reverse mode
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='dict')
assert_near_equal(J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6)
def test_basic_grouped_bug_from_pycycle(self):
prob = om.Problem()
root = prob.model
prob.model.add_subsystem('px1', om.IndepVarComp('x1', 100.0), promotes=['x1'])
sub1 = prob.model.add_subsystem('sub1', om.Group(), promotes=['x2'])
sub1.add_subsystem('src', SrcComp(), promotes=['x2'])
root.add_subsystem('tgtF', TgtCompFMulti())
root.add_subsystem('tgtC', TgtCompC())
root.add_subsystem('tgtK', TgtCompK())
prob.model.connect('x1', 'sub1.src.x1')
prob.model.connect('x2', 'tgtF.x2')
prob.model.connect('x2', 'tgtC.x2')
prob.model.connect('x2', 'tgtK.x2')
prob.setup()
prob.run_model()
assert_near_equal(prob['x2'], 100.0, 1e-6)
assert_near_equal(prob['tgtF.x3'], 212.0, 1e-6)
assert_near_equal(prob['tgtC.x3'], 100.0, 1e-6)
assert_near_equal(prob['tgtK.x3'], 373.15, 1e-6)
wrt = ['x1']
of = ['tgtF.x3', 'tgtC.x3', 'tgtK.x3']
J = prob.compute_totals(of=of, wrt=wrt, return_format='dict')
assert_near_equal(J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
# Check the total derivatives in reverse mode
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='dict')
assert_near_equal(J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_near_equal(J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_near_equal(J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
#def test_basic_grouped_grouped_implicit(self):
#prob = om.Problem()
#root = prob.model
#sub1 = prob.model.add_subsystem('sub1', om.Group(), promotes=['x2'])
#sub2 = prob.model.add_subsystem('sub2', om.Group(), promotes=['x2'])
#sub1.add_subsystem('src', SrcComp(), promotes = ['x2'])
#sub2.add_subsystem('tgtF', TgtCompFMulti(), promotes=['x2'])
#sub2.add_subsystem('tgtC', TgtCompC(), promotes=['x2'])
#sub2.add_subsystem('tgtK', TgtCompK(), promotes=['x2'])
#prob.model.add_subsystem('px1', om.IndepVarComp('x1', 100.0), promotes=['x1'])
#prob.model.connect('x1', 'sub1.src.x1')
#prob.setup()
#prob.run_model()
#assert_near_equal(prob['x2'], 100.0, 1e-6)
#assert_near_equal(prob['sub2.tgtF.x3'], 212.0, 1e-6)
#assert_near_equal(prob['sub2.tgtC.x3'], 100.0, 1e-6)
#assert_near_equal(prob['sub2.tgtK.x3'], 373.15, 1e-6)
#indep_list = ['x1']
#unknown_list = ['sub2.tgtF.x3', 'sub2.tgtC.x3', 'sub2.tgtK.x3']
#J = prob.calc_gradient(indep_list, unknown_list, mode='fwd',
#return_format='dict')
#assert_near_equal(J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6)
#assert_near_equal(J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6)
#assert_near_equal(J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6)
#J = prob.calc_gradient(indep_list, unknown_list, mode='rev',
#return_format='dict')
#assert_near_equal(J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6)
#assert_near_equal(J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6)
#assert_near_equal(J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6)
#J = prob.calc_gradient(indep_list, unknown_list, mode='fd',
#return_format='dict')
#assert_near_equal(J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6)
#assert_near_equal(J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6)
#assert_near_equal(J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6)
#def test_apply_linear_adjoint(self):
## Make sure we can index into dinputs
#class Attitude_Angular(Component):
#""" Calculates angular velocity vector from the satellite's orientation
#matrix and its derivative.
#"""
#def __init__(self, n=2):
#super().__init__()
#self.n = n
## Inputs
#self.add_param('O_BI', np.zeros((3, 3, n)), units="ft",
#desc="Rotation matrix from body-fixed frame to Earth-centered "
#"inertial frame over time")
#self.add_param('Odot_BI', np.zeros((3, 3, n)), units="km",
#desc="First derivative of O_BI over time")
## Outputs
#self.add_output('w_B', np.zeros((3, n)), units="1/s",
#desc="Angular velocity vector in body-fixed frame over time")
#self.dw_dOdot = np.zeros((n, 3, 3, 3))
#self.dw_dO = np.zeros((n, 3, 3, 3))
#def solve_nonlinear(self, inputs, outputs, resids):
#""" Calculate output. """
#O_BI = inputs['O_BI']
#Odot_BI = inputs['Odot_BI']
#w_B = outputs['w_B']
#for i in range(0, self.n):
#w_B[0, i] = np.dot(Odot_BI[2, :, i], O_BI[1, :, i])
#w_B[1, i] = np.dot(Odot_BI[0, :, i], O_BI[2, :, i])
#w_B[2, i] = np.dot(Odot_BI[1, :, i], O_BI[0, :, i])
#def linearize(self, inputs, outputs, resids):
#""" Calculate and save derivatives. (i.e., Jacobian) """
#O_BI = inputs['O_BI']
#Odot_BI = inputs['Odot_BI']
#for i in range(0, self.n):
#self.dw_dOdot[i, 0, 2, :] = O_BI[1, :, i]
#self.dw_dO[i, 0, 1, :] = Odot_BI[2, :, i]
#self.dw_dOdot[i, 1, 0, :] = O_BI[2, :, i]
#self.dw_dO[i, 1, 2, :] = Odot_BI[0, :, i]
#self.dw_dOdot[i, 2, 1, :] = O_BI[0, :, i]
#self.dw_dO[i, 2, 0, :] = Odot_BI[1, :, i]
#def apply_linear(self, inputs, outputs, dinputs, doutputs, dresids, mode):
#""" Matrix-vector product with the Jacobian. """
#dw_B = dresids['w_B']
#if mode == 'fwd':
#for k in range(3):
#for i in range(3):
#for j in range(3):
#if 'O_BI' in dinputs:
#dw_B[k, :] += self.dw_dO[:, k, i, j] * \
#dinputs['O_BI'][i, j, :]
#if 'Odot_BI' in dinputs:
#dw_B[k, :] += self.dw_dOdot[:, k, i, j] * \
#dinputs['Odot_BI'][i, j, :]
#else:
#for k in range(3):
#for i in range(3):
#for j in range(3):
#if 'O_BI' in dinputs:
#dinputs['O_BI'][i, j, :] += self.dw_dO[:, k, i, j] * \
#dw_B[k, :]
#if 'Odot_BI' in dinputs:
#dinputs['Odot_BI'][i, j, :] -= -self.dw_dOdot[:, k, i, j] * \
#dw_B[k, :]
#prob = om.Problem()
#root = prob.model
#prob.model.add_subsystem('comp', Attitude_Angular(n=5), promotes=['*'])
#prob.model.add_subsystem('p1', om.IndepVarComp('O_BI', np.ones((3, 3, 5))), promotes=['*'])
#prob.model.add_subsystem('p2', om.IndepVarComp('Odot_BI', np.ones((3, 3, 5))), promotes=['*'])
#prob.setup()
#prob.run_model()
#indep_list = ['O_BI', 'Odot_BI']
#unknown_list = ['w_B']
#Jf = prob.calc_gradient(indep_list, unknown_list, mode='fwd',
#return_format='dict')
#indep_list = ['O_BI', 'Odot_BI']
#unknown_list = ['w_B']
#Jr = prob.calc_gradient(indep_list, unknown_list, mode='rev',
#return_format='dict')
#for key, val in Jr.items():
#for key2 in val:
#diff = abs(Jf[key][key2] - Jr[key][key2])
#assert_near_equal(diff, 0.0, 1e-10)
def test_incompatible_connections(self):
class BadComp(om.ExplicitComponent):
def setup(self):
self.add_input('x2', 100.0, units='m')
self.add_output('x3', 100.0)
# Explicit Connection
prob = om.Problem()
prob.model.add_subsystem('src', SrcComp())
prob.model.add_subsystem('dest', BadComp())
prob.model.connect('src.x2', 'dest.x2')
with self.assertRaises(Exception) as cm:
prob.setup()
expected_msg = "<model> <class Group>: Output units of 'degC' for 'src.x2' are incompatible with input units of 'm' for 'dest.x2'."
self.assertEqual(expected_msg, str(cm.exception))
# Implicit Connection
prob = om.Problem()
prob.model.add_subsystem('src', SrcComp(), promotes=['x2'])
prob.model.add_subsystem('dest', BadComp(),promotes=['x2'])
with self.assertRaises(Exception) as cm:
prob.setup()
expected_msg = "<model> <class Group>: Output units of 'degC' for 'src.x2' are incompatible with input units of 'm' for 'dest.x2'."
self.assertEqual(expected_msg, str(cm.exception))
#def test_nested_relevancy_base(self):
## This one actually has nothing to do with units, but it tests the
## "rest" of the problem that the others are testing, namely that
## outscope vars could sometimes cause a problem even absent any units.
#prob = om.Problem()
#root = prob.model
#root.add_subsystem('p1', om.IndepVarComp('xx', 3.0))
#root.add_subsystem('c1', om.ExecComp(['y1=0.5*x + 0.1*xx', 'y2=0.3*x - 1.0*xx']))
#root.add_subsystem('c2', om.ExecComp(['y=0.5*x']))
#sub = root.add_subsystem('sub', om.Group())
#sub.add_subsystem('cc1', om.ExecComp(['y=0.1*x1 + 0.01*x2']))
#sub.add_subsystem('cc2', om.ExecComp(['y=0.1*x']))
#root.connect('p1.xx', 'c1.xx')
#root.connect('c1.y1', 'c2.x')
##root.connect('c2.y', 'c1.x')
#root.connect('c1.y2', 'sub.cc1.x1')
#root.connect('sub.cc1.y', 'sub.cc2.x')
#root.connect('sub.cc2.y', 'sub.cc1.x2')
#root.nonlinear_solver = Newton()
#root.linear_solver = ScipyKrylov()
#sub.nonlinear_solver = Newton()
#sub.linear_solver = om.DirectSolver()
#prob.driver.add_desvar('p1.xx')
#prob.driver.add_objective('c1.y2')
#prob.setup()
#prob.run_model()
## Pollute the dpvec
#sub.dpmat[None]['cc1.x1'] = 1e10
## Make sure we can calculate a good derivative in the presence of pollution
#sub.linear_solver.rel_inputs = ['sub.cc2.x', 'sub.cc1.x2']
#rhs_buf = {None : np.array([3.5, 1.7])}
#sol_buf = sub.linear_solver.solve(rhs_buf, sub, mode='fwd')[None]
#assert_near_equal(sol_buf[0], -3.52052052, 1e-3)
#assert_near_equal(sol_buf[1], -2.05205205, 1e-3)
#def test_nested_relevancy(self):
## This test is just to make sure that values in the dp vector from
## higher scopes aren't sitting there converting themselves during sub
## iterations.
#prob = om.Problem()
#root = prob.model
#root.add_subsystem('p1', om.IndepVarComp('xx', 3.0))
#root.add_subsystem('c1', om.ExecComp(['y1=0.5*x + 1.0*xx', 'y2=0.3*x - 1.0*xx'], units={'y2' : 'km'}))
#root.add_subsystem('c2', om.ExecComp(['y=0.5*x']))
#sub = root.add_subsystem('sub', om.Group())
#sub.add_subsystem('cc1', om.ExecComp(['y=1.01*x1 + 1.01*x2'], units={'x1' : 'nm'}))
#sub.add_subsystem('cc2', om.ExecComp(['y=1.01*x']))
#root.connect('p1.xx', 'c1.xx')
#root.connect('c1.y1', 'c2.x')
#root.connect('c2.y', 'c1.x')
#root.connect('c1.y2', 'sub.cc1.x1')
#root.connect('sub.cc1.y', 'sub.cc2.x')
#root.connect('sub.cc2.y', 'sub.cc1.x2')
#root.nonlinear_solver = Newton()
#root.nonlinear_solver.options['maxiter'] = 1
#root.linear_solver = ScipyKrylov()
#root.linear_solver.options['maxiter'] = 1
#sub.nonlinear_solver = Newton()
#sub.linear_solver = om.DirectSolver()
#prob.driver.add_desvar('p1.xx')
#prob.driver.add_objective('sub.cc2.y')
#prob.setup()
#prob.run_model()
#self.assertTrue(not np.isnan(prob['sub.cc2.y']))
#def test_nested_relevancy_adjoint(self):
## This test is just to make sure that values in the dp vector from
## higher scopes aren't sitting there converting themselves during sub
## iterations.
#prob = om.Problem()
#root = prob.model
#root.add_subsystem('p1', om.IndepVarComp('xx', 3.0))
#root.add_subsystem('c1', om.ExecComp(['y1=0.5*x + 1.0*xx', 'y2=0.3*x - 1.0*xx'], units={'y2' : 'km'}))
#root.add_subsystem('c2', om.ExecComp(['y=0.5*x']))
#sub = root.add_subsystem('sub', om.Group())
#sub.add_subsystem('cc1', om.ExecComp(['y=1.01*x1 + 1.01*x2'], units={'x1' : 'nm'}))
#sub.add_subsystem('cc2', om.ExecComp(['y=1.01*x']))
#root.connect('p1.xx', 'c1.xx')
#root.connect('c1.y1', 'c2.x')
#root.connect('c2.y', 'c1.x')
#root.connect('c1.y2', 'sub.cc1.x1')
#root.connect('sub.cc1.y', 'sub.cc2.x')
#root.connect('sub.cc2.y', 'sub.cc1.x2')
#root.nonlinear_solver = Newton()
#root.nonlinear_solver.options['maxiter'] = 1
#root.linear_solver = ScipyKrylov()
#root.linear_solver.options['maxiter'] = 1
#root.linear_solver.options['mode'] = 'rev'
#sub.nonlinear_solver = Newton()
#sub.linear_solver = om.DirectSolver()
#prob.driver.add_desvar('p1.xx')
#prob.driver.add_objective('sub.cc2.y')
#prob.setup()
#prob.run_model()
#self.assertTrue(not np.isnan(prob['sub.cc2.y']))
#def test_nested_relevancy_adjoint_apply_linear(self):
## This test is just to make sure that values in the dp vector from
## higher scopes aren't sitting there converting themselves during sub
## iterations.
#class TestComp(Component):
#def __init__(self):
#super().__init__()
## Params
#self.add_param('x1', 1.0, units='mm')
#self.add_param('x2', 1.0)
## Unknowns
#self.add_output('y', 1.0)
#self.dx1count = 0
#self.dx2count = 0
#def solve_nonlinear(self, inputs, outputs, resids):
#x1 = inputs['x1']
#x2 = inputs['x2']
#outputs['y'] = 1.01*(x1 + x2)
#def apply_linear(self, inputs, outputs, dinputs, doutputs, dresids,
#mode):
#"""Returns the product of the incoming vector with the Jacobian."""
#if mode == 'fwd':
#if 'x1' in dinputs:
#dresids['y'] += 1.01*dinputs['x1']
#self.dx1count += 1
#if 'x2' in dinputs:
#dresids['y'] += 1.01*dinputs['x2']
#self.dx2count += 1
#elif mode == 'rev':
#if 'x1' in dinputs:
#dinputs['x1'] = 1.01*dresids['y']
#self.dx1count += 1
#if 'x2' in dinputs:
#dinputs['x2'] = 1.01*dresids['y']
#self.dx2count += 1
#prob = om.Problem()
#root = prob.model
#root.add_subsystem('p1', om.IndepVarComp('xx', 3.0))
#root.add_subsystem('c1', om.ExecComp(['y1=0.5*x + 1.0*xx', 'y2=0.3*x - 1.0*xx'], units={'y2' : 'km'}))
#root.add_subsystem('c2', om.ExecComp(['y=0.5*x']))
#sub = root.add_subsystem('sub', om.Group())
#sub.add_subsystem('cc1', TestComp())
#sub.add_subsystem('cc2', om.ExecComp(['y=1.01*x']))
#root.connect('p1.xx', 'c1.xx')
#root.connect('c1.y1', 'c2.x')
#root.connect('c2.y', 'c1.x')
#root.connect('c1.y2', 'sub.cc1.x1')
#root.connect('sub.cc1.y', 'sub.cc2.x')
#root.connect('sub.cc2.y', 'sub.cc1.x2')
#root.nonlinear_solver = Newton()
#root.nonlinear_solver.options['maxiter'] = 1
#root.linear_solver = ScipyKrylov()
#root.linear_solver.options['maxiter'] = 1
#root.linear_solver.options['mode'] = 'rev'
#sub.nonlinear_solver = Newton()
#sub.linear_solver = om.DirectSolver()
#prob.driver.add_desvar('p1.xx')
#prob.driver.add_objective('sub.cc2.y')
#prob.setup()
#prob.run_model()
## x1 deriv code should be called less if the dinputs vec only
## considers sub relevancy
#self.assertTrue(sub.cc1.dx1count < sub.cc1.dx2count)
#def test_nested_relevancy_gmres(self):
## This test is just to make sure that values in the dp vector from
## higher scopes aren't sitting there converting themselves during sub
## iterations.
#prob = om.Problem()
#root = prob.model
#root.add_subsystem('p1', om.IndepVarComp('xx', 3.0))
#root.add_subsystem('c1', om.ExecComp(['y1=0.5*x + 1.0*xx', 'y2=0.3*x - 1.0*xx'], units={'y2' : 'km'}))
#root.add_subsystem('c2', om.ExecComp(['y=0.5*x']))
#sub = root.add_subsystem('sub', om.Group())
#sub.add_subsystem('cc1', om.ExecComp(['y=1.01*x1 + 1.01*x2'], units={'x1' : 'fm'}))
#sub.add_subsystem('cc2', om.ExecComp(['y=1.01*x']))
#root.connect('p1.xx', 'c1.xx')
#root.connect('c1.y1', 'c2.x')
#root.connect('c2.y', 'c1.x')
#root.connect('c1.y2', 'sub.cc1.x1')
#root.connect('sub.cc1.y', 'sub.cc2.x')
#root.connect('sub.cc2.y', 'sub.cc1.x2')
#root.nonlinear_solver = Newton()
#root.nonlinear_solver.options['maxiter'] = 1
#root.linear_solver = ScipyKrylov()
#root.linear_solver.options['maxiter'] = 1
#sub.nonlinear_solver = Newton()
#sub.linear_solver = ScipyKrylov()
#prob.driver.add_desvar('p1.xx')
#prob.driver.add_objective('sub.cc2.y')
#prob.setup()
#prob.run_model()
## GMRES doesn't cause a successive build-up in the value of an out-of
## scope param, but the linear solver doesn't converge. We can test to
## make sure it does.
#iter_count = sub.linear_solver.iter_count
#self.assertTrue(iter_count < 20)
#self.assertTrue(not np.isnan(prob['sub.cc2.y']))
#def test_nested_relevancy_gmres_precon(self):
## Make sure preconditioners also work
#prob = om.Problem()
#root = prob.model
#root.add_subsystem('p1', om.IndepVarComp('xx', 3.0))
#root.add_subsystem('c1', om.ExecComp(['y1=0.5*x + 1.0*xx', 'y2=0.3*x - 1.0*xx'], units={'y2' : 'km'}))
#root.add_subsystem('c2', om.ExecComp(['y=0.5*x']))
#sub = root.add_subsystem('sub', om.Group())
#sub.add_subsystem('cc1', om.ExecComp(['y=1.01*x1 + 1.01*x2'], units={'x1' : 'fm'}))
#sub.add_subsystem('cc2', om.ExecComp(['y=1.01*x']))
#root.connect('p1.xx', 'c1.xx')
#root.connect('c1.y1', 'c2.x')
#root.connect('c2.y', 'c1.x')
#root.connect('c1.y2', 'sub.cc1.x1')
#root.connect('sub.cc1.y', 'sub.cc2.x')
#root.connect('sub.cc2.y', 'sub.cc1.x2')
#root.nonlinear_solver = Newton()
#root.nonlinear_solver.options['maxiter'] = 1
#root.linear_solver = ScipyKrylov()
#root.linear_solver.options['maxiter'] = 1
#sub.nonlinear_solver = Newton()
#sub.linear_solver = ScipyKrylov()
#sub.linear_solver.precon = om.DirectSolver()
#prob.driver.add_desvar('p1.xx')
#prob.driver.add_objective('sub.cc2.y')
#prob.setup()
#prob.run_model()
## GMRES doesn't cause a successive build-up in the value of an out-of
## scope param, but the linear solver doesn't converge. We can test to
## make sure it does.
#iter_count = sub.linear_solver.iter_count
#self.assertTrue(iter_count < 20)
#self.assertTrue(not np.isnan(prob['sub.cc2.y']))
def test_promotes_equivalent_units(self):
# multiple Group.set_input_defaults calls at same tree level with conflicting units args
p = om.Problem()
g1 = p.model.add_subsystem("G1", om.Group(), promotes_inputs=['x'])
g1.add_subsystem("C1", om.ExecComp("y = 2. * x * z",
x={'value': 5.0, 'units': 'm/s/s'},
y={'value': 1.0, 'units': None},
z={'value': 1.0, 'units': 'W'}),
promotes_inputs=['x', 'z'])
g1.add_subsystem("C2", om.ExecComp("y = 3. * x * z",
x={'value': 5.0, 'units': 'm/s**2'},
y={'value': 1.0, 'units': None},
z={'value': 1.0, 'units': 'J/s'}),
promotes_inputs=['x', 'z'])
# converting m/s/s to m/s**2 is allowed
p.setup()
def test_promotes_non_equivalent_units(self):
# multiple Group.set_input_defaults calls at same tree level with conflicting units args
p = om.Problem()
g1 = p.model.add_subsystem("G1", om.Group(), promotes_inputs=['x'])
g1.add_subsystem("C1", om.ExecComp("y = 2. * x * z",
x={'value': 5.0, 'units': 'J/s/s'},
y={'value': 1.0, 'units': None},
z={'value': 1.0, 'units': 'W'}),
promotes_inputs=['x', 'z'])
g1.add_subsystem("C2", om.ExecComp("y = 3. * x * z",
x={'value': 5.0, 'units': 'm/s**2'},
y={'value': 1.0, 'units': None},
z={'value': 1.0, 'units': 'J/s'}),
promotes_inputs=['x', 'z'])
# trying to convert J/s/s to m/s**2 should cause Incompatible units TypeError exception
with self.assertRaises(TypeError) as e:
p.setup()
self.assertEqual(str(e.exception), "Units 'm/s**2' and 'J/s**2' are incompatible.")
def test_input_defaults_unit_compat(self):
p = om.Problem()
p.model.add_subsystem('comp', om.ExecComp('y=2*x', units='inch'))
with self.assertRaises(ValueError) as cm:
p.model.set_input_defaults('comp.x', val=2., units='in**2')
msg = ("Group: The units 'in**2' are invalid.")
self.assertEqual(cm.exception.args[0], msg)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
import os
import sys
import subprocess
from dataclasses import dataclass
@dataclass
class Settings:
kadmin_bin: str
service: str
realm: str
keytab_file: str
def delete_user(username):
subprocess.run(
[
settings.kadmin_bin,
'-r', settings.realm,
'-p', settings.service,
'-kt', settings.keytab_file,
'delete_principal', username,
],
timeout=1,
check=True,
)
def main():
print('Username: ', end='', flush=True)
username = sys.stdin.readline().strip()
delete_user(username)
print(f'Deleted user: {username}')
if __name__ == '__main__':
settings = Settings(
kadmin_bin='kadmin',
service=os.getenv('AUTH_SERVICE_NAME', 'http/devapp@EXAMPLE.COM'),
realm=os.getenv('AUTH_SERVICE_REALM', 'EXAMPLE.COM'),
keytab_file=os.getenv('KRB5_KTNAME', '/etc/krb5.keytab'),
)
main()
|
# MIT License
# Copyright (c) 2020 Anil Chauhan // This file is part of ZeroTsu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class String:
START_MESSAGE = ("Hello {},\n\n"
"My name is Zero Tsū. I'm built using python3 & powerd by [NEKOS API.](https://nekos.life/)\n"
"I can supply you loads of anime wallpapers and hentai images for free!\n"
"Click the button below to get started with the list of possible commands.\n\n"
"Made with ❤️ by [anii](https://t.me/meanii) on python-telegram-bot.")
SUPPORT_MESSAGE = ("We have support group now, head over @nina77chat in case you have any issues.\n"
"Channel: @spookyanii")
SOURCE_MESSAGE = ("Here is the source: https://github.com/gizmostuffin/ZeroTsu\n"
"[Zero Tsū](https://github.com/gizmostuffin/ZeroTsu/blob/main/LICENSE) is licensed under the **MIT License**\n\n"
"Copyright (c) 2020 Anil Chauhan (@meanii)")
HELP_MESSAGE = ("This is a list of available commands you can try.\n\n"
"• Commands to get photos:\n"
"/feet: Sends Random Anime Feet Images.\n"
"/yuri: Sends Random Yuri source Images.\n"
"/trap: Sends Random Trap source Images.\n"
"/futanari: Sends Random Futanari source Images.\n"
"/hololewd: Sends Random Holo Lewds.\n"
"/lewdkemo: Sends Random Kemo Lewds.\n"
"/sologif: Sends Random Solo GIFs.\n"
"/cumgif: Sends Random Cum GIFs.\n"
"/erokemo: Sends Random Ero-Kemo Images.\n"
"/lesbian: Sends Random Les Source Images.\n"
"/lewdk: Sends Random Kitsune Lewds.\n"
"/ngif: Sends Random Neko GIFs.\n"
"/tickle: Sends Random Tickle GIFs.\n"
"/lewd: Sends Random Lewds.\n"
"/feed: Sends Random Feeding GIFs.\n"
"/eroyuri: Sends Random Ero-Yuri source Images.\n"
"/eron: Sends Random Ero-Neko source Images.\n"
"/cum: Sends Random Cum Images.\n"
"/bjgif: Sends Random Blow Job GIFs.\n"
"/bj: Sends Random Blow Job source Images.\n"
"/nekonsfw: Sends Random NSFW Neko source Images.\n"
"/solo: Sends Random NSFW Neko GIFs.\n"
"/kemonomimi: Sends Random KemonoMimi source Images.\n"
"/avatarlewd: Sends Random Avater Lewd Stickers.\n"
"/gasm: Sends Random Orgasm Stickers.\n"
"/poke: Sends Random Poke GIFs.\n"
"/anal: Sends Random Anal GIFs.\n"
"/hentai: Sends Random Hentai source Images.\n"
"/avatar: Sends Random Avatar Stickers.\n"
"/erofeet: Sends Random Ero-Feet source Images.\n"
"/holo: Sends Random Holo source Images.\n"
"/tits: Sends Random Tits source Images.\n"
"/pussygif: Sends Random Pussy GIFs.\n"
"/holoero: Sends Random Ero-Holo source Images.\n"
"/pussy: Sends Random Pussy source Images.\n"
"/hentaigif: Sends Random Hentai GIFs.\n"
"/classic: Sends Random Classic Hentai GIFs.\n"
"/kuni: Sends Random Pussy Lick GIFs.\n"
"/waifu: Sends Random Waifu Stickers.\n"
"/kiss: Sends Random Kissing GIFs.\n"
"/femdom: Sends Random Femdom source Images.\n"
"/cuddle: Sends Random Cuddle GIFs.\n"
"/erok: Sends Random Ero-Kitsune source Images.\n"
"/foxgirl: Sends Random FoxGirl source Images.\n"
"/titsgif: Sends Random Tits GIFs.\n"
"/ero: Sends Random Ero source Images.\n"
"/smug: Sends Random Smug GIFs.\n"
"/baka: Sends Random Baka Shout GIFs.\n"
"/dva: Sends Random D.VA source Images.\n\n"
"• Commands to get wallpapers:\n"
"/neko: Sends Random SFW Neko source Images.\n"
"/wallpaper: Sends Random Wallpapers.")
|
import os
from contextlib import contextmanager
import sqlalchemy as db
from dagster import StringSource, check
from dagster.core.storage.sql import (
check_alembic_revision,
create_engine,
get_alembic_config,
handle_schema_errors,
run_alembic_downgrade,
run_alembic_upgrade,
stamp_alembic_rev,
)
from dagster.core.storage.sqlite import create_db_conn_string
from dagster.serdes import ConfigurableClass, ConfigurableClassData
from dagster.seven import urljoin, urlparse
from dagster.utils import mkdir_p
from sqlalchemy.pool import NullPool
from ..schema import RunStorageSqlMetadata, RunTagsTable, RunsTable
from ..sql_run_storage import SqlRunStorage
class SqliteRunStorage(SqlRunStorage, ConfigurableClass):
"""SQLite-backed run storage.
Users should not directly instantiate this class; it is instantiated by internal machinery when
``dagit`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in
``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.
This is the default run storage when none is specified in the ``dagster.yaml``.
To explicitly specify SQLite for run storage, you can add a block such as the following to your
``dagster.yaml``:
.. code-block:: YAML
run_storage:
module: dagster.core.storage.runs
class: SqliteRunStorage
config:
base_dir: /path/to/dir
The ``base_dir`` param tells the run storage where on disk to store the database.
"""
def __init__(self, conn_string, inst_data=None):
check.str_param(conn_string, "conn_string")
self._conn_string = conn_string
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {"base_dir": StringSource}
@staticmethod
def from_config_value(inst_data, config_value):
return SqliteRunStorage.from_local(inst_data=inst_data, **config_value)
@staticmethod
def from_local(base_dir, inst_data=None):
check.str_param(base_dir, "base_dir")
mkdir_p(base_dir)
conn_string = create_db_conn_string(base_dir, "runs")
engine = create_engine(conn_string, poolclass=NullPool)
engine.execute("PRAGMA journal_mode=WAL;")
RunStorageSqlMetadata.create_all(engine)
alembic_config = get_alembic_config(__file__)
connection = engine.connect()
db_revision, head_revision = check_alembic_revision(alembic_config, connection)
if not (db_revision and head_revision):
stamp_alembic_rev(alembic_config, engine)
return SqliteRunStorage(conn_string, inst_data)
@contextmanager
def connect(self):
engine = create_engine(self._conn_string, poolclass=NullPool)
conn = engine.connect()
try:
with handle_schema_errors(
conn, get_alembic_config(__file__), msg="Sqlite run storage requires migration",
):
yield conn
finally:
conn.close()
def _alembic_upgrade(self, rev="head"):
alembic_config = get_alembic_config(__file__)
with self.connect() as conn:
run_alembic_upgrade(alembic_config, conn, rev=rev)
def _alembic_downgrade(self, rev="head"):
alembic_config = get_alembic_config(__file__)
with self.connect() as conn:
run_alembic_downgrade(alembic_config, conn, rev=rev)
def upgrade(self):
self._check_for_version_066_migration_and_perform()
self._alembic_upgrade()
# In version 0.6.6, we changed the layout of the of the sqllite dbs on disk
# to move from the root of DAGSTER_HOME/runs.db to DAGSTER_HOME/history/runs.bd
# This function checks for that condition and does the move
def _check_for_version_066_migration_and_perform(self):
old_conn_string = "sqlite://" + urljoin(urlparse(self._conn_string).path, "../runs.db")
path_to_old_db = urlparse(old_conn_string).path
# sqlite URLs look like `sqlite:///foo/bar/baz on Unix/Mac` but on Windows they look like
# `sqlite:///D:/foo/bar/baz` (or `sqlite:///D:\foo\bar\baz`)
if os.name == "nt":
path_to_old_db = path_to_old_db.lstrip("/")
if os.path.exists(path_to_old_db):
old_storage = SqliteRunStorage(old_conn_string)
old_runs = old_storage.get_runs()
for run in old_runs:
self.add_run(run)
os.unlink(path_to_old_db)
def delete_run(self, run_id):
""" Override the default sql delete run implementation until we can get full
support on cascading deletes """
check.str_param(run_id, "run_id")
remove_tags = db.delete(RunTagsTable).where(RunTagsTable.c.run_id == run_id)
remove_run = db.delete(RunsTable).where(RunsTable.c.run_id == run_id)
with self.connect() as conn:
conn.execute(remove_tags)
conn.execute(remove_run)
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script."""
import os
from setuptools import find_packages
from setuptools import setup
version = '0.0.1'
def _get_requirements():
"""Parses requirements.txt file."""
install_requires_tmp = []
dependency_links_tmp = []
with open(
os.path.join(os.path.dirname(__file__), './requirements.txt'), 'r') as f:
for line in f:
package_name = line.strip()
# Skip empty line or comments starting with "#".
if not package_name or package_name[0] == '#':
continue
if package_name.startswith('-e '):
dependency_links_tmp.append(package_name[3:].strip())
else:
install_requires_tmp.append(package_name)
return install_requires_tmp, dependency_links_tmp
install_requires, dependency_links = _get_requirements()
install_requires.append('tf-nightly')
install_requires.append('tensorflow-datasets')
setup(
name='keras-cv',
version=version,
description='Keras Computer Vision Library',
url='https://github.com/keras-team/keras-cv',
author='The Keras authors',
author_email='keras-team@google.com',
license='Apache License 2.0',
install_requires=install_requires,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Software Development'
],
packages=find_packages(exclude=('tests',)),
exclude_package_data={'': ['*_test.py',],},
dependency_links=dependency_links,
python_requires='>=3.6',
)
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('add_image/', views.add_image, name='add_image'),
path('<int:image_id>/', views.image_detail, name='image_detail')
]
|
#%%
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy import text as sql_text
db = SQLAlchemy()
class User(db.Model):
__tablename__ = "users"
id = db.Column(UUID(as_uuid=True), primary_key=True, server_default=sql_text("uuid_generate_v4()"), nullable=False)
public_id = db.Column(UUID(as_uuid=True), server_default=sql_text("uuid_generate_v4()"), nullable=False)
email = db.Column(db.String, nullable=False)
class Note(db.Model):
__tablename__ = "notes"
id = db.Column(UUID(as_uuid=True), primary_key=True, server_default=sql_text("uuid_generate_v4()"), nullable=False)
title = db.Column(db.Text, nullable = False)
text = db.Column(db.Text, nullable = False)
date = db.Column(db.Text, nullable = False)
userId = db.Column(UUID(as_uuid=True), db.ForeignKey("users.id"), nullable=False)
createdAt = db.Column(db.DateTime, nullable=False)
updatedAt = db.Column(db.DateTime, nullable=False)
def __repr__(self):
return f"noteid:{self.id}\ntitele:{self.title}\ntext:{self.text}\ndate:{self.date}\ncreatedAt:{self.createdAt}\nupdatedAt:{self.updatedAt}\nuserId:{self.userId}"
# %%
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
from .se_net import senet50_256
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(
self,
backbone: nn.Module,
train_backbone: bool,
num_channels: int,
return_interm_layers: bool,
opt_name=None,
):
super().__init__()
for name, parameter in backbone.named_parameters():
if (
not train_backbone
or "layer2" not in name
and "layer3" not in name
and "layer4" not in name
):
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
if opt_name is None:
return_layers = {"layer4": "0"}
else:
return_layers = None
self.body = backbone
if return_layers is not None:
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(
self,
name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool,
):
if name not in ["senet256"]:
optname = None
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(),
norm_layer=FrozenBatchNorm2d,
)
num_channels = 512 if name in ("resnet18", "resnet34") else 2048
else:
optname = "feat_extract"
backbone = senet50_256(pretrained=is_main_process())
num_channels = 2048
super().__init__(
backbone, train_backbone, num_channels, return_interm_layers, optname
)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor, targets=None):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x, targets).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
backbone = Backbone(
args.backbone, train_backbone, return_interm_layers, args.dilation
)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
|
#!/usr/bin/env python
from distutils.core import setup
from Cython.Distutils import build_ext
from distutils.extension import Extension
import numpy
# from numpy.distutils.core import Extension
cy_mod = Extension("inside_polygon",
sources=["inside_polygon.pyx", "InsidePolygonWithBounds.c"],
include_dirs=[numpy.get_include()],
language="c")
setup(ext_modules=[cy_mod],
cmdclass={'build_ext': build_ext})
|
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB'
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
# apply image transformation
A = self.transform_A(A_img)
B = self.transform_B(B_img)
print (A)
print (B)
exit()
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
|
pairs = {
'(': ')',
'[': ']',
'{': '}',
'<': '>'
}
completion_scores = {
')': 1,
']': 2,
'}': 3,
'>': 4
}
syntax_scores = {
')': 3,
']': 57,
'}': 1197,
'>': 25137
}
class ClosingCharacterError(Exception):
def __init__(self, character: str, *args: object) -> None:
super().__init__(*args)
self.character = character
def read_file(filename):
with open(filename) as file:
for line in file:
line = line.strip()
if line:
yield line
def parse(line):
stack = []
for c in line:
if c in pairs:
stack.append(c)
elif pairs[stack[-1]] == c:
stack.pop()
else:
raise ClosingCharacterError(c)
return stack
def syntax_score(line):
try:
parse(line)
return 0
except ClosingCharacterError as e:
return syntax_scores[e.character]
def completion_score(line):
score = 0
try:
stack = parse(line)
while len(stack) > 0:
score = score * 5 + completion_scores[pairs[stack.pop()]]
except ClosingCharacterError:
pass
return score
def part_one(filename):
return sum(syntax_score(line) for line in read_file(filename))
def part_two(filename):
scores = [completion_score(line) for line in read_file(filename)]
scores = [score for score in scores if score > 0]
scores.sort()
return scores[len(scores) // 2]
print(part_two('day10/input'))
|
from __future__ import absolute_import
import os
import shutil
import subprocess
import time
import tempfile
from . import constants
from .config import get_config_value
def parent_dir(path):
"""Return the parent directory of a file or directory.
This is commonly useful for creating parent directories
prior to creating a file."""
return os.path.split(path)[0]
def vm_cp_path(app_or_service_name):
return os.path.join(constants.VM_CP_DIR, app_or_service_name)
def vm_command_files_path(app_or_service_name):
return os.path.join(constants.VM_COMMAND_FILES_DIR, app_or_service_name)
def dir_modified_time(path):
return time.ctime(os.path.getmtime(path))
def set_mac_user_ownership(path):
command = "chown -R {} {}".format(get_config_value(constants.CONFIG_MAC_USERNAME_KEY), path).split()
subprocess.check_call(command)
def case_insensitive_rename(src, dst):
"""A hack to allow us to rename paths in a case-insensitive filesystem like HFS."""
temp_dir = tempfile.mkdtemp()
shutil.rmtree(temp_dir)
shutil.move(src, temp_dir)
shutil.move(temp_dir, dst)
|
import mimetypes
import os
import urlparse
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.files.uploadedfile import UploadedFile
from django.core.files.uploadhandler import FileUploadHandler, \
StopFutureHandlers
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.utils.encoding import smart_str, force_unicode, filepath_to_uri
from google.appengine.api import files
from google.appengine.api.images import get_serving_url, NotImageError, \
TransformationError, BlobKeyRequiredError
from google.appengine.ext.blobstore import BlobInfo, BlobKey, delete, \
create_upload_url, BLOB_KEY_HEADER, BLOB_RANGE_HEADER, BlobReader
def prepare_upload(request, url, **kwargs):
return create_upload_url(url), {}
def serve_file(request, file, save_as, content_type, **kwargs):
if hasattr(file, 'file') and hasattr(file.file, 'blobstore_info'):
blobkey = file.file.blobstore_info.key()
elif hasattr(file, 'blobstore_info'):
blobkey = file.blobstore_info.key()
else:
raise ValueError("The provided file can't be served via the "
"Google App Engine Blobstore.")
response = HttpResponse(content_type=content_type)
response[BLOB_KEY_HEADER] = str(blobkey)
response['Accept-Ranges'] = 'bytes'
http_range = request.META.get('HTTP_RANGE')
if http_range is not None:
response[BLOB_RANGE_HEADER] = http_range
if save_as:
response['Content-Disposition'] = smart_str(
u'attachment; filename=%s' % save_as)
if file.size is not None:
response['Content-Length'] = file.size
return response
class BlobstoreStorage(Storage):
"""Google App Engine Blobstore storage backend."""
def _open(self, name, mode='rb'):
return BlobstoreFile(name, mode, self)
def _save(self, name, content):
name = name.replace('\\', '/')
if hasattr(content, 'file') and \
hasattr(content.file, 'blobstore_info'):
data = content.file.blobstore_info
elif hasattr(content, 'blobstore_info'):
data = content.blobstore_info
elif isinstance(content, File):
guessed_type = mimetypes.guess_type(name)[0]
file_name = files.blobstore.create(mime_type=guessed_type or 'application/octet-stream',
_blobinfo_uploaded_filename=name)
with files.open(file_name, 'a') as f:
for chunk in content.chunks():
f.write(chunk)
files.finalize(file_name)
data = files.blobstore.get_blob_key(file_name)
else:
raise ValueError("The App Engine storage backend only supports "
"BlobstoreFile instances or File instances.")
if isinstance(data, (BlobInfo, BlobKey)):
# We change the file name to the BlobKey's str() value.
if isinstance(data, BlobInfo):
data = data.key()
return '%s/%s' % (data, name.lstrip('/'))
else:
raise ValueError("The App Engine Blobstore only supports "
"BlobInfo values. Data can't be uploaded "
"directly. You have to use the file upload "
"handler.")
def delete(self, name):
delete(self._get_key(name))
def exists(self, name):
return self._get_blobinfo(name) is not None
def size(self, name):
return self._get_blobinfo(name).size
def url(self, name):
try:
return get_serving_url(self._get_blobinfo(name))
except (NotImageError, TransformationError):
return None
def created_time(self, name):
return self._get_blobinfo(name).creation
def get_valid_name(self, name):
return force_unicode(name).strip().replace('\\', '/')
def get_available_name(self, name):
return name.replace('\\', '/')
def _get_key(self, name):
return BlobKey(name.split('/', 1)[0])
def _get_blobinfo(self, name):
return BlobInfo.get(self._get_key(name))
class DevBlobstoreStorage(BlobstoreStorage):
def url(self, name):
try:
return super(DevBlobstoreStorage, self).url(name)
except BlobKeyRequiredError:
return urlparse.urljoin(settings.MEDIA_URL, filepath_to_uri(name))
class BlobstoreFile(File):
def __init__(self, name, mode, storage):
self.name = name
self._storage = storage
self._mode = mode
self.blobstore_info = storage._get_blobinfo(name)
@property
def size(self):
return self.blobstore_info.size
def write(self, content):
raise NotImplementedError()
@property
def file(self):
if not hasattr(self, '_file'):
self._file = BlobReader(self.blobstore_info.key())
return self._file
class BlobstoreFileUploadHandler(FileUploadHandler):
"""
File upload handler for the Google App Engine Blobstore.
"""
def new_file(self, *args, **kwargs):
super(BlobstoreFileUploadHandler, self).new_file(*args, **kwargs)
blobkey = self.content_type_extra.get('blob-key')
self.active = blobkey is not None
if self.active:
self.blobkey = BlobKey(blobkey)
raise StopFutureHandlers()
def receive_data_chunk(self, raw_data, start):
"""
Add the data to the StringIO file.
"""
if not self.active:
return raw_data
def file_complete(self, file_size):
"""
Return a file object if we're activated.
"""
if not self.active:
return
return BlobstoreUploadedFile(
blobinfo=BlobInfo(self.blobkey),
charset=self.charset)
class BlobstoreUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, blobinfo, charset):
super(BlobstoreUploadedFile, self).__init__(
BlobReader(blobinfo.key()), blobinfo.filename,
blobinfo.content_type, blobinfo.size, charset)
self.blobstore_info = blobinfo
def open(self, mode=None):
pass
def chunks(self, chunk_size=1024 * 128):
self.file.seek(0)
while True:
content = self.read(chunk_size)
if not content:
break
yield content
def multiple_chunks(self, chunk_size=1024 * 128):
return True
|
"""
"""
import weppRun
import subprocess
import threading
import time
import random
import mx.DateTime
import logging
import pg
wblog = logging.getLogger("wblog")
wblog.setLevel(logging.DEBUG)
fh = logging.FileHandler("wb.log", "w")
fh.setLevel(logging.DEBUG)
wblog.addHandler(fh)
sts = mx.DateTime.DateTime(1997,1,1)
ets = mx.DateTime.now() + mx.DateTime.RelativeDateTime(days=-1,hour=0,minute=0,second=0)
wbfindline = int((ets - sts).days + 20.0)
class WeppThread( threading.Thread ):
""" I am a processing thread that will do some work """
chunksize = 1000
def run( self ):
self.dbconn = pg.connect('wepp', 'iemdb')
good = True
while (good):
sts = mx.DateTime.now()
good = self.chunk()
ets = mx.DateTime.now()
rt = (ets - sts).seconds
print "%s Processed %s runs in %.1fs [%.3f runs/s]" % (
ets.strftime("%H:%M"), self.chunksize,
rt, self.chunksize / rt )
def chunk( self ):
# First, lets request a request ID from the server
rs = self.dbconn.query("SELECT \
nextval('job_queue_request_id'::text) as next").dictresult()
requestID = rs[0]["next"]
# Send a request to the server for 1000 combos to make
self.dbconn.query("UPDATE job_queue SET request_id = %s \
WHERE id in (SELECT id from job_queue \
WHERE request_id IS NULL LIMIT %s) " % \
(requestID, self.chunksize) )
# Now, ask again for runs that we can make
sql = "select c.mkrun as mkrun, c.id as cid, c.hrap_i, \
n.steep, n.len, \
n.soil_id, c.model_twp, c.nri_id::text as nri_id, \
n.man_id, m.name FROM \
combos c, nri n, managements m, job_queue q \
WHERE q.request_id = %s and q.combo_id = c.id \
and c.nri_id = n.id and n.man_id = m.man_id" % (requestID)
rs = self.dbconn.query(sql).dictresult()
if (len(rs) == 0):
return False
for i in range(len(rs)):
if (rs[i]['mkrun'] == 'f'):
continue
runwepp(rs[i])
return True
def runwepp(row):
hrap_i = int(row['hrap_i'])
cid = int(row['cid'])
wr = weppRun.weppRun(cid)
wr.model_twp = row['model_twp']
wr.nri_id = str(row['nri_id'])
wr.hrap_i = hrap_i
wr.mfile = row['name']
if (wr.mfile == "fallow"):
return
wr.sid = str(row['soil_id'])
wr.s_length = row['len']
wr.s_steep = float(row['steep']) + 0.01
#wr.buildSlope()
#wr.buildSoil(mydb)
wr.buildRun()
if (wr.error > 0):
return
proc = subprocess.Popen("./wepp < runfiles/wepp.%s" % (cid,), shell=True,
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
r = proc.stdout.read()
saveo = open('output/%s.txt'%(cid,),'w')
saveo.write( r )
saveo.close()
if r[-13:-1] != "SUCCESSFULLY":
e = open('error/%s.txt'%(cid,),'w')
e.write( r )
e.close()
return
for linenum, line in enumerate(open('wb/%s.wb' % (cid,))):
if linenum == wbfindline:
wblog.debug("%s %s" % (cid, line.strip()) )
break
for x in range(3):
if x > 0:
time.sleep( random.random() * 10 ) # Initial jitter
WeppThread().start()
|
"""Example of using custom_loss() with an imitation learning loss.
The default input file is too small to learn a good policy, but you can
generate new experiences for IL training as follows:
To generate experiences:
$ ./train.py --run=PG --config='{"output": "/tmp/cartpole"}' --env=CartPole-v0
To train on experiences with joint PG + IL loss:
$ python custom_loss.py --input-files=/tmp/cartpole
"""
import argparse
from pathlib import Path
import os
import ray
from ray import tune
from ray.rllib.models import Model, ModelCatalog
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.models.tf.fcnet_v1 import FullyConnectedNetwork
from ray.rllib.models.model import restore_original_dimensions
from ray.rllib.offline import JsonReader
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--iters", type=int, default=200)
parser.add_argument(
"--input-files",
type=str,
default=os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../tests/data/cartpole_small"))
class CustomLossModel(Model):
"""Custom model that adds an imitation loss on top of the policy loss."""
def _build_layers_v2(self, input_dict, num_outputs, options):
self.obs_in = input_dict["obs"]
with tf.variable_scope("shared", reuse=tf.AUTO_REUSE):
self.fcnet = FullyConnectedNetwork(input_dict, self.obs_space,
self.action_space, num_outputs,
options)
return self.fcnet.outputs, self.fcnet.last_layer
def custom_loss(self, policy_loss, loss_inputs):
# create a new input reader per worker
reader = JsonReader(self.options["custom_options"]["input_files"])
input_ops = reader.tf_input_ops()
# define a secondary loss by building a graph copy with weight sharing
obs = tf.cast(input_ops["obs"], tf.float32)
logits, _ = self._build_layers_v2({
"obs": restore_original_dimensions(obs, self.obs_space)
}, self.num_outputs, self.options)
# You can also add self-supervised losses easily by referencing tensors
# created during _build_layers_v2(). For example, an autoencoder-style
# loss can be added as follows:
# ae_loss = squared_diff(
# loss_inputs["obs"], Decoder(self.fcnet.last_layer))
print("FYI: You can also use these tensors: {}, ".format(loss_inputs))
# compute the IL loss
action_dist = Categorical(logits, self.options)
self.policy_loss = policy_loss
self.imitation_loss = tf.reduce_mean(
-action_dist.logp(input_ops["actions"]))
return policy_loss + 10 * self.imitation_loss
def custom_stats(self):
return {
"policy_loss": self.policy_loss,
"imitation_loss": self.imitation_loss,
}
if __name__ == "__main__":
ray.init()
args = parser.parse_args()
# Bazel makes it hard to find files specified in `args` (and `data`).
# Look for them here.
if not os.path.exists(args.input_files):
# This script runs in the ray/rllib/examples dir.
rllib_dir = Path(__file__).parent.parent
input_dir = rllib_dir.absolute().joinpath(args.input_files)
args.input_files = str(input_dir)
ModelCatalog.register_custom_model("custom_loss", CustomLossModel)
tune.run(
"PG",
stop={
"training_iteration": args.iters,
},
config={
"env": "CartPole-v0",
"num_workers": 0,
"model": {
"custom_model": "custom_loss",
"custom_options": {
"input_files": args.input_files,
},
},
},
)
|
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class GetCameraFovNode(Node, ArmLogicTreeNode):
'''Get camera FOV node'''
bl_idname = 'LNGetCameraFovNode'
bl_label = 'Get Camera FOV'
bl_icon = 'GAME'
def init(self, context):
self.inputs.new('ArmNodeSocketObject', 'Object')
self.outputs.new('NodeSocketFloat', 'FOV')
add_node(GetCameraFovNode, category='Value')
|
import py
from rpython.jit.metainterp import compile
from rpython.jit.metainterp.history import (TargetToken, JitCellToken,
TreeLoop, Const)
from rpython.jit.metainterp.optimizeopt.util import equaloplists
from rpython.jit.metainterp.optimizeopt.vector import (Pack,
NotAProfitableLoop, VectorizingOptimizer)
from rpython.jit.metainterp.optimizeopt.dependency import (Node,
DependencyGraph, IndexVar)
from rpython.jit.metainterp.optimizeopt.guard import (GuardStrengthenOpt,
Guard)
from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin
from rpython.jit.metainterp.optimizeopt.test.test_schedule import SchedulerBaseTest
from rpython.jit.metainterp.optimizeopt.test.test_vecopt import (FakeMetaInterpStaticData,
FakeJitDriverStaticData, FakeLoopInfo)
from rpython.jit.metainterp.resoperation import (rop,
ResOperation, InputArgInt)
from rpython.jit.tool.oparser_model import get_model
class FakeMemoryRef(object):
def __init__(self, array, iv):
self.index_var = iv
self.array = array
def is_adjacent_to(self, other):
if self.array is not other.array:
return False
iv = self.index_var
ov = other.index_var
val = (int(str(ov.var)[1:]) - int(str(iv.var)[1:]))
# i0 and i1 are adjacent
# i1 and i0 ...
# but not i0, i2
# ...
return abs(val) == 1
class FakeOp(object):
def __init__(self, cmpop):
self.boolinverse = ResOperation(cmpop, [box(0), box(0)], None).boolinverse
self.cmpop = cmpop
def getopnum(self):
return self.cmpop
def getarg(self, index):
if index == 0:
return 'lhs'
elif index == 1:
return 'rhs'
else:
assert 0
class FakeResOp(object):
def __init__(self, opnum):
self.opnum = opnum
def getopnum(self):
return self.opnum
def box(value):
return InputArgInt(value)
def const(value):
return Const._new(value)
def iv(value, coeff=(1,1,0)):
var = IndexVar(value)
var.coefficient_mul = coeff[0]
var.coefficient_div = coeff[1]
var.constant = coeff[2]
return var
def guard(opnum):
def guard_impl(cmpop, lhs, rhs):
guard = Guard(0, FakeResOp(opnum), FakeOp(cmpop), {'lhs': lhs, 'rhs': rhs})
return guard
return guard_impl
guard_true = guard(rop.GUARD_TRUE)
guard_false = guard(rop.GUARD_FALSE)
del guard
class GuardBaseTest(SchedulerBaseTest):
def optguards(self, loop, user_code=False):
info = FakeLoopInfo(loop)
info.snapshot(loop)
for op in loop.operations:
if op.is_guard():
op.setdescr(compile.CompileLoopVersionDescr())
dep = DependencyGraph(loop)
opt = GuardStrengthenOpt(dep.index_vars)
opt.propagate_all_forward(info, loop, user_code)
return opt
def assert_guard_count(self, loop, count):
guard = 0
for op in loop.operations + loop.prefix:
if op.is_guard():
guard += 1
if guard != count:
self.debug_print_operations(loop)
assert guard == count
def assert_contains_sequence(self, loop, instr):
class Glob(object):
next = None
prev = None
def __repr__(self):
return '*'
from rpython.jit.tool.oparser import OpParser, default_fail_descr
parser = OpParser(instr, self.cpu, self.namespace, None, default_fail_descr, True, None)
parser.vars = { arg.repr_short(arg._repr_memo) : arg for arg in loop.inputargs}
operations = []
last_glob = None
prev_op = None
for line in instr.splitlines():
line = line.strip()
if line.startswith("#") or \
line == "":
continue
if line.startswith("..."):
last_glob = Glob()
last_glob.prev = prev_op
operations.append(last_glob)
continue
op = parser.parse_next_op(line)
if last_glob is not None:
last_glob.next = op
last_glob = None
operations.append(op)
def check(op, candidate, rename):
m = 0
if isinstance(candidate, Glob):
if candidate.next is None:
return 0 # consumes the rest
if op.getopnum() != candidate.next.getopnum():
return 0
m = 1
candidate = candidate.next
if op.getopnum() == candidate.getopnum():
for i,arg in enumerate(op.getarglist()):
oarg = candidate.getarg(i)
if arg in rename:
assert rename[arg].same_box(oarg)
else:
rename[arg] = oarg
if not op.returns_void():
rename[op] = candidate
m += 1
return m
return 0
j = 0
rename = {}
ops = loop.finaloplist()
for i, op in enumerate(ops):
candidate = operations[j]
j += check(op, candidate, rename)
if isinstance(operations[-1], Glob):
assert j == len(operations)-1, self.debug_print_operations(loop)
else:
assert j == len(operations), self.debug_print_operations(loop)
def test_basic(self):
loop1 = self.parse_trace("""
i10 = int_lt(i1, 42)
guard_true(i10) []
i101 = int_add(i1, 1)
i102 = int_lt(i101, 42)
guard_true(i102) []
""")
opt = self.optguards(loop1)
self.assert_guard_count(loop1, 1)
self.assert_contains_sequence(loop1, """
...
i101 = int_add(i1, 1)
i12 = int_lt(i101, 42)
guard_true(i12) []
...
""")
def test_basic_sub(self):
loop1 = self.parse_trace("""
i10 = int_gt(i1, 42)
guard_true(i10) []
i101 = int_sub(i1, 1)
i12 = int_gt(i101, 42)
guard_true(i12) []
""")
opt = self.optguards(loop1)
self.assert_guard_count(loop1, 1)
self.assert_contains_sequence(loop1, """
...
i101 = int_sub(i1, 1)
i12 = int_gt(i101, 42)
guard_true(i12) []
...
""")
def test_basic_mul(self):
loop1 = self.parse_trace("""
i10 = int_mul(i1, 4)
i20 = int_lt(i10, 42)
guard_true(i20) []
i12 = int_add(i10, 1)
i13 = int_lt(i12, 42)
guard_true(i13) []
""")
opt = self.optguards(loop1)
self.assert_guard_count(loop1, 1)
self.assert_contains_sequence(loop1, """
...
i101 = int_mul(i1, 4)
i12 = int_add(i101, 1)
i13 = int_lt(i12, 42)
guard_true(i13) []
...
""")
def test_compare(self):
key = box(1)
incomparable = (False, 0)
# const const
assert iv(const(42)).compare(iv(const(42))) == (True, 0)
assert iv(const(-400)).compare(iv(const(-200))) == (True, -200)
assert iv(const(0)).compare(iv(const(-1))) == (True, 1)
# var const
assert iv(key, coeff=(1,1,0)).compare(iv(const(42))) == incomparable
assert iv(key, coeff=(5,70,500)).compare(iv(const(500))) == incomparable
# var var
assert iv(key, coeff=(1,1,0)).compare(iv(key,coeff=(1,1,0))) == (True, 0)
assert iv(key, coeff=(1,7,0)).compare(iv(key,coeff=(1,7,0))) == (True, 0)
assert iv(key, coeff=(4,7,0)).compare(iv(key,coeff=(3,7,0))) == incomparable
assert iv(key, coeff=(14,7,0)).compare(iv(key,coeff=(2,1,0))) == (True, 0)
assert iv(key, coeff=(14,7,33)).compare(iv(key,coeff=(2,1,0))) == (True, 33)
assert iv(key, coeff=(15,5,33)).compare(iv(key,coeff=(3,1,33))) == (True, 0)
def test_imply_basic(self):
key = box(1)
# if x < 42 <=> x < 42
g1 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,0)), iv(const(42)))
g2 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,0)), iv(const(42)))
assert g1.implies(g2)
assert g2.implies(g1)
# if x+1 < 42 => x < 42
g1 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,1)), iv(const(42)))
g2 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,0)), iv(const(42)))
assert g1.implies(g2)
assert not g2.implies(g1)
# if x+2 < 42 => x < 39
# counter: 39+2 < 42 => 39 < 39
g1 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,2)), iv(const(42)))
g2 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,0)), iv(const(39)))
assert not g1.implies(g2)
assert not g2.implies(g1)
# if x+2 <= 42 => x <= 43
g1 = guard_true(rop.INT_LE, iv(key, coeff=(1,1,2)), iv(const(42)))
g2 = guard_true(rop.INT_LE, iv(key, coeff=(1,1,0)), iv(const(43)))
assert g1.implies(g2)
assert not g2.implies(g1)
# if x*13/3+1 <= 0 => x*13/3 <= -1
# is true, but the implies method is not smart enough
g1 = guard_true(rop.INT_LE, iv(key, coeff=(13,3,1)), iv(const(0)))
g2 = guard_true(rop.INT_LE, iv(key, coeff=(13,3,0)), iv(const(-1)))
assert not g1.implies(g2)
assert not g2.implies(g1)
# > or >=
# if x > -55 => x*2 > -44
# counter: -44 > -55 (True) => -88 > -44 (False)
g1 = guard_true(rop.INT_GT, iv(key, coeff=(1,1,0)), iv(const(-55)))
g2 = guard_true(rop.INT_GT, iv(key, coeff=(2,1,0)), iv(const(-44)))
assert not g1.implies(g2)
assert not g2.implies(g1)
# if x*2/2 > -44 => x*2/2 > -55
g1 = guard_true(rop.INT_GE, iv(key, coeff=(2,2,0)), iv(const(-44)))
g2 = guard_true(rop.INT_GE, iv(key, coeff=(2,2,0)), iv(const(-55)))
assert g1.implies(g2)
assert not g2.implies(g1)
def test_imply_coeff(self):
key = box(1)
key2 = box(2)
# if x > y * 9/3 => x > y
# counter: x = -2, y = -1, -2 > -3 => -2 > -1, True => False
g1 = guard_true(rop.INT_GT, iv(key, coeff=(1,1,0)), iv(box(1),coeff=(9,3,0)))
g2 = guard_true(rop.INT_GT, iv(key, coeff=(1,1,0)), iv(box(1),coeff=(1,1,0)))
assert not g1.implies(g2)
assert not g2.implies(g1)
# if x > y * 15/5 <=> x > y * 3
g1 = guard_true(rop.INT_GT, iv(key, coeff=(1,1,0)), iv(key2,coeff=(15,5,0)))
g2 = guard_true(rop.INT_GT, iv(key, coeff=(1,1,0)), iv(key2,coeff=(3,1,0)))
assert g1.implies(g2)
assert g2.implies(g1)
# x >= y => x*3-5 >= y
# counter: 1 >= 0 => 1*3-5 >= 0 == -2 >= 0, True => False
g1 = guard_true(rop.INT_GE, iv(key, coeff=(1,1,0)), iv(key2))
g2 = guard_true(rop.INT_GE, iv(key, coeff=(3,1,-5)), iv(key2))
assert not g1.implies(g2)
assert not g2.implies(g1)
# guard false inverst >= to <
# x < y => x*3-5 < y
# counter: 3 < 4 => 3*3-5 < 4 == 4 < 4, True => False
g1 = guard_false(rop.INT_GE, iv(key, coeff=(1,1,0)), iv(key2))
g2 = guard_false(rop.INT_GE, iv(key, coeff=(3,1,-5)), iv(key2))
assert not g1.implies(g2)
assert not g2.implies(g1)
# x <= y => x*3-5 > y
# counter: 3 < 4 => 3*3-5 < 4 == 4 < 4, True => False
g1 = guard_false(rop.INT_GT, iv(key, coeff=(1,1,0)), iv(key2))
g2 = guard_true(rop.INT_GT, iv(key, coeff=(3,1,-5)), iv(key2))
assert not g1.implies(g2)
assert not g2.implies(g1)
def test_collapse(self):
loop1 = self.parse_trace("""
i10 = int_gt(i1, 42)
guard_true(i10) []
i11 = int_add(i1, 1)
i12 = int_gt(i11, i2)
guard_true(i12) []
""")
opt = self.optguards(loop1, True)
self.assert_guard_count(loop1, 2)
self.assert_contains_sequence(loop1, """
...
i100 = int_ge(42, i2)
guard_true(i100) []
...
i40 = int_gt(i1, 42)
guard_true(i40) []
...
""")
class Test(GuardBaseTest, LLtypeMixin):
pass
|
import tensorflow as tf
import multiprocessing
import os
import nibabel as nib
import numpy as np
import subprocess
import json
from sklearn.utils.validation import check_is_fitted
from abc import ABC, abstractmethod
from sklearn.base import BaseEstimator, TransformerMixin
from termcolor import cprint
from modules.models.utils import (print, save_fibers,
np_placeholder, make_hdr, flat_percentile, map_to_90deg_range)
from modules.models.example_loader import PointExamples, aff_to_rot
from modules.models.input_fn import input, is_nifti, is_mask
from modules.hooks import write_smt_num, write_smt_txt
from tensorflow.python.estimator.export.export import (
build_raw_serving_input_receiver_fn as input_receiver_fn)
from tensorflow.python.platform import tf_logging as logging
class BaseTF(ABC, BaseEstimator, TransformerMixin):
"""docstring for BaseTF"""
lock = multiprocessing.Lock()
num_instances = 0
def __init__(self, input_fn_config, config, params):
super(BaseTF, self).__init__()
self.input_fn_config = input_fn_config
self.config = config
self.params = params
self._restore_path = None
with BaseTF.lock:
self.instance_id = BaseTF.num_instances
BaseTF.num_instances += 1
def fit(self, X, y):
with BaseTF.lock:
config = self.config
if BaseTF.num_instances > 1:
config["model_dir"] = os.path.join(
config["model_dir"],
"inst-" + str(self.instance_id))
input_fn, self.feature_spec, train_size = (
input(X, y, self.input_fn_config))
self.estimator = tf.estimator.Estimator(
model_fn=self.model_fn,
params={**self.params,
**self.input_fn_config,
"train_size": train_size},
config=tf.estimator.RunConfig(**config))
tf.logging.set_verbosity(tf.logging.INFO)
try:
self.estimator.train(input_fn=input_fn)
except KeyboardInterrupt:
print("\nEarly stop of training, saving model...")
self.export_estimator()
return self
else:
self.export_estimator()
return self
def predict(self, X, head="predictions"):
check_is_fitted(self, ["_restore_path"])
predictor = tf.contrib.predictor.from_saved_model(self._restore_path)
if isinstance(X, np.ndarray):
return predictor({"X": X})[head]
elif isinstance(X, dict):
return predictor(X)[head]
def predictor(self):
if self._restore_path is not None:
return tf.contrib.predictor.from_saved_model(self._restore_path)
elif self.estimator.latest_checkpoint() is not None:
return tf.contrib.predictor.from_estimator(
self.estimator,
input_receiver_fn(self.feature_spec)
)
else:
print("Neither _restore_path nor latest_checkpoint set. "
"Returning None, no predictor.")
return None
def predict_proba(self, X):
return self.predict(X, head="probabs")
def set_save_path(self, save_path):
self.save_path = save_path
if self._restore_path is None:
self.config["model_dir"] = save_path
def export_estimator(self):
receiver_fn = input_receiver_fn(self.feature_spec)
self._restore_path = self.estimator.export_savedmodel(
self.save_path,
receiver_fn)
print("Model saved to {}".format(self._restore_path))
@abstractmethod
def score(self, X, y):
pass
@abstractmethod
def model_fn(self, features, labels, mode, params, config):
pass
def __getstate__(self):
state = self.__dict__.copy()
def remove_tensorflow(state):
for key, val in list(state.items()):
if "tensorflow" in getattr(val, "__module__", "None"):
del state[key]
elif isinstance(val, dict):
remove_tensorflow(val)
remove_tensorflow(state)
return state
def __setstate__(self, state):
self.__dict__.update(state)
class BaseTracker(BaseTF):
"""Exension to BaseTF to enable fiber tracking.
Extend this class to implement the methods and use for tracking.
"""
def predict(self, X, args):
"""Generate the tracktography with the current model on the given brain."""
# Check model
check_is_fitted(self, ["params"])
assert isinstance(X, list)
assert len(X) == 2
assert all(is_nifti(x) for x in X)
self.args = args
# Get brain information
if is_mask(X[0]):
wm_mask = nib.load(X[0]).get_data()
brain_data = nib.load(X[1]).get_data()
header = make_hdr(X[1])
else:
brain_data = nib.load(X[0]).get_data()
header = make_hdr(X[0])
wm_mask = nib.load(X[1]).get_data()
if 'max_length' in args:
self.max_length = args.max_length
else:
self.max_length = 100
# If no seeds are specified, build them from the wm mask
if 'seeds' not in self.args:
if "threshold" in self.args:
seeds = self._seeds_from_wm_mask(wm_mask, self.args.threshold)
else:
seeds = self._seeds_from_wm_mask(wm_mask)
else:
seeds = self.args.seeds
seeds = nib.load(seeds).get_data()
seeds = np.where(seeds > 0)
seeds = np.dstack(seeds)[0]
seeds = [[seed] for seed in seeds]
predictor = self.predictor()
if predictor is not None:
tracks, scalars = self._generate_masked_tractography(
brain_data,
wm_mask,
seeds,
affine=header["vox_to_ras"],
predictor=predictor)
if "file_name" in args:
if len(args.file_name) >= 4 and args.file_name[-4:] == ".trk":
fiber_path = os.path.join(
self.save_path, args.file_name)
else:
fiber_path = os.path.join(
self.save_path, args.file_name + ".trk")
else:
fiber_path = os.path.join(self.save_path, "fibers.trk")
if "min_length" in self.args:
min_length = self.args.min_length
else:
min_length = 2
save_fibers(tracks,
header,
fiber_path,
scalars=scalars,
min_length=min_length)
@staticmethod
def assert_equal_length(tracks, scalars):
for key in scalars.keys():
if len(tracks) != len(scalars[key]):
raise ValueError("Length tracks != Length "
"{} ({} != {})".format(key,
len(tracks),
len(scalars[key])))
for idx, track in enumerate(tracks):
if len(track) != len(scalars[key][idx]):
raise ValueError("Length of track does not match length of "
"scalar {} ({} != {})".format(key,
len(track),
len(scalars[key][idx]))
)
def _generate_masked_tractography(
self,
brain_data,
wm_mask,
seeds,
affine=None,
predictor=None):
"""Generate the tractography using the white matter mask."""
tracks = seeds
scalars = {"concentration": [[] for _ in range(len(seeds))],
"fvm_probab": [[] for _ in range(len(seeds))],
"angles": [[] for _ in range(len(seeds))],
"inverted": [[] for _ in range(len(seeds))],
"probab": []}
ongoing_idx = np.arange(len(seeds))
while len(ongoing_idx) > 0:
ongoing_fibers = np.asarray([tracks[i] for i in ongoing_idx])
predictions = predictor(self._build_next_X(brain_data, ongoing_fibers, affine))
if "use_mean" in self.args:
directions = self.get_directions_from_predictions(predictions,
affine,
use_mean=self.args.use_mean)
else:
directions = self.get_directions_from_predictions(predictions,
affine)
next_pts = ongoing_fibers[:, -1, :] + directions * self.args.step_size
if ongoing_fibers.shape[1] == 1:
flip = list(map(lambda x: self._is_border(x, wm_mask), next_pts))
next_pts[flip] = next_pts[flip] - 2 * directions[flip] * self.args.step_size
not_terminal = list(map(lambda x: not self._is_border(x, wm_mask), next_pts))
for i in range(len(ongoing_idx)):
# Note: no need to normalize here.
v = directions[i] # Affine already applied
mu = self.apply_affine(predictions["mean"][i], affine)
k = predictions["concentration"][i][0]
scalars["concentration"][ongoing_idx[i]].append(k)
inner_prod = np.clip(np.inner(mu, v), -1.0, 1.0)
mu = np.sign(inner_prod)*mu
scalars["inverted"][ongoing_idx[i]].append(1 if inner_prod < 0 else 0)
fvm_probab = np.log(self.fvm_probab(v, mu, k) + 10**-12)
scalars["fvm_probab"][ongoing_idx[i]].append(fvm_probab)
angle = map_to_90deg_range(np.rad2deg(np.arccos(inner_prod)))
scalars["angles"][ongoing_idx[i]].append(angle)
if ongoing_fibers.shape[1] >= self.max_length:
for i, fvm_probab in enumerate(scalars["fvm_probab"]):
probab = np.sum(fvm_probab)
scalars["probab"].append([probab] * len(fvm_probab))
self.assert_equal_length(tracks, scalars)
return tracks, scalars
for i, next_pt in enumerate(next_pts):
if not_terminal[i]:
tracks[ongoing_idx[i]].append(next_pt)
ongoing_idx = ongoing_idx[not_terminal]
cprint(
"{:6d} / {} fibers going on.".format(len(ongoing_idx),
len(seeds)),
"red", "on_grey",
end="\r", flush=True)
for i, fvm_probab in enumerate(scalars["fvm_probab"]):
probab = np.sum(fvm_probab)
scalars["probab"].append([probab] * len(fvm_probab))
self.assert_equal_length(tracks, scalars)
return tracks, scalars
def _build_next_X(self, brain_data, ongoing_fibers, affine):
"""Builds the next X-batch to be fed to the model.
The X-batch created continues the streamline based on the outgoing directions obtained at
the previous step.
Returns:
next_X: The next batch of point values (blocks, incoming, centers).
"""
label_type = "point"
X = {
'incoming': [],
'blocks': []
}
for fiber in ongoing_fibers:
center_point = fiber[-1]
incoming_point = np.zeros((self.input_fn_config["n_incoming"], 3))
outgoing = np.zeros(3)
for i in range(min(self.input_fn_config["n_incoming"], len(fiber)-1)):
incoming_point[i] = fiber[-i - 2]
sample = PointExamples.build_datablock(brain_data,
self.input_fn_config["block_size"],
center_point,
incoming_point,
outgoing,
label_type,
affine)
X_sample = {
'incoming': sample['incoming'].reshape(-1, 3),
'blocks': sample['data_block']
}
# Add example to examples by appending individual lists
for key, cur_list in X.items():
cur_list.append(X_sample[key])
for key, _ in X.items():
X[key] = np.array(X[key])
return X
@abstractmethod
def get_directions_from_predictions(self, predictions, affine):
"""Computes fiber directions form the predictions of the network.
Method to be extended in subclasses. By extending this the outputs of
different types of networks can be used in the same way.
Args:
predictions: The output of the neural network model.
affine: The affine transformation for the voxel space.
Returns:
directions: The fiber directions corresponding to the predictions.
"""
pass
def _seeds_from_wm_mask(self, wm_mask, threshold=0.5):
"""Compute the seeds for the streamlining from the white matter mask.
This is invoked only if no seeds are specified.
The seeds are selected on the interface between white and gray matter, i.e. they are the
white matter voxels that have at least one gray matter neighboring voxel.
These points are furthermore perturbed with some gaussian noise to have a wider range of
starting points.
Returns:
seeds: The list of voxel that are seeds.
"""
# Take te border voxels as seeds
seeds = self._find_borders(wm_mask, threshold)
print("Number of seeds on the white matter mask:", len(seeds))
print("Number of requested seeds:", self.args.n_fibers)
new_idxs = np.random.choice(len(seeds), self.args.n_fibers, replace=True)
new_seeds = [[seeds[i] + np.clip(np.random.normal(0, 0.25, 3), -0.5, 0.5)]
for i in new_idxs]
return new_seeds
def _find_borders(self, wm_mask, threshold=0.5, order=1):
"""Find the wm-gm interface points.
Args:
order: How far from the center voxel to look for differen voxels. Default 1.
Return:
seeds: The seeds generated from the white matter mask
"""
dim = wm_mask.shape
borders = []
if wm_mask.dtype != 'int' or wm_mask.dtype == 'int64':
# If float, check if it is really not boolean
if np.where(np.abs(wm_mask - 0.5) < 0.5)[0].shape[0] > 0:
cprint("WARNING: The mask in use might not be binary. \
Thresholding at {} will be applied.".format(threshold))
for x in range(order, dim[0] - order):
for y in range(order, dim[1] - order):
for z in range(order, dim[2] - order):
if wm_mask[x, y, z] > threshold: # Careful if using non-binary mask!
window = wm_mask[x - order:x + 1 + order,
y - order:y + 1 + order,
z - order:z + 1 + order]
if not np.all(window):
borders.append(np.array([x, y, z]))
return borders
def _is_border(self, coord, wm_mask):
"""Check if the voxel is on the white matter border.
Args:
coord: Numpy ndarray containing the [x, y, z] coordinates of the point.
Returns:
True if the [x, y, z] point is on the border.
"""
coord = np.round(coord).astype(int)
lowerbound_condition = coord[0] < 0 or coord[1] < 0 or coord[2] < 0
upperbound_condition = coord[0] >= wm_mask.shape[0] or \
coord[1] >= wm_mask.shape[1] or \
coord[2] >= wm_mask.shape[2]
# Check if out of image dimensions
if lowerbound_condition or upperbound_condition:
return True
# Check if out of white matter area
return np.isclose(wm_mask[coord[0], coord[1], coord[2]], 0.0)
@staticmethod
def apply_affine(directions, affine):
return aff_to_rot(affine).dot(directions.T).T
class DeterministicTracker(BaseTracker):
"""Base model for deterministic tracking.
A model does deterministic tracking when its output is the direction of the
fiber (given the possible different inputs), not a probablity distribution
(see ProbabilisticTracker).
"""
def get_directions_from_predictions(self, predictions, affine, use_mean=False):
"""Compute the direction of the fibers from the deterministic predict.
"""
return self.apply_affine(predictions["directions"], affine)
class ProbabilisticTracker(BaseTracker):
"""Base model for probabilistic tracking.
This model assumes that the network does not output a direction but a
probability distribution over the possible directions. In this case, the
distribution is the Fisher-Von Mises distribution, with parameters [mu, k],
where mu is the 3-dimensional mean-direciton vector and k is the
concentration parameter.
"""
def score(self, trk_file=None, y=None, args=None):
if isinstance(trk_file, list):
trk_file = trk_file[0]
if trk_file is None:
TM_DATA=["/local/entrack/data/tractometer/125mm/FODl4.nii.gz",
"/local/entrack/data/tractometer/125mm/wm_aligned.nii.gz"]
args.file_name = "tm_fibers.trk"
self.predict(TM_DATA, args)
trk_file = os.path.join(self.save_path, args.file_name)
TM_PATH = ("./.tractometer/ismrm_2015_tractography_challenge_scoring/"
"score_tractogram.py")
SCORING_DATA = ("./.tractometer/ismrm_2015_tractography_challenge_"
"scoring/scoring_data/")
scoring_cmd = "python {command} {tracts} {base} {out}".format(
command=TM_PATH,
tracts=trk_file,
base=SCORING_DATA,
out=self.save_path)
subprocess.run(["bash", "-c", "source activate entrack_tm && {}"
.format(scoring_cmd)])
eval_path = os.path.join(self.save_path, "scores", "tm_fibers.json")
eval_data = json.load(open(eval_path))
for metric in ["mean_OL", "mean_OR", "VC", "NC", "IC", "VB", "IB", "mean_F1"]:
write_smt_txt(eval_data[metric],
self.save_path,
metric=metric,
inline=True)
if "score" in args:
return eval_data[args.score]
def get_directions_from_predictions(self, predictions, affine, use_mean=False):
mu = predictions['mean']
k = predictions['concentration']
if not use_mean:
directions = ProbabilisticTracker.sample_vMF(mu, k)
else:
directions = mu
return self.apply_affine(directions, affine)
def save_scalars(self,
trk_file,
nii_file,
min_pts_per_fiber=2,
every_n_fibers=1,
file_name="scalars.trk"):
scalars, tracks, trk_hdr = self.fvm_scalars(trk_file,
nii_file,
min_pts_per_fiber,
every_n_fibers)
for metric in scalars.keys():
for q in [25, 50, 75]:
write_smt_txt(flat_percentile(scalars[metric], q),
self.save_path,
metric=metric + "_" + str(q),
inline=q>25)
save_fibers(tracks,
trk_hdr,
os.path.join(self.save_path, file_name),
scalars=scalars)
def fvm_scalars(self, trk_file, nii_file, min_pts_per_fiber=2, every_n_fibers=1):
"""Produce trk file marked with concentration and fvm_probab.
fvm_scalars produces concentration and fvm_probab scalars that
should be passed on to utils.save_fibers
Args:
trk_file (str): Path to trk file that contains the fibers to be marked.
nii_file (str): Path to nifti file with corresponding diffusion data.
Returns:
scalars (dict): Dict to lists of shape (n_tracks, ). Currently, there
are only two keys: "concentration" and "fvm_probab".
all_tracks (list): List of unmarked tracks of shape (n_tracks, ).
trk_hdr: Header of trk_file.
TODO:
* Add some kind of skip parameter to reduce computation time.
* Add min_length parameter.
* Make fvm_scalars useable with trained model and not only during
training.
"""
tracks, trk_hdr = nib.trackvis.read(trk_file, points_space="voxel")
trk_aff = nib.trackvis.aff_from_hdr(trk_hdr)
all_tracks = []
for i, track in enumerate(tracks):
if len(track[0]) >= min_pts_per_fiber and i % every_n_fibers == 0:
all_tracks.append(track[0])
tracks = all_tracks[:]
nii_file = nib.load(nii_file)
nii_data = nii_file.get_data()
nii_hdr = nii_file.header
nii_aff = nii_file.affine
assert np.allclose(nii_aff, trk_aff)
block_size = self.input_fn_config["block_size"]
n_incoming = self.input_fn_config["n_incoming"]
n_tracks = len(all_tracks)
predictor = self.predictor()
ongoing_idx = list(range(n_tracks))
track_lengths = list(map(lambda track: len(track), tracks))
scalars = {"concentration": [[] for _ in range(n_tracks)],
"fvm_probab": [[] for _ in range(n_tracks)],
"angles": [[] for _ in range(n_tracks)],
"inverted": [[] for _ in range(n_tracks)],
"probab": []}
cprint("Marking {} fibers...".format(n_tracks), "green", "on_grey", flush=True)
while len(ongoing_idx) > 0:
ongoing_tracks = [tracks[i] for i in ongoing_idx]
n_ongoing = len(ongoing_tracks)
batch = self._build_next_X(nii_data, ongoing_tracks, nii_aff)
predictions = predictor(batch)
for i, kappa in enumerate(predictions["concentration"]):
scalars["concentration"][ongoing_idx[i]].append(kappa[0])
for i in range(n_ongoing):
k = predictions["concentration"][i][0]
mu = self.apply_affine(predictions["mean"][i], nii_aff)
ongoing_len = len(ongoing_tracks[i])
assert ongoing_len >= 1
if ongoing_len == len(all_tracks[ongoing_idx[i]]):
v = PointExamples.points_to_relative(
all_tracks[ongoing_idx[i]][-2],
all_tracks[ongoing_idx[i]][-1]
)
else:
v = PointExamples.points_to_relative(
all_tracks[ongoing_idx[i]][ongoing_len - 1],
all_tracks[ongoing_idx[i]][ongoing_len]
)
v = self.apply_affine(v, nii_aff)
inner_prod = np.clip(np.inner(mu, v), -1.0, 1.0)
mu = np.sign(inner_prod)*mu
scalars["inverted"][ongoing_idx[i]].append(1 if inner_prod < 0 else 0)
fvm_probab = np.log(self.fvm_probab(v, mu, k) + 10**-12)
scalars["fvm_probab"][ongoing_idx[i]].append(fvm_probab)
angle = map_to_90deg_range(np.rad2deg(np.arccos(inner_prod)))
scalars["angles"][ongoing_idx[i]].append(angle)
tracks = list(map(lambda x: x[:-1], tracks))
ongoing_idx = list(filter(lambda i: len(tracks[i]) > 0, ongoing_idx))
cprint(
"{:6d} / {} fibers going on.".format(len(ongoing_idx),
n_tracks),
"red", "on_grey",
end="\r", flush=True)
for i, fvm_probab in enumerate(scalars["fvm_probab"]):
probab = np.sum(fvm_probab)
scalars["probab"].append([probab] * track_lengths[i])
self.assert_equal_length(all_tracks, scalars)
return scalars, all_tracks, trk_hdr
def transform(self, X, args=None):
assert isinstance(X, list)
assert len(X) == 2
if args is not None:
args = vars(args)
else:
args = {}
if is_nifti(X[0]):
self.save_scalars(nii_file=X[0],
trk_file=X[1],
**args)
else:
self.save_scalars(nii_file=X[1],
trk_file=X[0],
**args)
@staticmethod
def fvm_probab(v, mu, k, eps=10**-12):
if k < eps:
return 1 / (4 * np.pi)
C_k = k / (2 * np.pi * (1 - np.exp(-2 * k)) + eps)
return C_k * np.exp(k * (np.inner(mu, v) - 1))
@staticmethod
def sample_vMF(mu, k):
"""Sampe from the von Mises-Fisher distribution.
See "Numerically stable sampling of the von Mises Fisher distribution
onS2 (and other tricks)".
https://www.mendeley.com/viewer/?fileId=1d3bb1ab-8211-60fb-218c-f11e1638
0bde&documentId=7eb942de-6dd9-3af7-b36c-8a9c37b6b6a6
Args:
mu: Mean of the distribution. Shape (N, 3).
k: Concentration of the distribution. Shape (N, 3).
Returns:
samples: Samples from the specified vMF distribution. Ndarray of
shape (N, 3), where N is the number of different distributions.
A row of the matrix of index j is a sample from the vMF with
mean mu[j] and concentration k[j].
"""
mu = np.asarray(mu)
k = np.asarray(k)
# Assert 2D vectors
assert len(k.shape) == 2
assert len(mu.shape) == 2
# Assert correct axis orientation
assert k.shape[1] == 1
assert mu.shape[1] == 3
# Assert same number of samples
assert mu.shape[0] == k.shape[0]
# Assert all unit vectors
assert np.allclose(
np.linalg.norm(mu, axis=1),
np.ones(mu.shape[0])
)
n_samples = mu.shape[0]
V = ProbabilisticTracker._sample_unif_unit_circle(n_samples)
W = ProbabilisticTracker._sample_W_values(k)
omega = np.multiply(np.sqrt(1 - np.square(W)), V)
omega = np.hstack((omega, W))
# Now apply the rotation to change the mean
# i.e. rotate from the direction of the z-axis to the mean direction
reference = np.asarray([[0, 0, 1]] * omega.shape[0])
rotation = ProbabilisticTracker._rotation_matrices(reference, mu)
samples = np.matmul(rotation, omega[:, :, np.newaxis])[:, :, 0]
return samples
@staticmethod
def _rotation_matrices(vectors, references):
"""Compute all the rotation matrices from the vectors to the references.
Args:
vectors: Array of vectors that have to be rotated to match the
references.
references: Array of reference vectors.
Returns:
rotations: Array of matrices. Each matrix is the rotation form the
vector of corresponding index to its reference.
"""
# TODO: Fix the inefficiency of the for loop to compute the rotation
# matrices
rotations_list = []
for idx in range(vectors.shape[0]):
rot_mat = ProbabilisticTracker._to_rotation(vectors[idx, :],
references[idx, :])
rotations_list.append(rot_mat)
rotations = np.asarray(rotations_list)
return np.asarray(rotations)
@staticmethod
def _to_corss_skew_symmetric(vec, ref):
"""Finds the skew-symmetric cross-product matrix."""
v = np.cross(vec, ref)
cross_mat = np.zeros(shape=(3, 3))
cross_mat[[0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1]] = [-v[2],
v[1],
v[2],
-v[0],
-v[1],
v[0]]
return cross_mat
@staticmethod
def _to_rotation(vec, ref):
"""Compute rotation matrix from vec to ref.
NOTE: There must be a better way to do this.
"""
cross = ProbabilisticTracker._to_corss_skew_symmetric(vec, ref)
c = np.reshape(np.asarray(np.dot(vec, ref)), newshape=-1)
square = np.dot(cross, cross)
R = np.eye(3) + cross + square * (1 / (1 + c))
return R
@staticmethod
def _sample_unif_unit_circle(n_samples):
"""Sample form the uniform distribution on the unit circle.
Args:
n_samples: Number of samples required.
Returns:
samples: (n_samples,2) ndarray.
"""
angles = np.random.uniform(high=2*np.pi, size=(n_samples, 1))
samples_on_unit_circle = np.hstack((np.cos(angles), np.sin(angles)))
return samples_on_unit_circle
@staticmethod
def _sample_W_values(k):
"""Sample the values of W."""
n_samples = k.shape[0]
unif = np.random.uniform(size=(n_samples, 1))
return 1 + np.reciprocal(k) * np.log(unif + (1 - unif) * np.exp(-2 * k))
|
import mmcv
from mmcv import Config
import mmpose
from mmpose.apis import (inference_top_down_pose_model, init_pose_model,train_model,
vis_pose_result, process_mmdet_results)
from mmdet.apis import inference_detector, init_detector
from mmpose.datasets import build_dataset
from mmpose.models import build_posenet
import cv2
import os
import tempfile
import os.path as osp
from tqdm import tqdm
class MMPose:
def __init__(self,
backbone_det='FasterRCNN-pose',
backbone = 'HrNet32',
#note if inference need HrNet (64)
dataset_path = None
):
# 获取外部运行py的绝对路径
self.cwd = os.path.dirname(os.getcwd())
# 获取当前文件的绝对路径
self.save_fold = None
self.file_dirname = os.path.dirname(os.path.abspath(__file__))
self.backbone_det = backbone_det
backbone_det_path = os.path.join(self.file_dirname, 'models', self.backbone_det)
ckpt_cfg_list = list(os.listdir(backbone_det_path))
for item in ckpt_cfg_list:
if item[-1] == 'y':
self.det_config = os.path.join(backbone_det_path, item)
elif item[-1] == 'h':
self.det_checkpoint = os.path.join(backbone_det_path, item)
else:
print("Warning!!! There is an unrecognized file in the backbone folder.")
self.backbone = backbone
backbone_path = os.path.join(self.file_dirname, 'models', self.backbone)
ckpt_cfg_list = list(os.listdir(backbone_path))
for item in ckpt_cfg_list:
if item[-1] == 'y':
self.pose_config = os.path.join(backbone_path, item)
elif item[-1] == 'h':
self.pose_checkpoint = os.path.join(backbone_path, item)
self.cfg_det = Config.fromfile(self.det_config)
self.cfg = Config.fromfile(self.pose_config)
self.dataset_path = dataset_path
return None
def train(self, random_seed=0, save_fold=None, checkpoint = None, distributed=False, validate=True,
metric='PCK', save_best = 'PCK',optimizer="Adam", epochs=100, lr=5e-4,
resume_from = None,
eval_interval = 10,
log_interval = 5,
):
print("========= begin training ==========")
# 如果外部不指定save_fold
if not self.save_fold:
# 如果外部也没有传入save_fold,我们使用默认路径
if not save_fold:
self.save_fold = os.path.join(self.cwd, 'checkpoints/pose_model')
# 如果外部传入save_fold,我们使用传入值
else:
self.save_fold = save_fold
# self.cfg = Config.fromfile(self.backbonedict[self.backbone])
# print(self.cfg.pretty_text)
self.cfg.gpu_ids = range(1)
self.cfg.work_dir = self.save_fold
self.cfg.load_from = checkpoint
self.cfg.resume_from = resume_from
self.cfg.seed = random_seed
self.cfg.evaluation.interval = eval_interval
self.cfg.evaluation.metric = metric # 验证指标
self.cfg.evaluation.save_best = save_best # 验证指标
# self.cfg.model.backbone.frozen_stages = Frozen_stages
# set log interval
self.cfg.log_config.interval = log_interval
self.cfg.total_epochs = epochs # 最大的训练轮次
self.cfg.optimizer.lr = lr # 学习率
self.cfg.optimizer.type = optimizer # 优化器
datasets = [build_dataset(self.cfg.data.train)]
# build model
model = build_posenet(self.cfg.model)
# create work_dir
mmcv.mkdir_or_exist(self.cfg.work_dir)
# train model
train_model(
model, datasets, self.cfg, distributed=distributed, validate=validate, meta=dict())
print("========= finish training ==========")
return None
def _inference(self,det_model,pose_model,img,work_dir,name,show,i):
mmdet_results = inference_detector(det_model, img)
person_results = process_mmdet_results(mmdet_results, cat_id=1)
pose_results, returned_outputs = inference_top_down_pose_model(pose_model,
img,
person_results,
bbox_thr=0.3,
format='xyxy',
dataset=pose_model.cfg.data.test.type)
vis_result = vis_pose_result(pose_model,
img,
pose_results,
dataset=pose_model.cfg.data.test.type,
show=show)
with tempfile.TemporaryDirectory() as tmpdir:
if not os.path.exists(work_dir): ##目录存在,返回为真
os.makedirs(work_dir)
file_name = osp.join(work_dir, name+str(i)+'.png')
cv2.imwrite(file_name, vis_result)
return pose_results
def inference(self,
device='cuda:0',
is_trained=False,
pretrain_model='./checkpoints/pose_model/latest.pth',
img=None,
show=False,
work_dir='./img_result/',
name='pose_result'):
"""
params:
device: 推理设备,可选参数: ('cuda:int','cpu')
is_trained: 是否使用本地预训练的其他模型进行训练
pretrain_model: 如果使用其他模型,则传入模型路径
img: 推理图片的路径或文件夹名
show: 是否对推理结果进行显示
work_dir: 推理结果图片的保存文件夹
name: 推理结果保存的名字
return:
pose_results: 推理的结果数据,一个列表,其中包含若干个字典,每个字典存储对应检测的人体数据。
"""
if not pretrain_model:
pretrain_model = os.path.join(self.cwd, 'checkpoints/pose_model/latest.pth')
print("========= begin inference ==========")
if is_trained == True:
self.pose_checkpoint = pretrain_model
# initialize pose model
pose_model = init_pose_model(self.pose_config, self.pose_checkpoint,device = device)
# initialize detector
det_model = init_detector(self.det_config, self.det_checkpoint,device=device)
# inference img
if img[-1:] != '/':
pose_results = self._inference(det_model,pose_model,img,work_dir,name,show,0)
print('Image result is save as %s.png' % (name))
else:
# inference directory
img_dir = img
print("inference for directory: %s \n" % (img_dir))
for i,img in enumerate(tqdm(os.listdir(img_dir))):
pose_results = self._inference(det_model,pose_model,img_dir+img,work_dir,name,show,i)
print('Finish! Image result is save in %s \n' % (work_dir))
return pose_results
def load_dataset(self, path):
self.dataset_path = path
#数据集修正为 images train.json val.json 形式
# cfg.data_root = 'data/coco_tiny'
self.cfg.data.train.type = 'PoseDataset'
self.cfg.data.train.ann_file = os.path.join(self.dataset_path, 'train.json')
self.cfg.data.train.img_prefix = os.path.join(self.dataset_path, 'images/')
self.cfg.data.val.type = 'PoseDataset'
self.cfg.data.val.ann_file = os.path.join(self.dataset_path, 'val.json')
self.cfg.data.val.img_prefix = os.path.join(self.dataset_path, 'images/')
self.cfg.data.test.type = 'PoseDataset'
self.cfg.data.test.ann_file = os.path.join(self.dataset_path, 'val.json')
self.cfg.data.test.img_prefix = os.path.join(self.dataset_path, 'images/')
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Vidar Tonaas Fauske, Sebastian Koch.
# Distributed under the terms of the Modified BSD License.
from .example import ExampleWidget
from ._version import __version__, version_info
from .nbextension import _jupyter_nbextension_paths
|
import copy
import os
from .exceptions import (
CannotLoadConfiguration,
InvalidConfiguration,
NotRootException,
UnknownConfigurationOptions,
)
from .include import Include
_USER_CONFIG_FILE_PATH = os.path.expanduser("~/.dwightrc")
_USER_CONFIG_FILE_TEMPLATE = """# AUTOGENERATED DEFAULT CONFIG
# ROOT_IMAGE = "/some/path/here"
# INCLUDES = [Include("/proc", "/proc/"),
# Include("/dev", "/dev/"),
# Include("/dev/pts", "/dev/pts"),
# Include("/run", "/run"),
# Include("/sys", "/sys"),
# Include("/home", "/home/"),
# Include("/etc/passwd", "/etc/passwd"),
# Include("/etc/group", "/etc/group")]
# ENVIRON = {}
# UID = None # None means taking the uid from SUDO_UID
# PWD = os.path.abspath(".")
# NUM_LOOP_DEVICES = 64 # The number of loop to ensure that exist before chrooting
"""
class DwightConfiguration(object):
def __init__(self):
super(DwightConfiguration, self).__init__()
self._config = dict(
ROOT_IMAGE = None,
INCLUDES = [],
ENVIRON = {},
GIDS = None,
UID = None,
PWD = os.path.abspath("."),
NUM_LOOP_DEVICES = None,
MAX_CACHE_SIZE = None,
)
self._known_keys = set(self._config)
def __getitem__(self, key):
return self._config[key]
def __setitem__(self, key, value):
if key not in self._known_keys:
raise UnknownConfigurationOptions("Unknown configuration option: {0!r}".format(key))
self._config[key] = value
def process_user_config_file(self, user_config_file_path=_USER_CONFIG_FILE_PATH):
if not os.path.isfile(user_config_file_path):
self._ensure_user_config_file(user_config_file_path)
with open(user_config_file_path) as user_config_file:
self.load_from_string(user_config_file.read())
def _ensure_user_config_file(self, user_config_file_path):
if not os.path.isdir(os.path.dirname(user_config_file_path)):
os.makedirs(os.path.dirname(user_config_file_path))
with open(user_config_file_path, "w") as user_config_file:
user_config_file.write(_USER_CONFIG_FILE_TEMPLATE)
def load_from_string(self, s):
d = copy.deepcopy(self._config)
try:
exec(s, {"Include" : Include}, d)
except Exception as e:
raise CannotLoadConfiguration("Cannot load configuration ({0})".format(e))
for key in list(d):
if key.startswith("_") or not key[0].isupper():
d.pop(key)
self._check_unknown_parameters(d)
self._config.update(d)
def _check_unknown_parameters(self, d):
unknown = set(d) - self._known_keys
if unknown:
raise UnknownConfigurationOptions("Unknown configuration options: {0}".format(", ".join(map(repr, unknown))))
def check(self):
if self._config.get("ROOT_IMAGE", None) is None:
raise InvalidConfiguration("ROOT_IMAGE option is not set")
|
from __future__ import absolute_import
import unittest
from testutils import harbor_server
from testutils import TEARDOWN
from testutils import ADMIN_CLIENT
from library.system import System
from library.project import Project
from library.user import User
from library.repository import Repository
from library.repository import push_image_to_project
from library.artifact import Artifact
from library.scanner import Scanner
class TestProjects(unittest.TestCase):
@classmethod
def setUp(self):
self.system = System()
self.project= Project()
self.user= User()
self.artifact = Artifact()
self.repo = Repository()
self.scanner = Scanner()
@classmethod
def tearDown(self):
print("Case completed")
@unittest.skipIf(TEARDOWN == False, "Test data won't be erased.")
def test_ClearData(self):
#1. Delete Alice's repository and Luca's repository;
self.repo.delete_repoitory(TestProjects.project_Alice_name, TestProjects.repo_Alice_name.split('/')[1], **ADMIN_CLIENT)
self.repo.delete_repoitory(TestProjects.project_Luca_name, TestProjects.repo_Luca_name.split('/')[1], **ADMIN_CLIENT)
#2. Delete Alice's project and Luca's project;
self.project.delete_project(TestProjects.project_Alice_id, **ADMIN_CLIENT)
self.project.delete_project(TestProjects.project_Luca_id, **ADMIN_CLIENT)
#3. Delete user Alice and Luca.
self.user.delete_user(TestProjects.user_Alice_id, **ADMIN_CLIENT)
self.user.delete_user(TestProjects.user_Luca_id, **ADMIN_CLIENT)
def testSystemLevelScanALL(self):
"""
Test case:
System level Scan All
Test step and expected result:
1. Create user Alice and Luca;
2. Create 2 new private projects project_Alice and project_Luca;
3. Push a image to project_Alice and push another image to project_Luca;
4. Trigger scan all event;
5. Check if image in project_Alice and another image in project_Luca were both scanned.
Tear down:
1. Delete Alice's repository and Luca's repository;
2. Delete Alice's project and Luca's project;
3. Delete user Alice and Luca.
"""
url = ADMIN_CLIENT["endpoint"]
user_common_password = "Aa123456"
#1. Create user Alice and Luca;
TestProjects.user_Alice_id, user_Alice_name = self.user.create_user(user_password = user_common_password, **ADMIN_CLIENT)
TestProjects.user_Luca_id, user_Luca_name = self.user.create_user(user_password = user_common_password, **ADMIN_CLIENT)
USER_ALICE_CLIENT=dict(endpoint = url, username = user_Alice_name, password = user_common_password, with_scan_overview = True)
USER_LUCA_CLIENT=dict(endpoint = url, username = user_Luca_name, password = user_common_password, with_scan_overview = True)
#2. Create 2 new private projects project_Alice and project_Luca;
TestProjects.project_Alice_id, TestProjects.project_Alice_name = self.project.create_project(metadata = {"public": "false"}, **USER_ALICE_CLIENT)
TestProjects.project_Luca_id, TestProjects.project_Luca_name = self.project.create_project(metadata = {"public": "false"}, **USER_LUCA_CLIENT)
#3. Push a image to project_Alice and push another image to project_Luca;
#Note: Please make sure that this Image has never been pulled before by any other cases,
# so it is a not-scanned image rigth after repository creation.
#image = "tomcat"
image_a = "mariadb"
src_tag = "latest"
#3.1 Push a image to project_Alice;
TestProjects.repo_Alice_name, tag_Alice = push_image_to_project(TestProjects.project_Alice_name, harbor_server, user_Alice_name, user_common_password, image_a, src_tag)
#Note: Please make sure that this Image has never been pulled before by any other cases,
# so it is a not-scanned image rigth after repository creation.
image_b = "httpd"
src_tag = "latest"
#3.2 push another image to project_Luca;
TestProjects.repo_Luca_name, tag_Luca = push_image_to_project(TestProjects.project_Luca_name, harbor_server, user_Luca_name, user_common_password, image_b, src_tag)
#4. Trigger scan all event;
self.system.scan_now(**ADMIN_CLIENT)
#5. Check if image in project_Alice and another image in project_Luca were both scanned.
self.artifact.check_image_scan_result(TestProjects.project_Alice_name, image_a, tag_Alice, **USER_ALICE_CLIENT)
self.artifact.check_image_scan_result(TestProjects.project_Luca_name, image_b, tag_Luca, **USER_LUCA_CLIENT)
#6. Swith Scanner;
uuid = self.scanner.scanners_get_uuid(**ADMIN_CLIENT)
self.scanner.scanners_registration_id_patch(uuid, **ADMIN_CLIENT)
#7. Trigger scan all event;
self.system.scan_now(**ADMIN_CLIENT)
#8. Check if image in project_Alice and another image in project_Luca were both scanned.
self.artifact.check_image_scan_result(TestProjects.project_Alice_name, image_a, tag_Alice, **USER_ALICE_CLIENT)
self.artifact.check_image_scan_result(TestProjects.project_Luca_name, image_b, tag_Luca, **USER_LUCA_CLIENT)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class SogouSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SogouDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(2, mgr.Not(x_inc_i))
loc2 = Location(env, mgr.Not(inc_i))
loc2.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1, loc2])
res.append(h_inc)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
loc0 = Location(env, mgr.GE(l, n0), mgr.GE(r, n0),
stutterT=mgr.Equals(x_l, mgr.Plus(l, r)))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l3", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r2", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc = Location(env, mgr.LE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Minus(l, n1)))
h_l = Hint("h_l1", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc = Location(env, mgr.GE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
h_l = Hint("h_l0", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc = Location(env, mgr.GE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r0", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.LE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Minus(i, n1)))
h_i = Hint("h_i1", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i2", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(2, mgr.Equals(x_i, i))
loc2 = Location(env, mgr.GE(i, n0))
loc2.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i4", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1, loc2])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0), mgr.GE(i, n0),
stutterT=mgr.Equals(x_r, mgr.Plus(r, i)))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r3", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc2", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc = Location(env, mgr.LE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1)))
h_r = Hint("h_r1", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
return frozenset(res)
|
import os
from glob import glob
import cv2
from sklearn.utils import shuffle
from utils.general import find_class, append_path
def load_data(train_base, val_base):
cls = find_class(train_base)
train_paths = _find_files(train_base, cls)
val_paths = _find_files(val_base, cls)
return train_paths, val_paths
def _find_files(basepath, cls):
rst = []
count = []
for c in sorted(cls):
files = glob(os.path.join(basepath, c, '*'))
count += [len(files)]
minimum = min(count)
for c in sorted(cls):
files = shuffle(glob(os.path.join(basepath, c, '*')), random_state=1024)[:minimum]
append_files = append_path(basepath, c)(files)
for p in append_files[minimum:]:
os.remove(p)
rst += append_files[:minimum].tolist()
return rst
if __name__ == '__main__':
train_base = '../k-fashion/training'
val_base = '../k-fashion/validation'
train_paths, val_paths = load_data(train_base, val_base)
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/koobitstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *koobit_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("koobit-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
#https://www.codewars.com/kata/integers-recreation-one
CACHE = {}
def squared_cache(number):
if number not in CACHE:
divisors = [x for x in range(1, number + 1) if number % x == 0]
CACHE[number] = sum([x * x for x in divisors])
return CACHE[number]
return CACHE[number]
def list_squared(m, n):
ret = []
for number in range(m, n + 1):
divisors_sum = squared_cache(number)
if (divisors_sum ** 0.5).is_integer():
ret.append([number, divisors_sum])
return ret
############################################
from math import floor, sqrt, pow
def sum_squared_factors(n):
s, res, i = 0, [], 1
while (i <= floor(sqrt(n))):
if (n % i == 0):
s += i * i
nf = n // i
if (nf != i):
s += nf * nf
i += 1
if (pow(int(sqrt(s)), 2) == s):
res.append(n)
res.append(s)
return res
else:
return None
def list_squared(m, n):
res, i = [], m
while (i <= n):
r = sum_squared_factors(i)
if (r != None):
res.append(r);
i += 1
return res
|
# Inspired by https://machinelearningmastery.com/how-to-develop-a-convolutional-neural-network-from-scratch-for-mnist-handwritten-digit-classification/
# and https://www.kaggle.com/ashwani07/mnist-classification-using-random-forest
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from tensorflow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import SGD
from Preprocessing import Preprocessing
tf.random.set_seed(42)
import random
random.seed(42)
def get_baseline(pp):
"""Run the random forest baseline and calculate accuracy
Parameters
==========
pp : Preprocessor
Used to get the original MNIST data
Returns
=======
accuracy: boolean
The accuracy of the RF baseline
"""
# get MNIST data
x_train, y_train, x_test, y_test = (
pp.getMNISTTrainData(),
pp.getMNISTTrainLabel(),
pp.getMNISTTestData(),
pp.getMNISTTestLabel(),
)
# define Random forest and fit it
rf = RandomForestClassifier(n_estimators=100, random_state=42)
rf.fit(x_train.reshape(x_train.shape[0], 28 * 28), y_train)
# run prediction and return accuracy
pred = rf.predict(x_test.reshape(x_test.shape[0], 28 * 28))
return accuracy_score(y_test, pred)
def define_model():
"""Define the sequential layers of the CNN model
Returns
=======
model: Sequential keras model
The CNN model
"""
model = Sequential()
model.add(
Conv2D(
32,
(3, 3),
activation="relu",
kernel_initializer="he_uniform",
input_shape=(28, 28, 1),
)
)
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation="relu", kernel_initializer="he_uniform"))
model.add(Dense(10, activation="softmax"))
# compile model
opt = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"])
return model
def runProcessor(full=True):
"""Run the CNN model and the RF and determines predictions and accuracies
Parameters
==========
full : boolean
Default value is True
Determines if all the MNIST records should be considered or only
a subset shall be used for testing.
Returns
=======
acc_baseline: float
The accuracy of the RF baseline
acc_CNN: float
The accuracy of the CNN model
y_test: numpy.ndarray
The ground truth test labels
y_pred: numpy.ndarray
The predicted test labels
"""
pp = Preprocessing.Preprocessing(full)
# Get RF baseline
acc_baseline = get_baseline(pp)
# get preprocessed data
x_train, y_train, x_test, y_test = (
pp.getMNISTPreprocessedTrainData(),
pp.getMNISTPreprocessedTrainLabel(),
pp.getMNISTPreprocessedTestData(),
pp.getMNISTPreprocessedTestLabel(),
)
# define and train the CNN model
model = define_model()
model.fit(x_train, y_train, epochs=10, batch_size=32, verbose=0)
# evaluate model on test dataset
_, acc_CNN = model.evaluate(x_test, y_test, verbose=0)
y_pred = model.predict(x_test)
return acc_baseline, acc_CNN, y_test, y_pred
|
from importlib import import_module
from django.conf import settings
# noinspection PyUnresolvedReferences
from api.system.tasks import * # noqa: F401,F403
# noinspection PyUnresolvedReferences
from api.system.update.tasks import * # noqa: F401,F403
# noinspection PyUnresolvedReferences
from api.task.tasks import * # noqa: F401,F403
# noinspection PyUnresolvedReferences
from api.vm.tasks import * # noqa: F401,F403
# noinspection PyUnresolvedReferences
from api.node.tasks import * # noqa: F401,F403
# noinspection PyUnresolvedReferences
from api.image.tasks import * # noqa: F401,F403
# noinspection PyUnresolvedReferences
from api.mon.tasks import * # noqa: F401,F403
for third_party_app in settings.THIRD_PARTY_APPS:
# noinspection PyBroadException
try:
# noinspection PyShadowingBuiltins
module = import_module(third_party_app + '.tasks')
globals().update({name: module.__dict__[name] for name in module.__all__})
except Exception:
pass
|
#!/usr/bin/env python3
import setuptools
readme = 'README.md'
with open(readme) as f:
long_description = f.read()
setuptools.setup(
name='scrape-blog',
version='0.0.1',
author='Nick Ludwig',
author_email='nick.b.ludwig@gmail.com',
description='Tool to scrape SSC and maybe other simple blogs.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/nludwig/scrape-blog',
packages=setuptools.find_packages(),
install_requires=[
'beautifulsoup4',
'python-docx',
'requests',
],
entry_points={
'console_scripts': ['scrape-ssc = scrape_blog.scrape_ssc:main'],
},
)
|
from django import setup
def pytest_configure():
setup()
|
from pathlib import Path
import typer
import mossel
def main():
commands_conf = mossel.load_conf(Path("."))
mossel.prompt()
typer.echo(f"{commands_conf=}")
if __name__ == "__main__":
typer.run(main)
|
import pytest
try:
import pytest_timeout
except ImportError:
pytest_timeout = None
import time
import ray
import ray.ray_constants
import ray._private.gcs_utils as gcs_utils
from ray._private.test_utils import (wait_for_condition, convert_actor_state,
make_global_state_accessor)
# TODO(rliaw): The proper way to do this is to have the pytest config setup.
@pytest.mark.skipif(
pytest_timeout is None,
reason="Timeout package not installed; skipping test that may hang.")
@pytest.mark.timeout(30)
def test_replenish_resources(ray_start_regular):
cluster_resources = ray.cluster_resources()
available_resources = ray.available_resources()
assert cluster_resources == available_resources
@ray.remote
def cpu_task():
pass
ray.get(cpu_task.remote())
resources_reset = False
while not resources_reset:
available_resources = ray.available_resources()
resources_reset = (cluster_resources == available_resources)
assert resources_reset
@pytest.mark.skipif(
pytest_timeout is None,
reason="Timeout package not installed; skipping test that may hang.")
@pytest.mark.timeout(30)
def test_uses_resources(ray_start_regular):
cluster_resources = ray.cluster_resources()
@ray.remote
def cpu_task():
time.sleep(1)
cpu_task.remote()
resource_used = False
while not resource_used:
available_resources = ray.available_resources()
resource_used = available_resources.get(
"CPU", 0) == cluster_resources.get("CPU", 0) - 1
assert resource_used
@pytest.mark.skipif(
pytest_timeout is None,
reason="Timeout package not installed; skipping test that may hang.")
@pytest.mark.timeout(120)
def test_add_remove_cluster_resources(ray_start_cluster_head):
"""Tests that Global State API is consistent with actual cluster."""
cluster = ray_start_cluster_head
assert ray.cluster_resources()["CPU"] == 1
nodes = []
nodes += [cluster.add_node(num_cpus=1)]
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 2
cluster.remove_node(nodes.pop())
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 1
for i in range(5):
nodes += [cluster.add_node(num_cpus=1)]
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 6
def test_global_state_actor_table(ray_start_regular):
@ray.remote
class Actor:
def ready(self):
pass
# actor table should be empty at first
assert len(ray.state.actors()) == 0
# actor table should contain only one entry
a = Actor.remote()
ray.get(a.ready.remote())
assert len(ray.state.actors()) == 1
# actor table should contain only this entry
# even when the actor goes out of scope
del a
def get_state():
return list(ray.state.actors().values())[0]["State"]
dead_state = convert_actor_state(gcs_utils.ActorTableData.DEAD)
for _ in range(10):
if get_state() == dead_state:
break
else:
time.sleep(0.5)
assert get_state() == dead_state
def test_global_state_worker_table(ray_start_regular):
# Get worker table from gcs.
workers_data = ray.state.workers()
assert len(workers_data) == 1
def test_global_state_actor_entry(ray_start_regular):
@ray.remote
class Actor:
def ready(self):
pass
# actor table should be empty at first
assert len(ray.state.actors()) == 0
a = Actor.remote()
b = Actor.remote()
ray.get(a.ready.remote())
ray.get(b.ready.remote())
assert len(ray.state.actors()) == 2
a_actor_id = a._actor_id.hex()
b_actor_id = b._actor_id.hex()
assert ray.state.actors(actor_id=a_actor_id)["ActorID"] == a_actor_id
assert ray.state.actors(
actor_id=a_actor_id)["State"] == convert_actor_state(
gcs_utils.ActorTableData.ALIVE)
assert ray.state.actors(actor_id=b_actor_id)["ActorID"] == b_actor_id
assert ray.state.actors(
actor_id=b_actor_id)["State"] == convert_actor_state(
gcs_utils.ActorTableData.ALIVE)
@pytest.mark.parametrize("max_shapes", [0, 2, -1])
def test_load_report(shutdown_only, max_shapes):
resource1 = "A"
resource2 = "B"
cluster = ray.init(
num_cpus=1,
resources={resource1: 1},
_system_config={
"max_resource_shapes_per_load_report": max_shapes,
})
global_state_accessor = make_global_state_accessor(cluster)
@ray.remote
def sleep():
time.sleep(1000)
sleep.remote()
for _ in range(3):
sleep.remote()
sleep.options(resources={resource1: 1}).remote()
sleep.options(resources={resource2: 1}).remote()
class Checker:
def __init__(self):
self.report = None
def check_load_report(self):
message = global_state_accessor.get_all_resource_usage()
if message is None:
return False
resource_usage = gcs_utils.ResourceUsageBatchData.FromString(
message)
self.report = \
resource_usage.resource_load_by_shape.resource_demands
if max_shapes == 0:
return True
elif max_shapes == 2:
return len(self.report) >= 2
else:
return len(self.report) >= 3
# Wait for load information to arrive.
checker = Checker()
wait_for_condition(checker.check_load_report)
# Check that we respect the max shapes limit.
if max_shapes != -1:
assert len(checker.report) <= max_shapes
print(checker.report)
if max_shapes > 0:
# Check that we differentiate between infeasible and ready tasks.
for demand in checker.report:
if resource2 in demand.shape:
assert demand.num_infeasible_requests_queued > 0
assert demand.num_ready_requests_queued == 0
else:
assert demand.num_ready_requests_queued > 0
assert demand.num_infeasible_requests_queued == 0
global_state_accessor.disconnect()
def test_placement_group_load_report(ray_start_cluster):
cluster = ray_start_cluster
# Add a head node that doesn't have gpu resource.
cluster.add_node(num_cpus=4)
global_state_accessor = make_global_state_accessor(
ray.init(address=cluster.address))
class PgLoadChecker:
def nothing_is_ready(self):
resource_usage = self._read_resource_usage()
if not resource_usage:
return False
if resource_usage.HasField("placement_group_load"):
pg_load = resource_usage.placement_group_load
return len(pg_load.placement_group_data) == 2
return False
def only_first_one_ready(self):
resource_usage = self._read_resource_usage()
if not resource_usage:
return False
if resource_usage.HasField("placement_group_load"):
pg_load = resource_usage.placement_group_load
return len(pg_load.placement_group_data) == 1
return False
def two_infeasible_pg(self):
resource_usage = self._read_resource_usage()
if not resource_usage:
return False
if resource_usage.HasField("placement_group_load"):
pg_load = resource_usage.placement_group_load
return len(pg_load.placement_group_data) == 2
return False
def _read_resource_usage(self):
message = global_state_accessor.get_all_resource_usage()
if message is None:
return False
resource_usage = gcs_utils.ResourceUsageBatchData.FromString(
message)
return resource_usage
checker = PgLoadChecker()
# Create 2 placement groups that are infeasible.
pg_feasible = ray.util.placement_group([{"A": 1}])
pg_infeasible = ray.util.placement_group([{"B": 1}])
_, unready = ray.wait(
[pg_feasible.ready(), pg_infeasible.ready()], timeout=0)
assert len(unready) == 2
wait_for_condition(checker.nothing_is_ready)
# Add a node that makes pg feasible. Make sure load include this change.
cluster.add_node(resources={"A": 1})
ray.get(pg_feasible.ready())
wait_for_condition(checker.only_first_one_ready)
# Create one more infeasible pg and make sure load is properly updated.
pg_infeasible_second = ray.util.placement_group([{"C": 1}])
_, unready = ray.wait([pg_infeasible_second.ready()], timeout=0)
assert len(unready) == 1
wait_for_condition(checker.two_infeasible_pg)
global_state_accessor.disconnect()
def test_backlog_report(shutdown_only):
cluster = ray.init(
num_cpus=1,
_system_config={
"max_pending_lease_requests_per_scheduling_category": 1
})
global_state_accessor = make_global_state_accessor(cluster)
@ray.remote(num_cpus=1)
def foo(x):
print(".")
time.sleep(x)
return None
def backlog_size_set():
message = global_state_accessor.get_all_resource_usage()
if message is None:
return False
resource_usage = gcs_utils.ResourceUsageBatchData.FromString(message)
aggregate_resource_load = \
resource_usage.resource_load_by_shape.resource_demands
if len(aggregate_resource_load) == 1:
backlog_size = aggregate_resource_load[0].backlog_size
print(backlog_size)
# Ideally we'd want to assert backlog_size == 8, but guaranteeing
# the order the order that submissions will occur is too
# hard/flaky.
return backlog_size > 0
return False
# We want this first task to finish
refs = [foo.remote(0.5)]
# These tasks should all start _before_ the first one finishes.
refs.extend([foo.remote(1000) for _ in range(9)])
# Now there's 1 request running, 1 queued in the raylet, and 8 queued in
# the worker backlog.
ray.get(refs[0])
# First request finishes, second request is now running, third lease
# request is sent to the raylet with backlog=7
wait_for_condition(backlog_size_set, timeout=2)
global_state_accessor.disconnect()
def test_heartbeat_ip(shutdown_only):
cluster = ray.init(num_cpus=1)
global_state_accessor = make_global_state_accessor(cluster)
self_ip = ray.util.get_node_ip_address()
def self_ip_is_set():
message = global_state_accessor.get_all_resource_usage()
if message is None:
return False
resource_usage = gcs_utils.ResourceUsageBatchData.FromString(message)
resources_data = resource_usage.batch[0]
return resources_data.node_manager_address == self_ip
wait_for_condition(self_ip_is_set, timeout=2)
global_state_accessor.disconnect()
def test_next_job_id(ray_start_regular):
job_id_1 = ray.state.next_job_id()
job_id_2 = ray.state.next_job_id()
assert job_id_1.int() + 1 == job_id_2.int()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
from PIL import Image
from PIL import ImageOps
import numpy as np
from scipy import ndimage
class PicEditor:
#returns all pictures that might contain a digit -> ALL images in the Image...
def getAll(self, original):
img = original.copy() # we don't change the original, please.
#Let's darken it again, but make the "background" white
px = img.load()
for i in range(img.size[0]):
for j in range(img.size[1]):
if (px[i,j]) < 225:
px[i,j] = int(px[i,j] - 0.5*px[i,j])
else:
px[i,j] = 255
img = self.removeWhites(img)
splitted, right, left = self.split(img)
if not splitted:
img = self.removeWhites(img)
img = self.resize(img)
img = self.recenter(img)
#Now we can recontrast it (for real)
img = self.recolor(img)
img.show()
#But we have to do the other things again.
img = self.removeWhites(img) #Height-diff, again (maybe)
img = self.resize(img)
img = self.recenter(img)
return (False, [img]) #Let's pretend there is a need for a list - Otherwise we have to recode other things...
img = left
img = self.resize(img)
recentered = self.recenter(img)
images = [recentered]
while splitted:
splitted, right, left = self.split(right)
if splitted:
img = left
img = self.removeWhites(img)
img = self.resize(img)
img = self.recenter(img)
#Now we can recontrast it (for real)
img = self.recolor(left)
#But we have to do the other things again.
img = self.removeWhites(img) #Height-diff, again (maybe)
img = self.resize(img)
img = self.recenter(img)
images.append(img)
else:
#nothing was splitted, so it's the last one.
right = self.removeWhites(right)
right = self.resize(right)
right = self.recenter(right)
images.append(right)
return (True, images)
#checks if there could be a digit (from left to right.)
def split(self, img):
img = self.removeWhites(img)
px = np.array(img)
found = False
for x in range(len(px[0])):
if (min(px[:,x]) == 255):
found = True
px2 = np.delete(px,[x for x in range(0, x)],1)
px = np.delete(px,[x for x in range(x-1, len(px[0])-1)],1)
break
if (found):
img = Image.fromarray(px)
next = Image.fromarray(px2)
return (True, next, img)
else:
return (False, img, None)
def recolor(self, img):
"""
px = img.load()
for i in range(img.size[0]):
for j in range(img.size[1]):
if (px[i,j]) < 225:
px[i,j] = int(px[i,j] - 0.5*px[i,j])
else:
px[i,j] = 255
"""
ImageOps.autocontrast(img)
return img
def removeWhites(self, img):
px = np.array(img)
#left:
while min(px[:,0]) == 255:
px = np.delete(px,0,1)
#right:
while min(px[:,-1]) == 255:
px = np.delete(px,-1,1)
#top:
while min(px[0]) == 255:
px = px[1:]
#bottom:
while min(px[-1]) == 255:
px = px[:-1]
img = Image.fromarray(px)
return img
def resize(self, img):
width, height = img.size
#Exactly the same sizes? - NICE!
if (width == height):
img = img.resize((20,20), resample=Image.BICUBIC)
elif (width < height):
ratio = width / height
newWidth = round(20*ratio)
if (newWidth == 0): #Could happen. Somehow...
newWidth = 1
img = img.resize((newWidth,20), resample=Image.BICUBIC)
else:
ratio = height / width
newHeight = round(20*ratio)
if newHeight == 0:
newHeight = 1
img = img.resize((20, newHeight), resample=Image.BICUBIC)
return img
def recenter(self, img):
px = img.load()
newImg = Image.new("L", (28,28), "white") #We will put "our" picture into this bad boy (later)
pixels = newImg.load()
cy, cx = ndimage.measurements.center_of_mass(np.array(img))
cy,cx = round(cy), round(cx)
for i in range(img.size[0]):
for j in range(img.size[1]):
if (i+14-cx >= 0) and (j+14-cy >= 0) and (i+14-cx < newImg.size[0]) and (j+14-cy < newImg.size[1]):
pixels[i+14-cx,j+14-cy] = px[i,j]
img = newImg
return img
|
import json
from django.contrib.auth.models import Permission
from django.urls import reverse
from core.tests import BaseAPITestCase
from snippets.models import Snippet, SnippetFavorite
class SnippetFavoriteListAPIViewTestCase(BaseAPITestCase):
url = reverse("snippetfavorite-list")
def setUp(self):
super().setUp()
self.user1.user_permissions.add(
Permission.objects.get(codename="view_snippetfavorite"),
)
def test_user_snippet_favorite(self):
"""
User can see his own snippet favorites
"""
snippet_count = Snippet.objects.count()
snippet = Snippet.objects.create(user=self.user1, title="Python snippet")
SnippetFavorite.objects.create(user=self.user1, snippet=snippet)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)), snippet_count + 1)
def test_foreign_snippet_favorite(self):
"""
User cannot see snippet favorites of other users
"""
snippet_count = Snippet.objects.count()
snippet = Snippet.objects.create(user=self.user2, title="Python snippet")
SnippetFavorite.objects.create(user=self.user2, snippet=snippet)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)), snippet_count)
def test_no_permission(self):
self.api_authentication(self.token2)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
class SnippetFavoriteListAPICreateTestCase(BaseAPITestCase):
url = reverse("snippetfavorite-list")
def setUp(self):
super().setUp()
self.snippet1 = Snippet.objects.create(user=self.user1, title="Snippet user 1")
self.snippet2 = Snippet.objects.create(user=self.user2, title="Snippet user 2")
self.user1.user_permissions.add(
Permission.objects.get(codename="view_snippet"),
Permission.objects.get(codename="add_snippetfavorite"),
)
def test_user_snippet_favorite(self):
"""
User should be able to create a new snippet favorites as he received the required permissions.
Logged in user will be assigned automatically
"""
response = self.client.post(
self.url,
{
"snippet": self.snippet1.pk,
},
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data["snippet"], self.snippet1.pk)
# Test duplicate
response = self.client.post(
self.url,
{
"snippet": self.snippet1.pk,
},
)
self.assertEqual(response.status_code, 400)
def test_foreign_user_snippet(self):
"""
User should not be able to create favorites on snippets of other users
"""
response = self.client.post(
self.url,
{
"snippet": self.snippet2.pk,
},
)
self.assertEqual(response.status_code, 400)
def test_no_permission(self):
self.api_authentication(self.token2)
response = self.client.post(
self.url,
{
"snippet": self.snippet1.pk,
},
)
self.assertEqual(response.status_code, 403)
class SnippetFavoriteDetailAPIVBaseTestCase(BaseAPITestCase):
def setUp(self):
super().setUp()
self.snippet = Snippet.objects.create(user=self.user1, title="Python snippet")
self.snippet_favorite = SnippetFavorite.objects.create(user=self.user1, snippet=self.snippet)
self.url = reverse("snippetfavorite-detail", kwargs={"pk": self.snippet.pk})
self.snippet_favorite_count = Snippet.objects.count()
class SnippetFavoriteDetailAPIViewTestCase(SnippetFavoriteDetailAPIVBaseTestCase):
def setUp(self):
super().setUp()
self.user1.user_permissions.add(
Permission.objects.get(codename="view_snippetfavorite"),
)
def test_user_snippet_favorite(self):
"""
User should see snippet favorites that are assigned to him
"""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(SnippetFavorite.objects.count(), self.snippet_favorite_count)
self.assertEqual(response.data["snippet"], self.snippet.pk)
def test_foreign_user_snippet_favorite(self):
"""
User should not see snippet favorites that are not assigned to him
"""
self.snippet_favorite.user = self.user2
self.snippet_favorite.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
self.assertEqual(SnippetFavorite.objects.count(), self.snippet_favorite_count)
def test_no_permission(self):
self.api_authentication(self.token2)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
self.assertEqual(SnippetFavorite.objects.count(), self.snippet_favorite_count)
class SnippetFavoriteDetailAPIUpdateTestCase(SnippetFavoriteDetailAPIVBaseTestCase):
def setUp(self):
super().setUp()
self.user1.user_permissions.add(
Permission.objects.get(codename="change_snippetfavorite"),
)
def test_user_snippet_fvorite(self):
"""
Update own snippet favorites is not allowed
"""
response = self.client.put(
self.url,
{
"snippet": self.snippet.pk,
},
)
self.assertEqual(response.status_code, 403)
self.assertEqual(SnippetFavorite.objects.count(), self.snippet_favorite_count)
def test_foreign_user_snippet_favorite(self):
"""
Update foreign snippet favorites is not allowed
"""
self.snippet_favorite.user = self.user2
self.snippet_favorite.save()
response = self.client.put(
self.url,
{
"snippet": self.snippet.pk,
},
)
self.assertEqual(response.status_code, 404)
self.assertEqual(SnippetFavorite.objects.count(), self.snippet_favorite_count)
def test_no_permission(self):
self.api_authentication(self.token2)
response = self.client.put(self.url, {})
self.assertEqual(response.status_code, 403)
self.assertEqual(SnippetFavorite.objects.count(), self.snippet_favorite_count)
response = self.client.patch(self.url, {})
self.assertEqual(response.status_code, 403)
self.assertEqual(SnippetFavorite.objects.count(), self.snippet_favorite_count)
class SnippetFavoriteDetailAPIDeleteTestCase(SnippetFavoriteDetailAPIVBaseTestCase):
def setUp(self):
super().setUp()
self.user1.user_permissions.add(
Permission.objects.get(codename="view_snippetfavorite"),
Permission.objects.get(codename="delete_snippetfavorite"),
)
def test_user_snippet_favorite(self):
response = self.client.delete(self.url)
self.assertEqual(response.status_code, 204)
self.assertEqual(SnippetFavorite.objects.count(), self.snippet_favorite_count - 1)
def test_foreign_user_snippet_favorite(self):
self.snippet_favorite.user = self.user2
self.snippet_favorite.save()
response = self.client.delete(self.url)
self.assertEqual(response.status_code, 404)
self.assertEqual(SnippetFavorite.objects.count(), self.snippet_favorite_count)
def test_no_permission(self):
self.api_authentication(self.token2)
response = self.client.delete(self.url)
self.assertEqual(response.status_code, 403)
self.assertEqual(SnippetFavorite.objects.count(), self.snippet_favorite_count)
|
# current exe version: 2020.12.29.0000.0000
# @category __UserScripts
# @menupath Tools.Scripts.ffxiv_idarename
from __future__ import print_function
import os
import yaml
try:
from typing import Any, Dict, List, Optional, Union # noqa
except ImportError:
pass
import sys
import itertools
from abc import abstractmethod
from collections import deque
if sys.version_info[0] >= 3:
long = int
# region Api
class BaseApi(object):
@property
@abstractmethod
def data_file_path(self):
"""
Get the Path to the data.yml File
:return: Path to the data.yml File
:rtype: str
"""
@abstractmethod
def get_image_base(self):
"""
Get the image base ea
:return: Image base ea
:rtype: int
"""
@abstractmethod
def is_offset(self, ea):
"""
Is the given address an offset to something else
:param ea: Effective address
:type ea: int
:return: Is offset or not
:rtype: bool
"""
@abstractmethod
def xrefs_to(self, ea):
"""
Retrieve all xrefs to the given address
:param ea: Effective address
:type ea: int
:return: List of addresses
:rtype: List[int]
"""
@abstractmethod
def get_qword(self, ea):
"""
Read a qword of data from an address
:param ea: Effective address
:type ea: int
:return: 64bits of data
:rtype: int
"""
@abstractmethod
def get_addr_name(self, ea):
"""
Get the name of a given address
:param ea: Effective address
:type ea: int
:return: Name or empty
:rtype: str
"""
@abstractmethod
def set_addr_name(self, ea, name):
"""
Set the name of a given address (function)
:param ea: Effective address
:type ea: int
:param name: Name to set
:type name: str
:return: Success/failure
:rtype: bool
"""
@abstractmethod
def get_comment(self, ea):
"""
Get the comment at an address
:param ea: Effective address
:type ea: int
:return: Comment
:rtype: str
"""
@abstractmethod
def set_comment(self, ea, comment):
"""
Add a comment to an address
:param ea: Effective address
:type ea: int
:param comment: Comment
:type comment: str
:return: None
"""
def format_class_name_for_vtbl(self, class_name):
"""
Format a class name for special representation in the vtbl
By default, looks similar to vtbl_Component::Gui::AtkResNode
:param class_name: Name as contained in data.yml
:type class_name: str
:return: Formatted class name
:rtype: str
"""
return "vtbl_{0}".format(class_name)
def format_class_name(self, class_name):
"""
Format a class name for representation in the toolset
By default, looks similar to Component::Gui::AtkResNode
:param class_name: Name as contained in data.yml
:type class_name: str
:return: Formatted class name
:rtype: str
"""
return class_name
@abstractmethod
def format_vfunc_name(self, ea, current_func_name, proposed_func_name, class_name, parent_class_names):
"""
Name a function (vfunc) based on how it is currently named, what it is proposed to be named,
and the names of the current and parent classes. If None is returned, it is assumed
that the current func was named something unexpectedly and will be warned about. Return
an empty string "" if no renaming should take place.
:param ea: Func effective address
:type ea: int
:param current_func_name: Current func name as in the disassembler
:type current_func_name: str
:param proposed_func_name: Either a name from data.yml or vfX where X is the vfunc index
:type proposed_func_name: str
:param class_name: Class name
:type class_name: str
:param parent_class_names: Parent class names
:type parent_class_names: List[str]
:return: Formatted vfunc name
:rtype: Optional[str]
"""
@abstractmethod
def format_func_name(self, ea, current_func_name, proposed_func_name, class_name):
"""
Name a function based on how it is currently named, what it is proposed to be named,
and the name of the current class. If None is returned, it is assumed that the current
func was named something unexpectedly and will be warned about. Return an empty string ""
if no renaming should take place.
:param ea: Func effective address
:type ea: int
:param current_func_name: Current func name as in the disassembler
:type current_func_name: str
:param proposed_func_name: A name from data.yml
:type proposed_func_name: str
:param class_name: Class name
:type class_name: str
:return: Formatted func name
:rtype: Optional[str]
"""
@abstractmethod
def write_vtbl_struct(self, struct_name, struct_member_names):
"""
Write a vtbl struct for use in decompiled source code, should not be applied to the vtbl itself.
:param struct_name: Struct name, this will be the output of format_class_name_for_vtbl
:type struct_name: str
:param struct_member_names: List of struct names, no missing indexes
:type struct_member_names: List[str]
:return: None
:rtype: None
"""
api = None
# region IDA Api
if api is None:
try:
import idaapi # noqa
import idc # noqa
import idautils # noqa
except ImportError:
print("Warning: Unable to load IDA")
else:
# noinspection PyUnresolvedReferences
class IdaApi(BaseApi):
@property
def data_file_path(self):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "data.yml")
def get_image_base(self):
return idaapi.get_imagebase()
def is_offset(self, ea):
return idc.is_off0(idc.get_full_flags(ea))
def xrefs_to(self, ea):
return [xref.to for xref in idautils.XrefsTo(ea)]
def get_qword(self, ea):
return idc.get_qword(ea)
def get_addr_name(self, ea):
return idc.get_name(ea)
def set_addr_name(self, ea, name):
result = idc.set_name(ea, name)
return bool(result)
def get_comment(self, ea):
idc.get_cmt(ea, False)
def set_comment(self, ea, comment):
idc.set_cmt(ea, comment, False)
def format_vfunc_name(self, ea, current_func_name, proposed_func_name, class_name, parent_class_names):
if current_func_name.startswith("j_"): # jump
current_func_name = current_func_name.lstrip("j_")
if current_func_name.startswith("qword_"):
idc.auto_mark_range(ea, ea + 1, idc.AU_CODE)
idc.create_insn(ea)
idc.add_func(ea)
current_func_name = api.get_addr_name(ea)
print("Info: qword in vtbl of {1} at 0x{0:X}, it may be an offset to undefined code".format(ea, class_name))
# Previously renamed as a vfunc
if current_func_name.startswith(class_name):
# return the proposed func name in case it was updated since last run
current_class_name = current_func_name.rsplit(".", 1)[0]
return "{0}.{1}".format(current_class_name, proposed_func_name)
# This should have been handled in the parent class
if any(current_func_name.startswith(name) for name in parent_class_names):
return ""
if current_func_name.startswith("sub_"):
return "{0}.{1}".format(class_name, proposed_func_name)
if current_func_name.startswith("nullsub_"):
return "{0}.{1}_nullsub".format(class_name, proposed_func_name)
if current_func_name.startswith("loc_"):
return "{0}.{1}_loc".format(class_name, proposed_func_name)
if current_func_name.startswith("locret_"):
return "{0}.{1}_locret".format(class_name, proposed_func_name)
# Name it later in a child class when it gets overridden
if current_func_name == "_purecall":
return ""
# Mangled func names, thanks IDA
if current_func_name.startswith("?") or current_func_name.startswith("_"):
return "{0}.{1}".format(class_name, proposed_func_name)
return None
def format_func_name(self, ea, current_func_name, proposed_func_name, class_name):
if current_func_name.startswith("j_"): # jump
current_func_name = current_func_name.lstrip("j_")
proposed_qualified_func_name = "{0}.{1}".format(class_name, proposed_func_name)
if current_func_name == proposed_qualified_func_name:
return ""
if any(current_func_name.startswith(prefix) for prefix in ("sub_", "nullsub_", "loc_", "qword_")):
return proposed_qualified_func_name
return None
def write_vtbl_struct(self, vtbl_name, struct_member_names):
struct_name = "{0}_struct".format(vtbl_name)
sid = idc.get_struc_id(struct_name)
if sid == idc.BADADDR:
# Doesn't exist
sid = idc.add_struc(-1, struct_name, is_union=0)
else:
# Clear existing
member_offset = idc.get_first_member(sid)
while member_offset != idc.BADADDR:
idc.del_struc_member(sid, member_offset)
member_offset = idc.get_first_member(sid)
for member_name in struct_member_names:
idc.add_struc_member(sid, member_name, offset=-1, flag=idc.FF_DATA | idc.FF_QWORD, typeid=-1, nbytes=8, reftype=idc.REF_OFF64)
member_offset = idc.get_last_member(sid)
member_id = idc.get_member_id(sid, member_offset)
idc.SetType(member_id, "void*")
api = IdaApi()
# endregion
# region Ghidra Api
if api is None:
try:
import ghidra
from ghidra.program.model.data import CategoryPath # noqa
from ghidra.program.model.data import StructureDataType # noqa
from ghidra.program.model.data import PointerDataType # noqa
from ghidra.program.model.symbol import SourceType # noqa
except ImportError:
print("Warning: Unable to load Ghidra")
else:
# noinspection PyUnresolvedReferences
class GhidraApi(BaseApi):
@property
def data_file_path(self):
return os.path.join(os.path.dirname(str(sourceFile)), "data.yml")
def get_image_base(self):
return currentProgram.getImageBase().getOffset()
def is_offset(self, ea):
data = getDataAt(toAddr(ea))
if not data: return False
return data.isPointer()
def xrefs_to(self, ea):
return [xref.getFromAddress().getOffset() for xref in getReferencesTo(toAddr(ea))]
def get_qword(self, ea):
return getLong(toAddr(ea))
def get_addr_name(self, ea):
sym = getSymbolAt(toAddr(ea))
if not sym:
return ""
return sym.getName(True)
def set_addr_name(self, ea, name):
createLabel(toAddr(ea), name, True, SourceType.ANALYSIS)
return True
# return createLabel(toAddr(ea), name, True).checkIsValid()
def get_comment(self, ea):
return getEOLComment(toAddr(ea))
def set_comment(self, ea, comment):
if getEOLComment(toAddr(ea)) is None:
setEOLComment(toAddr(ea), comment)
def format_vfunc_name(self, ea, current_func_name, proposed_func_name, class_name, parent_class_names):
if current_func_name.startswith("thunk_"): # jump
current_func_name = current_func_name.lstrip("thunk_")
# Previously renamed as a vfunc
if current_func_name.startswith(class_name):
# return the proposed func name in case it was updated since last run
current_class_name = current_func_name.rsplit(".", 1)[0]
return "{0}.{1}".format(current_class_name, proposed_func_name)
# This should have been handled in the parent class
if any(current_func_name.startswith(name) for name in parent_class_names):
return ""
if any(current_func_name.startswith(prefix) for prefix in ("FUN_", "LAB_", "SUB_", "LOC_", "DAT_")):
return "{0}.{1}".format(class_name, proposed_func_name)
if current_func_name == "_purecall":
return ""
return None
def format_func_name(self, ea, current_func_name, proposed_func_name, class_name):
if current_func_name.startswith("thunk_"): # jump
current_func_name = current_func_name.lstrip("thunk_")
proposed_qualified_func_name = "{0}.{1}".format(class_name, proposed_func_name)
if current_func_name == proposed_qualified_func_name:
return ""
if any(current_func_name.startswith(prefix) for prefix in ("FUN_", "LAB_", "SUB_", "LOC_", "DAT_")):
return proposed_qualified_func_name
return None
def write_vtbl_struct(self, vtbl_name, struct_member_names):
pass
# def get_struct_id(self, name):
# gdt = currentProgram.getDataTypeManager()
# struct = gdt.getDataType(CategoryPath("/___vftables"), name.replace("_struct", ""))
# if struct: return gdt.getID(struct)
# return -1
# def create_struct(self, name):
# structName = name.replace("_struct", "")
# structPath = CategoryPath("/___vftables")
# gdt = currentProgram.getDataTypeManager()
# struct = gdt.getDataType(structPath, structName)
# if not struct:
# struct = StructureDataType(structPath, structName, 0, gdt)
# struct.deleteAll()
# dt = gdt.addDataType(struct, None)
# return gdt.getID(dt)
# def add_struct_member(self, sid, name):
# gdt = currentProgram.getDataTypeManager()
# struct = gdt.getDataType(sid)
# if not struct:
# return False
# member = struct.add(PointerDataType(), 8, name, None)
# return True
# def clear_struct(self, sid):
# gdt = currentProgram.getDataTypeManager()
# struct = gdt.getDataType(sid)
# if not struct:
# return False
# struct.deleteAll()
# return True
api = GhidraApi()
# endregion
if api is None:
raise Exception("Unable to load IDA or Ghidra")
# endregion
def load_data():
with open(api.data_file_path, "r") as fd:
data = yaml.safe_load(fd)
for ea, name in data["globals"].items():
if not isinstance(ea, (int, long)):
print('Warning: {0} has an invalid address {1}'.format(name, ea))
continue
api.set_addr_name(ea, name)
for ea, name in data["functions"].items():
if not isinstance(ea, (int, long)):
print('Warning: {0} has an invalid address {1}'.format(name, ea))
continue
api.set_addr_name(ea, name)
factory = FfxivClassFactory()
for class_name, class_data in data["classes"].items():
if not class_data:
class_data = {}
vtbl_ea = class_data.pop("vtbl", 0x0)
parent_class_name = class_data.pop("inherits_from", "")
vfuncs = class_data.pop("vfuncs", {})
funcs = class_data.pop("funcs", {})
for leftover in class_data:
print("Warning: Extra key \"{0}\" present in {1}".format(leftover, class_name))
factory.register(
class_name=class_name, parent_class_name=parent_class_name,
vtbl_ea=vtbl_ea, vfuncs=vfuncs, funcs=funcs)
factory.finalize()
class FfxivClassFactory:
_vtbl_addresses = [] # type: List[int]
_classes = {} # type: Dict[str, FfxivClass]
def register(self, class_name, parent_class_name="", vtbl_ea=0x0, vfuncs=None, funcs=None):
"""
Register a class
:param class_name: Class name
:type class_name: str
:param parent_class_name: Parent class
:type parent_class_name: str
:param vtbl_ea: Vtable effective address
:type vtbl_ea: int
:param funcs: Mapping of effective addresses to func names
:type funcs: Dict[int, str]
:param vfuncs: Mapping of vtbl index to func names
:type funcs: Dict[int, str]
:return: None
:rtype: None
"""
if not vfuncs:
vfuncs = {}
if not funcs:
funcs = {}
if vtbl_ea != 0x0 and vtbl_ea in self._vtbl_addresses:
print("Error: Multiple vtables are defined at 0x{0:X}".format(vtbl_ea))
return
if class_name in self._classes:
print("Error: Multiple classes are registered with the name \"{0}\"".format(class_name))
return
self._vtbl_addresses.append(vtbl_ea)
self._classes[class_name] = FfxivClass(
class_name=class_name, parent_name=parent_class_name,
vtbl_ea=vtbl_ea, vfuncs=vfuncs, funcs=funcs)
def finalize(self):
"""
Perform the class naming
:return: None
:rtype: None
"""
self._resolve_parent_classes()
# We write the vtbl names first for an added check so that
# the size finder will not advance past a named offset.
for cls in self._classes.values():
if cls.vtbl_ea != 0:
cls.write_vtbl_name()
for cls in self._classes.values():
self._finalize_class(cls)
def _resolve_parent_classes(self):
"""
Set FfxivClass.parent_class to _classes[FfxivClass.parent_class_name]
If missing, warn the user and add a stub entry.
:return: None
"""
for class_name, cls in list(self._classes.items()):
if cls.parent_class is None and cls.parent_name:
if cls.parent_name not in self._classes:
print("Warning: Inherited class \"{0}\" is not documented, add a placeholder entry".format(cls.parent_name))
self.register(class_name=cls.parent_name)
cls.parent_class = self._classes[cls.parent_name]
_finalize_stack = deque()
def _finalize_class(self, cls):
"""
Perform a single class naming
:param cls: Class object
:type cls: FfxivClass
:return: None
:rtype: None
"""
if cls in self._finalize_stack:
names = [c.name for c in self._finalize_stack] + [cls.name]
names = "\n".join([" - {0}".format(name) for name in names])
raise ValueError("Inheritance cycle detected: \n{0}".format(names))
if not cls.finalized:
self._finalize_stack.append(cls)
if cls.parent_class and not cls.parent_class.finalized:
self._finalize_class(cls.parent_class)
cls.finalize()
self._finalize_stack.pop()
class FfxivClass:
STANDARD_IMAGE_BASE = 0x140000000
# This is set when the factory is finalized
parent_class = None # type: FfxivClass
def __init__(self, class_name, parent_name, vtbl_ea, vfuncs, funcs):
"""
Object representing a class
:param class_name: Class name
:type class_name: str
:param parent_name: Parent class
:type parent_name: str
:param vtbl_ea: Vtable effective address
:type vtbl_ea: int
:param vfuncs: Mapping of vtbl index to func names
:type funcs: Dict[int, str]
:param funcs: Mapping of effective addresses to func names
:type funcs: Dict[int, str]
"""
self.name = class_name
self.parent_name = parent_name
self.vtbl_ea = vtbl_ea
self.vfuncs = vfuncs
self.funcs = funcs
# Offset the vtbl and funcs if the program has been rebased
current_image_base = api.get_image_base()
if self.STANDARD_IMAGE_BASE != current_image_base:
rebase_offset = current_image_base - self.STANDARD_IMAGE_BASE
if self.vtbl_ea != 0x0:
self.vtbl_ea += rebase_offset
for ea in list(funcs.keys()):
funcs[ea + rebase_offset] = funcs.pop(ea)
# region parent_class_names
_parent_names = None
@property
def parent_names(self):
"""
Get the class names of the entire hierarchy as a flat list
:return: List of parent names
:rtype: List[str]
"""
if self._parent_names is None:
self._parent_names = []
current_class = self.parent_class
while current_class:
self._parent_names.append(current_class.name)
current_class = current_class.parent_class
return self._parent_names
# endregion
# region vtbl_size
_vtbl_size = 0
@property
def vtbl_size(self):
"""
Iterate from the vtbl start until a non-offset or xref is encountered.
This strategy implies that the only xref in a vtbl is the first vfunc.
:return: VTable func count
:rtype: int
"""
if self.vtbl_ea == 0x0:
return self._vtbl_size
if self._vtbl_size == 0:
self._vtbl_size = 1 # Set to 1, skip the first entry
for ea in itertools.count(self.vtbl_ea + 8, 8):
if api.get_addr_name(ea) != '':
break
if api.is_offset(ea) and api.xrefs_to(ea) == []:
self._vtbl_size += 1
else:
break
if self.parent_class and self.vtbl_size < self.parent_class.vtbl_size:
print("Error: The sum of \"{0}\"'s parent vtbl sizes ({1}) is greater than the actual class itself ({2})".format(self.name, self.parent_class.vtbl_size, self.vtbl_size))
return self._vtbl_size
# endregion
# region finalized
_finalized = False
@property
def finalized(self):
"""
Has this class and its hierarchy been written out or not
:return: bool yes/no
:rtype: bool
"""
if self.parent_class:
return self._finalized and self.parent_class.finalized
else:
return self._finalized
@finalized.setter
def finalized(self, value):
"""
Set if finalized or not
:param value: Finalized state
:type value: bool
:return: None
:rtype: None
"""
self._finalized = value
# endregion
# region finalize
def finalize(self):
"""
Write out this class
:return: None
:rtype: None
"""
self._inherit_func_names_from_parent()
self._comment_vtbl_with_inheritance_tree()
builder, struct_members = self._build_vtbl()
self._write_vtbl(builder)
self._write_funcs()
vtbl_name = api.format_class_name_for_vtbl(self.name)
api.write_vtbl_struct(vtbl_name, struct_members)
self.finalized = True
def _inherit_func_names_from_parent(self):
"""
A parent is guaranteed to be finalized before the child,
so a parent has all the vfunc names of its parent already.
:return: None
:rtype: None
"""
if self.parent_class:
for idx, parent_vfunc_name in self.parent_class.vfuncs.items():
if idx in self.vfuncs:
print("Warning: 0x{0:X} \"{1}\" overwrites the name of inherited function \"{2}\"".format(self.vtbl_ea, self.name, parent_vfunc_name))
pass
else:
self.vfuncs[idx] = parent_vfunc_name
def _comment_vtbl_with_inheritance_tree(self):
"""
Adds the inheritance tree as a comment to the start of the vtbl.
grandparent_name
parent_name
self_name
:return: None
:rtype: None
"""
comment = api.get_comment(self.vtbl_ea) or ""
indent = 0
for class_name in self.parent_names[-1::-1] + [self.name]:
if comment:
comment += "\n"
comment += (" " * indent) + api.format_class_name_for_vtbl(class_name)
indent += 4
api.set_comment(self.vtbl_ea, comment)
def _build_vtbl(self):
"""
Build a list of (ea, func_name) tuples to be written
:return: List of (ea, func_name) tuples and struct names
:rtype: Tuple[List[Tuple[int, str]], List[str]]
"""
vtbl_builder = []
struct_names = []
# Iterate through each offset
for idx in range(0, self.vtbl_size):
vtbl_vfunc_ea = self.vtbl_ea + idx * 8
vfunc_ea = api.get_qword(vtbl_vfunc_ea) # type: int
current_func_name = api.get_addr_name(vfunc_ea) # type: str
proposed_func_name = self.vfuncs.get(idx, "vf{0}".format(idx))
formatted_class_name = api.format_class_name(self.name)
formatted_parent_class_names = [api.format_class_name(name) for name in self.parent_names]
func_name = api.format_vfunc_name(vfunc_ea, current_func_name, proposed_func_name, formatted_class_name, formatted_parent_class_names)
struct_names.append(proposed_func_name)
if func_name == "":
pass
elif func_name is None:
print("Error: Function at 0x{0:X} had unexpected name \"{1}\" during naming of {2}.{3} (vtbl[{4}])".format(vfunc_ea, current_func_name, self.name, proposed_func_name, idx))
else:
vtbl_builder.append((vfunc_ea, func_name))
return vtbl_builder, struct_names
def write_vtbl_name(self):
"""
Write out the vtbl name.
:return: None
"""
api.set_addr_name(self.vtbl_ea, api.format_class_name_for_vtbl(self.name))
def _write_vtbl(self, builder):
"""
Write out the vtbl as defined by _build_vtbl
:param builder: List of (ea, func_name) tuples
:type builder: List[Tuple[int, str]]
:return: None
:rtype: None
"""
for (func_ea, func_name) in builder:
api.set_addr_name(func_ea, func_name)
def _write_funcs(self):
"""
Write the names of all non-vtbl funcs
:return: None
"""
for func_ea, proposed_func_name in self.funcs.items():
current_func_name = api.get_addr_name(func_ea) # type: str
func_name = api.format_func_name(func_ea, current_func_name, proposed_func_name, self.name)
if func_name == "":
pass
elif func_name is None:
print("Error: Function at 0x{0:X} had unexpected name \"{1}\" during naming of {2}.{3}".format(func_ea, current_func_name, self.name, proposed_func_name))
else:
api.set_addr_name(func_ea, func_name)
# endregion
def __repr__(self):
return "<{0}(\"{1}\")>".format(self.__class__.__name__, self.name)
# endregion
print("Executing")
load_data()
print("Done")
|
"""Adds workflow table.
Revision ID: 7351fa734e2a
Revises: ecdb4e7566f2
Create Date: 2020-09-23 11:42:36.889418
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = "7351fa734e2a"
down_revision = "ecdb4e7566f2"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"workflow",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(), nullable=True),
sa.Column("description", sa.String(), nullable=True),
sa.Column("is_active", sa.Boolean(), nullable=True),
sa.Column("parameters", sa.JSON(), nullable=True),
sa.Column("resource_id", sa.String(), nullable=True),
sa.Column("plugin_slug", sa.String(), nullable=True),
sa.Column("search_vector", sqlalchemy_utils.types.ts_vector.TSVectorType(), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_workflow_search_vector",
"workflow",
["search_vector"],
unique=False,
postgresql_using="gin",
)
op.create_table(
"workflow_instance",
sa.Column("resource_type", sa.String(), nullable=True),
sa.Column("resource_id", sa.String(), nullable=True),
sa.Column("weblink", sa.String(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("workflow_id", sa.Integer(), nullable=True),
sa.Column("creator_id", sa.Integer(), nullable=True),
sa.Column("status", sa.String(), nullable=True),
sa.Column("incident_id", sa.Integer(), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["creator_id"],
["participant.id"],
),
sa.ForeignKeyConstraint(
["incident_id"],
["incident.id"],
),
sa.ForeignKeyConstraint(
["workflow_id"],
["workflow.id"],
),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("workflow_instance")
op.drop_index("ix_workflow_search_vector", table_name="workflow")
op.drop_table("workflow")
# ### end Alembic commands ###
|
# -*- coding: utf-8 -*-
# sneaky
# author - Quentin Ducasse
# https://github.com/QDucasse
# quentin.ducasse@ensta-bretagne.org
from abc import abstractmethod, ABC
import unicodedata
from sneaky import BaseItem
from scrapy import Spider
from scrapy.http import Request
from scrapy.crawler import CrawlerProcess
class BaseSpider(Spider):
def __init__(self,base_url, item, pagination,
products, product_name, product_price, product_link):
self.base_url = base_url
self.pagination_path = pagination
self.products_path = products
self.product_name_path = product_name
self.product_price_path = product_price
self.product_link_path = product_link
self.item = item
def parse(self, response):
# Scrape the items in the response
for item in self.scrape(response):
yield item
# Scrape the next page url
next_page_url = response.xpath(self.pagination_path)
# If the url of the next page is found, it is concatenated to the base
# one, displayed then fed as a new Request to the crawler
if next_page_url:
next_page = self.handle_next_page(response,next_page_url)
print("Found url: {}".format(next_page))
yield Request(next_page, callback=self.parse)
def scrape(self,response):
# Looks for the products
products = response.xpath(self.products_path)
# For each one of the products, the name, price and link is extracted
for product in products:
item = BaseItem()
item['name'] = product.xpath(self.product_name_path).get().strip()
scrapped_price = product.xpath(self.product_price_path).get().strip()
item['price'] = self.handle_price(scrapped_price)
scrapped_link = product.xpath(self.product_link_path).get().strip()
item['link'] = self.handle_link(scrapped_link)
yield item
@abstractmethod
def handle_next_page(self,response,scrapped_url):
pass
@abstractmethod
def handle_link(self,scrapped_link):
pass
def handle_price(self,scrapped_price):
return unicodedata.normalize("NFKD", scrapped_price)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Contains code for parsing and building a dictionary from text.
"""
from parlai.core.opt import Opt
from parlai.core.build_data import modelzoo_path
from parlai.utils.bpe import bpe_factory, BPEHelper
from .agents import Agent
from .build_data import make_dir
from collections import defaultdict
import codecs
import copy
import numpy as np
import os
import json
import re
RETOK = re.compile(r'\w+|[^\w\s]|\n', re.UNICODE)
def escape(s):
r"""
Replace potential special characters with escaped version.
For example, \n => \\n and \t => \\t
:param s:
string to escape
"""
return s.replace('\n', '\\n').replace('\t', '\\t').replace('\r', '\\r')
def unescape(s):
r"""
Revert escaped characters back to their special version.
For example, \\n => \n and \\t => \t
:param s:
string to unescape
"""
return s.replace('\\n', '\n').replace('\\t', '\t').replace('\\r', '\r')
def find_ngrams(token_dict, text, n):
"""
Break text into ngrams that appear in ``token_dict``.
:param token_dict:
``dict`` to check for ngrams
:param text:
``str`` to look for ngrams in
:param n:
``int`` max size of ngrams
"""
# base case
if n <= 1:
return text
# tokens committed to output
saved_tokens = []
# tokens remaining to be searched in sentence
search_tokens = text[:]
# tokens stored until next ngram found
next_search = []
while len(search_tokens) >= n:
ngram = ' '.join(search_tokens[:n])
if ngram in token_dict:
# first, search previous unmatched words for smaller ngrams
sub_n = min(len(next_search), n - 1)
saved_tokens.extend(find_ngrams(token_dict, next_search, sub_n))
next_search.clear()
# then add this ngram
saved_tokens.append(ngram)
# then pop this ngram from the remaining words to search
search_tokens = search_tokens[n:]
else:
next_search.append(search_tokens.pop(0))
remainder = next_search + search_tokens
sub_n = min(len(remainder), n - 1)
saved_tokens.extend(find_ngrams(token_dict, remainder, sub_n))
return saved_tokens
class DictionaryAgent(Agent):
"""
Builds and/or loads a dictionary.
The dictionary provides access to the frequency of each token, functions to
translate sentences from tokens to their vectors (list of ints, each int is the
index of a token in the dictionary) and back from vectors to tokenized text.
"""
default_lang = 'english'
default_maxngram = -1
default_minfreq = 0
default_maxtokens = -1
default_null = '__null__'
default_start = '__start__'
default_end = '__end__'
default_unk = '__unk__'
default_tok = 're'
default_lower = False
default_textfields = 'text,labels'
@staticmethod
def add_cmdline_args(argparser):
"""
Add commandline arguments related to the dictionary.
"""
dictionary = argparser.add_argument_group('Dictionary Arguments')
dictionary.add_argument(
'-df',
'--dict-file',
help='path to dictionary file. defaults to [model_file].dict if '
'not set and model_file is set.',
hidden=True,
)
dictionary.add_argument(
'--dict-initpath',
hidden=True,
help='path to a saved dictionary to load tokens / counts from to '
'seed the dictionary with initial tokens and/or frequencies',
)
dictionary.add_argument(
'--dict-language',
default=DictionaryAgent.default_lang,
hidden=True,
help='sets language for the punkt sentence tokenizer',
)
dictionary.add_argument(
'--dict-max-ngram-size',
type=int,
hidden=True,
default=DictionaryAgent.default_maxngram,
help='looks for ngrams of up to this size. this is ignored when '
'building the dictionary. note: this takes approximate '
'runtime of len(sentence)^max_ngram_size',
)
dictionary.add_argument(
'--dict-minfreq',
default=DictionaryAgent.default_minfreq,
type=int,
help='minimum frequency of words to include them in sorted '
'dict or minimum frequency of bpe codecs',
hidden=True,
)
dictionary.add_argument(
'--dict-maxtokens',
default=DictionaryAgent.default_maxtokens,
type=int,
help='max number of tokens to include in dictionary or bpe codecs',
hidden=True,
)
dictionary.add_argument(
'--dict-nulltoken',
default=DictionaryAgent.default_null,
hidden=True,
help='empty token, can be used for padding or just empty values',
)
dictionary.add_argument(
'--dict-starttoken',
default=DictionaryAgent.default_start,
hidden=True,
help='token for starting sentence generation, if needed',
)
dictionary.add_argument(
'--dict-endtoken',
default=DictionaryAgent.default_end,
hidden=True,
help='token for end of sentence markers, if needed',
)
dictionary.add_argument(
'--dict-unktoken',
default=DictionaryAgent.default_unk,
hidden=True,
help='token to return for unavailable words',
)
dictionary.add_argument(
'-tok',
'--dict-tokenizer',
default=DictionaryAgent.default_tok,
help='Which tokenizer to use. Defaults to "split", which splits '
'on whitespace as well as recognizing basic punctuation. '
'Other options include nltk, gpt2 and bytelevelbpe.',
hidden=True,
)
dictionary.add_argument(
'--dict-lower',
default=DictionaryAgent.default_lower,
type='bool',
help='Whether or not to lowercase all text seen.',
hidden=True,
)
dictionary.add_argument(
'--bpe-debug',
action='store_true',
hidden=True,
help='Leave BPE tokens untouched in output. Useful for debugging.',
)
dictionary.add_argument(
'--dict-textfields',
default=DictionaryAgent.default_textfields,
hidden=True,
help='Observation fields which dictionary learns vocabulary from. '
'Tasks with additional fields may add to this list to handle '
'any extra vocabulary.',
)
dictionary = BPEHelper.add_cmdline_args(dictionary)
return dictionary
def __init__(self, opt: Opt, shared=None):
"""
Initialize DictionaryAgent.
"""
self.opt = copy.deepcopy(opt)
self.minfreq = opt.get('dict_minfreq', DictionaryAgent.default_minfreq)
self.null_token = opt.get('dict_nulltoken', DictionaryAgent.default_null)
self.end_token = opt.get('dict_endtoken', DictionaryAgent.default_end)
self.unk_token = opt.get('dict_unktoken', DictionaryAgent.default_unk)
self.start_token = opt.get('dict_starttoken', DictionaryAgent.default_start)
self.max_ngram_size = opt.get(
'dict_max_ngram_size', DictionaryAgent.default_maxngram
)
self.tokenizer = opt.get('dict_tokenizer', DictionaryAgent.default_tok)
self.lower = opt.get('dict_lower', DictionaryAgent.default_lower)
self.maxtokens = opt.get('dict_maxtokens', DictionaryAgent.default_maxtokens)
self.textfields = opt.get(
'dict_textfields', DictionaryAgent.default_textfields
).split(",")
try:
self.tokenizer_fun = getattr(self, self.tokenizer + '_tokenize')
except AttributeError:
raise AttributeError(
'tokenizer type {} not yet supported'.format(self.tokenizer)
)
if shared:
self.freq = shared.get('freq', {})
self.tok2ind = shared.get('tok2ind', {})
self.ind2tok = shared.get('ind2tok', {})
else:
self.freq = defaultdict(int)
self.tok2ind = {}
self.ind2tok = {}
if self.null_token:
self.add_token(self.null_token)
if self.start_token:
# set special start of sentence word token
self.add_token(self.start_token)
if self.end_token:
# set special end of sentence word token
self.add_token(self.end_token)
if self.unk_token:
# set special unknown word token
self.add_token(self.unk_token)
loaded = False
# If data built via pytorch data teacher, we need to load prebuilt dict
if opt.get('dict_file'):
opt['dict_file'] = modelzoo_path(opt.get('datapath'), opt['dict_file'])
if os.path.isfile(opt['dict_file']):
# load pre-existing dictionary
self.load(opt['dict_file'])
loaded = True
if not loaded and opt.get('dict_initpath'):
# load seed dictionary
opt['dict_initpath'] = modelzoo_path(
opt.get('datapath'), opt['dict_initpath']
)
# don't check isfile first, should fail if file not found
self.load(opt['dict_initpath'])
opt['dict_loaded'] = loaded
# cache unk token for later
self._unk_token_idx = self.tok2ind.get(self.unk_token)
# initialize tokenizers
if self.tokenizer == 'nltk':
try:
import nltk
except ImportError:
raise ImportError('Please install nltk (pip install nltk)')
# nltk-specific setup
st_path = 'tokenizers/punkt/{0}.pickle'.format(opt['dict_language'])
try:
self.sent_tok = nltk.data.load(st_path)
except LookupError:
nltk.download('punkt')
self.sent_tok = nltk.data.load(st_path)
self.word_tok = nltk.tokenize.treebank.TreebankWordTokenizer()
elif self.tokenizer in ['bpe', 'gpt2', 'bytelevelbpe', 'slow_bytelevel_bpe']:
self.bpe = bpe_factory(opt, shared)
self.bpe.sync_with_dict(self)
if not shared:
if self.null_token:
# fix count for null token to one billion and three
self.freq[self.null_token] = 1000000003
if self.start_token:
# fix count for start of sentence token to one billion and two
self.freq[self.start_token] = 1000000002
if self.end_token:
# fix count for end of sentence token to one billion and one
self.freq[self.end_token] = 1000000001
if self.unk_token:
# fix count for unknown token to one billion
self.freq[self.unk_token] = 1000000000
if opt.get('dict_file'):
self.save_path = opt['dict_file']
def add_token(self, word):
"""
Add a single token to the dictionary.
"""
if word not in self.tok2ind:
index = len(self.tok2ind)
self.tok2ind[word] = index
self.ind2tok[index] = word
def __contains__(self, key):
"""
Return if the dictionary contains the key.
If key is an int, returns whether the key is in the indices. If key is a str,
return if the token is in the dict of tokens.
"""
if type(key) == int:
return key in self.ind2tok
elif type(key) == str:
return key in self.tok2ind
def _word_lookup(self, key):
# return index from token, or unk_token's index, or None
return self.tok2ind.get(key, self._unk_token_idx)
def _index_lookup(self, key):
# return token from index, or unk_token
return self.ind2tok.get(key, self.unk_token)
def __getitem__(self, key):
"""
Lookup the word or ID.
If key is an int, returns the corresponding token. If it does not exist, return
the unknown token. If key is a str, return the token's index. If the token is
not in the dictionary, return the index of the unknown token. If there is no
unknown token, return ``None``.
"""
if type(key) == str:
return self._word_lookup(key)
if type(key) == int:
return self._index_lookup(key)
def __len__(self):
return len(self.tok2ind)
def __setitem__(self, key, value):
"""
Set the frequency for a word to a value.
If the key is not in the dictionary, add it to the dictionary and set its
frequency to value.
"""
key = str(key)
if self.lower:
key = key.lower()
self.freq[key] = int(value)
self.add_token(key)
def keys(self):
"""
Return all the words in the dictionary.
"""
return self.tok2ind.keys()
def nltk_tokenize(self, text, building=False):
"""
Tokenize using NLTK PunktTokenizer.
Uses nltk-trained PunktTokenizer for sentence tokenization and Treebank Word
Tokenizer for tokenizing words within sentences.
"""
return (
token
for sent in self.sent_tok.tokenize(text)
for token in self.word_tok.tokenize(sent)
)
def gpt2_tokenize(self, text):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def slow_bytelevel_bpe_tokenize(self, text):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def bytelevelbpe_tokenize(self, text):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
@staticmethod
def re_tokenize(text):
r"""
Tokenize using a liberal regular expression.
Find boundaries between word characters, newlines, and non-word
non-whitespace tokens ``(r'[\\w\\n]+ | [^\\w\\s] | \\n')``.
This splits along whitespace and punctuation and keeps the newline as
a token in the returned list.
"""
return RETOK.findall(text)
@staticmethod
def split_tokenize(text):
"""
Tokenize on whitespace and some limited punctuation.
Splits tokens based on whitespace after adding whitespace around
punctuation.
Use re_tokenize if you want more robust handling of punctuation.
"""
return (
text.replace('.', ' . ')
.replace(',', ' , ')
.replace(';', ' ; ')
.replace(':', ' : ')
.replace('!', ' ! ')
.replace('?', ' ? ')
.split()
)
@staticmethod
def space_tokenize(text):
"""
Tokenize exactly on spaces.
Useful when text is pre-tokenized.
"""
return text.strip().split(' ')
def span_tokenize(self, text):
"""
Tokenize and find starting index of each token in the original string.
"""
tokens = self.tokenize(text)
curr_idx = 0
indices = []
for t in tokens:
while text[curr_idx] != t[0]:
curr_idx += 1
indices.append((curr_idx, curr_idx + len(t)))
curr_idx += len(t)
return tokens, indices
def tokenize(self, text, building=False):
"""
Return a sequence of tokens from the iterable.
"""
if self.lower:
text = text.lower()
# calls the selected tokenizer function e.g. 're' => re_tokenize(text)
word_tokens = self.tokenizer_fun(text)
if not building and self.max_ngram_size > 1:
# search for ngrams during parse-time
# TODO(ahm): support build-time ngrams using word2vec heuristic?
word_tokens = find_ngrams(self.tok2ind, word_tokens, self.max_ngram_size)
return word_tokens
def bpe_tokenize(self, text):
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def add_to_dict(self, tokens):
"""
Build dictionary from the list of provided tokens.
"""
self.built = False
for token in tokens:
self.add_token(token)
self.freq[token] += 1
def remove_tail(self, min_freq):
"""
Remove elements below the frequency cutoff from the dictionary.
"""
to_remove = []
for token, freq in self.freq.items():
if freq < min_freq:
# queue up removals since can't mutate dict during iteration
to_remove.append(token)
for token in to_remove:
del self.freq[token]
idx = self.tok2ind.pop(token)
del self.ind2tok[idx]
def _remove_non_bpe(self):
"""
Set the dictionary vocab to the bpe vocab, merging counts.
"""
to_remove = []
to_add = []
for token, freq in self.freq.items():
tokens = self.bpe_tokenize(token)
if len(tokens) != 1:
for t in tokens:
to_add.append((t, freq))
to_remove.append(token)
for token in to_remove:
del self.freq[token]
idx = self.tok2ind.pop(token)
del self.ind2tok[idx]
for token, freq in to_add:
self.add_token(token)
self.freq[token] += freq
def resize_to_max(self, maxtokens):
"""
Trims the dictionary to the maximum number of tokens.
"""
if maxtokens >= 0 and len(self.tok2ind) > maxtokens:
for k in range(maxtokens, len(self.ind2tok)):
v = self.ind2tok[k]
del self.ind2tok[k]
del self.tok2ind[v]
del self.freq[v]
def load(self, filename):
"""
Load pre-existing dictionary in 'token[<TAB>count]' format.
Initialize counts from other dictionary, or 0 if they aren't included.
"""
print('Dictionary: loading dictionary from {}'.format(filename))
lower_special = self.null_token == self.null_token.lower()
SPECIAL_TOKENS = {'__UNK__', '__NULL__', '__END__', '__START__'}
with codecs.open(filename, 'r', encoding='utf-8', errors='ignore') as read:
for line in read:
split = line.strip().split('\t')
token = unescape(split[0])
if lower_special and token in SPECIAL_TOKENS:
token = token.lower()
cnt = int(split[1]) if len(split) > 1 else 0
self.freq[token] = cnt
self.add_token(token)
print('[ num words = %d ]' % len(self))
def save(self, filename=None, append=False, sort=True):
"""
Save dictionary to file.
Format is 'token<TAB>count' for every token in the dictionary, sorted
by count with the most frequent words first.
If ``append`` (default ``False``) is set to ``True``, appends instead of
overwriting.
If ``sort`` (default ``True``), then first sort the dictionary before saving.
"""
filename = self.opt['dict_file'] if filename is None else filename
if self.tokenizer in ['bpe', 'gpt2', 'bytelevelbpe', 'slow_bytelevel_bpe']:
needs_removal = self.bpe.finalize(
self.freq, num_symbols=self.maxtokens, minfreq=self.minfreq
)
if needs_removal:
self._remove_non_bpe()
elif filename != self.opt.get('dict_file'):
# need to copy over the old codecs file
self.bpe.copy_codecs_file(filename + '.codecs')
if sort and self.bpe.should_sort():
self.sort(trim=False)
elif sort:
self.sort(trim=True)
print('Dictionary: saving dictionary to {}'.format(filename))
make_dir(os.path.dirname(filename))
mode = 'a' if append else 'w'
with open(filename, mode, encoding='utf-8') as write:
for i in self.ind2tok.keys():
tok = self.ind2tok[i]
cnt = self.freq[tok]
write.write('{tok}\t{cnt}\n'.format(tok=escape(tok), cnt=cnt))
# save opt file
with open(filename + '.opt', 'w', encoding='utf-8') as handle:
json.dump(self.opt, handle, indent=4)
# save the byte level bpe model file as well
if self.tokenizer == 'bytelevelbpe':
# This saves filename-vocab.json and filename-merges.txt as
# hugging face tokenizer does
self.bpe.save(os.path.dirname(filename), os.path.basename(filename))
def sort(self, trim=True):
"""
Sort the dictionary.
Inline operation. Rearranges the dictionary so that the elements with
the lowest index have the highest counts. This reindexes the dictionary
according to the sorted frequencies, breaking ties alphabetically by
token.
:param bool trim:
If True, truncate the dictionary based on minfreq and maxtokens.
"""
if trim and self.tokenizer == 'gpt2':
raise RuntimeError("You should not trim the dictionary when using gpt-2.")
if trim and self.tokenizer == 'bytelevelbpe':
raise RuntimeError(
"You should not trim the dictionary when using bytelevelbpe."
)
# sort first by count, then alphabetically
if trim:
self.remove_tail(self.minfreq)
sorted_pairs = sorted(self.freq.items(), key=lambda x: (-x[1], x[0]))
new_tok2ind = {}
new_ind2tok = {}
for i, (tok, _) in enumerate(sorted_pairs):
new_tok2ind[tok] = i
new_ind2tok[i] = tok
self.tok2ind = new_tok2ind
self.ind2tok = new_ind2tok
if trim:
self.resize_to_max(self.maxtokens)
assert len(self.freq) == len(self.ind2tok) == len(self.tok2ind)
return sorted_pairs
def parse(self, txt_or_vec, vec_type=list):
"""
Parse either text or a vector of indices.
Calls `~txt2vec` if `txt_or_vec is a string, or `~vec2txt` otherwise.
:param vec_type:
type of the returned vector if the input is a string.
"""
# TODO: try to deprecate this, preferring straight txt2vec
if type(txt_or_vec) == str:
return self.txt2vec(txt_or_vec, vec_type)
else:
return self.vec2txt(txt_or_vec)
def txt2vec(self, text, vec_type=list):
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
:param type vec_type:
The type of the returned vector if the input is a string. Suggested
``list``, ``tuple``, ``set``, or ``np.ndarray``.
"""
itr = (self._word_lookup(token) for token in self.tokenize(str(text)))
if vec_type == list or vec_type == tuple or vec_type == set:
res = vec_type(itr)
elif vec_type == np.ndarray:
res = np.fromiter(itr, np.int)
else:
raise RuntimeError('Type {} not supported by dict'.format(vec_type))
return res
def vec2txt(self, vector, delimiter=' '):
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self[int(idx)] for idx in vector]
if self.tokenizer in ['gpt2', 'bpe', 'slow_bytelevel_bpe']:
# if we used a BPE tokenizer we need to rejoin the encodings
text = self.bpe.decode(tokens, vector, delimiter)
elif self.tokenizer == 'bytelevelbpe':
# We add special tokens in the beginning of ParlAI dict but in the
# end of Hugging Face dict,there is an offset of 4 between them.
vector = [
idx + len(self.tok2ind) - 4 if idx < 4 else idx - 4 for idx in vector
]
tokens = [self[int(idx)] for idx in vector]
text = self.bpe.decode(tokens, vector, delimiter)
else:
text = delimiter.join(self[int(idx)] for idx in vector)
return text
def act(self):
"""
Add words in the last observation to the dictionary.
This checks any fields in the message present in the --dict-textfields argument
(e.g. "text,labels").
"""
for textfield in self.textfields:
source = self.observation.get(textfield)
if source is None:
continue
# fields may be singleton strings or lists of strings.
# wrap the singleton strings in a list to iterate over them
if type(source) is str:
source = [source]
for text in source:
if text:
self.add_to_dict(self.tokenize(text))
return {'id': 'Dictionary'}
def share(self):
"""
Share internal dicts.
"""
shared = super().share()
shared['freq'] = self.freq
shared['tok2ind'] = self.tok2ind
shared['ind2tok'] = self.ind2tok
return shared
def shutdown(self):
"""
Save on shutdown if ``save_path`` is set.
"""
if hasattr(self, 'save_path'):
self.save(self.save_path)
def __str__(self):
"""
Return string representation of frequencies in dictionary.
"""
return str(self.freq)
|
from __future__ import print_function as _
import os as _os
import sys as _sys
import json
import dash as _dash
# noinspection PyUnresolvedReferences
from ._imports_ import *
from ._imports_ import __all__
if not hasattr(_dash, 'development'):
print('Dash was not successfully imported. '
'Make sure you don\'t have a file '
'named \n"dash.py" in your current directory.', file=_sys.stderr)
_sys.exit(1)
_basepath = _os.path.dirname(__file__)
_filepath = _os.path.abspath(_os.path.join(_basepath, 'package-info.json'))
with open(_filepath) as f:
package = json.load(f)
package_name = package['name'].replace(' ', '_').replace('-', '_')
__version__ = package['version']
_current_path = _os.path.dirname(_os.path.abspath(__file__))
_this_module = _sys.modules[__name__]
_js_dist = [
{
'relative_package_path': 'dash_react_datepicker.min.js',
'external_url': 'https://unpkg.com/{0}@{2}/{1}/{1}.min.js'.format(
package_name, __name__, __version__),
'namespace': package_name
},
{
'relative_package_path': 'dash_react_datepicker.min.js.map',
'external_url': 'https://unpkg.com/{0}@{2}/{1}/{1}.min.js.map'.format(
package_name, __name__, __version__),
'namespace': package_name,
'dynamic': True
}
]
_css_dist = []
for _component in __all__:
setattr(locals()[_component], '_js_dist', _js_dist)
setattr(locals()[_component], '_css_dist', _css_dist)
|
import os, datetime
import numpy as np
import tensorflow as tf
from DataLoader import *
# Dataset Parameters
batch_size = 200
load_size = 256
fine_size = 224
c = 3
data_mean = np.asarray([0.45834960097,0.44674252445,0.41352266842])
# Training Parameters
learning_rate = 0.001
dropout = 0.5 # Dropout, probability to keep units
training_iters = 100000
step_display = 50
step_save = 10000
path_save = 'alexnet'
start_from = ''
import inspect
import os
import numpy as np
import tensorflow as tf
import time
VGG_MEAN = [103.939, 116.779, 123.68]
class VGG16:
def __init__(self, vgg16_npy_path=None):
if vgg16_npy_path is None:
path = inspect.getfile(Vgg16)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "vgg16.npy")
vgg16_npy_path = path
print(path)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
# Change output layer to have 100 classes instead of 1000.
modified_fc8_weights = self.data_dict['fc8'][0][:,:100]
modified_fc8_biases = self.data_dict['fc8'][1][0:100]
self.data_dict['fc8'][0] = modified_fc8_weights
self.data_dict['fc8'][1] = modified_fc8_biases
print('weights:', self.data_dict['fc8'][0].shape)
print('biases:', self.data_dict['fc8'][1].shape)
print("Loaded in weights from .npy file.")
def build(self, rgb):
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
"""
start_time = time.time()
print("Started building model...")
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat(axis=3, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self.max_pool(self.conv4_3, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self.max_pool(self.conv5_3, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
assert self.fc6.get_shape().as_list()[1:] == [4096]
self.relu6 = tf.nn.relu(self.fc6)
self.fc7 = self.fc_layer(self.relu6, "fc7")
self.relu7 = tf.nn.relu(self.fc7)
self.fc8 = self.fc_layer(self.relu7, "fc8")
self.prob = tf.nn.softmax(self.fc8, name="prob")
print(("build model finished: %ds" % (time.time() - start_time))) # self.data_dict = None
return self.prob
def forward(self, rgb):
""" Feed inputs through the network. """
return self.build(rgb)
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def fc_layer(self, bottom, name):
with tf.variable_scope(name):
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
weights = self.get_fc_weight(name)
biases = self.get_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def get_conv_filter(self, name, trainable=True):
if trainable:
return tf.Variable(self.data_dict[name][0], name="filter_" + name)
else:
return tf.constant(self.data_dict[name][0], name="filter_" + name)
def get_bias(self, name, trainable=True):
if trainable:
return tf.Variable(self.data_dict[name][1], name="biases_" + name)
else:
return tf.constant(self.data_dict[name][1], name="biases_" + name)
def get_fc_weight(self, name, trainable=True):
if trainable:
return tf.Variable(self.data_dict[name][0], name="weights_" + name)
else:
return tf.constant(self.data_dict[name][0], name="weights_" + name)
# Construct dataloader
opt_data_train = {
#'data_h5': 'miniplaces_256_train.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/train.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': True
}
opt_data_val = {
#'data_h5': 'miniplaces_256_val.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/val.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': False
}
loader_train = DataLoaderDisk(**opt_data_train)
loader_val = DataLoaderDisk(**opt_data_val)
#loader_train = DataLoaderH5(**opt_data_train)
#loader_val = DataLoaderH5(**opt_data_val)
# tf Graph input
x = tf.placeholder(tf.float32, [None, fine_size, fine_size, c])
y = tf.placeholder(tf.int64, None)
keep_dropout = tf.placeholder(tf.float32)
# Construct model
vgg = VGG16(vgg16_npy_path='./vgg16.npy')
logits = vgg.forward(x) # returns the output layer of the network
print('logits:', logits)
# Define loss and optimizer
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits))
train_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# Evaluate model
accuracy1 = tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, y, 1), tf.float32))
accuracy5 = tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, y, 5), tf.float32))
# define initialization
init = tf.global_variables_initializer()
# define saver
saver = tf.train.Saver()
# define summary writer
#writer = tf.train.SummaryWriter('.', graph=tf.get_default_graph())
# Launch the graph
with tf.Session() as sess:
# Initialization
if len(start_from)>1:
saver.restore(sess, start_from)
else:
sess.run(init)
step = 0
while step < training_iters:
# Load a batch of training data
images_batch, labels_batch = loader_train.next_batch(batch_size)
if step % step_display == 0:
print('[%s]:' %(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
# Calculate batch loss and accuracy on training set
l, acc1, acc5 = sess.run([loss, accuracy1, accuracy5], feed_dict={x: images_batch, y: labels_batch, keep_dropout: 1.})
print("-Iter " + str(step) + ", Training Loss= " + \
"{:.4f}".format(l) + ", Accuracy Top1 = " + \
"{:.2f}".format(acc1) + ", Top5 = " + \
"{:.2f}".format(acc5))
# Calculate batch loss and accuracy on validation set
images_batch_val, labels_batch_val = loader_val.next_batch(batch_size)
l, acc1, acc5 = sess.run([loss, accuracy1, accuracy5], feed_dict={x: images_batch_val, y: labels_batch_val, keep_dropout: 1.})
print("-Iter " + str(step) + ", Validation Loss= " + \
"{:.4f}".format(l) + ", Accuracy Top1 = " + \
"{:.2f}".format(acc1) + ", Top5 = " + \
"{:.2f}".format(acc5))
# Run optimization op (backprop)
sess.run(train_optimizer, feed_dict={x: images_batch, y: labels_batch, keep_dropout: dropout})
step += 1
# Save model
if step % step_save == 0:
saver.save(sess, path_save, global_step=step)
print("Model saved at Iter %d !" %(step))
print("Optimization Finished!")
# Evaluate on the whole validation set
print('Evaluation on the whole validation set...')
num_batch = loader_val.size()/batch_size
acc1_total = 0.
acc5_total = 0.
loader_val.reset()
for i in range(num_batch):
images_batch, labels_batch = loader_val.next_batch(batch_size)
acc1, acc5 = sess.run([accuracy1, accuracy5], feed_dict={x: images_batch, y: labels_batch, keep_dropout: 1.})
acc1_total += acc1
acc5_total += acc5
print("Validation Accuracy Top1 = " + \
"{:.2f}".format(acc1) + ", Top5 = " + \
"{:.2f}".format(acc5))
acc1_total /= num_batch
acc5_total /= num_batch
print('Evaluation Finished! Accuracy Top1 = ' + "{:.4f}".format(acc1_total) + ", Top5 = " + "{:.4f}".format(acc5_total))
|
#!/usr/bin/env python
# The following license does not apply to controller.png
#
# Copyright (c) 2011, Thiago C. (tncardoso.com)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Thiago C. nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Thiago C. BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
import sys
import threading
import pygame
import serial
import ctypes
from datetime import datetime, timedelta
class Dummy:
'''Dummy for testing without using serial communication'''
def write(self, txt):
pass
class MasterController:
'''Handle serial communication with arduino'''
def __init__(self):
self._serial = serial.Serial('/dev/ttyACM0', 9600)
#self._serial = Dummy()
# functions programed in the arduino side
# one byte is written to the serial port. The interpretation
# of this byte is coded in the arduino side (loop function).
def pushButtonLeft(self): self._serial.write(chr(1))
def releaseButtonLeft(self): self._serial.write(chr(2))
def pushButtonRight(self): self._serial.write(chr(3))
def releaseButtonRight(self): self._serial.write(chr(4))
def pushButtonDown(self): self._serial.write(chr(5))
def releaseButtonDown(self): self._serial.write(chr(6))
def pushButtonUp(self): self._serial.write(chr(7))
def releaseButtonUp(self): self._serial.write(chr(8))
def pushButtonA(self): self._serial.write(chr(9))
def releaseButtonA(self): self._serial.write(chr(10))
def pushButtonB(self): self._serial.write(chr(11))
def releaseButtonB(self): self._serial.write(chr(12))
class DrawThread(threading.Thread):
'''Thread for drawing the canvas. This task is done in a separate
thread in order to process events with little delay'''
def __init__ (self, gui):
threading.Thread.__init__(self)
self._gui = gui
def run(self):
clock = pygame.time.Clock()
while not self._gui._finished:
self._gui._screen.blit(self._gui._controller, self._gui._controller.get_rect())
self.drawButtonA()
self.drawButtonB()
self.drawButtonUp()
self.drawButtonDown()
self.drawButtonLeft()
self.drawButtonRight()
pygame.display.flip()
clock.tick(30)
# functions for rendering pressed keys
def drawButtonA(self):
if self._gui._button_a:
pygame.draw.circle(self._gui._screen, pygame.Color('red'), (271,149), 20, 6)
def drawButtonB(self):
if self._gui._button_b:
pygame.draw.circle(self._gui._screen, pygame.Color('red'), (331,149), 20, 6)
def drawButtonUp(self):
if self._gui._button_up:
pygame.draw.rect(self._gui._screen, pygame.Color('red'), pygame.Rect((110,95,8,16)))
def drawButtonDown(self):
if self._gui._button_down:
pygame.draw.rect(self._gui._screen, pygame.Color('red'), pygame.Rect((110,153,8,16)))
def drawButtonLeft(self):
if self._gui._button_left:
pygame.draw.rect(self._gui._screen, pygame.Color('red'), pygame.Rect((77,127,16,8)))
def drawButtonRight(self):
if self._gui._button_right:
pygame.draw.rect(self._gui._screen, pygame.Color('red'), pygame.Rect((135,127,16,8)))
class Gui:
'''Graphical interface used to control the arduino'''
def __init__(self, replay=None, record=None):
# init pygame lib
pygame.init()
# check if events should be recorded
if record is None: self._record = None
else: self._record = open(record, 'w')
# check if events should be replayed
if replay is None: self._replay = None
else: self._replay = open(replay, 'r')
self._atime = datetime.now()
# set window size and get screen
self._width = 400
self._height = 270
self._screen = pygame.display.set_mode((self._width, self._height))
self._finished = False
# load resources
self._controller = ball = pygame.image.load('controller.png')
# start serial connection
self._serial = MasterController()
# drawing thread
self._dthread = DrawThread(self)
# set controller state
self._button_a = False
self._button_b = False
self._button_up = False
self._button_down = False
self._button_left = False
self._button_right = False
# actions
# maps event to key to action: e.g. actions[KEYUP][K_z]
self._actions = { pygame.KEYDOWN: {}, pygame.KEYUP: {} }
self._actions[pygame.KEYDOWN][pygame.K_z] = self.pushButtonA
self._actions[pygame.KEYUP][pygame.K_z] = self.releaseButtonA
self._actions[pygame.KEYDOWN][pygame.K_x] = self.pushButtonB
self._actions[pygame.KEYUP][pygame.K_x] = self.releaseButtonB
self._actions[pygame.KEYDOWN][pygame.K_UP] = self.pushButtonUp
self._actions[pygame.KEYUP][pygame.K_UP] = self.releaseButtonUp
self._actions[pygame.KEYDOWN][pygame.K_DOWN] = self.pushButtonDown
self._actions[pygame.KEYUP][pygame.K_DOWN] = self.releaseButtonDown
self._actions[pygame.KEYDOWN][pygame.K_LEFT] = self.pushButtonLeft
self._actions[pygame.KEYUP][pygame.K_LEFT] = self.releaseButtonLeft
self._actions[pygame.KEYDOWN][pygame.K_RIGHT] = self.pushButtonRight
self._actions[pygame.KEYUP][pygame.K_RIGHT] = self.releaseButtonRight
# force arduino release
for key, func in self._actions[pygame.KEYUP].iteritems():
func()
def __del__(self):
self._record.close()
def run(self):
self._dthread.start()
# last action time
self._atime = datetime.now()
# check if it is a playback
if self._replay is not None:
self.replay()
while not self._finished:
# only process events in the main thread
for event in pygame.event.get():
if event.type == pygame.QUIT:
self._finished = True
return
elif event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:
self.checkKeyEvent(event.type, event.key)
pygame.time.wait(10)
def replay(self):
# load all replay data into memory and
# prepare execution tuples
print 'loading replay data'
actions = []
for action in self._replay.readlines():
spt = action.split()
if spt[2] == 'P': action = pygame.KEYDOWN
else: action = pygame.KEYUP
if spt[3] == 'A': button = pygame.K_z
elif spt[3] == 'B': button = pygame.K_x
elif spt[3] == 'U': button = pygame.K_UP
elif spt[3] == 'D': button = pygame.K_DOWN
elif spt[3] == 'L': button = pygame.K_LEFT
else: button = pygame.K_RIGHT
sec = int(spt[0])
nsec = int(spt[1])
print 'appending action= %s button= %s sec= %s nsec=%s'%(action,
button, sec, nsec)
actions.append((sec, nsec, action, button))
# check events
for event in pygame.event.get():
if event.type == pygame.QUIT:
self._finished = True
return
# execute replay loop
for sec, nsec, state, button in actions:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self._finished = True
return
nanosleep(sec, nsec)
self.checkKeyEvent(state, button)
def checkKeyEvent(self, state, key):
# not good to use try/cache for normal behavior
# but this is faster than using find every time
try:
f = self._actions[state][key]
f()
except KeyError:
# key not mapped
pass
def total_seconds(self, td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
def recordAction(self, state, button):
'''Check if action should be recorded'''
if self._record is not None:
now = datetime.now()
td = now - self._atime
self._atime = now
self._record.write('%d %s %s %s\n'%(td.seconds,
td.microseconds * (10**3), state, button))
def pushButtonA(self):
self._serial.pushButtonA()
self._button_a = True
self.recordAction('P','A')
def releaseButtonA(self):
self._serial.releaseButtonA()
self._button_a = False
self.recordAction('R','A')
def pushButtonB(self):
self._serial.pushButtonB()
self._button_b = True
self.recordAction('P','B')
def releaseButtonB(self):
self._serial.releaseButtonB()
self._button_b = False
self.recordAction('R','B')
def pushButtonUp(self):
self._serial.pushButtonUp()
self._button_up = True
self.recordAction('P','U')
def releaseButtonUp(self):
self._serial.releaseButtonUp()
self._button_up = False
self.recordAction('R','U')
def pushButtonDown(self):
self._serial.pushButtonDown()
self._button_down = True
self.recordAction('P','D')
def releaseButtonDown(self):
self._serial.releaseButtonDown()
self._button_down = False
self.recordAction('R','D')
def pushButtonLeft(self):
self._serial.pushButtonLeft()
self._button_left = True
self.recordAction('P','L')
def releaseButtonLeft(self):
self._serial.releaseButtonLeft()
self._button_left = False
self.recordAction('R','L')
def pushButtonRight(self):
self._serial.pushButtonRight()
self._button_right = True
self.recordAction('P','R')
def releaseButtonRight(self):
self._serial.releaseButtonRight()
self._button_right = False
self.recordAction('R','R')
# nanosleep function for better time resolution
libc = ctypes.CDLL('libc.so.6')
class timespec(ctypes.Structure):
# parameters t_time sec and long nsec
_fields_ = [('sec', ctypes.c_long), ('nsec', ctypes.c_long)]
libc.nanosleep.argtypes = [ctypes.POINTER(timespec), ctypes.POINTER(timespec)]
def nanosleep(sec, nsec):
req = timespec()
req.sec = sec
req.nsec = nsec
rem = timespec()
# nanosleep returns -1 in case of interruption and write rem with
# the remaining time
while libc.nanosleep(req, rem) == -1:
req = rem
rem = timespec()
if __name__ == '__main__':
if len(sys.argv) < 3:
gui = Gui()
else:
if sys.argv[1] == 'record':
gui = Gui(record=sys.argv[2])
elif sys.argv[1] == 'replay':
gui = Gui(replay=sys.argv[2])
gui.run()
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/3 17:40
# data config
exp_name = "msra/msra_pseudo"
msra_path = '/home/xjc/Dataset/MSRA-TD500/'
hust_path = '/home/xjc/Dataset/HUST-TR400/'
workspace_dir = '/home/xjc/Desktop/CVPR_SemiText/SemiText/PSENet_box_supervision/workspace/'
workspace = ""
gt_name = "msra_gt.zip"
data_shape = 640
# train config
gpu_id = '0'
workers = 10
start_epoch = 0
epochs = 600
train_batch_size = 8
lr = 1e-4
end_lr = 1e-7
lr_gamma = 0.1
lr_decay_step = [100,200]
weight_decay = 5e-4
warm_up_epoch = 6
warm_up_lr = lr * lr_gamma
display_input_images = False
display_output_images = False
visualization = False
is_box_pseudo = True
display_interval = 10
show_images_interval = 50
save_interval=5
pretrained = True
restart_training = True
checkpoint = ''
# net config
backbone = 'resnet50'
Lambda = 0.7
kernel_num = 6
min_scale = 0.5
OHEM_ratio = 3
scale = 1
# random seed
seed = 2
def print():
from pprint import pformat
tem_d = {}
for k, v in globals().items():
if not k.startswith('_') and not callable(v):
tem_d[k] = v
return pformat(tem_d)
|
"""
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100], "gamma": [0.01, 0.1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=outer_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print(
"Average difference of {:6f} with std. dev. of {:6f}.".format(
score_difference.mean(), score_difference.std()
)
)
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
(non_nested_scores_line,) = plt.plot(non_nested_scores, color="r")
(nested_line,) = plt.plot(nested_scores, color="b")
plt.ylabel("score", fontsize="14")
plt.legend(
[non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, 0.4, 0.5, 0),
)
plt.title(
"Non-Nested and Nested Cross Validation on Iris Dataset",
x=0.5,
y=1.1,
fontsize="15",
)
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend(
[difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, 0.8, 0),
)
plt.ylabel("score difference", fontsize="14")
plt.show()
|
# -*- coding: utf-8 -*-
"""Subclass of InteractiveShell for terminal based frontends."""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import bdb
import os
import re
import sys
import textwrap
# We need to use nested to support python 2.6, once we move to >=2.7, we can
# use the with keyword's new builtin support for nested managers
try:
from contextlib import nested
except:
from IPython.utils.nested_context import nested
from IPython.core.error import TryNext, UsageError
from IPython.core.usage import interactive_usage, default_banner
from IPython.core.inputsplitter import IPythonInputSplitter
from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils.encoding import get_stream_enc
from IPython.utils import py3compat
from IPython.utils.terminal import toggle_set_term_title, set_term_title
from IPython.utils.process import abbrev_cwd
from IPython.utils.warn import warn, error
from IPython.utils.text import num_ini_spaces, SList, strip_email_quotes
from IPython.utils.traitlets import Integer, CBool, Unicode
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def get_default_editor():
try:
ed = os.environ['EDITOR']
except KeyError:
if os.name == 'posix':
ed = 'vi' # the only one guaranteed to be there!
else:
ed = 'notepad' # same in Windows!
return ed
def get_pasted_lines(sentinel, l_input=py3compat.input):
""" Yield pasted lines until the user enters the given sentinel value.
"""
print("Pasting code; enter '%s' alone on the line to stop or use Ctrl-D." \
% sentinel)
while True:
try:
l = l_input(':')
if l == sentinel:
return
else:
yield l
except EOFError:
print('<EOF>')
return
#------------------------------------------------------------------------
# Terminal-specific magics
#------------------------------------------------------------------------
@magics_class
class TerminalMagics(Magics):
def __init__(self, shell):
super(TerminalMagics, self).__init__(shell)
self.input_splitter = IPythonInputSplitter(input_mode='line')
def cleanup_input(self, block):
"""Apply all possible IPython cleanups to an input block.
This means:
- remove any global leading whitespace (dedent)
- remove any email quotes ('>') if they are present in *all* lines
- apply all static inputsplitter transforms and break into sub-blocks
- apply prefilter() to each sub-block that is a single line.
Parameters
----------
block : str
A possibly multiline input string of code.
Returns
-------
transformed block : str
The input, with all transformations above applied.
"""
# We have to effectively implement client-side the loop that is done by
# the terminal frontend, and furthermore do it on a block that can
# possibly contain multiple statments pasted in one go.
# First, run the input through the block splitting code. We should
# eventually make this a self-contained method in the inputsplitter.
isp = self.input_splitter
isp.reset()
b = textwrap.dedent(block)
# Remove email quotes first. These must be consistently applied to
# *all* lines to be removed
b = strip_email_quotes(b)
# Split the input into independent sub-blocks so we can later do
# prefiltering (which must be done *only* to single-line inputs)
blocks = []
last_block = []
for line in b.splitlines():
isp.push(line)
last_block.append(line)
if not isp.push_accepts_more():
blocks.append(isp.source_reset())
last_block = []
if last_block:
blocks.append('\n'.join(last_block))
# Now, apply prefiltering to any one-line block to match the behavior
# of the interactive terminal
final_blocks = []
for block in blocks:
lines = block.splitlines()
if len(lines) == 1:
final_blocks.append(self.shell.prefilter(lines[0]))
else:
final_blocks.append(block)
# We now have the final version of the input code as a list of blocks,
# with all inputsplitter transformations applied and single-line blocks
# run through prefilter. For further processing, turn into a single
# string as the rest of our apis use string inputs.
return '\n'.join(final_blocks)
def store_or_execute(self, block, name):
""" Execute a block, or store it in a variable, per the user's request.
"""
b = self.cleanup_input(block)
if name:
# If storing it for further editing
self.shell.user_ns[name] = SList(b.splitlines())
print("Block assigned to '%s'" % name)
else:
self.shell.user_ns['pasted_block'] = b
self.shell.using_paste_magics = True
try:
self.shell.run_cell(b)
finally:
self.shell.using_paste_magics = False
def rerun_pasted(self, name='pasted_block'):
""" Rerun a previously pasted command.
"""
b = self.shell.user_ns.get(name)
# Sanity checks
if b is None:
raise UsageError('No previous pasted block available')
if not isinstance(b, basestring):
raise UsageError(
"Variable 'pasted_block' is not a string, can't execute")
print("Re-executing '%s...' (%d chars)"% (b.split('\n',1)[0], len(b)))
self.shell.run_cell(b)
@line_magic
def autoindent(self, parameter_s = ''):
"""Toggle autoindent on/off (if available)."""
self.shell.set_autoindent()
print("Automatic indentation is:",['OFF','ON'][self.shell.autoindent])
@skip_doctest
@line_magic
def cpaste(self, parameter_s=''):
"""Paste & execute a pre-formatted code block from clipboard.
You must terminate the block with '--' (two minus-signs) or Ctrl-D
alone on the line. You can also provide your own sentinel with '%paste
-s %%' ('%%' is the new sentinel for this operation)
The block is dedented prior to execution to enable execution of method
definitions. '>' and '+' characters at the beginning of a line are
ignored, to allow pasting directly from e-mails, diff files and
doctests (the '...' continuation prompt is also stripped). The
executed block is also assigned to variable named 'pasted_block' for
later editing with '%edit pasted_block'.
You can also pass a variable name as an argument, e.g. '%cpaste foo'.
This assigns the pasted block to variable 'foo' as string, without
dedenting or executing it (preceding >>> and + is still stripped)
'%cpaste -r' re-executes the block previously entered by cpaste.
Do not be alarmed by garbled output on Windows (it's a readline bug).
Just press enter and type -- (and press enter again) and the block
will be what was just pasted.
IPython statements (magics, shell escapes) are not supported (yet).
See also
--------
paste: automatically pull code from clipboard.
Examples
--------
::
In [8]: %cpaste
Pasting code; enter '--' alone on the line to stop.
:>>> a = ["world!", "Hello"]
:>>> print " ".join(sorted(a))
:--
Hello world!
"""
opts, name = self.parse_options(parameter_s, 'rs:', mode='string')
if 'r' in opts:
self.rerun_pasted()
return
sentinel = opts.get('s', '--')
block = '\n'.join(get_pasted_lines(sentinel))
self.store_or_execute(block, name)
@line_magic
def paste(self, parameter_s=''):
"""Paste & execute a pre-formatted code block from clipboard.
The text is pulled directly from the clipboard without user
intervention and printed back on the screen before execution (unless
the -q flag is given to force quiet mode).
The block is dedented prior to execution to enable execution of method
definitions. '>' and '+' characters at the beginning of a line are
ignored, to allow pasting directly from e-mails, diff files and
doctests (the '...' continuation prompt is also stripped). The
executed block is also assigned to variable named 'pasted_block' for
later editing with '%edit pasted_block'.
You can also pass a variable name as an argument, e.g. '%paste foo'.
This assigns the pasted block to variable 'foo' as string, without
executing it (preceding >>> and + is still stripped).
Options
-------
-r: re-executes the block previously entered by cpaste.
-q: quiet mode: do not echo the pasted text back to the terminal.
IPython statements (magics, shell escapes) are not supported (yet).
See also
--------
cpaste: manually paste code into terminal until you mark its end.
"""
opts, name = self.parse_options(parameter_s, 'rq', mode='string')
if 'r' in opts:
self.rerun_pasted()
return
try:
block = self.shell.hooks.clipboard_get()
except TryNext as clipboard_exc:
message = getattr(clipboard_exc, 'args')
if message:
error(message[0])
else:
error('Could not get text from the clipboard.')
return
# By default, echo back to terminal unless quiet mode is requested
if 'q' not in opts:
write = self.shell.write
write(self.shell.pycolorize(block))
if not block.endswith('\n'):
write('\n')
write("## -- End pasted text --\n")
self.store_or_execute(block, name)
# Class-level: add a '%cls' magic only on Windows
if sys.platform == 'win32':
@line_magic
def cls(self, s):
"""Clear screen.
"""
os.system("cls")
#-----------------------------------------------------------------------------
# Main class
#-----------------------------------------------------------------------------
class TerminalInteractiveShell(InteractiveShell):
autoedit_syntax = CBool(False, config=True,
help="auto editing of files with syntax errors.")
banner = Unicode('')
banner1 = Unicode(default_banner, config=True,
help="""The part of the banner to be printed before the profile"""
)
banner2 = Unicode('', config=True,
help="""The part of the banner to be printed after the profile"""
)
confirm_exit = CBool(True, config=True,
help="""
Set to confirm when you try to exit IPython with an EOF (Control-D
in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
you can force a direct exit without any confirmation.""",
)
# This display_banner only controls whether or not self.show_banner()
# is called when mainloop/interact are called. The default is False
# because for the terminal based application, the banner behavior
# is controlled by Global.display_banner, which IPythonApp looks at
# to determine if *it* should call show_banner() by hand or not.
display_banner = CBool(False) # This isn't configurable!
embedded = CBool(False)
embedded_active = CBool(False)
editor = Unicode(get_default_editor(), config=True,
help="Set the editor used by IPython (default to $EDITOR/vi/notepad)."
)
pager = Unicode('less', config=True,
help="The shell program to be used for paging.")
screen_length = Integer(0, config=True,
help=
"""Number of lines of your screen, used to control printing of very
long strings. Strings longer than this number of lines will be sent
through a pager instead of directly printed. The default value for
this is 0, which means IPython will auto-detect your screen size every
time it needs to print certain potentially long strings (this doesn't
change the behavior of the 'print' keyword, it's only triggered
internally). If for some reason this isn't working well (it needs
curses support), specify it yourself. Otherwise don't change the
default.""",
)
term_title = CBool(False, config=True,
help="Enable auto setting the terminal title."
)
# This `using_paste_magics` is used to detect whether the code is being
# executed via paste magics functions
using_paste_magics = CBool(False)
# In the terminal, GUI control is done via PyOS_InputHook
@staticmethod
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
"""
# Deferred import
from IPython.lib.inputhook import enable_gui as real_enable_gui
return real_enable_gui(gui, app)
def __init__(self, config=None, ipython_dir=None, profile_dir=None,
user_ns=None, user_module=None, custom_exceptions=((),None),
usage=None, banner1=None, banner2=None, display_banner=None):
super(TerminalInteractiveShell, self).__init__(
config=config, ipython_dir=ipython_dir, profile_dir=profile_dir, user_ns=user_ns,
user_module=user_module, custom_exceptions=custom_exceptions
)
# use os.system instead of utils.process.system by default,
# because piped system doesn't make sense in the Terminal:
self.system = self.system_raw
self.init_term_title()
self.init_usage(usage)
self.init_banner(banner1, banner2, display_banner)
#-------------------------------------------------------------------------
# Overrides of init stages
#-------------------------------------------------------------------------
def init_display_formatter(self):
super(TerminalInteractiveShell, self).init_display_formatter()
# terminal only supports plaintext
self.display_formatter.active_types = ['text/plain']
#-------------------------------------------------------------------------
# Things related to the terminal
#-------------------------------------------------------------------------
@property
def usable_screen_length(self):
if self.screen_length == 0:
return 0
else:
num_lines_bot = self.separate_in.count('\n')+1
return self.screen_length - num_lines_bot
def init_term_title(self):
# Enable or disable the terminal title.
if self.term_title:
toggle_set_term_title(True)
set_term_title('IPython: ' + abbrev_cwd())
else:
toggle_set_term_title(False)
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
# The parent class defines aliases that can be safely used with any
# frontend.
super(TerminalInteractiveShell, self).init_alias()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
aliases = [('clear', 'clear'), ('more', 'more'), ('less', 'less'),
('man', 'man')]
elif os.name == 'nt':
aliases = [('cls', 'cls')]
for name, cmd in aliases:
self.alias_manager.define_alias(name, cmd)
#-------------------------------------------------------------------------
# Things related to the banner and usage
#-------------------------------------------------------------------------
def _banner1_changed(self):
self.compute_banner()
def _banner2_changed(self):
self.compute_banner()
def _term_title_changed(self, name, new_value):
self.init_term_title()
def init_banner(self, banner1, banner2, display_banner):
if banner1 is not None:
self.banner1 = banner1
if banner2 is not None:
self.banner2 = banner2
if display_banner is not None:
self.display_banner = display_banner
self.compute_banner()
def show_banner(self, banner=None):
if banner is None:
banner = self.banner
self.write(banner)
def compute_banner(self):
self.banner = self.banner1
if self.profile and self.profile != 'default':
self.banner += '\nIPython profile: %s\n' % self.profile
if self.banner2:
self.banner += '\n' + self.banner2
def init_usage(self, usage=None):
if usage is None:
self.usage = interactive_usage
else:
self.usage = usage
#-------------------------------------------------------------------------
# Mainloop and code execution logic
#-------------------------------------------------------------------------
def mainloop(self, display_banner=None):
"""Start the mainloop.
If an optional banner argument is given, it will override the
internally created default banner.
"""
with nested(self.builtin_trap, self.display_trap):
while 1:
try:
self.interact(display_banner=display_banner)
#self.interact_with_readline()
# XXX for testing of a readline-decoupled repl loop, call
# interact_with_readline above
break
except KeyboardInterrupt:
# this should not be necessary, but KeyboardInterrupt
# handling seems rather unpredictable...
self.write("\nKeyboardInterrupt in interact()\n")
def _replace_rlhist_multiline(self, source_raw, hlen_before_cell):
"""Store multiple lines as a single entry in history"""
# do nothing without readline or disabled multiline
if not self.has_readline or not self.multiline_history:
return hlen_before_cell
# windows rl has no remove_history_item
if not hasattr(self.readline, "remove_history_item"):
return hlen_before_cell
# skip empty cells
if not source_raw.rstrip():
return hlen_before_cell
# nothing changed do nothing, e.g. when rl removes consecutive dups
hlen = self.readline.get_current_history_length()
if hlen == hlen_before_cell:
return hlen_before_cell
for i in range(hlen - hlen_before_cell):
self.readline.remove_history_item(hlen - i - 1)
stdin_encoding = get_stream_enc(sys.stdin, 'utf-8')
self.readline.add_history(py3compat.unicode_to_str(source_raw.rstrip(),
stdin_encoding))
return self.readline.get_current_history_length()
def interact(self, display_banner=None):
"""Closely emulate the interactive Python console."""
# batch run -> do not interact
if self.exit_now:
return
if display_banner is None:
display_banner = self.display_banner
if isinstance(display_banner, basestring):
self.show_banner(display_banner)
elif display_banner:
self.show_banner()
more = False
if self.has_readline:
self.readline_startup_hook(self.pre_readline)
hlen_b4_cell = self.readline.get_current_history_length()
else:
hlen_b4_cell = 0
# exit_now is set by a call to %Exit or %Quit, through the
# ask_exit callback.
while not self.exit_now:
self.hooks.pre_prompt_hook()
if more:
try:
prompt = self.prompt_manager.render('in2')
except:
self.showtraceback()
if self.autoindent:
self.rl_do_indent = True
else:
try:
prompt = self.separate_in + self.prompt_manager.render('in')
except:
self.showtraceback()
try:
line = self.raw_input(prompt)
if self.exit_now:
# quick exit on sys.std[in|out] close
break
if self.autoindent:
self.rl_do_indent = False
except KeyboardInterrupt:
#double-guard against keyboardinterrupts during kbdint handling
try:
self.write('\nKeyboardInterrupt\n')
source_raw = self.input_splitter.source_raw_reset()[1]
hlen_b4_cell = \
self._replace_rlhist_multiline(source_raw, hlen_b4_cell)
more = False
except KeyboardInterrupt:
pass
except EOFError:
if self.autoindent:
self.rl_do_indent = False
if self.has_readline:
self.readline_startup_hook(None)
self.write('\n')
self.exit()
except bdb.BdbQuit:
warn('The Python debugger has exited with a BdbQuit exception.\n'
'Because of how pdb handles the stack, it is impossible\n'
'for IPython to properly format this particular exception.\n'
'IPython will resume normal operation.')
except:
# exceptions here are VERY RARE, but they can be triggered
# asynchronously by signal handlers, for example.
self.showtraceback()
else:
self.input_splitter.push(line)
more = self.input_splitter.push_accepts_more()
if (self.SyntaxTB.last_syntax_error and
self.autoedit_syntax):
self.edit_syntax_error()
if not more:
source_raw = self.input_splitter.source_raw_reset()[1]
self.run_cell(source_raw, store_history=True)
hlen_b4_cell = \
self._replace_rlhist_multiline(source_raw, hlen_b4_cell)
# Turn off the exit flag, so the mainloop can be restarted if desired
self.exit_now = False
def raw_input(self, prompt=''):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
Optional inputs:
- prompt(''): a string to be printed to prompt the user.
- continue_prompt(False): whether this line is the first one or a
continuation in a sequence of inputs.
"""
# Code run by the user may have modified the readline completer state.
# We must ensure that our completer is back in place.
if self.has_readline:
self.set_readline_completer()
# raw_input expects str, but we pass it unicode sometimes
prompt = py3compat.cast_bytes_py2(prompt)
try:
line = py3compat.str_to_unicode(self.raw_input_original(prompt))
except ValueError:
warn("\n********\nYou or a %run:ed script called sys.stdin.close()"
" or sys.stdout.close()!\nExiting IPython!\n")
self.ask_exit()
return ""
# Try to be reasonably smart about not re-indenting pasted input more
# than necessary. We do this by trimming out the auto-indent initial
# spaces, if the user's actual input started itself with whitespace.
if self.autoindent:
if num_ini_spaces(line) > self.indent_current_nsp:
line = line[self.indent_current_nsp:]
self.indent_current_nsp = 0
return line
#-------------------------------------------------------------------------
# Methods to support auto-editing of SyntaxErrors.
#-------------------------------------------------------------------------
def edit_syntax_error(self):
"""The bottom half of the syntax error handler called in the main loop.
Loop until syntax error is fixed or user cancels.
"""
while self.SyntaxTB.last_syntax_error:
# copy and clear last_syntax_error
err = self.SyntaxTB.clear_err_state()
if not self._should_recompile(err):
return
try:
# may set last_syntax_error again if a SyntaxError is raised
self.safe_execfile(err.filename,self.user_ns)
except:
self.showtraceback()
else:
try:
f = open(err.filename)
try:
# This should be inside a display_trap block and I
# think it is.
sys.displayhook(f.read())
finally:
f.close()
except:
self.showtraceback()
def _should_recompile(self,e):
"""Utility routine for edit_syntax_error"""
if e.filename in ('<ipython console>','<input>','<string>',
'<console>','<BackgroundJob compilation>',
None):
return False
try:
if (self.autoedit_syntax and
not self.ask_yes_no('Return to editor to correct syntax error? '
'[Y/n] ','y')):
return False
except EOFError:
return False
def int0(x):
try:
return int(x)
except TypeError:
return 0
# always pass integer line and offset values to editor hook
try:
self.hooks.fix_error_editor(e.filename,
int0(e.lineno),int0(e.offset),e.msg)
except TryNext:
warn('Could not open editor')
return False
return True
#-------------------------------------------------------------------------
# Things related to exiting
#-------------------------------------------------------------------------
def ask_exit(self):
""" Ask the shell to exit. Can be overiden and used as a callback. """
self.exit_now = True
def exit(self):
"""Handle interactive exit.
This method calls the ask_exit callback."""
if self.confirm_exit:
if self.ask_yes_no('Do you really want to exit ([y]/n)?','y'):
self.ask_exit()
else:
self.ask_exit()
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
super(TerminalInteractiveShell, self).init_magics()
self.register_magics(TerminalMagics)
def showindentationerror(self):
super(TerminalInteractiveShell, self).showindentationerror()
if not self.using_paste_magics:
print("If you want to paste code into IPython, try the "
"%paste and %cpaste magic functions.")
InteractiveShellABC.register(TerminalInteractiveShell)
|
# -*- coding: utf-8 -*-
import jwt
import datetime
class Tokenizer():
def __init__(self, key):
self.secretKey = key
# 👇 DIFFERENT STRATEGIES POSSIBLE 👇
def createToken(self, username):
# define content as a mix of username and expiration date
tokenExpiry = self.setupExpiry()
tokenContent = {
'user': username,
'expiration': tokenExpiry
}
# 'crypt' it this way:
fullToken = jwt.encode(tokenContent, self.secretKey, algorithm='HS256')
return fullToken
# returns a decoded token
def decodeToken(self, rawToken):
output = jwt.decode(rawToken, self.secretKey, algorithms=['HS256'])
return output
# 👇 DIFFERENT STRATEGIES POSSIBLE 👇
def setupExpiry(self):
# sets token expiration to 30 minutes from now
return str(datetime.datetime.utcnow() + datetime.timedelta(minutes=30))
|
# -*- coding: utf-8 -*-
from zerver.lib.test_classes import WebhookTestCase
class OpsGenieHookTests(WebhookTestCase):
STREAM_NAME = 'opsgenie'
URL_TEMPLATE = "/api/v1/external/opsgenie?&api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'opsgenie'
def test_acknowledge_alert(self) -> None:
expected_subject = u"Integration1"
expected_message = (u"**OpsGenie: [Alert for Integration1.](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c)**\n"
u"Type: *Acknowledge*\n"
u"Message: *test alert*\n"
u"`tag1` `tag2`"
)
self.send_and_test_stream_message('acknowledge', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_addnote_alert(self) -> None:
expected_subject = u"Integration1"
expected_message = (u"**OpsGenie: [Alert for Integration1.](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c)**\n"
u"Type: *AddNote*\n"
u"Note: *note to test alert*\n"
u"Message: *test alert*\n"
u"`tag1` `tag2`"
)
self.send_and_test_stream_message('addnote', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_addrecipient_alert(self) -> None:
expected_subject = u"Integration1"
expected_message = (u"**OpsGenie: [Alert for Integration1.](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c)**\n"
u"Type: *AddRecipient*\n"
u"Recipient: *team2_escalation*\n"
u"Message: *test alert*\n"
u"`tag1` `tag2`"
)
# use fixture named helloworld_hello
self.send_and_test_stream_message('addrecipient', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_addtags_alert(self) -> None:
expected_subject = u"Integration1"
expected_message = (u"**OpsGenie: [Alert for Integration1.](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c)**\n"
u"Type: *AddTags*\n"
u"Added tags: *tag1,tag2,tag3*\n"
u"Message: *test alert*\n"
u"`tag1` `tag2` `tag3`"
)
self.send_and_test_stream_message('addtags', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_addteam_alert(self) -> None:
expected_subject = u"Integration1"
expected_message = (u"**OpsGenie: [Alert for Integration1.](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c)**\n"
u"Type: *AddTeam*\n"
u"Added team: *team2*\n"
u"Message: *test alert*\n"
u"`tag1` `tag2`"
)
self.send_and_test_stream_message('addteam', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_assignownership_alert(self) -> None:
expected_subject = u"Integration1"
expected_message = (u"**OpsGenie: [Alert for Integration1.](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c)**\n"
u"Type: *AssignOwnership*\n"
u"Assigned owner: *user2@ifountain.com*\n"
u"Message: *test alert*\n"
u"`tag1` `tag2`"
)
self.send_and_test_stream_message('assignownership', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_close_alert(self) -> None:
expected_subject = u"Integration1"
expected_message = (u"**OpsGenie: [Alert for Integration1.](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c)**\n"
u"Type: *Close*\n"
u"Message: *test alert*"
)
self.send_and_test_stream_message('close', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_create_alert(self) -> None:
expected_subject = u"Webhook"
expected_message = (u"**OpsGenie: [Alert for Webhook.](https://app.opsgenie.com/alert/V2#/show/ec03dad6-62c8-4c94-b38b-d88f398e900f)**\n"
u"Type: *Create*\n"
u"Message: *another alert*\n"
u"`vip`"
)
self.send_and_test_stream_message('create', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_customaction_alert(self) -> None:
expected_subject = u"Integration1"
expected_message = (u"**OpsGenie: [Alert for Integration1.](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c)**\n"
u"Type: *TestAction*\n"
u"Message: *test alert*\n"
u"`tag1` `tag2`"
)
self.send_and_test_stream_message('customaction', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_delete_alert(self) -> None:
expected_subject = u"Integration1"
expected_message = (u"**OpsGenie: [Alert for Integration1.](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c)**\n"
u"Type: *Delete*\n"
u"Message: *test alert*"
)
self.send_and_test_stream_message('delete', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_escalate_alert(self) -> None:
expected_subject = u"Webhook_Test"
expected_message = (u"**OpsGenie: [Alert for Webhook_Test.](https://app.opsgenie.com/alert/V2#/show/7ba97e3a-d328-4b5e-8f9a-39e945a3869a)**\n"
u"Type: *Escalate*\n"
u"Escalation: *test_esc*"
)
self.send_and_test_stream_message('escalate', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_removetags_alert(self) -> None:
expected_subject = u"Integration1"
expected_message = (u"**OpsGenie: [Alert for Integration1.](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c)**\n"
u"Type: *RemoveTags*\n"
u"Removed tags: *tag3*\n"
u"Message: *test alert*\n"
u"`tag1` `tag2`"
)
self.send_and_test_stream_message('removetags', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_takeownership_alert(self) -> None:
expected_subject = u"Webhook"
expected_message = (u"**OpsGenie: [Alert for Webhook.](https://app.opsgenie.com/alert/V2#/show/8a745a79-3ed3-4044-8427-98e067c0623c)**\n"
u"Type: *TakeOwnership*\n"
u"Message: *message test*\n"
u"`tag1` `tag2`"
)
self.send_and_test_stream_message('takeownership', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_unacknowledge_alert(self) -> None:
expected_subject = u"Integration1"
expected_message = (u"**OpsGenie: [Alert for Integration1.](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c)**\n"
u"Type: *UnAcknowledge*\n"
u"Message: *test alert*\n"
u"`tag1` `tag2`"
)
self.send_and_test_stream_message('unacknowledge', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("opsgenie", fixture_name, file_type="json")
|
def parse(in_string, rule_set):
out_string = ''
for char in in_string:
if char in rule_set:
out_string += rule_set[char]
else:
out_string += char
return out_string
def l_system(axiom, rule_set, iterations):
curr_string = axiom
for i in range(iterations):
print(curr_string)
curr_string = parse(curr_string, rule_set)
print(curr_string)
return curr_string
|
import pandas as pd
import sys
import time
from application import scrape_company_data, application_methods
def main():
print('Once the window opens, please load the stock ticker file.')
time.sleep(1)
stocks = application_methods.load_input_data()
print('Once the window opens, please load the output directory.\n')
time.sleep(1)
output_directory = application_methods.select_output_directory()
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:77.0) Gecko/20100101 Firefox/77.0'}
for i in range(len(stocks)):
stock = stocks[i]
# --- Class Instance Creation --- #
run = scrape_company_data(stock, headers)
# --- Scrape Methods Execution --- #
summary_data = run.scrape_summary_data()
profile_data = run.scrape_profile_data()
income_statement = run.scrape_income_statement_data()
balance_sheet = run.scrape_balance_sheet_data()
cash_flow = run.scrape_cash_flow_data()
valuation_measures = run.scrape_valuation_measures_data()
misc_data = run.scrape_highlights_and_trading_data()
highlight_data = misc_data['financial']
trading_data = misc_data['trading']
# --- Record Data to XLSX File --- #
application_methods.write_xlsx_file(stock,
output_directory,
summary_data, profile_data,
income_statement,
balance_sheet,
cash_flow,
valuation_measures,
highlight_data,
trading_data)
print(stock + ' Report Written')
print('------------------------------------')
time.sleep(2)
if __name__ == "__main__":
start_time = time.time()
main()
elapsed_time = time.time() - start_time
print('Script executed successfully.')
print('Duration: ' + str(round(elapsed_time, 2)) + ' Seconds')
|
import onnxruntime as rt
import onnx.utils
import onnx
import sys
sys.path.append("../lib")
from config import update_config
from config import cfg
import torch
import models
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg', type=str, required=True)
parser.add_argument('opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
# args expected by supporting codebase
args.modelDir = ''
args.logDir = ''
args.dataDir = ''
args.prevModelDir = ''
return args
args = parse_args()
update_config(cfg, args)
pose_model = models.pose_hrnet.get_pose_net(
cfg, is_train=False
)
pose_model.load_state_dict(torch.load('model/pose_hrnet_w32_256x192.pth'),
strict=False)
onnx_file_name = 'pose_hrnet_w32_256x192.onnx'
batch_size = 10
x = torch.randn((batch_size, 3, 256, 192), requires_grad=True)
print('Export the onnx model ...')
torch.onnx.export(pose_model,
x,
onnx_file_name,
export_params=True,
opset_version=11,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}}
)
print("done exporting; load onnx model and optimize it")
model = onnx.load(onnx_file_name)
onnx.checker.check_model(model)
sess_op_cpu = rt.SessionOptions()
sess_op_cpu.graph_optimization_level = rt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
sess_op_cpu.intra_op_num_threads = 1
sess_op_cpu.execution_mode = rt.ExecutionMode.ORT_SEQUENTIAL
providers = ['CPUExecutionProvider']
sess_op_cpu.optimized_model_filepath = "cpu_pose_hrnet_w32_256x192.onnx"
sess_cpu = rt.InferenceSession(onnx_file_name, providers=providers,
sess_options=sess_op_cpu)
sess_op_gpu = rt.SessionOptions()
sess_op_gpu.graph_optimization_level = rt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
sess_op_gpu.intra_op_num_threads = 1
sess_op_gpu.execution_mode = rt.ExecutionMode.ORT_SEQUENTIAL
providers = ['CUDAExecutionProvider']
sess_op_gpu.optimized_model_filepath = "gpu_pose_hrnet_w32_256x192.onnx"
sess_gpu = rt.InferenceSession(onnx_file_name, providers=providers,
sess_options=sess_op_gpu)
model = onnx.load("cpu_pose_hrnet_w32_256x192.onnx")
onnx.checker.check_model(model)
model = onnx.load("gpu_pose_hrnet_w32_256x192.onnx")
onnx.checker.check_model(model)
|
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The ``spyne.model.complex`` module contains
:class:`spyne.model.complex.ComplexBase` class and its helper objects and
subclasses. These are mainly container classes for other simple or
complex objects -- they don't carry any data by themselves.
"""
from __future__ import print_function
import logging
logger = logging.getLogger(__name__)
import decimal
import traceback
from copy import copy
from weakref import WeakKeyDictionary
from collections import deque, OrderedDict
from inspect import isclass
from itertools import chain
from spyne import const
from spyne.const.xml import PREFMAP
from spyne.model import Point, Unicode, PushBase, ModelBase
from spyne.model._base import PSSM_VALUES, apply_pssm
from spyne.model.primitive import NATIVE_MAP
from spyne.util import six, memoize, memoize_id, sanitize_args, \
memoize_ignore_none
from spyne.util.color import YEL
from spyne.util.meta import Prepareable
from spyne.util.odict import odict
from spyne.util.six import add_metaclass, with_metaclass, string_types
# FIXME: for backwards compatibility, to be removed in Spyne 3
# noinspection PyUnresolvedReferences
from spyne.model import json, jsonb, xml, msgpack, table
def _get_flat_type_info(cls, retval):
assert isinstance(retval, TypeInfo)
parent = getattr(cls, '__extends__', None)
if not (parent is None):
_get_flat_type_info(parent, retval)
retval.update(cls._type_info)
retval.alt.update(cls._type_info_alt) # FIXME: move to cls._type_info.alt
retval.attrs.update({k: v for (k, v) in cls._type_info.items()
if issubclass(v, XmlAttribute)})
return retval
class TypeInfo(odict):
def __init__(self, *args, **kwargs):
super(TypeInfo, self).__init__(*args, **kwargs)
self.attributes = {}
self.alt = OrderedDict()
self.attrs = OrderedDict()
def __setitem__(self, key, val):
assert isinstance(key, string_types)
super(TypeInfo, self).__setitem__(key, val)
class _SimpleTypeInfoElement(object):
__slots__ = ['path', 'parent', 'type', 'is_array', 'can_be_empty']
def __init__(self, path, parent, type_, is_array, can_be_empty):
self.path = path
self.parent = parent
self.type = type_
self.is_array = is_array
self.can_be_empty = can_be_empty
def __repr__(self):
return "SimpleTypeInfoElement(path=%r, parent=%r, type=%r, is_array=%r)" \
% (self.path, self.parent, self.type, self.is_array)
class XmlModifier(ModelBase):
def __new__(cls, type, ns=None):
retval = cls.customize()
retval.type = type
retval.Attributes = type.Attributes
retval._ns = ns
if type.__type_name__ is ModelBase.Empty:
retval.__type_name__ = ModelBase.Empty
return retval
@staticmethod
def resolve_namespace(cls, default_ns, tags=None):
cls.type.resolve_namespace(cls.type, default_ns, tags)
cls.__namespace__ = cls._ns
if cls.__namespace__ is None:
cls.__namespace__ = cls.type.get_namespace()
if cls.__namespace__ in PREFMAP:
cls.__namespace__ = default_ns
@classmethod
def _fill_empty_type_name(cls, parent_ns, parent_tn, k):
cls.__namespace__ = parent_ns
tn = "%s_%s%s" % (parent_tn, k, const.TYPE_SUFFIX)
child_v = cls.type
child_v.__type_name__ = tn
cls._type_info = TypeInfo({tn: child_v})
cls.__type_name__ = '%s%s%s' % (const.ARRAY_PREFIX, tn,
const.ARRAY_SUFFIX)
extends = child_v.__extends__
while extends is not None and extends.get_type_name() is cls.Empty:
extends._fill_empty_type_name(parent_ns, parent_tn,
k + const.PARENT_SUFFIX)
extends = extends.__extends__
class XmlData(XmlModifier):
"""Items which are marshalled as data of the parent element."""
@classmethod
def marshall(cls, prot, name, value, parent_elt):
if value is not None:
if len(parent_elt) == 0:
parent_elt.text = prot.to_bytes(cls.type, value)
else:
parent_elt[-1].tail = prot.to_bytes(cls.type, value)
@classmethod
def get_type_name(cls):
return cls.type.get_type_name()
@classmethod
def get_type_name_ns(cls, interface):
return cls.type.get_type_name_ns(interface)
@classmethod
def get_namespace(cls):
return cls.type.get_namespace()
@classmethod
def get_element_name(cls):
return cls.type.get_element_name()
@classmethod
def get_element_name_ns(cls, interface):
return cls.type.get_element_name_ns(interface)
class XmlAttribute(XmlModifier):
"""Items which are marshalled as attributes of the parent element."""
def __new__(cls, type_, use=None, ns=None):
retval = super(XmlAttribute, cls).__new__(cls, type_, ns)
retval._use = use
if retval.type.Attributes.min_occurs > 0 and retval._use is None:
retval._use = 'required'
return retval
class XmlAttributeRef(XmlAttribute):
"""Reference to an Xml attribute."""
def __init__(self, ref, use=None):
self._ref = ref
self._use = use
def describe(self, name, element, app):
element.set('ref', self._ref)
if self._use:
element.set('use', self._use)
class SelfReference(object):
"""Use this as a placeholder type in classes that contain themselves. See
:func:`spyne.test.model.test_complex.TestComplexModel.test_self_reference`.
"""
customize_args = []
customize_kwargs = {}
__orig__ = None
def __init__(self):
raise NotImplementedError()
@classmethod
def customize(cls, *args, **kwargs):
args = list(chain(args, cls.customize_args))
kwargs = dict(chain(kwargs.items(), cls.customize_kwargs.items()))
if cls.__orig__ is None:
cls.__orig__ = cls
return type("SelfReference", (cls,), {
'customize_args': args,
'customize_kwargs': kwargs,
})
def _get_spyne_type(cls_name, k, v):
try:
v = NATIVE_MAP.get(v, v)
except TypeError:
return
try:
subc = issubclass(v, ModelBase) or issubclass(v, SelfReference)
except:
subc = False
if subc:
if issubclass(v, Array) and len(v._type_info) != 1:
raise Exception("Invalid Array definition in %s.%s."% (cls_name, k))
elif issubclass(v, Point) and v.Attributes.dim is None:
raise Exception("Please specify the number of dimensions")
return v
def _join_args(x, y):
if x is None:
return y
if y is None:
return x
xa, xk = sanitize_args(x)
ya, yk = sanitize_args(y)
xk = dict(xk)
xk.update(yk)
return xa + ya, xk
def _gen_attrs(cls_bases, cls_dict):
attrs = cls_dict.get('Attributes', None)
if attrs is None:
for b in cls_bases:
if hasattr(b, 'Attributes'):
class Attributes(b.Attributes):
pass
attrs = cls_dict['Attributes'] = Attributes
break
else:
raise Exception("No ModelBase subclass in bases? Huh?")
return attrs
def _get_type_info(cls, cls_name, cls_bases, cls_dict, attrs):
base_type_info = TypeInfo()
mixin = TypeInfo()
extends = cls_dict.get('__extends__', None)
# user did not specify explicit base class so let's try to derive it from
# the actual class hierarchy
if extends is None:
# we don't want origs end up as base classes
orig = cls_dict.get("__orig__", None)
if orig is None:
orig = getattr(cls, '__orig__', None)
if orig is not None:
bases = orig.__bases__
logger.debug("Got bases for %s from orig: %r", cls_name, bases)
else:
bases = cls_bases
logger.debug("Got bases for %s from meta: %r", cls_name, bases)
for b in bases:
base_types = getattr(b, "_type_info", None)
# we don't care about non-ComplexModel bases
if base_types is None:
continue
# mixins are simple
if getattr(b, '__mixin__', False) == True:
logger.debug("Adding fields from mixin %r to '%s'", b, cls_name)
mixin.update(b.get_flat_type_info(b))
if '__mixin__' not in cls_dict:
cls_dict['__mixin__'] = False
continue
if not (extends in (None, b)):
raise Exception("Spyne objects do not support multiple "
"inheritance. Use mixins if you need to reuse "
"fields from multiple classes.")
if len(base_types) > 0 and issubclass(b, ModelBase):
extends = cls_dict["__extends__"] = b
assert extends.__orig__ is None, "You can't inherit from a " \
"customized class. You should first get your class " \
"hierarchy right, then start customizing classes."
b.get_subclasses.memo.clear()
logger.debug("Registering %r as base of '%s'", b, cls_name)
if not ('_type_info' in cls_dict):
cls_dict['_type_info'] = _type_info = TypeInfo()
_type_info.update(base_type_info)
class_fields = []
for k, v in cls_dict.items():
if k.startswith('_'):
continue
if isinstance(v, tuple) and len(v) == 1 and \
_get_spyne_type(cls_name, k, v[0]) is not None:
logger.warning(YEL("There seems to be a stray comma in the"
"definition of '%s.%s'.", cls_name, k))
v = _get_spyne_type(cls_name, k, v)
if v is None:
continue
class_fields.append((k, v))
_type_info.update(class_fields)
else:
_type_info = cls_dict['_type_info']
if not isinstance(_type_info, TypeInfo):
_type_info = cls_dict['_type_info'] = TypeInfo(_type_info)
for k, v in reversed(mixin.items()):
_type_info.insert(0, (k, v))
return _type_info
class _MethodsDict(dict):
def __init__(self, *args, **kwargs):
super(_MethodsDict, self).__init__(*args, **kwargs)
self._processed = False
def _gen_methods(cls, cls_dict):
methods = _MethodsDict()
for k, v in cls_dict.items():
if not k.startswith('_') and hasattr(v, '_is_rpc'):
logger.debug("Registering %s as member method for %r", k, cls)
assert cls is not None
# generate method descriptor from information in the decorator
descriptor = v(_default_function_name=k, _self_ref_replacement=cls)
# strip the decorator and put the original function in the class
setattr(cls, k, descriptor.function)
# modify the descriptor with user-supplied class
if cls.Attributes.method_config_do is not None:
descriptor = cls.Attributes.method_config_do(descriptor)
methods[k] = descriptor
return methods
def _get_ordered_attributes(cls_name, cls_dict, attrs):
if not isinstance(cls_dict, odict):
# FIXME: Maybe add a warning here?
return cls_dict
SUPPORTED_ORDERS = ('random', 'declared')
if (attrs.declare_order is not None and
not attrs.declare_order in SUPPORTED_ORDERS):
msg = "The declare_order attribute value %r is invalid in %s"
raise Exception(msg % (attrs.declare_order, cls_name))
declare_order = attrs.declare_order or const.DEFAULT_DECLARE_ORDER
if declare_order is None or declare_order == 'random':
# support old behaviour
cls_dict = dict(cls_dict)
return cls_dict
def _sanitize_sqlalchemy_parameters(cls_dict, attrs):
table_name = cls_dict.get('__tablename__', None)
if attrs.table_name is None:
attrs.table_name = table_name
_cls_table = cls_dict.get('__table__', None)
if attrs.sqla_table is None:
attrs.sqla_table = _cls_table
metadata = cls_dict.get('__metadata__', None)
if attrs.sqla_metadata is None:
attrs.sqla_metadata = metadata
margs = cls_dict.get('__mapper_args__', None)
attrs.sqla_mapper_args = _join_args(attrs.sqla_mapper_args, margs)
targs = cls_dict.get('__table_args__', None)
attrs.sqla_table_args = _join_args(attrs.sqla_table_args, targs)
def _sanitize_type_info(cls_name, _type_info, _type_info_alt):
"""Make sure _type_info contents are sane"""
for k, v in _type_info.items():
if not isinstance(k, six.string_types):
raise ValueError("Invalid class key", k)
if not isclass(v):
raise ValueError(v)
if issubclass(v, SelfReference):
continue
elif not issubclass(v, ModelBase):
v = _get_spyne_type(cls_name, k, v)
if v is None:
raise ValueError( (cls_name, k, v) )
_type_info[k] = v
elif issubclass(v, Array) and len(v._type_info) != 1:
raise Exception("Invalid Array definition in %s.%s." %
(cls_name, k))
sub_ns = v.Attributes.sub_ns
sub_name = v.Attributes.sub_name
if sub_ns is None and sub_name is None:
pass
elif sub_ns is not None and sub_name is not None:
key = "{%s}%s" % (sub_ns, sub_name)
if key in _type_info:
raise Exception("%r is already defined: %r" %
(key, _type_info[key]))
_type_info_alt[key] = v, k
elif sub_ns is None:
key = sub_name
if sub_ns in _type_info:
raise Exception("%r is already defined: %r" %
(key, _type_info[key]))
_type_info_alt[key] = v, k
elif sub_name is None:
key = "{%s}%s" % (sub_ns, k)
if key in _type_info:
raise Exception("%r is already defined: %r" %
(key, _type_info[key]))
_type_info_alt[key] = v, k
D_EXC = dict(exc=True)
def _process_child_attrs(cls, retval, kwargs):
child_attrs = copy(kwargs.get('child_attrs', None))
child_attrs_all = kwargs.get('child_attrs_all', None)
child_attrs_noexc = copy(kwargs.get('child_attrs_noexc', None))
# add exc=False to child_attrs_noexc
if child_attrs_noexc is not None:
# if there is _noexc, make sure that child_attrs_all is also used to
# exclude exclude everything else first
if child_attrs_all is None:
child_attrs_all = D_EXC
else:
if 'exc' in child_attrs_all and child_attrs_all['exc'] != D_EXC:
logger.warning("Overriding child_attrs_all['exc'] to True "
"for %r", cls)
child_attrs_all.update(D_EXC)
# update child_attrs_noexc with exc=False
for k, v in child_attrs_noexc.items():
if 'exc' in v:
logger.warning("Overriding 'exc' for %s.%s from "
"child_attrs_noexc with False", cls.get_type_name(), k)
v['exc'] = False
# update child_attrs with data from child_attrs_noexc
if child_attrs is None:
child_attrs = child_attrs_noexc
else:
# update with child_attrs_noexc with exc=False
if child_attrs is None:
child_attrs = dict()
for k, v in child_attrs_noexc.items():
if k in child_attrs:
logger.warning("Overriding child_attrs for %s.%s from "
"child_attrs_noexc", cls.get_type_name(), k)
child_attrs[k] = v
if child_attrs_all is not None:
ti = retval._type_info
logger.debug("processing child_attrs_all for %r", cls)
for k, v in ti.items():
logger.debug(" child_attrs_all set %r=%r", k, child_attrs_all)
ti[k] = ti[k].customize(**child_attrs_all)
if retval.__extends__ is not None:
retval.__extends__ = retval.__extends__.customize(
child_attrs_all=child_attrs_all)
retval.Attributes._delayed_child_attrs_all = child_attrs_all
if child_attrs is not None:
ti = retval._type_info
logger.debug("processing child_attrs for %r", cls)
for k, v in list(child_attrs.items()):
if k in ti:
logger.debug(" child_attr set %r=%r", k, v)
ti[k] = ti[k].customize(**v)
del child_attrs[k]
base_fti = {}
if retval.__extends__ is not None:
retval.__extends__ = retval.__extends__.customize(
child_attrs=child_attrs)
base_fti = retval.__extends__.get_flat_type_info(retval.__extends__)
for k, v in child_attrs.items():
if k not in base_fti:
logger.debug(" child_attr delayed %r=%r", k, v)
retval.Attributes._delayed_child_attrs[k] = v
def recust_selfref(selfref, cls):
if len(selfref.customize_args) > 0 or len(selfref.customize_kwargs) > 0:
logger.debug("Replace self reference with %r with *%r and **%r",
cls, selfref.customize_args, selfref.customize_kwargs)
return cls.customize(*selfref.customize_args,
**selfref.customize_kwargs)
logger.debug("Replace self reference with %r", cls)
return cls
def _set_member_default(inst, key, cls, attr):
def_val = attr.default
def_fac = attr.default_factory
if def_fac is None and def_val is None:
return False
if def_fac is not None:
if six.PY2 and hasattr(def_fac, 'im_func'):
# unbound-method error workaround. huh.
def_fac = def_fac.im_func
dval = def_fac()
# should not check for read-only for default values
setattr(inst, key, dval)
return True
if def_val is not None:
# should not check for read-only for default values
setattr(inst, key, def_val)
return True
assert False, "Invalid application state"
def _is_sqla_array(cls, attr):
# inner object is complex
ret1 = issubclass(cls, Array) and \
hasattr(cls.get_inner_type(), '_sa_class_manager')
# inner object is primitive
ret2 = issubclass(cls, Array) and attr.store_as is not None
# object is a bare array
ret3 = attr.max_occurs > 1 and hasattr(cls, '_sa_class_manager')
return ret1 or ret2 or ret3
def _init_member(inst, key, cls, attr):
cls_getattr_ret = getattr(inst.__class__, key, None)
if isinstance(cls_getattr_ret, property) and cls_getattr_ret.fset is None:
return # we skip read-only properties
if _set_member_default(inst, key, cls, attr):
return
# sqlalchemy objects do their own init.
if _is_sqla_array(cls, attr):
# except the attributes that sqlalchemy doesn't know about
if attr.exc_db:
setattr(inst, key, None)
elif attr.store_as is None:
setattr(inst, key, None)
return
# sqlalchemy objects do their own init.
if hasattr(inst.__class__, '_sa_class_manager'):
# except the attributes that sqlalchemy doesn't know about
if attr.exc_db:
setattr(inst, key, None)
elif issubclass(cls, ComplexModelBase) and attr.store_as is None:
setattr(inst, key, None)
return
setattr(inst, key, None)
class ComplexModelMeta(with_metaclass(Prepareable, type(ModelBase))):
"""This metaclass sets ``_type_info``, ``__type_name__`` and ``__extends__``
which are going to be used for (de)serialization and schema generation.
"""
def __new__(cls, cls_name, cls_bases, cls_dict):
"""This function initializes the class and registers attributes."""
attrs = _gen_attrs(cls_bases, cls_dict)
assert issubclass(attrs, ComplexModelBase.Attributes), \
("%r must be a ComplexModelBase.Attributes subclass" % attrs)
cls_dict = _get_ordered_attributes(cls_name, cls_dict, attrs)
type_name = cls_dict.get("__type_name__", None)
if type_name is None:
cls_dict["__type_name__"] = cls_name
_type_info = _get_type_info(cls, cls_name, cls_bases, cls_dict, attrs)
# used for sub_name and sub_ns
_type_info_alt = cls_dict['_type_info_alt'] = TypeInfo()
for b in cls_bases:
if hasattr(b, '_type_info_alt'):
_type_info_alt.update(b._type_info_alt)
_sanitize_type_info(cls_name, _type_info, _type_info_alt)
_sanitize_sqlalchemy_parameters(cls_dict, attrs)
return super(ComplexModelMeta, cls).__new__(cls,
cls_name, cls_bases, cls_dict)
def __init__(self, cls_name, cls_bases, cls_dict):
type_info = self._type_info
extends = self.__extends__
if extends is not None and self.__orig__ is None:
eattr = extends.Attributes;
if eattr._subclasses is None:
eattr._subclasses = []
eattr._subclasses.append(self)
if self.Attributes._subclasses is eattr._subclasses:
self.Attributes._subclasses = None
# sanitize fields
for k, v in type_info.items():
# replace bare SelfRerefence
if issubclass(v, SelfReference):
self._replace_field(k, recust_selfref(v, self))
# cache XmlData for easier access
elif issubclass(v, XmlData):
if self.Attributes._xml_tag_body_as is None:
self.Attributes._xml_tag_body_as = [(k, v)]
else:
self.Attributes._xml_tag_body_as.append((k, v))
# replace SelfRerefence in arrays
elif issubclass(v, Array):
v2, = v._type_info.values()
while issubclass(v2, Array):
v = v2
v2, = v2._type_info.values()
if issubclass(v2, SelfReference):
v._set_serializer(recust_selfref(v2, self))
# apply field order
# FIXME: Implement this better
new_type_info = []
for k, v in self._type_info.items():
if v.Attributes.order == None:
new_type_info.append(k)
for k, v in self._type_info.items():
if v.Attributes.order is not None:
new_type_info.insert(v.Attributes.order, k)
assert len(self._type_info) == len(new_type_info)
self._type_info.keys()[:] = new_type_info
# install checkers for validation on assignment
for k, v in self._type_info.items():
if not v.Attributes.validate_on_assignment:
continue
def _get_prop(self):
return self.__dict__[k]
def _set_prop(self, val):
if not (val is None or isinstance(val, v.Value)):
raise ValueError("Invalid value %r, "
"should be an instance of %r" % (val, v.Value))
self.__dict__[k] = val
setattr(self, k, property(_get_prop, _set_prop))
# process member rpc methods
methods = _gen_methods(self, cls_dict)
if len(methods) > 0:
self.Attributes.methods = methods
# finalize sql table mapping
tn = self.Attributes.table_name
meta = self.Attributes.sqla_metadata
t = self.Attributes.sqla_table
# For spyne objects reflecting an existing db table
if tn is None:
if t is not None:
self.Attributes.sqla_metadata = t.metadata
from spyne.store.relational import gen_spyne_info
gen_spyne_info(self)
# For spyne objects being converted to a sqlalchemy table
elif meta is not None and (tn is not None or t is not None) and \
len(self._type_info) > 0:
from spyne.store.relational import gen_sqla_info
gen_sqla_info(self, cls_bases)
super(ComplexModelMeta, self).__init__(cls_name, cls_bases, cls_dict)
#
# We record the order fields are defined into ordered dict, so we can
# declare them in the same order in the WSDL.
#
# For Python 3 __prepare__ works out of the box, see PEP 3115.
# But we use `Preparable` metaclass for both Python 2 and Python 3 to
# support six.add_metaclass decorator
#
@classmethod
def __prepare__(mcs, name, bases, **kwds):
return odict()
_is_array = lambda v: issubclass(v, Array) or (v.Attributes.max_occurs > 1)
class ComplexModelBase(ModelBase):
"""If you want to make a better class type, this is what you should inherit
from.
"""
__mixin__ = False
class Attributes(ModelBase.Attributes):
"""ComplexModel-specific attributes"""
store_as = None
"""Method for serializing to persistent storage. One of %r. It makes
sense to specify this only when this object is a child of another
ComplexModel subclass.""" % (PSSM_VALUES,)
sqla_metadata = None
"""None or :class:`sqlalchemy.MetaData` instance."""
sqla_table_args = None
"""A dict that will be passed to :class:`sqlalchemy.schema.Table`
constructor as ``**kwargs``.
"""
sqla_mapper_args = None
"""A dict that will be passed to :func:`sqlalchemy.orm.mapper`
constructor as. ``**kwargs``.
"""
sqla_table = None
"""The sqlalchemy table object"""
sqla_mapper = None
"""The sqlalchemy mapper object"""
validate_freq = True
"""When ``False``, soft validation ignores missing mandatory attributes.
"""
child_attrs = None
"""Customize child attributes in one go. It's a dict of dicts. This is
ignored unless used via explicit customization."""
child_attrs_all = None
"""Customize all child attributes. It's a dict. This is ignored unless
used via explicit customization. `child_attrs` always take precedence.
"""
declare_order = None
"""The order fields of the :class:``ComplexModel`` are to be declared
in the SOAP WSDL. If this is left as None or explicitly set to
``'random'`` declares then the fields appear in whatever order the
Python's hash map implementation seems fit in the WSDL. This randomised
order can change every time the program is run. This is what Spyne <2.11
did if you didn't set _type_info as an explicit sequence (e.g. using a
list, odict, etc.). It means that clients who are manually complied or
generated from the WSDL will likely need to be recompiled every time it
changes. The string ``name`` means the field names are alphabetically
sorted in the WSDL declaration. The string ``declared`` means in the
order the field type was declared in Python 2, and the order the
field was declared in Python 3.
In order to get declared field order in Python 2, the
:class:`spyne.util.meta.Preparable` class inspects the frame stack in
order to locate the class definition, re-parses it to get declaration
order from the AST and uses that information to order elements.
It's a horrible hack that we tested to work with CPython 2.6 through 3.3
and PyPy. It breaks in Nuitka as Nuitka does away with code objects.
Other platforms were not tested.
It's not recommended to use set this to ``'declared'`` in Python 2
unless you're sure you fully understand the consequences.
"""
parent_variant = None
"""FIXME: document me yo."""
methods = None
"""A dict of member RPC methods (typically marked with @mrpc)."""
method_config_do = None
"""When not None, it's a callable that accepts a ``@mrpc`` method
descriptor and returns a modified version."""
_variants = None
_xml_tag_body_as = None
_delayed_child_attrs = None
_delayed_child_attrs_all = None
_subclasses = None
def __init__(self, *args, **kwargs):
cls = self.__class__
cls_attr = cls.Attributes
fti = cls.get_flat_type_info(cls)
if cls.__orig__ is not None:
logger.warning("%r(0x%X) seems to be a customized class. It is not "
"supposed to be instantiated. You have been warned.",
cls, id(cls))
logger.debug(traceback.print_stack())
if cls_attr._xml_tag_body_as is not None:
for arg, (xtba_key, xtba_type) in \
zip(args, cls_attr._xml_tag_body_as):
if xtba_key is not None and len(args) == 1:
attr = xtba_type.Attributes
_init_member(self, xtba_key, xtba_type, attr)
self._safe_set(xtba_key, arg, xtba_type,
xtba_type.Attributes)
elif len(args) > 0:
raise TypeError(
"Positional argument is only for ComplexModels "
"with XmlData field. You must use keyword "
"arguments in any other case.")
for k, v in fti.items():
attr = v.Attributes
if not k in self.__dict__:
_init_member(self, k, v, attr)
if k in kwargs:
self._safe_set(k, kwargs[k], v, attr)
def __len__(self):
return len(self._type_info)
def __getitem__(self, i):
if isinstance(i, slice):
retval = []
for key in self._type_info.keys()[i]:
retval.append(getattr(self, key, None))
else:
retval = getattr(self, self._type_info.keys()[i], None)
return retval
def __repr__(self):
return "%s(%s)" % (self.get_type_name(), ', '.join(
['%s=%r' % (k, self.__dict__.get(k))
for k in self.__class__.get_flat_type_info(self.__class__)
if self.__dict__.get(k, None) is not None]))
def _safe_set(self, key, value, t, attrs):
if attrs.read_only:
return False
try:
setattr(self, key, value)
except AttributeError as e:
logger.exception(e)
raise AttributeError("can't set %r attribute %s to %r" %
(self.__class__, key, value))
return True
@classmethod
def get_identifiers(cls):
for k, v in cls.get_flat_type_info(cls).items():
if getattr(v.Attributes, 'primary_key', None):
yield k, v
@classmethod
def get_primary_keys(cls):
return cls.get_identifiers()
def as_dict(self):
"""Represent object as dict.
Null values are omitted from dict representation to support optional
not nullable attributes.
"""
return dict((
(k, getattr(self, k)) for k in self.get_flat_type_info(self.__class__)
if getattr(self, k) is not None
))
@classmethod
def get_serialization_instance(cls, value):
"""Returns the native object corresponding to the serialized form passed
in the ``value`` argument.
:param value: This argument can be:
* A list or tuple of native types aligned with cls._type_info.
* A dict of native types.
* The native type itself.
If the value type is not a ``list``, ``tuple`` or ``dict``, the
value is returned untouched.
"""
# if the instance is a list, convert it to a cls instance.
# this is only useful when deserializing method arguments for a client
# request which is the only time when the member order is not arbitrary
# (as the members are declared and passed around as sequences of
# arguments, unlike dictionaries in a regular class definition).
if isinstance(value, list) or isinstance(value, tuple):
keys = cls.get_flat_type_info(cls).keys()
if not len(value) <= len(keys):
logger.error("\n\tcls: %r" "\n\tvalue: %r" "\n\tkeys: %r",
cls, value, keys)
raise ValueError("Impossible sequence to instance conversion")
cls_orig = cls
if cls.__orig__ is not None:
cls_orig = cls.__orig__
try:
inst = cls_orig()
except Exception as e:
logger.error("Error instantiating %r: %r", cls_orig, e)
raise
for i in range(len(value)):
setattr(inst, keys[i], value[i])
elif isinstance(value, dict):
cls_orig = cls
if cls.__orig__ is not None:
cls_orig = cls.__orig__
inst = cls_orig()
for k in cls.get_flat_type_info(cls):
setattr(inst, k, value.get(k, None))
else:
inst = value
return inst
@classmethod
def get_deserialization_instance(cls, ctx):
"""Get an empty native type so that the deserialization logic can set
its attributes.
"""
if cls.__orig__ is None:
return cls()
return cls.__orig__()
@classmethod
@memoize_id
def get_subclasses(cls):
retval = []
subca = cls.Attributes._subclasses
if subca is not None:
retval.extend(subca)
for subc in subca:
retval.extend(subc.get_subclasses())
return retval
@staticmethod
@memoize_ignore_none
def get_flat_type_info(cls):
"""Returns a _type_info dict that includes members from all base
classes.
It's called a "flat" dict because it flattens all members from the
inheritance hierarchy into one dict.
"""
return _get_flat_type_info(cls, TypeInfo())
@classmethod
def get_orig(cls):
return cls.__orig__ or cls
@staticmethod
@memoize
def get_simple_type_info(cls, hier_delim="."):
"""Returns a _type_info dict that includes members from all base classes
and whose types are only primitives. It will prefix field names in
non-top-level complex objects with field name of its parent.
For example, given hier_delim='_'; the following hierarchy: ::
{'some_object': [{'some_string': ['abc']}]}
would be transformed to: ::
{'some_object_some_string': ['abc']}
:param hier_delim: String that will be used as delimiter between field
names. Default is ``'_'``.
"""
fti = cls.get_flat_type_info(cls)
retval = TypeInfo()
tags = set()
queue = deque([(k, v, (k,), (_is_array(v),), cls)
for k,v in fti.items()])
tags.add(cls)
while len(queue) > 0:
k, v, prefix, is_array, parent = queue.popleft()
if issubclass(v, Array) and v.Attributes.max_occurs == 1:
v, = v._type_info.values()
key = hier_delim.join(prefix)
if issubclass(v, ComplexModelBase):
retval[key] = _SimpleTypeInfoElement(path=tuple(prefix),
parent=parent, type_=v, is_array=tuple(is_array),
can_be_empty=True)
if not (v in tags):
tags.add(v)
queue.extend([
(k2, v2, prefix + (k2,),
is_array + (v.Attributes.max_occurs > 1,), v)
for k2, v2 in v.get_flat_type_info(v).items()])
else:
value = retval.get(key, None)
if value is not None:
raise ValueError("%r.%s conflicts with %r" %
(cls, k, value.path))
retval[key] = _SimpleTypeInfoElement(path=tuple(prefix),
parent=parent, type_=v, is_array=tuple(is_array),
can_be_empty=False)
return retval
@staticmethod
def resolve_namespace(cls, default_ns, tags=None):
if tags is None:
tags = set()
elif cls in tags:
return False
if not ModelBase.resolve_namespace(cls, default_ns, tags):
return False
for k, v in cls._type_info.items():
if v is None:
continue
if v.__type_name__ is ModelBase.Empty:
v._fill_empty_type_name(cls.get_namespace(),
cls.get_type_name(), k)
v.resolve_namespace(v, default_ns, tags)
if cls._force_own_namespace is not None:
for c in cls._force_own_namespace:
c.__namespace__ = cls.get_namespace()
ComplexModel.resolve_namespace(c, cls.get_namespace(), tags)
assert not (cls.__namespace__ is ModelBase.Empty)
assert not (cls.__type_name__ is ModelBase.Empty)
return True
@staticmethod
def produce(namespace, type_name, members):
"""Lets you create a class programmatically."""
return ComplexModelMeta(type_name, (ComplexModel,), odict({
'__namespace__': namespace,
'__type_name__': type_name,
'_type_info': TypeInfo(members),
}))
@classmethod
def customize(cls, **kwargs):
"""Duplicates cls and overwrites the values in ``cls.Attributes`` with
``**kwargs`` and returns the new class.
Because each class is registered as a variant of the original (__orig__)
class, using this function to generate classes dynamically on-the-fly
could cause memory leaks. You have been warned.
"""
store_as = apply_pssm(kwargs.get('store_as', None))
if store_as is not None:
kwargs['store_as'] = store_as
cls_name, cls_bases, cls_dict = cls._s_customize(**kwargs)
cls_dict['__module__'] = cls.__module__
if '__extends__' not in cls_dict:
cls_dict['__extends__'] = cls.__extends__
retval = type(cls_name, cls_bases, cls_dict)
retval._type_info = TypeInfo(cls._type_info)
retval.__type_name__ = cls.__type_name__
retval.__namespace__ = cls.__namespace__
retval.Attributes.parent_variant = cls
dca = retval.Attributes._delayed_child_attrs
if retval.Attributes._delayed_child_attrs is None:
retval.Attributes._delayed_child_attrs = {}
else:
retval.Attributes._delayed_child_attrs = dict(dca.items())
tn = kwargs.get("type_name", None)
if tn is not None:
retval.__type_name__ = tn
ns = kwargs.get("namespace", None)
if ns is not None:
retval.__namespace__ = ns
if cls is not ComplexModel:
cls._process_variants(retval)
_process_child_attrs(cls, retval, kwargs)
# we could be smarter, but customize is supposed to be called only
# during daemon initialization, so it's not really necessary.
ComplexModelBase.get_subclasses.memo.clear()
ComplexModelBase.get_flat_type_info.memo.clear()
ComplexModelBase.get_simple_type_info.memo.clear()
return retval
@classmethod
def _process_variants(cls, retval):
orig = getattr(retval, '__orig__', None)
if orig is not None:
if orig.Attributes._variants is None:
orig.Attributes._variants = WeakKeyDictionary()
orig.Attributes._variants[retval] = True
# _variants is only for the root class.
retval.Attributes._variants = None
@classmethod
def _append_field_impl(cls, field_name, field_type):
assert isinstance(field_name, string_types)
dcaa = cls.Attributes._delayed_child_attrs_all
if dcaa is not None:
field_type = field_type.customize(**dcaa)
dca = cls.Attributes._delayed_child_attrs
if dca is not None:
d_cust = dca.get(field_name, None)
if d_cust is not None:
field_type = field_type.customize(**d_cust)
cls._type_info[field_name] = field_type
ComplexModelBase.get_flat_type_info.memo.clear()
ComplexModelBase.get_simple_type_info.memo.clear()
@classmethod
def _append_to_variants(cls, field_name, field_type):
if cls.Attributes._variants is not None:
for c in cls.Attributes._variants:
c.append_field(field_name, field_type)
@classmethod
def append_field(cls, field_name, field_type):
cls._append_field_impl(field_name, field_type)
cls._append_to_variants(field_name, field_type)
@classmethod
def _insert_to_variants(cls, index, field_name, field_type):
if cls.Attributes._variants is not None:
for c in cls.Attributes._variants:
c.insert_field(index, field_name, field_type)
@classmethod
def _insert_field_impl(cls, index, field_name, field_type):
assert isinstance(index, int)
assert isinstance(field_name, string_types)
dcaa = cls.Attributes._delayed_child_attrs_all
if dcaa is not None:
field_type = field_type.customize(**dcaa)
dca = cls.Attributes._delayed_child_attrs
if dca is not None:
if field_name in dca:
d_cust = dca.pop(field_name)
field_type = field_type.customize(**d_cust)
cls._type_info.insert(index, (field_name, field_type))
ComplexModelBase.get_flat_type_info.memo.clear()
ComplexModelBase.get_simple_type_info.memo.clear()
@classmethod
def insert_field(cls, index, field_name, field_type):
cls._insert_field_impl(index, field_name, field_type)
cls._insert_to_variants(index, field_name, field_type)
@classmethod
def _replace_in_variants(cls, field_name, field_type):
if cls.Attributes._variants is not None:
for c in cls.Attributes._variants:
c._replace_field(field_name, field_type)
@classmethod
def _replace_field_impl(cls, field_name, field_type):
assert isinstance(field_name, string_types)
cls._type_info[field_name] = field_type
ComplexModelBase.get_flat_type_info.memo.clear()
ComplexModelBase.get_simple_type_info.memo.clear()
@classmethod
def _replace_field(cls, field_name, field_type):
cls._replace_field_impl(field_name, field_type)
cls._replace_in_variants(field_name, field_type)
@classmethod
def store_as(cls, what):
return cls.customize(store_as=what)
@classmethod
def novalidate_freq(cls):
return cls.customize(validate_freq=False)
@classmethod
def init_from(cls, other, **kwargs):
retval = (cls if cls.__orig__ is None else cls.__orig__)()
for k, v in cls._type_info.items():
try:
if k in kwargs:
retval._safe_set(k, kwargs[k], v, v.Attributes)
elif hasattr(other, k):
retval._safe_set(k, getattr(other, k), v, v.Attributes)
except AttributeError as e:
logger.warning("Error setting %s: %r", k, e)
return retval
@classmethod
def __respawn__(cls, ctx=None, filters=None):
if ctx is not None and ctx.in_object is not None and \
len(ctx.in_object) > 0:
retval = next(iter(ctx.in_object))
if retval is not None:
return retval
if ctx.descriptor.default_on_null:
return cls.get_deserialization_instance(ctx)
@add_metaclass(ComplexModelMeta)
class ComplexModel(ComplexModelBase):
"""The general complexType factory. The __call__ method of this class will
return instances, contrary to primivites where the same call will result in
customized duplicates of the original class definition.
Those who'd like to customize the class should use the customize method.
(see :class:``spyne.model.ModelBase``).
"""
@add_metaclass(ComplexModelMeta)
class Array(ComplexModelBase):
"""This class generates a ComplexModel child that has one attribute that has
the same name as the serialized class. It's contained in a Python list.
"""
class Attributes(ComplexModelBase.Attributes):
_wrapper = True
def __new__(cls, serializer, member_name=None, wrapped=True, **kwargs):
if not wrapped:
if serializer.Attributes.max_occurs == 1:
kwargs['max_occurs'] = 'unbounded'
return serializer.customize(**kwargs)
retval = cls.customize(**kwargs)
_serializer = _get_spyne_type(cls.__name__, '__serializer__', serializer)
if _serializer is None:
raise ValueError("serializer=%r is not a valid spyne type" % serializer)
if issubclass(_serializer, SelfReference):
# hack to make sure the array passes ComplexModel sanity checks
# that are there to prevent empty arrays.
retval._type_info = {'_bogus': _serializer}
else:
retval._set_serializer(_serializer, member_name)
tn = kwargs.get("type_name", None)
if tn is not None:
retval.__type_name__ = tn
return retval
@classmethod
def _fill_empty_type_name(cls, parent_ns, parent_tn, k):
cls.__namespace__ = parent_ns
tn = "%s_%s%s" % (parent_tn, k, const.TYPE_SUFFIX)
child_v, = cls._type_info.values()
child_v.__type_name__ = tn
cls._type_info = TypeInfo({tn: child_v})
cls.__type_name__ = '%s%s%s' % (const.ARRAY_PREFIX, tn,
const.ARRAY_SUFFIX)
extends = child_v.__extends__
while extends is not None and extends.get_type_name() is cls.Empty:
extends._fill_empty_type_name(parent_ns, parent_tn,
k + const.PARENT_SUFFIX)
extends = extends.__extends__
@classmethod
def customize(cls, **kwargs):
serializer_attrs = kwargs.get('serializer_attrs', None)
if serializer_attrs is None:
return super(Array, cls).customize(**kwargs)
del kwargs['serializer_attrs']
logger.debug('Pass serializer attrs %r', serializer_attrs)
serializer, = cls._type_info.values()
return cls(serializer.customize(**serializer_attrs)).customize(**kwargs)
@classmethod
def _set_serializer(cls, serializer, member_name=None):
if serializer.get_type_name() is ModelBase.Empty: # A customized class
member_name = "OhNoes"
# mark array type name as "to be resolved later".
cls.__type_name__ = ModelBase.Empty
else:
if member_name is None:
member_name = serializer.get_type_name()
cls.__type_name__ = '%s%s%s' % (const.ARRAY_PREFIX,
serializer.get_type_name(),
const.ARRAY_SUFFIX)
# hack to default to unbounded arrays when the user didn't specify
# max_occurs.
if serializer.Attributes.max_occurs == 1:
serializer = serializer.customize(max_occurs=decimal.Decimal('inf'))
assert isinstance(member_name, string_types), member_name
cls._type_info = TypeInfo({member_name: serializer})
# the array belongs to its child's namespace, it doesn't have its own
# namespace.
@staticmethod
def resolve_namespace(cls, default_ns, tags=None):
(serializer,) = cls._type_info.values()
serializer.resolve_namespace(serializer, default_ns, tags)
if cls.__namespace__ is None:
cls.__namespace__ = serializer.get_namespace()
if cls.__namespace__ in PREFMAP:
cls.__namespace__ = default_ns
return ComplexModel.resolve_namespace(cls, default_ns, tags)
@classmethod
def get_serialization_instance(cls, value):
inst = ComplexModel.__new__(Array)
(member_name,) = cls._type_info.keys()
setattr(inst, member_name, value)
return inst
@classmethod
def get_deserialization_instance(cls, ctx):
return []
@classmethod
def get_inner_type(cls):
return next(iter(cls._type_info.values()))
class Iterable(Array):
"""This class generates a ``ComplexModel`` child that has one attribute that
has the same name as the serialized class. It's contained in a Python
iterable. The distinction with the ``Array`` is made in the protocol
implementation, this is just a marker.
Whenever you return a generator instead of a list, you should use this type
as this suggests the intermediate machinery to NEVER actually try to iterate
over the value. An ``Array`` could be iterated over for e.g. logging
purposes.
"""
class Attributes(Array.Attributes):
logged = False
class Push(PushBase):
"""The push interface to the `Iterable`.
Anything append()'ed to a `Push` instance is serialized and written to
outgoing stream immediately.
When using Twisted, Push callbacks are called from the reactor thread if
the instantiation is done in a reactor thread. Otherwise, callbacks are
called by `deferToThread`. Make sure to avoid relying on thread-local
stuff as `deferToThread` is not guaranteed to restore original thread
context.
"""
pass
def TTableModelBase():
from spyne.store.relational import add_column
class TableModelBase(ComplexModelBase):
@classmethod
def append_field(cls, field_name, field_type):
cls._append_field_impl(field_name, field_type)
# There could have been changes to field_type in ComplexModel so we
# should not use field_type directly from above
if cls.__table__ is not None:
add_column(cls, field_name, cls._type_info[field_name])
cls._append_to_variants(field_name, field_type)
@classmethod
def replace_field(cls, field_name, field_type):
raise NotImplementedError()
@classmethod
def insert_field(cls, index, field_name, field_type):
cls._insert_field_impl(index, field_name, field_type)
# There could have been changes to field_type in ComplexModel so we
# should not use field_type directly from above
if cls.__table__ is not None:
add_column(cls, field_name, cls._type_info[field_name])
cls._insert_to_variants(index, field_name, field_type)
return TableModelBase
# this has docstring repeated in the documentation at reference/model/complex.rst
def TTableModel(metadata=None, base=None, metaclass=None):
"""A TableModel template that generates a new TableModel class for each
call. If metadata is not supplied, a new one is instantiated.
"""
from sqlalchemy import MetaData
if base is None:
base = TTableModelBase()
if metaclass is None:
metaclass = ComplexModelMeta
@add_metaclass(metaclass)
class TableModel(base):
class Attributes(ComplexModelBase.Attributes):
sqla_metadata = metadata if metadata is not None else MetaData()
return TableModel
def Mandatory(cls, **_kwargs):
"""Customizes the given type to be a mandatory one. Has special cases for
:class:`spyne.model.primitive.Unicode` and
:class:`spyne.model.complex.Array`\\.
"""
kwargs = dict(min_occurs=1, nillable=False)
if cls.get_type_name() is not cls.Empty:
kwargs['type_name'] = '%s%s%s' % (const.MANDATORY_PREFIX,
cls.get_type_name(), const.MANDATORY_SUFFIX)
kwargs.update(_kwargs)
if issubclass(cls, Unicode):
kwargs.update(dict(min_len=1))
elif issubclass(cls, Array):
(k,v), = cls._type_info.items()
if v.Attributes.min_occurs == 0:
cls._type_info[k] = Mandatory(v)
return cls.customize(**kwargs)
|
"""Generates useful information to include when reporting a bug in a library.
"""
__version__ = "0.1"
__author__ = "Steve Dower <steve.dower@python.org>"
import getpass
import hashlib
import importlib
import inspect
import os
import platform
import socket
import sys
import traceback
import unicodedata
from datetime import datetime
def from_namespace(ns, *exclude):
return {
k: repr(getattr(ns, k))
for k in dir(ns)
if k not in exclude and not (k.startswith("__") and k.endswith("__"))
}
def join(values, sep=None):
return (sep or ", ").join(
s if isinstance(s, str) else "None" if s is None else repr(s) for s in values
)
def join_paths(values):
return join(values, os.path.pathsep)
def collect_from_sys():
data = {
"prefix": sys.prefix,
"executable": sys.executable,
"argv": join(map(repr, sys.argv), sep=" "),
"platform": sys.platform,
}
data["implementation"] = from_namespace(
getattr(sys, "implementation", None), "version"
)
data["path"] = join_paths(sys.path)
return data
def collect_from_platform():
data = {
"os": os.name,
"platform": platform.platform(),
"build": platform.python_build()[0],
"build_date": platform.python_build()[1],
"version": platform.python_version(),
"architecture": platform.architecture()[0],
"machine": platform.machine(),
}
return data
def collect_from_module(module_name, extra_args):
try:
data = {}
module = importlib.import_module(module_name)
data.update(
{
k: getattr(module, k)
for k in [
"__file__",
"version",
"__version__",
"VERSION",
"__VERSION__",
]
if hasattr(module, k)
}
)
info = getattr(module, "_reportabug_info", None)
if info:
try:
data.update(info(extra_args))
except Exception as ex:
data["_error_type"] = type(ex).__name__
data["_error_full"] = str(ex)
return data
except Exception as ex:
return {"_error_type": type(ex).__name__, "_error_full": str(ex)}
def collect_from_environ():
data = {
k: os.environ.get(k)
for k in [
"PYTHONPATH",
"PYTHONHOME",
"PYTHONSTARTUP",
"PYTHONCASEOK",
"PYTHONIOENCODING",
"PYTHONFAULTHANDLER",
"PYTHONHASHSEED",
"PYTHONMALLOC",
"PYTHONCOERCECLOCALE",
"PYTHONBREAKPOINT",
"PYTHONDEVMODE",
"PATH",
]
if k in os.environ
}
data["cwd"] = os.getcwd()
return data
def collect_from_sys_path():
data = {}
for i, path in enumerate(sys.path):
try:
data[str(i)] = join_paths(sorted(os.listdir(path)))
except OSError:
data[str(i)] = "(unreadable)"
return data
def censor_word(word):
md5 = hashlib.md5()
md5.update(word.encode("utf-8"))
cats = " ".join(sorted(set(map(unicodedata.category, word))))
return "md5=`{}`, Unicode=`{}`".format(md5.hexdigest(), cats)
def censor(data, bad_words):
if data is None:
return data
if isinstance(data, str):
for k, v in bad_words:
data = data.replace(k, v)
return data
if isinstance(data, list):
return [censor(k, bad_words) for k in data]
if isinstance(data, dict):
return {k: censor(v, bad_words) for k, v in data.items()}
return data
def flatten_dict(data, prefix=""):
if data is None:
return
if not isinstance(data, dict):
yield prefix, data
return
for k in sorted(data):
v = data[k]
p = str(k) if not prefix else "{}.{}".format(prefix, k)
yield from flatten_dict(v, p)
def format_markdown(data):
print("# ReportABug Summary")
print()
print(
"Generated",
datetime.now(),
"with arguments [{}]".format(", ".join("`{!r}`".format(a) for a in sys.argv)),
)
print()
print("* Python", data["platform"]["version"], data["platform"]["architecture"])
print("* Platform", data["platform"]["platform"], data["platform"]["machine"])
modules = data["module_info"]
for k in sorted(modules):
mod = modules[k]
if "summary" in mod:
print("* `{}` {}".format(k, mod["summary"]))
print()
print("# Module info")
for k in sorted(modules):
print("<details><summary>{}</summary></p>".format(k))
print()
print("```python")
for k2, v2 in flatten_dict(modules[k]):
print("{} = {!r}".format(k2, v2))
print("```")
print()
print("</p></details>")
print()
print("<details><summary>sys</summary><p>")
print()
print("```python")
for k in sorted(data["sys"]):
if k == "path":
print("path = [")
for p in data["sys"]["path"].split(os.pathsep):
print(" {!r},".format(p))
print("]")
else:
print("{} = {!r}".format(k, data["sys"][k]))
print("```")
print()
print("</p></details>")
print()
print("<details><summary>platform</summary><p>")
print()
print("```python")
for k in sorted(data["platform"]):
print("{} = {!r}".format(k, data["platform"][k]))
print("```")
print()
print("</p></details>")
print()
print("## Environment")
print("<details><summary>Detail</summary><p>")
print()
print("```python")
for k in sorted(data["environ"]):
if k.lower() == "path":
prefix = "PATH ="
for p in data["environ"][k].split(os.path.pathsep):
print(prefix, repr(p))
prefix = " "
else:
print(k, "=", repr(data["environ"][k]))
print("```")
print()
print("</p></details>")
print()
print("## Censored words")
print("<details><summary>Detail</summary><p>")
print()
print(" Key | Info")
print("-----|-----")
for k in sorted(data["censored"]):
print(k, "|", data["censored"][k])
print()
print("</p></details>")
print()
def format_raw(data):
lines = list(flatten_dict(data))
max_key = max(len(k) for k, _ in lines)
if max_key > 40:
max_key = 40
for k, v in lines:
print(k.ljust(max_key), v)
def main():
module_info = {}
censored = {
"$USER": censor_word(getpass.getuser()),
"$HOST": censor_word(socket.gethostname()),
}
bad_words = [(getpass.getuser(), "$USER"), (socket.gethostname(), "$HOST")]
data = {
"sys": collect_from_sys(),
"platform": collect_from_platform(),
"environ": collect_from_environ(),
"PYTHONPATH": collect_from_sys_path(),
"module_info": module_info,
"censored": censored,
}
# For now, assume args without leading '-' is module name
for a in sys.argv[1:]:
if a[0] != "-":
module_info[a] = collect_from_module(a, None)
data = censor(data, bad_words)
format_markdown(data)
# format_raw(data)
if __name__ == "__main__":
sys.exit(int(main() or 0))
|
#
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
"""Music - Common Methods"""
from oslo_log import log as logging
from conductor.common import db_backend
from conductor.common.music import api
LOG = logging.getLogger(__name__)
def music_api(configuration):
"""Create or return a Music API instance"""
configuration = dict(configuration)
kwargs = {
'host': configuration.get('host'),
'port': configuration.get('port'),
'version': configuration.get('version'),
'replication_factor': configuration.get('replication_factor'),
}
api_instance = db_backend.get_client(**kwargs)
# Create the keyspace if necessary
# TODO(jdandrea): Use oslo.config with a [music] section
# keyspace = conf.music.get('keyspace')
# api_instance.create_keyspace(keyspace)
return api_instance
|
"""
NLP Sandbox Date Annotator API
# Overview The OpenAPI specification implemented by NLP Sandbox Annotators. # noqa: E501
The version of the OpenAPI document: 1.0.2
Contact: thomas.schaffter@sagebionetworks.org
Generated by: https://openapi-generator.tech
"""
import unittest
from unittest.mock import patch
import nlpsandbox
from nlpsandbox.api.text_person_name_annotation_api import TextPersonNameAnnotationApi # noqa: E501
class TestTextPersonNameAnnotationApi(unittest.TestCase):
"""TextPersonNameAnnotationApi unit test stubs"""
def setUp(self):
self.api = TextPersonNameAnnotationApi() # noqa: E501
self.patcher = patch('nlpsandbox.api_client.ApiClient.call_api')
self.mock_foo = self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_create_text_person_name_annotations(self):
"""Test case for create_text_person_name_annotations
Annotate person names in a clinical note # noqa: E501
"""
self.api.create_text_person_name_annotations(
text_person_name_annotation_request={
"note": {
"identifier": "note-1",
"type": "note-type",
"text": "my text here",
"patient_id": "patient-1"
}
}
)
if __name__ == '__main__':
unittest.main()
|
marks=[500,1000,1500,2000,2500]
time=list(map(int,input().split()))
wrong=list(map(int,input().split()))
h,u=map(int,input().split())
ans=0
for i in range(5):
ans=ans+max([0.3*marks[i],((1-time[i]/250)*marks[i])-(50*wrong[i])])
ans=ans+h*100-50*u
print(int(ans))
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path
from spack import *
class Libfabric(AutotoolsPackage):
"""The Open Fabrics Interfaces (OFI) is a framework focused on exporting
fabric communication services to applications."""
homepage = "https://libfabric.org/"
url = "https://github.com/ofiwg/libfabric/releases/download/v1.6.1/libfabric-1.6.1.tar.gz"
git = "https://github.com/ofiwg/libfabric.git"
version('develop', branch='master')
version('1.9.0', sha256='559bfb7376c38253c936d0b104591c3394880376d676894895706c4f5f88597c',
url='https://github.com/ofiwg/libfabric/releases/download/v1.9.0/libfabric-1.9.0.tar.bz2')
version('1.8.1', sha256='3c560b997f9eafd89f961dd8e8a29a81aad3e39aee888e3f3822da419047dc88',
url='https://github.com/ofiwg/libfabric/releases/download/v1.8.1/libfabric-1.8.1.tar.bz2')
version('1.8.0', sha256='c4763383a96af4af52cd81b3b094227f5cf8e91662f861670965994539b7ee37',
url='https://github.com/ofiwg/libfabric/releases/download/v1.8.0/libfabric-1.8.0.tar.bz2')
version('1.7.1', sha256='312e62c57f79b7274f89c41823932c00b15f1cc8de9c1f8dce17cd7fdae66fa1')
version('1.7.0', sha256='9d7059e2ef48341f967f2a20ee215bc50f9079b32aad485f654098f83040e4be')
version('1.6.2', sha256='b1a9cf8c47189a1c918f8b5710d05cb50df6b47a1c9b2ba51d927e97503b4df0')
version('1.6.1', sha256='ac85f18bbf09226e868d72771ecba39cfdb7915aab3aeb855c95f8be7817f8bc')
version('1.6.0', sha256='cd7d4543cf706820e4a33003457eff97336b5160f35d0e8b001aea18b5470423')
version('1.5.3', sha256='770e505185074b4c66a0c33ac2155670142746a71a6299c286f6d5cd220cbff8')
version('1.5.0', sha256='f62709e70fab6abea719402da854f3c6ab60369be6b1e31e4f77554c7454da28')
version('1.4.2', sha256='858e30d92b69ee5e47ac10a8ac0c731b491d75a6e28267a128f3d6eb43f940a1')
fabrics = ('psm',
'psm2',
'sockets',
'verbs',
'usnic',
'gni',
'xpmem',
'udp',
'rxm',
'rxd',
'mlx',
'tcp',
'efa',
'mrail',
'shm')
variant('fabrics',
default='sockets,tcp,udp',
description='A list of enabled fabrics',
values=fabrics,
multi=True)
# NOTE: the 'kdreg' variant enables use of the special /dev/kdreg file to
# assist in memory registration caching in the GNI provider. This
# device file can only be opened once per process, however, and thus it
# frequently conflicts with MPI.
variant('kdreg', default=False,
description='Enable kdreg on supported Cray platforms')
# For version 1.9.0:
# headers: fix forward-declaration of enum fi_collective_op with C++
patch('https://github.com/ofiwg/libfabric/commit/2e95b0efd85fa8a3d814128e34ec57ffd357460e.patch',
sha256='71f06e8bf0adeccd425b194ac524e4d596469e9dab9e7a4f8bb209e6b9a454f4',
when='@1.9.0')
depends_on('rdma-core', when='fabrics=verbs')
depends_on('opa-psm2', when='fabrics=psm2')
depends_on('psm', when='fabrics=psm')
depends_on('ucx', when='fabrics=mlx')
depends_on('m4', when='@develop', type='build')
depends_on('autoconf', when='@develop', type='build')
depends_on('automake', when='@develop', type='build')
depends_on('libtool', when='@develop', type='build')
resource(name='fabtests',
url='https://github.com/ofiwg/libfabric/releases/download/v1.9.0/fabtests-1.9.0.tar.bz2',
sha256='60cc21db7092334904cbdafd142b2403572976018a22218e7c453195caef366e',
placement='fabtests', when='@1.9.0')
resource(name='fabtests',
url='https://github.com/ofiwg/libfabric/releases/download/v1.8.0/fabtests-1.8.0.tar.gz',
sha256='4b9af18c9c7c8b28eaeac4e6e9148bd2ea7dc6b6f00f8e31c90a6fc536c5bb6c',
placement='fabtests', when='@1.8.0')
resource(name='fabtests',
url='https://github.com/ofiwg/libfabric/releases/download/v1.7.0/fabtests-1.7.0.tar.gz',
sha256='ebb4129dc69dc0e1f48310ce1abb96673d8ddb18166bc595312ebcb96e803de9',
placement='fabtests', when='@1.7.0')
resource(name='fabtests',
url='https://github.com/ofiwg/fabtests/releases/download/v1.6.1/fabtests-1.6.1.tar.gz',
sha256='d357466b868fdaf1560d89ffac4c4e93a679486f1b4221315644d8d3e21174bf',
placement='fabtests', when='@1.6.1')
resource(name='fabtests',
url='https://github.com/ofiwg/fabtests/releases/download/v1.6.0/fabtests-1.6.0.tar.gz',
sha256='dc3eeccccb005205017f5af60681ede15782ce202a0103450a6d56a7ff515a67',
placement='fabtests', when='@1.6.0')
resource(name='fabtests',
url='https://github.com/ofiwg/fabtests/releases/download/v1.5.3/fabtests-1.5.3.tar.gz',
sha256='3835b3bf86cd00d23df0ddba8bf317e4a195e8d5c3c2baa918b373d548f77f29',
placement='fabtests', when='@1.5.3')
resource(name='fabtests',
url='https://github.com/ofiwg/fabtests/releases/download/v1.5.0/fabtests-1.5.0.tar.gz',
sha256='1dddd446c3f1df346899f9a8636f1b4265de5b863103ae24876e9f0c1e40a69d',
placement='fabtests', when='@1.5.0')
resource(name='fabtests',
url='https://github.com/ofiwg/fabtests/releases/download/v1.4.2/fabtests-1.4.2.tar.gz',
sha256='3b78d0ca1b223ff21b7f5b3627e67e358e3c18b700f86b017e2233fee7e88c2e',
placement='fabtests', when='@1.4.2')
def setup_build_environment(self, env):
if self.run_tests:
env.prepend_path('PATH', self.prefix.bin)
@when('@develop')
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('./autogen.sh')
if self.run_tests:
with working_dir('fabtests'):
bash('./autogen.sh')
def configure_args(self):
args = []
if '+kdreg' in self.spec:
args.append('--with-kdreg=yes')
else:
args.append('--with-kdreg=no')
for fabric in self.fabrics:
if 'fabrics=' + fabric in self.spec:
args.append('--enable-{0}=yes'.format(fabric))
else:
args.append('--enable-{0}=no'.format(fabric))
return args
def install(self, spec, prefix):
# Call main install method
super(Libfabric, self).install(spec, prefix)
# Build and install fabtests, if available
if not os.path.isdir('fabtests'):
return
with working_dir('fabtests'):
configure = Executable('./configure')
configure('--prefix={0}'.format(self.prefix),
'--with-libfabric={0}'.format(self.prefix))
make()
make('install')
def installcheck(self):
fi_info = Executable(self.prefix.bin.fi_info)
fi_info()
# Run fabtests test suite if available
if not os.path.isdir('fabtests'):
return
if self.spec.satisfies('@1.8.0,1.9.0'):
# make test seems broken.
return
with working_dir('fabtests'):
make('test')
|
import unittest
from src.assignments.invoice import Invoice
from src.assignments.invoice_item import InvoiceItem
class Test_Assign9(unittest.TestCase):
invoice_items = [] #list of Invoice Item instance objects
def test_invoice_item_extended_cost_w_qty_10_cost_5(self):
'''
Create an Invoice item instance with argument values: 'Widget1', 10, and 5
The extended cost result should be 50;
'''
invoice_item = InvoiceItem('Widget1', 10, 5)
self.assertEqual(50, invoice_item.get_extended_cost())
#create the assert code
def test_invoice__w_3_invoice_items(self):
'''
Create an Invoice instance with argument values: 'ABC company', '03282018'
Three invoice items as follows argument values:
'Widget1', 10, and 5
'Widget2', 7, and 8
'Widget3', 20, and 10
Get Extended result should be 50 + 56 + 200 = 306
'''
invoice = Invoice('ABC Company', '03282018')
invoice.add_invoice_item(InvoiceItem('Widget1', 10, 5))
invoice.add_invoice_item(InvoiceItem('Widget2', 7, 8))
invoice.add_invoice_item(InvoiceItem('Widget3', 20, 10))
self.assertEqual(306, invoice.get_invoice_total())
#create the assert equal code
if __name__ == '__main__':
unittest.main(verbosity=2)
|
import os
log_path = os.getcwd()
client_id = ''
secret_key = ''
redirect_url = ''
username = ''
password = ''
pin1 = ''
pin2 = ''
pin3 = ''
pin4 = ''
response_type = "code"
grant_type = "authorization_code"
|
import collections
import errno
if not hasattr(errno, 'ECANCELED'):
errno.ECANCELED = 125 # 2.7 errno doesn't define this, so guess.
import os
import sys
import unittest
import uuid
# This is an ugly hack but it works; you have to say "-v -v", not "-vv".
verbose = sys.argv.count('-v') + sys.argv.count('--verbose')
verbose += int(os.environ.get('TEST_VERBOSE', 0))
if verbose > 1:
import logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(name)-20s %(levelname)-8s %(message)s')
if verbose < 3:
logging.getLogger('nvm.pmemobj.trace').setLevel(logging.WARNING)
class TestCase(unittest.TestCase):
# XXX I'm not sure how one gets a real pmem file, so keep this factored.
def _test_fn(self):
fn = "{}.pmem".format(uuid.uuid4())
self.addCleanup(lambda: os.remove(fn) if os.path.exists(fn) else None)
return fn
if sys.version_info[0] < 3:
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
assertCountEqual = unittest.TestCase.assertItemsEqual
def parameterize(cls):
"""A test method parameterization class decorator.
Parameters are specified as the value of a class attribute that ends with
the string '_params'. Call the portion before '_params' the prefix. Then
a method to be parameterized must have the same prefix, the string
'_as_', and an arbitrary suffix.
The value of the _params attribute may be either a dictionary or a list.
The values in the dictionary and the elements of the list may either be
single values, or a list. If single values, they are turned into single
element tuples. However derived, the resulting sequence is passed via
*args to the parameterized test function.
In a _params dictioanry, the keys become part of the name of the generated
tests. In a _params list, the values in the list are converted into a
string by joining the string values of the elements of the tuple by '_' and
converting any blanks into '_'s, and this become part of the name.
The full name of a generated test is a 'test_' prefix, the portion of the
test function name after the '_as_' separator, plus an '_', plus the name
derived as explained above.
For example, if we have:
count_params = range(2)
def count_as_foo_arg(self, foo):
self.assertEqual(foo+1, myfunc(foo))
we will get parameterized test methods named:
test_foo_arg_0
test_foo_arg_1
test_foo_arg_2
Or we could have:
example_params = {'foo': ('bar', 1), 'bing': ('bang', 2)}
def example_as_myfunc_input(self, name, count):
self.assertEqual(name+str(count), myfunc(name, count))
and get:
test_myfunc_input_foo
test_myfunc_input_bing
Note: if and only if the generated test name is a valid identifier can it
be used to select the test individually from the unittest command line.
"""
paramdicts = {}
testers = collections.defaultdict(list)
for name, attr in cls.__dict__.items():
if name.endswith('_params'):
if not hasattr(attr, 'keys'):
d = {}
for x in attr:
if not hasattr(x, '__iter__') or hasattr(x, 'encode'):
x = (x,)
n = '_'.join(str(v) for v in x).replace(' ', '_')
d[n] = x
attr = d
paramdicts[name[:-7] + '_as_'] = attr
if '_as_' in name:
testers[name.split('_as_')[0] + '_as_'].append(name)
testfuncs = {}
for name in paramdicts:
if name not in testers:
raise ValueError("No tester found for {}".format(name))
for name in testers:
if name not in paramdicts:
raise ValueError("No params found for {}".format(name))
for name, attr in cls.__dict__.items():
for paramsname, paramsdict in paramdicts.items():
if name.startswith(paramsname):
testnameroot = 'test_' + name[len(paramsname):]
for paramname, params in paramsdict.items():
if (not hasattr(params, '__iter__')
or hasattr(params, 'encode')):
params = (params,)
test = (lambda self, name=name, params=params:
getattr(self, name)(*params))
testname = testnameroot + '_' + paramname
test.__name__ = testname
testfuncs[testname] = test
for key, value in testfuncs.items():
setattr(cls, key, value)
return cls
|
from cache_any_client.operation_enum import OperationEnum
class Statement:
def __init__(self, jsonAsString=None):
self.id: str = None
self.value: any = None
if jsonAsString is None:
return
if 'id' in jsonAsString and jsonAsString['id'] is not None:
self.id = jsonAsString['id']
if 'value' in jsonAsString and jsonAsString['value'] is not None:
self.value = jsonAsString['value']
class Operation:
def __init__(self, jsonAsString=None):
self.operation: OperationEnum = None
self.statement:Statement = None
if jsonAsString is None:
return
if 'operation' in jsonAsString and jsonAsString['operation'] is not None:
self.operation = OperationEnum(jsonAsString['operation'])
if 'statement' in jsonAsString and jsonAsString['statement'] is not None:
self.statement = Statement(jsonAsString['statement'])
|
"""
MDES Digital Enablement API
These APIs are designed as RPC style stateless web services where each API endpoint represents an operation to be performed. All request and response payloads are sent in the JSON (JavaScript Object Notation) data-interchange format. Each endpoint in the API specifies the HTTP Method used to access it. All strings in request and response objects are to be UTF-8 encoded. Each API URI includes the major and minor version of API that it conforms to. This will allow multiple concurrent versions of the API to be deployed simultaneously. <br><br> **Authentication** <br><br> Mastercard uses OAuth 1.0a with body hash extension for authenticating the API clients. This requires every request that you send to Mastercard to be signed with an RSA private key. A private-public RSA key pair must be generated consisting of: <br><br> 1. A private key for the OAuth signature for API requests. It is recommended to keep the private key in a password-protected or hardware keystore. <br> 2. A public key is shared with Mastercard during the project setup process through either a certificate signing request (CSR) or the API Key Generator. Mastercard will use the public key to verify the OAuth signature that is provided on every API call.<br> An OAUTH1.0a signer library is available on [GitHub](https://github.com/Mastercard/oauth1-signer-java) <br><br> **Encryption** <br><br> All communications between Issuer web service and the Mastercard gateway is encrypted using TLS. <br><br> **Additional Encryption of Sensitive Data** <br><br> In addition to the OAuth authentication, when using MDES Digital Enablement Service, any PCI sensitive and all account holder Personally Identifiable Information (PII) data must be encrypted. This requirement applies to the API fields containing encryptedData. Sensitive data is encrypted using a symmetric session (one-time-use) key. The symmetric session key is then wrapped with an RSA Public Key supplied by Mastercard during API setup phase (the Customer Encryption Key). <br> Java Client Encryption Library available on [GitHub](https://github.com/Mastercard/client-encryption-java) # noqa: E501
The version of the OpenAPI document: 1.3.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from openapi_client.exceptions import ApiAttributeError
def lazy_import():
from openapi_client.model.funding_account_data import FundingAccountData
globals()['FundingAccountData'] = FundingAccountData
class FundingAccountInfoEncryptedPayload(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('public_key_fingerprint',): {
'max_length': 64,
},
('encrypted_key',): {
'max_length': 512,
},
('iv',): {
'max_length': 32,
'min_length': 32,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'public_key_fingerprint': (str,), # noqa: E501
'encrypted_key': (str,), # noqa: E501
'oaep_hashing_algorithm': (str,), # noqa: E501
'iv': (str,), # noqa: E501
'encrypted_data': (FundingAccountData,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'public_key_fingerprint': 'publicKeyFingerprint', # noqa: E501
'encrypted_key': 'encryptedKey', # noqa: E501
'oaep_hashing_algorithm': 'oaepHashingAlgorithm', # noqa: E501
'iv': 'iv', # noqa: E501
'encrypted_data': 'encryptedData', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""FundingAccountInfoEncryptedPayload - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
public_key_fingerprint (str): The fingerprint of the public key used to encrypt the ephemeral AES key. . [optional] # noqa: E501
encrypted_key (str): One-time use AES key encrypted by the MasterCard public key (as identified by publicKeyFingerprint) using the OAEP or PKCS#1 v1.5 scheme (depending on the value of oaepHashingAlgorithm. . [optional] # noqa: E501
oaep_hashing_algorithm (str): Hashing algorithm used with the OAEP scheme. Must be either SHA256 or SHA512. . [optional] # noqa: E501
iv (str): The initialization vector used when encrypting data using the one-time use AES key. Must be exactly 16 bytes (32 character hex string) to match the block size. If not present, an IV of zero is assumed. . [optional] # noqa: E501
encrypted_data (FundingAccountData): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""FundingAccountInfoEncryptedPayload - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
public_key_fingerprint (str): The fingerprint of the public key used to encrypt the ephemeral AES key. . [optional] # noqa: E501
encrypted_key (str): One-time use AES key encrypted by the MasterCard public key (as identified by publicKeyFingerprint) using the OAEP or PKCS#1 v1.5 scheme (depending on the value of oaepHashingAlgorithm. . [optional] # noqa: E501
oaep_hashing_algorithm (str): Hashing algorithm used with the OAEP scheme. Must be either SHA256 or SHA512. . [optional] # noqa: E501
iv (str): The initialization vector used when encrypting data using the one-time use AES key. Must be exactly 16 bytes (32 character hex string) to match the block size. If not present, an IV of zero is assumed. . [optional] # noqa: E501
encrypted_data (FundingAccountData): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
from typing import List
import matplotlib.pyplot as plt
class Bar:
def __init__(self, x: List[float], y: List[float]) -> None:
"""
Constructor for the bar class. Serves as a wrapper around
matplotlib's bar charts.
Args:
x (List[float]): The values to be plotted with
respect to the x-axis.
y (List[float]): The values to be plotted with
respect to the y-axis.
"""
self.x = x
self.y = y
def plot(self) -> None:
"""
Renders the bar chart using matplotlib api.
"""
# TODO: Pretty barebones in terms of flexibility.
# What else should go here? :,)
plt.figure()
plt.bar(self.x, self.y, width=0.1, color='black')
plt.xlabel('m/z', fontweight='bold', color='black',
fontsize='13', horizontalalignment='center')
plt.ylabel('Signal Intensity', fontweight='bold',
color='black', fontsize='13', horizontalalignment='center')
plt.show()
class Bar3D:
def __init__(self, x: List[float], y: List[float], z: List[float]) -> None:
"""
Constructor for the Bar3D class. Serves as a wrapper
around matplotlib's 3d bar projection.
Args:
x (List[float]): The values to be plotted with respect
to the x-axis.
y (List[float]): The values to be plotted with respect
to the y-axis.
z (List[float]): The values to be plotted with respect
to the z-axis.
"""
self.x = x
self.y = y
self.z = z
def plot(self) -> None:
"""
Renders the 3D bar chart projection.
"""
# Pretty barebones in terms of flexibility
# What else do we need here??
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
zpos = [0 for i in self.x]
dx = [1 for i in self.x]
dy = [1 for i in self.x]
ax.bar3d(self.x, self.y, zpos, dx, dy, self.z, color='#FFADF5')
ax.set_xlabel('m/z')
ax.set_ylabel('Retention Time')
ax.set_zlabel('Signal Intensity')
plt.show()
|
import os
from base64 import b64decode, b64encode
from flask import Flask, Blueprint, render_template, request, redirect, jsonify
from logging import getLogger
import jsonrpclib
app = Flask(__name__)
app.config['DEBUG'] = False
app.config['LOG_DIR'] = '/tmp/'
if os.environ.get('HSELING_WEB_POEM_GENERATOR_SETTINGS'):
app.config.from_envvar('HSELING_WEB_POEM_GENERATOR_SETTINGS')
app.config['HSELING_API_ENDPOINT'] = os.environ.get('HSELING_API_ENDPOINT')
app.config['HSELING_RPC_ENDPOINT'] = os.environ.get('HSELING_RPC_ENDPOINT')
print(app.config)
def get_server_endpoint():
HSELING_RPC_ENDPOINT = app.config.get('HSELING_RPC_ENDPOINT')
return HSELING_RPC_ENDPOINT
def get_jsonrpc_server():
jsonrpc_endpoint = get_server_endpoint()
return jsonrpclib.Server(jsonrpc_endpoint)
if not app.debug:
import logging
from logging.handlers import TimedRotatingFileHandler
# https://docs.python.org/3.6/library/logging.handlers.html#timedrotatingfilehandler
file_handler = TimedRotatingFileHandler(os.path.join(app.config['LOG_DIR'], 'hseling_web_poem_generator.log'), 'midnight')
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(logging.Formatter('<%(asctime)s> <%(levelname)s> %(message)s'))
app.logger.addHandler(file_handler)
log = getLogger(__name__)
@app.route('/web/healthz')
def healthz():
app.logger.info('Health checked')
return jsonify({"status": "ok", "message": "hseling-web-poem-generator"})
@app.route('/web/')
def index():
a = int(request.args.get('a', 1))
b = int(request.args.get('b', 2))
server = get_jsonrpc_server()
try:
result = server.add(a, b)
except ConnectionRefusedError:
result = None
return render_template('index.html.j2', result=result)
@app.route('/web/test')
def index_test():
return render_template('index.html.j2', result="This is a string!")
@app.route('/')
def index_redirect():
return redirect('/web/')
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True, port=8000)
__all__ = [app]
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from xml.dom import minidom
from grit.format.policy_templates.writers import xml_formatted_writer
def GetWriter(config):
'''Factory method for instanciating the ADMLWriter. Every Writer needs a
GetWriter method because the TemplateFormatter uses this method to
instantiate a Writer.
'''
return ADMLWriter(['win'], config)
class ADMLWriter(xml_formatted_writer.XMLFormattedWriter):
''' Class for generating an ADML policy template. It is used by the
PolicyTemplateGenerator to write the ADML file.
'''
# DOM root node of the generated ADML document.
_doc = None
# The string-table contains all ADML "string" elements.
_string_table_elem = None
# The presentation-table is the container for presentation elements, that
# describe the presentation of Policy-Groups and Policies.
_presentation_table_elem = None
def _AddString(self, parent, id, text):
''' Adds an ADML "string" element to the passed parent. The following
ADML snippet contains an example:
<string id="$(id)">$(text)</string>
Args:
parent: Parent element to which the new "string" element is added.
id: ID of the newly created "string" element.
text: Value of the newly created "string" element.
'''
string_elem = self.AddElement(parent, 'string', {'id': id})
string_elem.appendChild(self._doc.createTextNode(text))
def WritePolicy(self, policy):
'''Generates the ADML elements for a Policy.
<stringTable>
...
<string id="$(policy_group_name)">$(caption)</string>
<string id="$(policy_group_name)_Explain">$(description)</string>
</stringTable>
<presentationTables>
...
<presentation id=$(policy_group_name)/>
</presentationTables>
Args:
policy: The Policy to generate ADML elements for.
'''
policy_type = policy['type']
policy_name = policy['name']
if 'caption' in policy:
policy_caption = policy['caption']
else:
policy_caption = policy_name
if 'desc' in policy:
policy_description = policy['desc']
else:
policy_description = policy_name
if 'label' in policy:
policy_label = policy['label']
else:
policy_label = policy_name
self._AddString(self._string_table_elem, policy_name, policy_caption)
self._AddString(self._string_table_elem, policy_name + '_Explain',
policy_description)
presentation_elem = self.AddElement(
self._presentation_table_elem, 'presentation', {'id': policy_name})
if policy_type == 'main':
pass
elif policy_type in ('string', 'dict'):
# 'dict' policies are configured as JSON-encoded strings on Windows.
textbox_elem = self.AddElement(presentation_elem, 'textBox',
{'refId': policy_name})
label_elem = self.AddElement(textbox_elem, 'label')
label_elem.appendChild(self._doc.createTextNode(policy_label))
elif policy_type == 'int':
textbox_elem = self.AddElement(presentation_elem, 'decimalTextBox',
{'refId': policy_name})
textbox_elem.appendChild(self._doc.createTextNode(policy_label + ':'))
elif policy_type in ('int-enum', 'string-enum'):
for item in policy['items']:
self._AddString(self._string_table_elem, item['name'], item['caption'])
dropdownlist_elem = self.AddElement(presentation_elem, 'dropdownList',
{'refId': policy_name})
dropdownlist_elem.appendChild(self._doc.createTextNode(policy_label))
elif policy_type == 'list':
self._AddString(self._string_table_elem,
policy_name + 'Desc',
policy_caption)
listbox_elem = self.AddElement(presentation_elem, 'listBox',
{'refId': policy_name + 'Desc'})
listbox_elem.appendChild(self._doc.createTextNode(policy_label))
elif policy_type == 'group':
pass
else:
raise Exception('Unknown policy type %s.' % policy_type)
def BeginPolicyGroup(self, group):
'''Generates ADML elements for a Policy-Group. For each Policy-Group two
ADML "string" elements are added to the string-table. One contains the
caption of the Policy-Group and the other a description. A Policy-Group also
requires an ADML "presentation" element that must be added to the
presentation-table. The "presentation" element is the container for the
elements that define the visual presentation of the Policy-Goup's Policies.
The following ADML snippet shows an example:
Args:
group: The Policy-Group to generate ADML elements for.
'''
# Add ADML "string" elements to the string-table that are required by a
# Policy-Group.
self._AddString(self._string_table_elem, group['name'] + '_group',
group['caption'])
def _AddBaseStrings(self, string_table_elem, build):
''' Adds ADML "string" elements to the string-table that are referenced by
the ADMX file but not related to any specific Policy-Group or Policy.
'''
self._AddString(string_table_elem, self.config['win_supported_os'],
self.messages['win_supported_winxpsp2']['text'])
recommended_name = '%s (%s)' % \
(self.config['app_name'], self.messages['doc_recommended']['text'])
if build == 'chrome':
self._AddString(string_table_elem,
self.config['win_mandatory_category_path'][0],
'Google')
self._AddString(string_table_elem,
self.config['win_mandatory_category_path'][1],
self.config['app_name'])
self._AddString(string_table_elem,
self.config['win_recommended_category_path'][1],
recommended_name)
elif build == 'chromium':
self._AddString(string_table_elem,
self.config['win_mandatory_category_path'][0],
self.config['app_name'])
self._AddString(string_table_elem,
self.config['win_recommended_category_path'][0],
recommended_name)
def BeginTemplate(self):
dom_impl = minidom.getDOMImplementation('')
self._doc = dom_impl.createDocument(None, 'policyDefinitionResources',
None)
policy_definitions_resources_elem = self._doc.documentElement
policy_definitions_resources_elem.attributes['revision'] = '1.0'
policy_definitions_resources_elem.attributes['schemaVersion'] = '1.0'
self.AddElement(policy_definitions_resources_elem, 'displayName')
self.AddElement(policy_definitions_resources_elem, 'description')
resources_elem = self.AddElement(policy_definitions_resources_elem,
'resources')
self._string_table_elem = self.AddElement(resources_elem, 'stringTable')
self._AddBaseStrings(self._string_table_elem, self.config['build'])
self._presentation_table_elem = self.AddElement(resources_elem,
'presentationTable')
def GetTemplateText(self):
# Using "toprettyxml()" confuses the Windows Group Policy Editor
# (gpedit.msc) because it interprets whitespace characters in text between
# the "string" tags. This prevents gpedit.msc from displaying the category
# names correctly.
# TODO(markusheintz): Find a better formatting that works with gpedit.
return self._doc.toxml()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayDataAiserviceCloudbusMetrodetailQueryModel(object):
def __init__(self):
self._app_version = None
self._city_code = None
self._dest_geo = None
self._end_date = None
self._is_out = None
self._partner_id = None
self._start_date = None
self._station_id = None
self._type = None
@property
def app_version(self):
return self._app_version
@app_version.setter
def app_version(self, value):
self._app_version = value
@property
def city_code(self):
return self._city_code
@city_code.setter
def city_code(self, value):
self._city_code = value
@property
def dest_geo(self):
return self._dest_geo
@dest_geo.setter
def dest_geo(self, value):
self._dest_geo = value
@property
def end_date(self):
return self._end_date
@end_date.setter
def end_date(self, value):
self._end_date = value
@property
def is_out(self):
return self._is_out
@is_out.setter
def is_out(self, value):
self._is_out = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def start_date(self):
return self._start_date
@start_date.setter
def start_date(self, value):
self._start_date = value
@property
def station_id(self):
return self._station_id
@station_id.setter
def station_id(self, value):
self._station_id = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.app_version:
if hasattr(self.app_version, 'to_alipay_dict'):
params['app_version'] = self.app_version.to_alipay_dict()
else:
params['app_version'] = self.app_version
if self.city_code:
if hasattr(self.city_code, 'to_alipay_dict'):
params['city_code'] = self.city_code.to_alipay_dict()
else:
params['city_code'] = self.city_code
if self.dest_geo:
if hasattr(self.dest_geo, 'to_alipay_dict'):
params['dest_geo'] = self.dest_geo.to_alipay_dict()
else:
params['dest_geo'] = self.dest_geo
if self.end_date:
if hasattr(self.end_date, 'to_alipay_dict'):
params['end_date'] = self.end_date.to_alipay_dict()
else:
params['end_date'] = self.end_date
if self.is_out:
if hasattr(self.is_out, 'to_alipay_dict'):
params['is_out'] = self.is_out.to_alipay_dict()
else:
params['is_out'] = self.is_out
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.start_date:
if hasattr(self.start_date, 'to_alipay_dict'):
params['start_date'] = self.start_date.to_alipay_dict()
else:
params['start_date'] = self.start_date
if self.station_id:
if hasattr(self.station_id, 'to_alipay_dict'):
params['station_id'] = self.station_id.to_alipay_dict()
else:
params['station_id'] = self.station_id
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayDataAiserviceCloudbusMetrodetailQueryModel()
if 'app_version' in d:
o.app_version = d['app_version']
if 'city_code' in d:
o.city_code = d['city_code']
if 'dest_geo' in d:
o.dest_geo = d['dest_geo']
if 'end_date' in d:
o.end_date = d['end_date']
if 'is_out' in d:
o.is_out = d['is_out']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'start_date' in d:
o.start_date = d['start_date']
if 'station_id' in d:
o.station_id = d['station_id']
if 'type' in d:
o.type = d['type']
return o
|
import matplotlib
matplotlib.use('nbagg')
import sys
sys.path.append('../../')
from modelinter.models.utils import view_code
|
# encoding: utf-8
import datetime
import logging
from django.core.management.base import BaseCommand
from django_yubin.management.commands import create_handler
from django_yubin.models import Message
class Command(BaseCommand):
help = 'Delete the mails created before -d days (default 90)'
def add_arguments(self, parser):
parser.add_argument(
'-d',
'--days',
dest='days',
type=int,
default=90,
help="Cleanup mails older than this many days, defaults to 90.",
)
def handle(self, verbosity, **options):
# Delete mails and their related logs and queued created before X days
logger = logging.getLogger('django_yubin')
handler = create_handler(verbosity)
logger.addHandler(handler)
today = datetime.date.today()
cutoff_date = today - datetime.timedelta(options['days'])
count = Message.objects.filter(date_created__lt=cutoff_date).count()
if count:
Message.objects.filter(date_created__lt=cutoff_date).delete()
logger.info("Deleted %s mails created before %s " %
(count, cutoff_date))
|
""" documentation goes here """
|
"""
Train MattingBase
You can download pretrained DeepLabV3 weights from <https://github.com/VainF/DeepLabV3Plus-Pytorch>
Example:
CUDA_VISIBLE_DEVICES=0 python V2/train_base.py \
--dataset-name photomatte85 \
--model-backbone resnet50 \
--model-name custom \
--model-last-checkpoint "/eva_data/kie/research/pretrained/V2-model.pth" \
--model-pretrain-initialization "/home/kie/research/pretrained/best_deeplabv3_resnet50_voc_os16.pth" \
--epoch-end 10
"""
import argparse
import kornia
import torch
import os
import random
from torch import nn
from torch.nn import functional as F
from torch.cuda.amp import autocast, GradScaler
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torch.optim import Adam
from torchvision.utils import make_grid
from tqdm import tqdm
from torchvision import transforms as T
from PIL import Image
from data_path import DATA_PATH
from dataset import ImagesDataset, ZipDataset, VideoDataset, SampleDataset
from dataset import augmentation as A
from model import MattingBase
from model.utils import load_matched_state_dict
# --------------- Arguments ---------------
parser = argparse.ArgumentParser()
parser.add_argument('--dataset-name', type=str, required=True, choices=DATA_PATH.keys())
parser.add_argument('--model-backbone', type=str, required=True, choices=['resnet101', 'resnet50', 'mobilenetv2'])
parser.add_argument('--model-name', type=str, required=True)
parser.add_argument('--model-pretrain-initialization', type=str, default=None)
parser.add_argument('--model-last-checkpoint', type=str, default=None)
parser.add_argument('--batch-size', type=int, default=8)
parser.add_argument('--num-workers', type=int, default=16)
parser.add_argument('--epoch-start', type=int, default=0)
parser.add_argument('--epoch-end', type=int, required=True)
parser.add_argument('--log-train-loss-interval', type=int, default=10)
parser.add_argument('--log-train-images-interval', type=int, default=2000)
parser.add_argument('--log-valid-interval', type=int, default=5000)
parser.add_argument('--checkpoint-interval', type=int, default=5000)
args = parser.parse_args()
# --------------- Loading ---------------
def train():
# Training DataLoader
dataset_train = ZipDataset([
ZipDataset([
ImagesDataset(DATA_PATH[args.dataset_name]['train']['pha'], mode='L'),
ImagesDataset(DATA_PATH[args.dataset_name]['train']['fgr'], mode='RGB'),
], transforms=A.PairCompose([
A.PairRandomAffineAndResize((512, 512), degrees=(-5, 5), translate=(0.1, 0.1), scale=(0.4, 1), shear=(-5, 5)),
A.PairRandomHorizontalFlip(),
A.PairRandomBoxBlur(0.1, 5),
A.PairRandomSharpen(0.1),
A.PairApplyOnlyAtIndices([1], T.ColorJitter(0.15, 0.15, 0.15, 0.05)),
A.PairApply(T.ToTensor())
]), assert_equal_length=True),
ImagesDataset(DATA_PATH['backgrounds']['train'], transforms=T.Compose([
A.RandomAffineAndResize((512, 512), degrees=(-5, 5), translate=(0.1, 0.1), scale=(1, 2), shear=(-5, 5)),
T.RandomHorizontalFlip(),
A.RandomBoxBlur(0.1, 5),
A.RandomSharpen(0.1),
T.ColorJitter(0.15, 0.15, 0.15, 0.05),
T.ToTensor()
])),
])
dataloader_train = DataLoader(dataset_train,
shuffle=True,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=False)
# Validation DataLoader
dataset_valid = ZipDataset([
ZipDataset([
ImagesDataset(DATA_PATH[args.dataset_name]['valid']['pha'], mode='L'),
ImagesDataset(DATA_PATH[args.dataset_name]['valid']['fgr'], mode='RGB')
], transforms=A.PairCompose([
A.PairRandomAffineAndResize((512, 512), degrees=(-5, 5), translate=(0.1, 0.1), scale=(0.3, 1), shear=(-5, 5)),
A.PairApply(T.ToTensor())
]), assert_equal_length=True),
ImagesDataset(DATA_PATH['backgrounds']['valid'], mode='RGB', transforms=T.Compose([
A.RandomAffineAndResize((512, 512), degrees=(-5, 5), translate=(0.1, 0.1), scale=(1, 1.2), shear=(-5, 5)),
T.ToTensor()
])),
])
dataset_valid = SampleDataset(dataset_valid, 50)
dataloader_valid = DataLoader(dataset_valid,
pin_memory=True,
batch_size=args.batch_size,
num_workers=args.num_workers)
# Model
model = MattingBase(args.model_backbone).cuda()
if args.model_last_checkpoint is not None:
load_matched_state_dict(model, torch.load(args.model_last_checkpoint))
elif args.model_pretrain_initialization is not None:
model.load_pretrained_deeplabv3_state_dict(torch.load(args.model_pretrain_initialization)['model_state'])
optimizer = Adam([
{'params': model.backbone.parameters(), 'lr': 1e-4},
{'params': model.aspp.parameters(), 'lr': 5e-4},
{'params': model.decoder.parameters(), 'lr': 5e-4}
])
scaler = GradScaler()
# Logging and checkpoints
if not os.path.exists(f'checkpoint/{args.model_name}'):
os.makedirs(f'checkpoint/{args.model_name}')
writer = SummaryWriter(f'log/{args.model_name}')
# Run loop
for epoch in range(args.epoch_start, args.epoch_end):
for i, ((true_pha, true_fgr), true_bgr) in enumerate(tqdm(dataloader_train)):
step = epoch * len(dataloader_train) + i
true_pha = true_pha.cuda(non_blocking=True)
true_fgr = true_fgr.cuda(non_blocking=True)
true_bgr = true_bgr.cuda(non_blocking=True)
true_pha, true_fgr, true_bgr = random_crop(true_pha, true_fgr, true_bgr)
true_src = true_bgr.clone()
# Augment with shadow
aug_shadow_idx = torch.rand(len(true_src)) < 0.3
if aug_shadow_idx.any():
aug_shadow = true_pha[aug_shadow_idx].mul(0.3 * random.random())
aug_shadow = T.RandomAffine(degrees=(-5, 5), translate=(0.2, 0.2), scale=(0.5, 1.5), shear=(-5, 5))(aug_shadow)
aug_shadow = kornia.filters.box_blur(aug_shadow, (random.choice(range(20, 40)),) * 2)
true_src[aug_shadow_idx] = true_src[aug_shadow_idx].sub_(aug_shadow).clamp_(0, 1)
del aug_shadow
del aug_shadow_idx
# Composite foreground onto source
true_src = true_fgr * true_pha + true_src * (1 - true_pha)
# Augment with noise
aug_noise_idx = torch.rand(len(true_src)) < 0.4
if aug_noise_idx.any():
true_src[aug_noise_idx] = true_src[aug_noise_idx].add_(torch.randn_like(true_src[aug_noise_idx]).mul_(0.03 * random.random())).clamp_(0, 1)
true_bgr[aug_noise_idx] = true_bgr[aug_noise_idx].add_(torch.randn_like(true_bgr[aug_noise_idx]).mul_(0.03 * random.random())).clamp_(0, 1)
del aug_noise_idx
# Augment background with jitter
aug_jitter_idx = torch.rand(len(true_src)) < 0.8
if aug_jitter_idx.any():
true_bgr[aug_jitter_idx] = kornia.augmentation.ColorJitter(0.18, 0.18, 0.18, 0.1)(true_bgr[aug_jitter_idx])
del aug_jitter_idx
# Augment background with affine
aug_affine_idx = torch.rand(len(true_bgr)) < 0.3
if aug_affine_idx.any():
true_bgr[aug_affine_idx] = T.RandomAffine(degrees=(-1, 1), translate=(0.01, 0.01))(true_bgr[aug_affine_idx])
del aug_affine_idx
with autocast():
pred_pha, pred_fgr, pred_err = model(true_src, true_bgr)[:3]
loss = compute_loss(pred_pha, pred_fgr, pred_err, true_pha, true_fgr)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
if (i + 1) % args.log_train_loss_interval == 0:
writer.add_scalar('loss', loss, step)
if (i + 1) % args.log_train_images_interval == 0:
writer.add_image('train_pred_pha', make_grid(pred_pha, nrow=5), step)
writer.add_image('train_pred_fgr', make_grid(pred_fgr, nrow=5), step)
writer.add_image('train_pred_com', make_grid(pred_fgr * pred_pha, nrow=5), step)
writer.add_image('train_pred_err', make_grid(pred_err, nrow=5), step)
writer.add_image('train_true_src', make_grid(true_src, nrow=5), step)
writer.add_image('train_true_bgr', make_grid(true_bgr, nrow=5), step)
del true_pha, true_fgr, true_bgr
del pred_pha, pred_fgr, pred_err
if (i + 1) % args.log_valid_interval == 0:
valid(model, dataloader_valid, writer, step)
if (step + 1) % args.checkpoint_interval == 0:
torch.save(model.state_dict(), f'checkpoint/{args.model_name}/epoch-{epoch}-iter-{step}.pth')
torch.save(model.state_dict(), f'checkpoint/{args.model_name}/epoch-{epoch}.pth')
# --------------- Utils ---------------
def compute_loss(pred_pha, pred_fgr, pred_err, true_pha, true_fgr):
true_err = torch.abs(pred_pha.detach() - true_pha)
true_msk = true_pha != 0
return F.l1_loss(pred_pha, true_pha) + \
F.l1_loss(kornia.sobel(pred_pha), kornia.sobel(true_pha)) + \
F.l1_loss(pred_fgr * true_msk, true_fgr * true_msk) + \
F.mse_loss(pred_err, true_err)
def random_crop(*imgs):
w = random.choice(range(256, 512))
h = random.choice(range(256, 512))
results = []
for img in imgs:
img = kornia.resize(img, (max(h, w), max(h, w)))
img = kornia.center_crop(img, (h, w))
results.append(img)
return results
def valid(model, dataloader, writer, step):
model.eval()
loss_total = 0
loss_count = 0
with torch.no_grad():
for (true_pha, true_fgr), true_bgr in dataloader:
batch_size = true_pha.size(0)
true_pha = true_pha.cuda(non_blocking=True)
true_fgr = true_fgr.cuda(non_blocking=True)
true_bgr = true_bgr.cuda(non_blocking=True)
true_src = true_pha * true_fgr + (1 - true_pha) * true_bgr
pred_pha, pred_fgr, pred_err = model(true_src, true_bgr)[:3]
loss = compute_loss(pred_pha, pred_fgr, pred_err, true_pha, true_fgr)
loss_total += loss.cpu().item() * batch_size
loss_count += batch_size
writer.add_scalar('valid_loss', loss_total / loss_count, step)
model.train()
# --------------- Start ---------------
if __name__ == '__main__':
train()
|
import re
from google.appengine.ext import db
from bloghandler import BlogHandler
from models import User
# Validation of information
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
PASS_RE = re.compile(r"^.{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
def valid_password(password):
return password and PASS_RE.match(password)
def valid_email(email):
return not email or EMAIL_RE.match(email)
class Signup(BlogHandler):
"""Page for creating a user account"""
def get(self):
self.render("signup.html")
def post(self):
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
params = dict(username=self.username,
email=self.email)
if not valid_username(self.username):
params['error_username'] = "Invalid username."
have_error = True
if not valid_password(self.password):
params['error_password'] = "Invalid password."
have_error = True
elif self.password != self.verify:
params['error_verify'] = "Your passwords didn't match."
have_error = True
if not valid_email(self.email):
params['error_email'] = "Invalid email."
have_error = True
if have_error:
self.render('signup.html', **params)
else:
self.done()
def done(self, *a, **kw):
u = User.by_name(self.username)
if u:
msg = 'This user already exists'
self.render('signup.html', error_username=msg)
else:
u = User.register(self.username, self.password, self.email)
u.put()
self.set_login_cookie(u)
self.redirect('/')
|
#!/usr/bin/env python
import os
import sys
from distutils.core import setup
VERSION = "0.4.0"
if __name__ == "__main__":
if "--format=msi" in sys.argv or "bdist_msi" in sys.argv:
# hack the version name to a format msi doesn't have trouble with
VERSION = VERSION.replace("-alpha", "a")
VERSION = VERSION.replace("-beta", "b")
VERSION = VERSION.replace("-rc", "r")
fname = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
readme = open(fname, "r")
long_desc = readme.read()
readme.close()
setupdata = {
"name": "python-openal",
"version": VERSION,
"description": "Python OpenAL bindings",
"long_description": long_desc,
"author": "Marcus von Appen",
"author_email": "marcus@sysfault.org",
"license": "Public Domain / zlib",
"url": "http://bitbucket.org/marcusva/py-al",
"packages": ["openal",
"openal.loaders",
"openal.test",
"openal.test.util"
],
"package_data": {"openal.test": ["resources/*.*"]},
"classifiers": [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: Public Domain",
"License :: OSI Approved :: zlib/libpng License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: IronPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Multimedia :: Sound/Audio",
"Topic :: Software Development :: Libraries :: Python Modules",
],
}
setup(**setupdata)
|
#!/usr/bin/env python3
# Smith Waterman Algorythm (DNA Allignment AI)
# Written by CoolCat467 07/02/2020
NAME = 'Smith Waterman Algorythm'
__version__ = '0.0.1'
class SMAlgorythm(object):
def __init__(self, sequence1, sequence2):
self.seqA = str(sequence1).upper()
self.seqB = str(sequence2).upper()
self.n = len(self.seqA)
self.m = len(self.seqB)
self.ntides = ['A', 'T', 'G', 'C']
self.subMatrix = self.getAllignmentDictionary()
self.S = lambda a, b: self.subMatrix[(a, b)]
# Linear Width
self.w1 = 2
self.W = lambda k: k*self.w1
# Affine Width
self.v = 5#Opening gap penalty
self.u = 1#Gap Extention penalty
## self.W = lambda k: (self.u*k) + self.v if self.v > 0 and self.u > 0 else 999999
self.scoreMatrix = [[0 for i in range(self.m+1)] for i in range(self.n+1)]
def __repr__(self):
return "SMAlgorythm()"
def getAllignmentDictionary(self):
shift = lambda x: x[-1:] + x[:-1]
x = list(self.ntides)
tides = list(x)
for i in range(len(self.ntides)-1):
x = shift(x)
tides += x
isSame = lambda x, y: 3 if x==y else -3
return {t:isSame(*t) for t in zip(tides, self.ntides*len(self.ntides))}
def score(self):
a = self.seqA
b = self.seqB
H = list(self.scoreMatrix)
for k in range(self.n):
for l in range(self.m):
H[k][0] = 0
H[0][l] = 0
# main
for i in range(1, self.n):
for j in range(1, self.m):
H[i][j] = max(H[i-1][j-1] + self.S(a[i], b[j]),#Alligning Score
H[i-k][j] - self.W(k),#Score if A is at the end of a gap of length k
H[i][j-l] - self.W(l),#Score if B is at the end of a gap of length l
0)
self.scoreMatrix = H
self.k = k
self.l = l
def getStartIdx(self):
cmax = 0
idx = None
for i in range(self.n+1):
for ii in range(self.m+1):
if self.scoreMatrix[i][ii] > cmax:
cmax = self.scoreMatrix[i][ii]
idx = [i, ii]
self.startIdx = tuple(idx)
def getPath(self):
i, j = tuple(self.startIdx)
path = [tuple(self.startIdx)]
while self.scoreMatrix[i][j] != 0:
frm = (self.scoreMatrix[i-1][j-1] + self.S(self.seqA[i], self.seqB[j]),#Alligning Score
self.scoreMatrix[i-self.k][j] - self.W(self.k),#Score if A is at the end of a gap of length k
self.scoreMatrix[i][j-self.l] - self.W(self.l),#Score if B is at the end of a gap of length l
0)
idxs = {frm[0]:(i-1, j-1),
frm[1]:(i-self.k, j),
frm[2]:(i, j-self.l)}
m = max(frm)
if m != 0:
i, j = idxs[m]
path.append(idxs[m])
continue
break
self.path = [[self.seqA[i], self.seqB[j]] for i, j in reversed(path)]
@classmethod
def pathToString(cls, path):
relative = []
for i, ii in path:
relative.append([i, '|' if i == ii else ' ', ii])
data = ['', '', '']
for r in range(len(relative)):
i, rel, ii = relative[r]
data[0] += i
data[1] += rel
data[2] += ii
return ''.join([''.join(data[i])+'\n' for i in range(3)])[:-1]
def allign(self, toString=False):
self.score()
self.getStartIdx()
self.getPath()
if toString:
return self.__class__.pathToString(self.path)
return self.path
pass
if __name__ == '__main__':
seq1 = 'catatgcgcattatgat'
## seq1 = seq1 + ''.join([i for i in reversed(seq1)])
seq2 = 'gatatgatcattatgct'
print(seq1)
print(seq2)
print(SMAlgorythm(seq1, seq2).allign(True))
|
countries.loc['United Kingdom', 'capital'] = 'Cambridge'
countries
|
import unittest
from typing import List, Tuple
from unittest.mock import MagicMock, Mock, patch
from numpy import power
from components.ai import BaseAI, HostileEnemy, ConfusedEnemy
from components.equipment import Equipment
from components.fighter import Fighter
from components.inventory import Inventory
from components.level import Level
from entity import Entity, Actor
from game_map import GameMap
from engine import Engine
from actions import MeleeAction, MovementAction, WaitAction
import tile_types
class Test_BaseAI(unittest.TestCase):
def test_entity_set(self):
'''
tests that the entity can be set correctly
'''
actor = Actor(ai_cls=BaseAI, equipment=Equipment(), fighter=Fighter(
hp=10, base_defense=10, base_power=10), inventory=Inventory(capacity=5),
level=Level())
self.assertEqual(actor, actor.ai.entity)
ai = BaseAI(actor)
ai.entity = actor
self.assertEqual(actor, ai.entity)
def test_perform(self):
'''
all perform of this AI will be handled
elsewhere
'''
ent = Entity()
ai = BaseAI(entity=ent)
with self.assertRaises(NotImplementedError):
ai.perform()
def test_get_path_to_straight_line(self):
'''
test get_path_to function
1. test that it returns a short path in a straight line
'''
ent = Entity(x=0, y=0)
eng = Engine(player=ent)
gm = GameMap(engine=eng, width=10, height=10)
# GameMaps are initialized as all wall, convert to floor
gm.tiles[:, :] = tile_types.floor
ent.parent = gm
ai = BaseAI(entity=ent)
# ent starts at 0,0 - get the path straight down to 0,9 (bottom of map)
path = ai.get_path_to(dest_x=0, dest_y=9)
# path should be...
# (0,1)...(0,9)
path_should_be: List[Tuple[int, int]] = [
(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9)]
self.assertEqual(path, path_should_be)
def test_get_path_to_avoid_wall(self):
'''
test get_path_to function
2. test that it avoids walls
'''
ent = Entity(x=0, y=0)
eng = Engine(player=ent)
gm = GameMap(engine=eng, width=10, height=10)
# GameMaps are initialized as all wall, convert to floor
gm.tiles[:, :] = tile_types.floor
# change a couple of the path tiles to a wall (unwalkable)
gm.tiles[0, 5] = tile_types.wall
gm.tiles[1, 5] = tile_types.wall
ent.parent = gm
ai = BaseAI(entity=ent)
# ent starts at 0,0 - get the path straight down to 0,9 (bottom of map)
path = ai.get_path_to(dest_x=0, dest_y=9)
# straight path should be (0,1)...(0,9)
# but since (0,5) and (1, 5) are unwalkable,
# the path increases to go around them (diagonal to (1,4) and (2,5) then
# back to (1,6) before straightening out)
path_should_be: List[Tuple[int, int]] = [
(0, 1), (0, 2), (0, 3), (1, 4), (2, 5), (1, 6), (0, 7), (0, 8), (0, 9)]
self.assertEqual(path, path_should_be)
def test_get_path_to_avoid_entities(self):
'''
test get_path_to function
3. test that it avoids blocking entities
'''
ent = Entity(x=0, y=0)
eng = Engine(player=ent)
gm = GameMap(engine=eng, width=10, height=10)
# GameMaps are initialized as all wall, convert to floor
gm.tiles[:, :] = tile_types.floor
# add a couple entities to the gamemap that are in the way
ent1 = Entity(x=0, y=5, blocks_movement=True)
ent2 = Entity(x=1, y=5, blocks_movement=True)
gm.entities = {ent1, ent2}
ent.parent = gm
ai = BaseAI(entity=ent)
# ent starts at 0,0 - get the path straight down to 0,9 (bottom of map)
path = ai.get_path_to(dest_x=0, dest_y=9)
# straight path should be (0,1)...(0,9)
# but since (0,5) and (1, 5) have blocking entities,
# the path increases to go around them (diagonal to (1,4) and (2,5) then
# back to (1,6) before straightening out)
path_should_be: List[Tuple[int, int]] = [
(0, 1), (0, 2), (0, 3), (1, 4), (2, 5), (1, 6), (0, 7), (0, 8), (0, 9)]
self.assertEqual(path, path_should_be)
class TestConfusedEnemy(unittest.TestCase):
def test_init(self):
'''
test that the confused enemy can be initized without issues
'''
actor = Actor(ai_cls=HostileEnemy, equipment=Equipment(), fighter=Fighter(
hp=10, base_defense=10, base_power=10), inventory=Inventory(capacity=5),
level=Level())
ai = ConfusedEnemy(
entity=actor, previous_ai=HostileEnemy, turns_remaining=5)
self.assertEqual(ai.previous_ai, HostileEnemy)
self.assertEqual(ai.turns_remaining, 5)
def test_perform_switch_ai(self):
'''
test that with 0 or less turns remaining the ai switches back
'''
actor = Actor(ai_cls=HostileEnemy, equipment=Equipment(), fighter=Fighter(
hp=10, base_defense=10, base_power=10), inventory=Inventory(capacity=5),
level=Level())
ai = ConfusedEnemy(
entity=actor, previous_ai=HostileEnemy(entity=actor), turns_remaining=0)
actor.ai = ai
eng = Engine(player=actor)
gm = GameMap(engine=eng, width=10, height=10)
actor.parent = gm
self.assertIsInstance(actor.ai, ConfusedEnemy)
actor.ai.perform()
self.assertIsInstance(actor.ai, HostileEnemy)
def test_perform_bump_action(self):
'''
test that with turns remaining the ai will perform a bump action
in a random direction and reduce the number of turns remaining
'''
actor = Actor(ai_cls=HostileEnemy, equipment=Equipment(), fighter=Fighter(
hp=10, base_defense=10, base_power=10), inventory=Inventory(capacity=5),
level=Level())
ai = ConfusedEnemy(
entity=actor, previous_ai=HostileEnemy(entity=actor), turns_remaining=5)
actor.ai = ai
eng = Engine(player=actor)
gm = GameMap(engine=eng, width=10, height=10)
actor.parent = gm
with patch('actions.BumpAction.perform') as patch_perform:
ai.perform()
self.assertEqual(ai.turns_remaining, 4)
patch_perform.assert_called_once()
class TestHostileEnemy(unittest.TestCase):
def test_init(self):
'''
test that the hostile enemy class can be initialized without issues
'''
# instantiating an actor will automatically instantiate the ai class under actor.ai
actor = Actor(ai_cls=HostileEnemy, equipment=Equipment(), fighter=Fighter(
hp=10, base_defense=10, base_power=10), inventory=Inventory(capacity=5),
level=Level())
self.assertEqual(actor, actor.ai.entity)
self.assertEqual([], actor.ai.path)
def test_perform_wait(self):
'''
test that perform() with no path will call WaitAction.perform
'''
# run the setup code
player = Entity(x=0, y=0)
eng = Engine(player=player)
gm = GameMap(engine=eng, width=10, height=10)
gm.tiles[:, :] = tile_types.floor
gm.entities.add(player)
hostile_ent = Actor(x=0, y=2, ai_cls=HostileEnemy, equipment=Equipment(), fighter=Fighter(
hp=10, base_defense=10, base_power=10), inventory=Inventory(capacity=5),
level=Level())
gm.entities.add(hostile_ent)
player.parent = gm
hostile_ent.parent = gm
eng.game_map = gm
# since update_fov has not been run, there is no 'visible' tiles
# for the player to see the hostile entity
# eng.update_fov()
# patch the class we want to test for then run the code
with patch('actions.WaitAction.perform') as mock_WaitAction_perform:
hostile_ent.ai.perform()
# verify the WaitAction.perform was called
mock_WaitAction_perform.assert_called()
def test_perform_movement(self):
'''
test that perform() with a path will call MovementAction.perform
'''
# run the setup code
player = Entity(x=0, y=0)
eng = Engine(player=player)
gm = GameMap(engine=eng, width=10, height=10)
gm.tiles[:, :] = tile_types.floor
gm.entities.add(player)
# hostile entity is 2 spaces away
hostile_ent = Actor(x=0, y=2, ai_cls=HostileEnemy, equipment=Equipment(), fighter=Fighter(
hp=10, base_defense=10, base_power=10), inventory=Inventory(capacity=5),
level=Level())
gm.entities.add(hostile_ent)
player.parent = gm
hostile_ent.parent = gm
eng.game_map = gm
# run update_fov to update the visible tiles
eng.update_fov()
# patch the class we want to test for then run the code
with patch('actions.MovementAction.perform') as mock_MovementAction_perform:
hostile_ent.ai.perform()
# verify the WaitAction.perform was called
mock_MovementAction_perform.assert_called()
def test_perform_melee(self):
'''
test that perform() while next to the player object will
peform a melee action
'''
# run the setup code
player = Entity(x=0, y=0)
eng = Engine(player=player)
gm = GameMap(engine=eng, width=10, height=10)
gm.tiles[:, :] = tile_types.floor
gm.entities.add(player)
# hostile entity is 1 spaces away
hostile_ent = Actor(x=0, y=1, ai_cls=HostileEnemy, equipment=Equipment(), fighter=Fighter(
hp=10, base_defense=10, base_power=10), inventory=Inventory(capacity=5),
level=Level())
gm.entities.add(hostile_ent)
player.parent = gm
hostile_ent.parent = gm
eng.game_map = gm
# run update_fov to update the visible tiles
eng.update_fov()
# patch the class we want to test for then run the code
with patch('actions.MeleeAction.perform') as mock_MeleeAction_perform:
hostile_ent.ai.perform()
# verify the WaitAction.perform was called
mock_MeleeAction_perform.assert_called()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Utility functions for creating and analyzing spectra."""
import copy
import textwrap
from enum import Enum
from os import path
import numpy as np
from matplotlib import pyplot as plt
from scipy.integrate import simpson
from pypython import _AttributeDict, _cleanup_root, get_xy_subset, smooth_array
from pypython.constants import PARSEC
from pypython.physics import angstrom_to_hz
from pypython.plot import finish_figure, set_axes_scales
# Units enumerators ------------------------------------------------------------
class SpectrumThing(Enum):
frequency = "Hz"
wavelength = "Angstrom"
class SpectrumUnits(Enum):
"""Possible units for the spectra created in Python.
Note the typo in the per wavelength units. This is due to a typo in
Python.
"""
l_nu = "erg/s/Hz"
l_lm = "erg/s/A"
f_nu = "erg/s/cm^-2/Hz"
f_lm = "erg/s/cm^-2/A"
unknown = "unknown"
# Spectrum class ---------------------------------------------------------------
class Spectrum:
"""A class to store PYTHON .spec and .log_spec files.
The Python spectra are read in and stored within a dict of dicts,
where each column name is the spectrum name and the columns in that
dict are the names of the columns in the spectrum file. The data is
stored as numpy arrays.
"""
def __init__(self, root, fp=".", log_spec=True, smooth=None, distance=None, default=None, delim=None):
"""Create the Spectrum object.
Construct the file path of the spectrum files given the
root, directory and whether the logarithmic spectrum is used or not.
The different spectra are then read in, with either the .spec or the
first spectrum file read in being the default index choice.
Parameters
----------
root: str
The root name of the model.
fp: str [optional]
The directory containing the model.
default: str [optional]
The default spectrum to make the available spectrum for indexing.
log_spec: bool [optional]
Read in the logarithmic version of the spectra.
smooth: int [optional]
The amount of smoothing to use.
distance: float [optional]
The distance of the spectrum flux, in units of parsec.
delim: str [optional]
The deliminator in the spectrum file.
"""
root, fp = _cleanup_root(root, fp)
self.root = root
self.fp = path.expanduser(fp)
if self.fp[-1] != "/":
self.fp += "/"
self.pf = self.fp + self.root + ".pf"
self.log_spec = log_spec
if default and self.log_spec:
if not default.startswith("log_") and default != "spec_tau":
default = "log_" + default
# Initialize the important members
# Each spectrum is stored as a key in a dictionary. Each dictionary
# contains keys for each column in the spectrum file, as well as other
# meta info such as the distance of the spectrum
self.spectra = _AttributeDict({})
self._original_spectra = None
self.available = []
# Now we can read in the spectra and set the default/target spectrum
# for the object. We can also re-scale to a different distance.
self.get_spectra(delim)
if default:
self.current = self._get_spec_key(default)
else:
self.current = self.available[0]
self.set(self.current)
if distance:
self.set_distance(distance)
# Smooth all the spectra. A copy of the unsmoothed spectra is kept
# in the member self.original.
if smooth:
self.smooth(smooth)
# Private methods ----------------------------------------------------------
def _get_spec_key(self, name):
"""Get the key depending on if the log or linear version of a spectrum
was read in.
Parameters
----------
name: str
The name of the spectrum to get the key for.
Returns
-------
name: str
The key for the spectrum requested.
"""
if self.log_spec and not name.startswith("log_") and name != "spec_tau":
name = "log_" + name
return name
def _plot_observer_spectrum(self, label_lines=False):
"""Plot the spectrum components and observer spectra on a 1x2 panel
plot. The left panel has the components, whilst the right panel has the
observer spectrum.
Parameters
----------
label_lines: bool
Plot line IDs.
"""
name = self._get_spec_key("spec")
if name not in self.available:
raise IOError("A .spec/.log_spec file was not read in, cannot use this function")
fig, ax = plt.subplots(1, 2, figsize=(12, 5), sharey="row")
# Plot the components of the observer spectrum, i.e Emitted, Created,
# Disc, etc.
for component in self.columns[:-self.n_inclinations]:
if component in ["Lambda", "Freq."]:
continue
ax[0] = self._plot_thing(component, name, label_lines=label_lines, ax_update=ax[0])
for line in ax[0].get_lines(): # Set the different spectra to have a transparency
line.set_alpha(0.7)
ax[0].legend(ncol=2, loc="upper right").set_zorder(0)
# Now plot the observer spectra
for inclination in self.inclinations:
ax[1] = self._plot_thing(inclination, name, label_lines=label_lines, ax_update=ax[1])
for label, line in zip(self.inclinations, ax[1].get_lines()):
line.set_alpha(0.7)
line.set_label(str(label) + r"$^{\circ}$")
ax[1].set_ylabel("")
ax[1].legend(ncol=2, loc="upper right").set_zorder(0)
# Final clean up to make a nice spectrum
ax[0].set_title("Components")
ax[1].set_title("Observer spectra")
fig = finish_figure(fig, wspace=0)
return fig, ax
def _plot_thing(self, thing, spec_type, scale="loglog", label_lines=False, ax_update=None):
"""Plot a specific column in a spectrum file.
Parameters
----------
thing: str
The name of the thing to be plotted.
scale: str
The scale of the axes, i.e. loglog, logx, logy, linlin.
label_lines: bool
Plot line IDs.
ax_update: plt.Axes
An plt.Axes object to update, i.e. to plot on.
"""
if ax_update:
ax = ax_update
else:
fig, ax = plt.subplots(figsize=(9, 5))
if spec_type:
key = spec_type
else:
key = self.current
units = self.spectra[key].units
distance = self.spectra[key].distance
ax = set_axes_scales(ax, scale)
ax = plot.set_axes_labels(ax, units=units, distance=distance)
# How things are plotted depends on the units of the spectrum
if units == SpectrumUnits.f_lm or units == SpectrumUnits.l_lm:
x_thing = "Lambda"
else:
x_thing = "Freq."
label = thing
if thing.isdigit():
label += r"$^{\circ}$"
ax.plot(self.spectra[key][x_thing], self.spectra[key][thing], label=label, zorder=0)
if label_lines:
ax = plot.add_line_ids(ax, plot.common_lines(units), linestyle="none", fontsize=10)
if ax_update:
return ax
else:
fig = finish_figure(fig)
return fig, ax
# Methods ------------------------------------------------------------------
def convert_flux_to_luminosity(self):
"""Convert the spectrum from flux into luminosity units.
This is easily done using the relationship F = L / (4 pi d^2). This
method is applied to all the spectra currently loaded in the class,
but only if the units are already a flux.
"""
for spectrum in self.available:
if spectrum == "spec_tau":
continue
distance = self.spectra[spectrum]["distance"]
units = self.spectra[spectrum]["units"]
if units in [SpectrumUnits.l_lm, SpectrumUnits.l_nu]:
continue
for column in self.spectra[spectrum].columns:
self.spectra[spectrum][column] *= 4 * np.pi * (distance * PARSEC)**2
if units == SpectrumUnits.f_nu:
self.spectra[spectrum].units = SpectrumUnits.l_nu
else:
self.spectra[spectrum].units = SpectrumUnits.l_lm
def convert_luminosity_to_flux(self, distance):
"""Convert the spectrum from luminosity into flux units.
This is easily done by using the relationship F = L / (4 pi d^2). This
method is applied to all the spectra currently loaded in the class,
but only if the units are not a flux.
"""
for spectrum in self.available:
if spectrum == "spec_tau":
continue
distance = self.spectra[spectrum]["distance"]
units = self.spectra[spectrum]["units"]
if units in [SpectrumUnits.f_lm, SpectrumUnits.f_nu]:
continue
for column in self.spectra[spectrum].columns:
self.spectra[spectrum][column] /= 4 * np.pi * (distance * PARSEC)**2
if units == SpectrumUnits.l_nu:
self.spectra[spectrum].units = SpectrumUnits.f_nu
else:
self.spectra[spectrum].units = SpectrumUnits.f_lm
def get_spectra(self, delim=None):
"""Read in a spectrum file given in self.filepath. The spectrum is
stored as a dictionary in self.spectra where each key is the name of
the columns.
Parameters
----------
delim: str [optional]
A custom delimiter, useful for reading in files which have sometimes
between delimited with commas instead of spaces.
"""
n_read = 0
files_to_read = ["spec", "spec_tot", "spec_tot_wind", "spec_wind", "spec_tau"]
# Read in each spec file type, and store each spectrum as a key in
# self.avail_spec, etc.
for spec_type in files_to_read:
fp = self.fp + self.root + "."
if self.log_spec and spec_type != "spec_tau":
spec_type = "log_" + spec_type
fp += spec_type
if not path.exists(fp):
continue
n_read += 1
self.spectra[spec_type] = _AttributeDict({
"units": SpectrumUnits.unknown,
})
with open(fp, "r") as f:
spectrum_file = f.readlines()
# Read in the spectrum file. Ignore empty lines and lines which have
# been commented out by #
spectrum = []
for line in spectrum_file:
line = line.strip()
if delim:
line = line.split(delim)
else:
line = line.split()
if "Units:" in line:
self.spectra[spec_type]["units"] = SpectrumUnits(line[4][1:-1])
if self.spectra[spec_type]["units"] in [SpectrumUnits.f_lm, SpectrumUnits.f_nu]:
self.spectra[spec_type]["distance"] = float(line[6])
else:
self.spectra[spec_type]["distance"] = 0
if len(line) == 0 or line[0] == "#":
continue
spectrum.append(line)
# Extract the header columns of the spectrum. This assumes the first
# read line in the spectrum is the header.
header = [] # wish this was short enough to do in a list comprehension
for i, column_name in enumerate(spectrum[0]):
if column_name[0] == "A":
j = column_name.find("P")
column_name = column_name[1:j].lstrip("0") # remove leading 0's for, i.e., 01 degrees
header.append(column_name)
columns = [column for column in header if column not in ["Freq.", "Lambda"]]
spectrum = np.array(spectrum[1:], dtype=np.float64)
# Add the spectrum to self.avail_spectrum[spec_type]. The keys of
# the dictionary are the column names in the spectrum, i.e. what
# is in the header
for i, column_name in enumerate(header):
self.spectra[spec_type][column_name] = spectrum[:, i]
inclinations = [] # this could almost be a list comprehension...
for col in header:
if col.isdigit() and col not in inclinations:
inclinations.append(col)
self.spectra[spec_type]["columns"] = tuple(columns)
self.spectra[spec_type]["inclinations"] = tuple(inclinations)
self.spectra[spec_type]["n_inclinations"] = len(inclinations)
if n_read == 0:
raise IOError(f"Unable to open any spectrum files for {self.root} in {self.fp}")
self.available = tuple(self.spectra.keys())
def plot(self, names=None, spec_type=None, scale="loglog", label_lines=False):
"""Plot the spectra or a single component in a single figure. By
default this creates a 1 x 2 of the components on the left and the
observer spectra on the right. Useful for when in an interactive
session.
Parameters
----------
names: str
The name of the thing to plot.
spec_type: str
The spectrum the thing to plot belongs in.
scale: str
The scale of the axes, i.e. loglog, logx, logy or linlin.
label_lines: bool
Plot line IDs.
"""
# If name is given, then plot that column of the spectrum. Otherwise
# assume we just want to plot all columns in the spec file
if names:
if type(names) is not list:
names = [names]
fig, ax = self._plot_thing(str(names[0]), spec_type, scale, label_lines)
if len(names) > 1:
for name in names[1:]:
ax = self._plot_thing(str(name), spec_type, scale, label_lines, ax_update=ax)
ax.legend()
else:
fig, ax = self._plot_observer_spectrum(label_lines)
return fig, ax
def set(self, name):
"""Set a spectrum as the default.
Sets a different spectrum to be the currently available spectrum for
indexing.
Parameters
----------
name: str
The name of the spectrum, i.e. log_spec or spec_tot, etc. The
available spectrum types are stored in self.available.
"""
name = self._get_spec_key(name)
if name not in self.available:
raise IndexError(f"spectrum {name} is not available: available are {self.available}")
self.current = name
def set_distance(self, distance):
"""Rescale the flux to the given distance.
Parameters
----------
distance: float or int
The distance to scale the flux to.
"""
if type(distance) is not float and type(distance) is not int:
raise ValueError("distance is not a float or integer")
for spectrum in self.available:
if spectrum == "spec_tau":
continue
if self.spectra[spectrum].units == SpectrumUnits.l_nu:
continue
for key in self.spectra[spectrum].columns:
if key in ["Lambda", "Freq."]:
continue
self.spectra[spectrum][key] *= \
(self.spectra[spectrum].distance * PARSEC) ** 2 / (distance * PARSEC) ** 2
self.spectra[spectrum].distance = distance
@staticmethod
def show(block=True):
"""Show a plot which has been created.
Wrapper around pyplot.show().
Parameters
----------
block: bool
Use blocking or non-blocking figure display.
"""
plt.show(block=block)
def smooth(self, width=5):
"""Smooth the spectrum flux/luminosity bins.
If this is used after the spectrum has already been smoothed, then the
"original" is copied back into the spectrum before smoothing again. This
way the function does not smooth an already smoothed spectrum.
Parameters
----------
width: int [optional]
The width of the boxcar filter (in bins).
"""
if self._original_spectra is None:
self._original_spectra = copy.deepcopy(self.spectra)
else:
self.spectra = copy.deepcopy(self._original_spectra)
# Loop over each available spectrum and smooth it
for spectrum in self.available:
if spectrum == "spec_tau": # todo: cleaner way to skip spec_tau
continue
for thing_to_smooth in self.spectra[spectrum].columns:
try:
self.spectra[spectrum][thing_to_smooth] = smooth_array(self.spectra[spectrum][thing_to_smooth],
width)
except KeyError:
pass # some spectra do not have the inclination angles...
def restore_original_spectra(self):
"""Restore the spectrum to its original unsmoothed form."""
self.spectra = copy.deepcopy(self._original_spectra)
# Built in stuff -----------------------------------------------------------
def __getattr__(self, key):
return self.spectra[self.current][key]
def __getitem__(self, key):
if self._get_spec_key(key) in self.available:
return self.spectra[self._get_spec_key(key)]
else:
return self.spectra[self.current][key]
# def __setattr__(self, name, value):
# self.spectra[self.current][name] = value
def __setitem__(self, key, value):
if self._get_spec_key(key) in self.available:
self.spectra[self._get_spec_key(key)] = value
else:
self.spectra[self.current][key] = value
def __str__(self):
msg = f"Spectrum for the model {self.root}\n"
msg += f"\nDirectory {self.fp}"
msg += f"\nAvailable spectra {self.available}"
msg += f"\nCurrent spectrum {self.current}"
if "spec" in self.available or "log_spec" in self.available:
if self.log_spec:
key = "log_spec"
else:
key = "spec"
msg += f"\nSpectrum inclinations: {self.spectra[key].inclinations}"
if "tau_spec" in self.available:
msg += f"\nOptical depth inclinations {self.spectra.tau_spec.inclinations}"
return textwrap.dedent(msg)
# Functions --------------------------------------------------------------------
def integrate(spectrum, name, xmin, xmax, spec_type=None):
"""Integrate a sub-range of a spectrum.
By integrating a spectrum in luminosity units between [xmin, xmax], it
is possible to calculate the total luminosity of a given wavelength band.
For example, by using xmin, xmax = 3000, 8000 Angstroms, the total optical
luminosity can be estimated.
This function uses Simpson's rule to approximate the integral given the
wavelength/frequency bins (used as the sample points) and the luminosity
bins.
Parameters
----------
spectrum: pypython.Spectrum
The spectrum class containing the spectrum to integrate.
name: str
The name of the spectrum to integrate, i.e. "60", "Emitted".
xmin: float
The lower integration bound, in Angstroms.
xmax: float
The upper integration bound, in Angstroms.
spec_type: str [optional]
The spectrum type to use. If this is None, then spectrum.current is
used
Returns
-------
The integral between of the spectrum between xmin and xmax.
"""
if spec_type:
key = spec_type
else:
key = spectrum.current
if spectrum[key].units == SpectrumUnits.l_lm or spectrum[key].units == SpectrumUnits.f_lm:
sample_points = spectrum[key]["Lambda"]
else:
sample_points = spectrum[key]["Freq."]
tmp = xmin
xmin = angstrom_to_hz(xmax)
xmax = angstrom_to_hz(tmp)
sample_points, y = get_xy_subset(sample_points, spectrum[key][name], xmin, xmax)
return simpson(y, sample_points)
# This is placed here due to a circular dependency -----------------------------
from pypython.spectrum import create, plot, lines
|
"""
Test lldb data formatter subsystem.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
USE_LIBSTDCPP = "USE_LIBSTDCPP"
USE_LIBCPP = "USE_LIBCPP"
class GenericMultiSetDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
self.namespace = 'std'
def findVariable(self, name):
var = self.frame().FindVariable(name)
self.assertTrue(var.IsValid())
return var
def getVariableType(self, name):
var = self.findVariable(name)
return var.GetType().GetDisplayTypeName()
def check(self, var_name, size):
var = self.findVariable(var_name)
self.assertEqual(var.GetNumChildren(), size)
children = []
for i in range(size):
child = var.GetChildAtIndex(i)
children.append(ValueCheck(value=child.GetValue()))
self.expect_var_path(var_name, type=self.getVariableType(var_name), children=children)
def do_test_with_run_command(self, stdlib_type):
"""Test that that file and class static variables display correctly."""
self.build(dictionary={stdlib_type: "1"})
(self.target, process, _, bkpt) = lldbutil.run_to_source_breakpoint(
self, "Set break point at this line.", lldb.SBFileSpec("main.cpp", False))
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
self.runCmd(
"settings set target.max-children-count 256",
check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
ii_type = self.getVariableType("ii")
self.assertTrue(ii_type.startswith(self.namespace + "::multiset"),
"Type: " + ii_type)
self.expect("frame variable ii", substrs=["size=0", "{}"])
lldbutil.continue_to_breakpoint(process, bkpt)
self.expect(
"frame variable ii",
substrs=[
"size=6",
"[0] = 0",
"[1] = 1",
"[2] = 2",
"[3] = 3",
"[4] = 4",
"[5] = 5"])
lldbutil.continue_to_breakpoint(process, bkpt)
self.check("ii", 7)
lldbutil.continue_to_breakpoint(process, bkpt)
self.expect("frame variable ii", substrs=["size=0", "{}"])
self.check("ii", 0)
lldbutil.continue_to_breakpoint(process, bkpt)
self.expect("frame variable ii", substrs=["size=0", "{}"])
ss_type = self.getVariableType("ss")
self.assertTrue(ss_type.startswith(self.namespace + "::multiset"),
"Type: " + ss_type)
self.expect("frame variable ss", substrs=["size=0", "{}"])
self.check("ss", 0)
lldbutil.continue_to_breakpoint(process, bkpt)
self.expect(
"frame variable ss",
substrs=[
"size=2",
'[0] = "a"',
'[1] = "a very long string is right here"'])
self.check("ss", 2)
lldbutil.continue_to_breakpoint(process, bkpt)
self.expect(
"frame variable ss",
substrs=[
"size=4",
'[0] = "a"',
'[1] = "a very long string is right here"',
'[2] = "b"',
'[3] = "c"',
])
self.check("ss", 4)
self.expect(
"p ss",
substrs=[
"size=4",
'[0] = "a"',
'[1] = "a very long string is right here"',
'[2] = "b"',
'[3] = "c"',
])
self.expect("frame variable ss[2]", substrs=[' = "b"'])
lldbutil.continue_to_breakpoint(process, bkpt)
self.expect(
"frame variable ss",
substrs=[
"size=3",
'[0] = "a"',
'[1] = "a very long string is right here"',
'[2] = "c"'])
def do_test_ref_and_ptr(self, stdlib_type):
"""Test that the data formatters work on ref and ptr."""
self.build(dictionary={stdlib_type: "1"})
(self.target, process, _, bkpt) = lldbutil.run_to_source_breakpoint(
self, "Stop here to check by ref and ptr.",
lldb.SBFileSpec("main.cpp", False))
# The reference should print just like the value:
self.check("ref", 7)
self.expect("frame variable ptr",
substrs=["ptr =", "size=7"])
self.expect("expr ptr",
substrs=["size=7"])
@add_test_categories(["libstdcxx"])
def test_with_run_command_libstdcpp(self):
self.do_test_with_run_command(USE_LIBSTDCPP)
@add_test_categories(["libc++"])
def test_with_run_command_libcpp(self):
self.do_test_with_run_command(USE_LIBCPP)
@add_test_categories(["libstdcxx"])
def test_ref_and_ptr_libstdcpp(self):
self.do_test_ref_and_ptr(USE_LIBSTDCPP)
@add_test_categories(["libc++"])
def test_ref_and_ptr_libcpp(self):
self.do_test_ref_and_ptr(USE_LIBCPP)
|
from .ChangeOneMutator import ChangeOneMutator
from .DiscreteMutator import DiscreteMutator
from .SwapMutator import SwapMutator
|
from .graph import (
GraphPlotStorer,
)
|
import numpy as np
import torchpruner.mask_utils as mask_utils
from collections import OrderedDict
from . import operator
import copy
def mask_mapping(
node, mask, operator, defined_dict={}, masks=None, return_origin=False
):
in_or_out, rank = operator.rank(node)
node_key = in_or_out + "_" + str(rank)
if masks is None:
masks = create_masks(operator)
if node_key not in defined_dict:
raise RuntimeError("The " + node_key + " cut is not defined")
dim_dict = defined_dict[node_key]
indexs, dims = mask.indexs(return_dims=True)
shape_length = len(indexs)
for dim in dims:
mapping_list = None
if dim in dim_dict.keys():
mapping_list = dim_dict[dim]
elif dim - shape_length in dim_dict.keys():
mapping_list = dim_dict[dim - shape_length]
else:
if "any" in dim_dict.keys():
mapping_list = dim_dict["any"]
if mapping_list is None:
continue
for item in mapping_list:
current_in_or_out = "in"
if item[0].startswith("in"):
nodes = operator.in_data
else:
nodes = operator.out_data
current_in_or_out = "out"
node_rank = int(item[0].split("_")[1])
if node_rank >= len(nodes):
continue
current_node = nodes[node_rank]
c_mask = mask_utils.Mask(current_node.size())
if current_in_or_out in masks.keys():
if node_rank < len(masks[current_in_or_out]):
c_mask = masks[current_in_or_out][node_rank]
cut_dim = item[1]
if cut_dim == "any":
if len(item) == 2:
cut_dim = dim
else:
cut_dim = dim + item[2]
c_mask.set_mask(indexs=[indexs[dim]], dims=[cut_dim])
if return_origin:
masks[in_or_out][rank] = mask
return masks
# just one to one
def set_reduce_masks(node, mask, masks, operator, dims=None, keepdims=False):
in_or_out, rank = operator.rank(node)
if dims is None and keepdims is False:
raise RuntimeError("unsupport axis is None and keepdims is 0")
if in_or_out == "in":
masks["out"][0] = mask.reduce(dims, keepdims)
if in_or_out == "out":
masks["in"][0] = mask.copy()
if not keepdims:
dims = list(dims).sort()
for dim in dims:
masks["in"][0] = masks["in"][0].expand_dim(dim)
return masks
def mask_list_to_name(operator, masks):
return_dict = OrderedDict()
items = ["in", "out"]
for item in items:
data_list = None
if item == "in":
data_list = operator.in_data
mask_list = masks["in"]
if item == "out":
data_list = operator.out_data
mask_list = masks["out"]
for i in range(0, len(data_list)):
if mask_list[i] is None or mask_list[i].no_cut():
continue
return_dict[data_list[i].name] = mask_list[i]
return return_dict
def create_masks(operator):
masks = OrderedDict()
masks["in"] = []
masks["out"] = []
items = ["in", "out"]
for item in items:
data_list = None
if item == "in":
data_list = operator.in_data
mask_list = masks["in"]
if item == "out":
data_list = operator.out_data
mask_list = masks["out"]
for i in range(0, len(data_list)):
mask_list.append(mask_utils.Mask(data_list[i].size()))
return masks
# apply the boardcase here
class onnx_Gemm(operator.OperatorNode):
def __init__(self, node):
super(onnx_Gemm, self).__init__(node)
self.index_mapping = {
"in_0": {0: [("in_2", 0), ("out_0", 0)], 1: [("in_1", 0)]},
"in_1": {0: [("in_0", 1)], 1: [("in_2", 1), ("out_0", 1)]},
"in_2": {0: [("in_0", 0), ("out_0", 0)], 1: [("in_1", 1), ("out_0", 1)]},
"out_0": {0: [("in_0", 0), ("in_2", 0)], 1: [("in_1", 1), ("in_2", 1)]},
}
def analysis(self, node, mask):
transA = 0
if "transA" in self.params.keys():
transA = self.params["transA"]
transB = 0
if "transB" in self.params.keys():
transB = self.params["transB"]
masks = create_masks(self)
in_or_out, rank = self.rank(node)
shink_size = None
if transA == 1:
masks["in"][0] = masks["in"][0].transpose([1, 0])
if transB == 1:
masks["in"][1] = masks["in"][1].transpose([1, 0])
if len(masks["in"]) == 3:
shink_size = masks["in"][2].shape
masks["in"][2] = masks["in"][2].boardcast(masks["out"][0].shape)
if in_or_out == "in" and rank == 0 and transA == 1:
mask = mask.transpose([1, 0])
if in_or_out == "in" and rank == 1 and transB == 1:
mask = mask.transpose([1, 0])
if in_or_out == "in" and rank == 2:
mask = mask.boardcast(masks["out"][0].shape)
masks = mask_mapping(node, mask, self, self.index_mapping, masks)
if shink_size != None:
masks["in"][2] = masks["in"][2].shrinkcast(shink_size)
if transA == 1:
masks["in"][0] = masks["in"][0].transpose([1, 0])
if transB == 1:
masks["in"][1] = masks["in"][1].transpose([1, 0])
return mask_list_to_name(self, masks), None
def flops(self):
shape1 = self.in_data[0].size()
shape2 = self.in_data[1].size()
flops_matrix = 0
if shape2[0] in shape1:
flops_matrix = shape1[0] * shape1[1] * shape2[1]
else:
flops_matrix = shape1[0] * shape1[1] * shape2[0]
if len(self.in_data) == 3:
out_data_shape = self.out_data[0].size()
flops_matrix += out_data_shape[0] * out_data_shape[1]
return flops_matrix
# the last one should be changed to mask type
class onnx_Concat(operator.OperatorNode):
def __init__(self, node):
super(onnx_Concat, self).__init__(node)
def analysis(self, node, mask):
params = self.params
axis = params["axis"]
in_or_out, rank = self.rank(node)
masks = create_masks(self)
# if out, don't have to do this
if in_or_out == "in":
indexs, dims = mask.indexs(return_dims=True)
del indexs[axis]
if axis in dims:
dims.remove(axis)
for i in range(0, len(masks["in"])):
masks["in"][i].set_mask(indexs, dims)
masks["in"][rank] = mask
masks["out"][0] = mask_utils.concatenate(masks["in"], axis)
masks["in"][rank] = None
return mask_list_to_name(self, masks), None
else:
masks["out"][0] = mask
begin = 0
for i in range(0, len(masks["in"])):
length = masks["in"][i].shape[axis]
masks["in"][i] = masks["out"][0].slice(begin, begin + length, axis)
begin += length
masks["out"][0] = None
return mask_list_to_name(self, masks), None
class onnx_Split(operator.OperatorNode):
def __init__(self, node):
super(onnx_Split, self).__init__(node)
def analysis(self, node, mask):
params = self.params
axis = params["axis"]
in_or_out, rank = self.rank(node)
masks = create_masks(self)
# if out, don't have to do this
if in_or_out == "out":
indexs, dims = mask.indexs(return_dims=True)
del indexs[axis]
dims.remove(axis)
for i in range(0, len(masks["out"])):
masks["out"][i].set_mask(indexs, dims)
masks["out"][rank] = mask
masks["in"][0] = mask_utils.concatenate(masks["out"], axis)
masks["out"][rank] = None
return mask_list_to_name(self, masks), None
else:
masks["in"][0] = mask
begin = 0
for i in range(0, len(masks["out"])):
length = masks["out"][i].shape[axis]
masks["out"][i] = masks["in"][0].slice(begin, begin + length, axis)
begin += length
masks["in"][0] = None
return mask_list_to_name(self, masks), None
## the following is the point wise one to one condition
# Abs Acos Acosh Asin Asinh Atan Atanh Cast Ceil Reciprocal
# Clip Cos Cosh DequantizeLinear Dropout DynamicQuantizeLinear Elu Erf Exp
# Floor HardSigmoid Hardmax Identity IsInf IsNaN LeakyRelu Log
# LogSoftmax Neg NonZero Not Relu Round Selu Shrink Sigmoid Sign Sin
# Sinh Softmax Softplus Softsign Sqrt Tan Tanh ThresholdedRelu
# Max Mean Min LpNormalization LRN MeanVarianceNormalization
## the PRelu and the ThreasholdedRelu is special
#####################################################
class onnx_pw(operator.OperatorNode):
def __init__(self, node):
super(onnx_pw, self).__init__(node)
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
for i in range(0, len(masks["in"])):
if in_or_out == "in" and rank == i:
continue
masks["in"][i] = mask.copy()
for i in range(0, len(masks["out"])):
if in_or_out == "out" and rank == i:
continue
masks["out"][i] = mask.copy()
return mask_list_to_name(self, masks), None
def flops(self):
shape = self.in_data[0].size()
flops = 1
for size in shape:
flops = flops * size
return flops
## boardcast and pointwise
## just support n to 1
# Add And BitShift Div Equal Greater Less Mod Mul Sub
# Or Pow Sum Xor
# PRelu ThresholdedRelu
#######################################################
class onnx_bc_pw(operator.OperatorNode):
def __init__(self, node):
super(onnx_bc_pw, self).__init__(node)
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
# boardcast the mask
boardcast_shape = self.out_data[0].size()
mask = mask.boardcast(boardcast_shape)
# pointwise assign
for i in range(0, len(masks["in"])):
if in_or_out == "in" and rank == i:
continue
masks["in"][i] = mask.copy()
for i in range(0, len(masks["out"])):
if in_or_out == "out" and rank == i:
continue
masks["out"][i] = mask.copy()
# shrink the mask
for i in range(0, len(masks["in"])):
masks["in"][i] = masks["in"][i].shrinkcast(self.in_data[i].size())
for i in range(0, len(masks["out"])):
masks["out"][i] = masks["out"][i].shrinkcast(self.out_data[i].size())
return mask_list_to_name(self, masks), None
def flops(self):
input_num = len(self.in_data)
shape = self.out_data[0].size()
flops = 1
for size in shape:
flops = flops * size
return flops * (input_num - 1)
## reduce type operator, reduce the dims
# ArgMax ArgMin GlobalAveragePool GlobalLpPool OneHot Squeeze
# ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp ReduceMax ReduceMean ReduceMin
# ReduceProd ReduceSum ReduceSUmSquare
# ordinary reduce
# ArgMax ArgMin ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
# ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSUmSquare
class onnx_reduce(operator.OperatorNode):
def __init__(self, parameters={}):
super(onnx_reduce, self).__init__(parameters)
def get_axis(self):
axis = None
if "axes" in self.params:
axis = self.params["axes"]
else:
axis = self.params["axis"]
if not isinstance(axis, (list, tuple)):
axis = [axis]
keepdims = self.params["keepdims"]
if keepdims == 1:
keepdims = True
else:
keepdims = False
return axis, keepdims
def analysis(self, node, mask):
axis, keepdims = self.get_axis()
masks = create_masks(self)
masks = set_reduce_masks(node, mask, masks, self, dims=axis, keepdims=keepdims)
return mask_list_to_name(self, masks), None
# Global reduce
# GlobalLpPool GlobalAveragePool GlobalMaxPool
class onnx_GlobalPool(onnx_reduce):
def __init__(self, node):
super(onnx_GlobalPool, self).__init__(node)
def get_axis(self):
in_data = self.in_data[0]
if len(in_data.size()) <= 2:
raise RuntimeError("Wrong demision")
keepdims = True
axis = list(range(2, len(in_data.size())))
return axis, keepdims
# Squeeze
class onnx_Squeeze(onnx_reduce):
def __init__(self, node):
super(onnx_Squeeze, self).__init__(node)
def get_axis(self):
axis = self.params["axes"]
return axis, False
# UnSqueeze
class onnx_Unsqueeze(operator.OperatorNode):
def __init__(self, parameters={}):
super(onnx_Unsqueeze, self).__init__(parameters)
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
axes = self.params["axes"]
if in_or_out == "out":
masks["in"][0] = mask.reduce(axes, False)
if in_or_out == "in":
dims = list(axes).sort()
for dim in dims:
masks["out"][0] = masks["out"][0].expand_dim(dim)
return mask_list_to_name(self, masks), None
# no action operation
# Constant RandomNormal RandomNormalLike RandomUniform
# RandomUniformLike Range Shape Size MonMaxsuppression
# Gather GatherElements GatherND
class onnx_no_action(operator.OperatorNode):
def __init__(self, node):
super(onnx_no_action, self).__init__(node)
def analysis(self, node, mask):
return OrderedDict(), None
# unsupport operation
# means the op doesn't support cutting
class onnx_unsupport(operator.OperatorNode):
def __init__(self, node):
super(onnx_unsupport, self).__init__(node)
def analysis(self, node, mask):
raise RuntimeError("The opeartor doesn't support cutting")
# Conv like operation
# ConvInteger ConvTranspose Conv
# In_0: B G C
# In_1: G W C
# In_2: G W
# Out_0:B G W
class onnx_conv(operator.OperatorNode):
def __init__(self, node):
super(onnx_conv, self).__init__(node)
self.index_mapping = {
# input
"in_0": {
0: [("out_0", 0)],
1: [("in_1", 0), ("in_2", 0), ("out_0", 1)],
2: [("in_1", 2)],
},
"in_1": {
0: [("in_2", 0), ("out_0", 1), ("in_0", 1)],
1: [("in_2", 1), ("out_0", 2)],
2: [("in_0", 2)],
},
"in_2": {
0: [("in_1", 0), ("out_0", 1), ("in_0", 1)],
1: [("in_1", 1), ("out_0", 2)],
},
"out_0": {
0: [("in_0", 0)],
1: [("in_1", 0), ("in_2", 0), ("in_0", 1)],
2: [("in_1", 1), ("in_2", 1)],
},
}
def analysis(self, node, mask):
params = self.params
group = params["group"]
in_or_out, rank = self.rank(node)
masks = create_masks(self)
# divide in data
for i in range(0, len(masks["in"])):
if i == 0:
masks["in"][0] = masks["in"][0].divide_dim(1, (group, -1))
if i == 1 or i == 2:
masks["in"][i] = masks["in"][i].divide_dim(0, (group, -1))
# for ConvTranspose condition
if self.type == "ConvTranspose":
dims = list(range(0, len(masks["in"][1].shape)))
dims[1] = 2
dims[2] = 1
masks["in"][1] = masks["in"][1].transpose(dims=dims)
# divide out data
for i in range(0, len(masks["out"])):
if i == 0:
masks["out"][0] = masks["out"][0].divide_dim(1, (group, -1))
if in_or_out == "in":
if rank == 0:
mask = mask.divide_dim(1, (group, -1))
if rank == 1 or rank == 2:
mask = mask.divide_dim(0, (group, -1))
if in_or_out == "out":
mask = mask.divide_dim(1, (group, -1))
if in_or_out == "in" and rank == 1 and self.type == "ConvTranspose":
dims = list(range(0, len(mask.shape)))
dims[1] = 2
dims[2] = 1
mask = mask.transpose(dims=dims)
operator_dict = None
group_cut = 0
# sync the new_mask
if in_or_out == "in":
if rank == 0:
mask = mask.trim(2)
indexs = mask.indexs()
group_cut = len(indexs[1])
if rank == 1 or rank == 2:
mask = mask.trim(1)
indexs = mask.indexs()
group_cut = len(indexs[0])
if in_or_out == "out":
mask = mask.trim(2)
indexs = mask.indexs()
group_cut = len(indexs[1])
if group_cut >= 1:
operator_dict = {"group": group_cut}
# get result_dict
masks = mask_mapping(node, mask, self, self.index_mapping, masks, True)
# for ConvTranspose condition
if self.type == "ConvTranspose":
dims = list(range(0, len(masks["in"][1].shape)))
dims[1] = 2
dims[2] = 1
masks["in"][1] = masks["in"][1].transpose(dims=dims)
# recovery to normal shape
for i in range(0, len(masks["in"])):
if i == 0:
masks["in"][0] = masks["in"][0].combine_dim([1, 2])
if i == 1 or i == 2:
masks["in"][i] = masks["in"][i].combine_dim([0, 1])
# recovery out data
for i in range(0, len(masks["out"])):
if i == 0:
masks["out"][0] = masks["out"][0].combine_dim([1, 2])
# print(masks)
return mask_list_to_name(self, masks), operator_dict
def flops(self):
kernal_flops = 1
kernel_shape = self.params["kernel_shape"]
for size in kernel_shape:
kernal_flops = kernal_flops * size
single_flops = (
kernal_flops
* self.in_data[0].size(1)
* self.out_data[0].size(1)
/ self.params["group"]
)
element_size = 1
if self.type == "Conv":
shape = self.out_data[0].size()
for i in range(2, len(shape)):
element_size = element_size * shape[i]
if self.type == "ConvTranspose":
shape = self.in_data[0].size()
for i in range(2, len(shape)):
element_size = element_size * shape[i]
element_count = self.in_data[0].size(0) * element_size
conv_flops = single_flops * element_count
# bias
bias_size = 0
if len(self.in_data) == 3:
shape = self.in_data[2].size()
bias_size = 1
for i in range(0, len(shape)):
bias_size = bias_size * shape[i]
bias_flops = bias_size * element_count
return conv_flops + bias_flops
# Expand
class onnx_Expand(operator.OperatorNode):
def __init__(self, parameters={}):
super(onnx_Expand, self).__init__(parameters)
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
if in_or_out == "in" and rank == 1:
raise RuntimeError("The shape is not cuttable")
if in_or_out == "in" and rank == 0:
masks["out"][0] = mask.boardcast(list(self.out_data[0].size()))
if in_or_out == "out" and rank == 0:
masks["in"][0] = mask.shrinkcast(list(self.in_data[0].size()))
return mask_list_to_name(self, masks), None
##onnx mapping
# BatchNormalization GRU InstanceNormalization
# LRN LSTM MatMul MatMulInteger
# Matmul QuantizeLinear AveragePool MaxPool MaxUnPool
class onnx_mapping(operator.OperatorNode):
def __init__(self, node):
super(onnx_mapping, self).__init__(node)
if "strides" in self.params:
if isinstance(self.params["strides"], list):
if len(self.params["strides"]) == 0:
self.params["strides"] = copy.deepcopy(self.params["kernel_shape"])
self.index_mapping = {}
def analysis(self, node, mask):
return (
mask_list_to_name(self, mask_mapping(node, mask, self, self.index_mapping)),
None,
)
# GRU complex
# implement later
class onnx_GRU(operator.OperatorNode):
def __init__(self, node):
super(onnx_GRU, self).__init__(node)
self.index_mapping = {
"in_0": {
2: [("in_1", 3)],
},
"in_1": {
3: [("in_0", 2)],
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("out_0", 3),
("out_1", 2),
],
},
"in_2": {
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("out_0", 3),
("out_1", 2),
],
3: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("out_0", 3),
("out_1", 2),
],
},
"in_3": {
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("out_0", 3),
("out_1", 2),
]
},
"in_5": {
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("out_0", 3),
("out_1", 2),
]
},
"out_0": {
3: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("out_0", 3),
("out_1", 2),
]
},
"out_1": {
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("out_0", 3),
("out_1", 2),
]
},
}
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
# divide in_5
masks["in"][1] = masks["in"][1].divide_dim(1, (4, -1))
masks["in"][2] = masks["in"][2].divide_dim(1, (4, -1))
masks["in"][3] = masks["in"][3].divide_dim(1, (8, -1))
# get result_dict
masks = mask_mapping(node, mask, self, self.index_mapping, masks, True)
# recovery out data
operator_dict = {"hidden_size": len(masks["in"][1].indexs()[2])}
masks["in"][1] = masks["in"][1].combine_dim([1, 2])
masks["in"][2] = masks["in"][2].combine_dim([1, 2])
masks["in"][3] = masks["in"][3].combine_dim([1, 2])
return mask_list_to_name(self, masks), operator_dict
# LSTM complex
class onnx_LSTM(operator.OperatorNode):
def __init__(self, node):
super(onnx_LSTM, self).__init__(node)
self.index_mapping = {
"in_0": {
2: [("in_1", 3)],
},
"in_1": {
3: [("in_0", 2)],
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("in_6", 2),
("in_7", 2),
("out_0", 3),
("out_1", 2),
("out_2", 2),
],
},
"in_2": {
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("in_6", 2),
("in_7", 2),
("out_0", 3),
("out_1", 2),
("out_2", 2),
],
3: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("in_6", 2),
("in_7", 2),
("out_0", 3),
("out_1", 2),
("out_2", 2),
],
},
"in_3": {
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("in_6", 2),
("in_7", 2),
("out_0", 3),
("out_1", 2),
("out_2", 2),
]
},
"in_5": {
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("in_6", 2),
("in_7", 2),
("out_0", 3),
("out_1", 2),
("out_2", 2),
]
},
"in_6": {
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("in_6", 2),
("in_7", 2),
("out_0", 3),
("out_1", 2),
("out_2", 2),
]
},
"in_7": {
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("in_6", 2),
("in_7", 2),
("out_0", 3),
("out_1", 2),
("out_2", 2),
]
},
"out_0": {
3: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("in_6", 2),
("in_7", 2),
("out_0", 3),
("out_1", 2),
("out_2", 2),
]
},
"out_1": {
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("in_6", 2),
("in_7", 2),
("out_0", 3),
("out_1", 2),
("out_2", 2),
]
},
"out_2": {
2: [
("in_1", 2),
("in_2", 2),
("in_2", 3),
("in_3", 2),
("in_5", 2),
("in_6", 2),
("in_7", 2),
("out_0", 3),
("out_1", 2),
("out_2", 2),
]
},
}
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
# divide in_5
masks["in"][1] = masks["in"][1].divide_dim(1, (4, -1))
masks["in"][2] = masks["in"][2].divide_dim(1, (4, -1))
masks["in"][3] = masks["in"][3].divide_dim(1, (8, -1))
masks["in"][7] = masks["in"][7].divide_dim(1, (3, -1))
# get result_dict
masks = mask_mapping(node, mask, self, self.index_mapping, masks, True)
# recovery out data
operator_dict = {"hidden_size": len(masks["in"][1].indexs()[2])}
masks["in"][1] = masks["in"][1].combine_dim([1, 2])
masks["in"][2] = masks["in"][2].combine_dim([1, 2])
masks["in"][3] = masks["in"][3].combine_dim([1, 2])
masks["in"][7] = masks["in"][7].combine_dim([1, 2])
return mask_list_to_name(self, masks), operator_dict
# RNN complex
class onnx_RNN(operator.OperatorNode):
def __init__(self, node):
super(onnx_RNN, self).__init__(node)
self.index_mapping = {
"in_0": {
2: [("in_1", 2)],
},
"in_1": {
2: [("in_0", 2)],
1: [
("in_2", 1),
("in_2", 2),
("in_3", 2),
("in_5", 2),
("out_0", 3),
("out_1", 2),
],
},
"in_2": {
1: [
("in_1", 1),
("in_2", 2),
("in_3", 2),
("in_5", 2),
("out_0", 3),
("out_1", 2),
],
2: [
("in_1", 1),
("in_2", 1),
("in_3", 2),
("in_5", 2),
("out_0", 3),
("out_1", 2),
],
},
"in_3": {
2: [
("in_1", 1),
("in_2", 1),
("in_2", 2),
("in_5", 2),
("out_0", 3),
("out_1", 2),
],
},
"in_5": {
2: [
("in_1", 1),
("in_2", 1),
("in_2", 2),
("in_3", 2),
("out_0", 3),
("out_1", 2),
],
},
"out_0": {
3: [
("in_1", 1),
("in_2", 1),
("in_2", 2),
("in_3", 2),
("in_5", 2),
("out_1", 2),
],
},
"out_1": {
2: [
("in_1", 1),
("in_2", 1),
("in_2", 2),
("in_3", 2),
("in_5", 2),
("out_0", 3),
],
},
}
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
# divide in_5
masks["in"][5] = masks["in"][5].divide_dim(2, (2, -1))
# get result_dict
masks = mask_mapping(node, mask, self, self.index_mapping, masks, True)
# recovery out data
operator_dict = {"hidden_size": len(masks["in"][1].indexs()[1])}
masks["in"][5] = masks["out"][0].combine_dim([2, 3])
return mask_list_to_name(self, masks), operator_dict
# InstanceNormalization
class onnx_InstanceNormalization(onnx_mapping):
def __init__(self, node):
super(onnx_InstanceNormalization, self).__init__(node)
self.index_mapping = {
"in_0": {
0: [("out_0", 0)],
1: [("in_1", 0), ("in_2", 0), ("out_0", 1)],
"any": [("out_0", "any")],
},
"in_1": {0: [("in_0", 1), ("in_2", 0), ("out_0", 1)]},
"in_2": {0: [("in_0", 1), ("in_1", 0), ("out_0", 1)]},
"out_0": {
0: [("in_0", 0)],
1: [("in_1", 0), ("in_2", 0), ("in_0", 1)],
"any": [("in_0", "any")],
},
}
def flops(self):
shape = self.in_data[0].size()
flops = 1
for size in shape:
flops = flops * size
return 2 * flops
class onnx_BatchNormalization(onnx_mapping):
def __init__(self, node):
super(onnx_BatchNormalization, self).__init__(node)
self.index_mapping = {
"in_0": {
0: [("out_0", 0)],
1: [
("out_0", 1),
("out_1", 0),
("out_2", 0),
("in_1", 0),
("in_2", 0),
("in_3", 0),
("in_4", 0),
],
"any": [("out_0", "any")],
},
"out_0": {
0: [("in_0", 0)],
1: [
("in_0", 1),
("out_1", 0),
("out_2", 0),
("in_1", 0),
("in_2", 0),
("in_3", 0),
("in_4", 0),
],
"any": [("in_0", "any")],
},
"in_1": {
0: [
("out_0", 1),
("out_1", 0),
("out_2", 0),
("in_0", 1),
("in_2", 0),
("in_3", 0),
("in_4", 0),
]
},
"in_2": {
0: [
("out_0", 1),
("out_1", 0),
("out_2", 0),
("in_0", 1),
("in_1", 0),
("in_3", 0),
("in_4", 0),
]
},
"in_3": {
0: [
("out_0", 1),
("out_1", 0),
("out_2", 0),
("in_0", 1),
("in_1", 0),
("in_2", 0),
("in_4", 0),
]
},
"in_4": {
0: [
("out_0", 1),
("out_1", 0),
("out_2", 0),
("in_0", 1),
("in_1", 0),
("in_2", 0),
("in_3", 0),
]
},
"out_1": {
0: [
("out_0", 1),
("out_2", 0),
("in_0", 1),
("in_1", 0),
("in_2", 0),
("in_3", 0),
("in_4", 0),
]
},
"out_2": {
0: [
("out_0", 1),
("out_1", 0),
("in_0", 1),
("in_1", 0),
("in_2", 0),
("in_3", 0),
("in_4", 0),
]
},
}
def flops(self):
shape = self.in_data[0].size()
flops = 1
for size in shape:
flops = flops * size
return flops * 2
# MatMul
class onnx_MatMul(onnx_mapping):
def __init__(self, node):
super(onnx_MatMul, self).__init__(node)
self.index_mapping = {
"in_0": {
-2: [("out_0", -2)],
-1: [("in_1", -2)],
"any": [("in_1", "any"), ("out_0", "any")],
},
"in_1": {
-2: [("in_0", -1)],
-1: [("out_0", -1)],
"any": [("in_0", "any"), ("out_0", "any")],
},
"out_0": {
-2: [("in_0", -2)],
-1: [("in_1", -1)],
"any": [("in_0", "any"), ("in_1", "any")],
},
}
def flops(self):
base = 1
in_shape = self.in_data[0].size()
out_shape = self.out_data[0].size()
for i in range(0, len(in_shape) - 2):
base = base * in_shape[i]
single_flops = in_shape[-1] * out_shape[-2] * out_shape[-1]
return base * single_flops
# MaxRoiPool
class onnx_MaxRoiPool(onnx_mapping):
def __init__(self, node):
super(onnx_MaxRoiPool, self).__init__(node)
self.index_mapping = {"in_0": {0: [("out_0", 1)]}, "out_0": {1: [("in_0", 0)]}}
# RoiAlign
class onnx_RoiAlign(onnx_mapping):
def __init__(self, node):
super(onnx_RoiAlign, self).__init__(node)
self.index_mapping = {"in_0": {0: [("out_0", 1)]}, "out_0": {1: [("in_0", 0)]}}
# AveragePool MaxPool MaxUnPool LpPool
class onnx_local_pool(onnx_mapping):
def __init__(self, node):
super(onnx_local_pool, self).__init__(node)
self.index_mapping = {
"in_0": {0: [("out_0", 0)], 1: [("out_0", 1)]},
"out_0": {0: [("in_0", 0)], 1: [("in_0", 1)]},
}
def flops(self):
element_shape = None
if self.type == "MaxUnPool":
element_shape = self.in_data[0].size()
else:
element_shape = self.out_data[0].size()
element_size = 1
for size in element_shape:
element_size = element_size * size
kernel_shape = self.params["kernel_shape"]
k_size = 1
for size in kernel_shape:
k_size = k_size * size
return element_size * k_size
# MatMulInteger
class onnx_MatMulInteger(onnx_mapping):
def __init__(self, node):
super(onnx_MatMulInteger, self).__init__(node)
raise NotImplementedError()
def flops(self):
raise NotImplementedError("The MatMulInteger is not implemented")
# Flatten
class onnx_Flatten(operator.OperatorNode):
def __init__(self, node):
super(onnx_Flatten, self).__init__(node)
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
axis = self.params["axis"]
if in_or_out == "in":
masks["out"][0] = mask.combine_dim(dims=list(range(axis, len(mask.shape))))
if in_or_out == "out":
masks["in"][0] = mask.divide_dim(
dim=axis, size=self.in_data[0].size()[axis:]
)
masks["in"][0] = masks["in"][0].trim(axis)
masks["out"][0] = masks["in"][0].combine_dim(
dims=list(range(axis, len(mask.shape)))
)
return mask_list_to_name(self, masks), None
# Reshape
class onnx_Reshape(operator.OperatorNode):
def __init__(self, parameters={}):
super(onnx_Reshape, self).__init__(parameters)
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
shape = self.in_data[1].data.tolist()
out_shape = shape
in_shape = self.in_data[0].size()
if in_or_out == "in":
masks["out"][0] = mask.combine_dim()
masks["out"][0] = masks["out"][0].divide_dim(0, out_shape)
max_value = max(masks["out"][0]._narray.shape)
masks["out"][0] = masks["out"][0].trim(
list(masks["out"][0]._narray.shape[:]).index(max_value)
)
masks["in"][0] = masks["out"][0].combine_dim()
masks["in"][0] = masks["in"][0].divide_dim(0, in_shape)
if in_or_out == "out":
masks["in"][0] = mask.combine_dim()
masks["in"][0] = masks["in"][0].divide_dim(0, in_shape)
max_value = max(masks["in"][0]._narray.shape)
masks["in"][0] = masks["in"][0].trim(
list(masks["in"][0]._narray.shape[:]).index(max_value)
)
masks["out"][0] = masks["in"][0].combine_dim()
masks["out"][0] = masks["out"][0].divide_dim(0, out_shape)
return mask_list_to_name(self, masks), None
# Slice
class onnx_Slice(operator.OperatorNode):
def __init__(self, node):
super(onnx_Slice, self).__init__(node)
def analysis(self, node, mask):
in_or_out, rank = node.rank(self)
masks = create_masks(self)
starts = list(self.in_data[1].data)
ends = list(self.in_data[2].data)
axes = None
if len(self.in_data) >= 4:
axes = list(self.in_data[3].data)
else:
axes = range(0, len(starts))
steps = None
if len(self.in_data) >= 5:
steps = list(self.in_data[3].data)
else:
steps = list(np.ones(len(starts), dtype=int))
if in_or_out == "in" and rank == 0:
masks["out"][0] = mask.copy()
for i in range(0, len(axes)):
masks["out"][0] = mask.slice(starts[i], ends[i], axes[i], steps[i])
if in_or_out == "out" and rank == 0:
axes.sort()
mask_slice = []
for i in range(0, len(self.in_data[0].shape)):
if i not in axes:
mask_slice.append(slice())
else:
index = axes.index(i)
mask_slice = slice(starts[index], ends[index], steps[index])
masks["in"][0][mask_slice] = mask
return mask_list_to_name(self, masks), None
# Resize
class onnx_Resize(operator.OperatorNode):
def __init__(self, node):
super(onnx_Resize, self).__init__(node)
self.index_mapping = {
"in_0": {"any": [("out_0", "any")]},
"out_0": {"any": [("in_0", "any")]},
}
def analysis(self, node, mask):
masks = create_masks(self)
indexs, dims = mask.indexs(return_dims=True)
masks = mask_mapping(node, mask, self, self.index_mapping, masks)
return mask_list_to_name(self, masks), None
# Pad
class onnx_Pad(operator.OperatorNode):
def __init__(self, node):
super(onnx_Pad, self).__init__(node)
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
pads = list(self.in_data[1].data)
if in_or_out == "in":
if rank == 1 or rank == 2:
raise RuntimeError("Unsupport cut the pads or value")
indexs, dims = mask.indexs(return_dims=True)
for dim in dims:
if len(indexs[dim]) == 0:
continue
indexs[dim] = list(np.array(indexs[dim]) + pads[dim * 2])
masks["out"][0].set_mask([indexs[dim]], [dim])
return mask_list_to_name(self, masks), None
if in_or_out == "out":
in_shape = self.in_data[0].size()
indexs, dims = mask.indexs(return_dims=True)
for dim in dims:
if len(indexs[dim]) == 0:
continue
indexs[dim] = list(np.array(indexs[dim]) - pads[dim * 2])
indexs[dim] = list(
filter(lambda x: x >= 0 and x < in_shape[dim], indexs[dim])
)
masks["in"][0].set_mask([indexs[dim]], [dim])
return mask_list_to_name(self, masks), None
# Transpose
class onnx_Transpose(operator.OperatorNode):
def __init__(self, node):
super(onnx_Transpose, self).__init__(node)
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
perm = list(self.params["perm"])
if in_or_out == "in":
masks["out"][0] = mask.transpose(perm)
else:
masks["in"][0] = mask.transpose(perm)
return mask_list_to_name(self, masks), None
# TopK
class onnx_TopK(operator.OperatorNode):
def __init__(self, node):
super(onnx_TopK, self).__init__(node)
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
indexs, dims = mask.indexs(return_dims=True)
dim = self.params["axis"]
k = self.in_data[1].data[0]
if in_or_out == "out":
raise RuntimeError("The topK output is not cuttable")
if in_or_out == "in":
if dim in dims:
if len(indexs[dim]) < k:
raise RuntimeError("The input is less than k")
return mask_list_to_name(self, masks), None
# Compress
class onnx_Compress(operator.OperatorNode):
def __init__(self, node):
super(onnx_Compress, self).__init__(node)
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
axis = None
masks = create_masks(self)
if "axis" in self.params:
axis = self.params["axis"]
condition = list(self.in_data[1].data)
if in_or_out == "in":
if axis is None:
mask = mask.combine_dim()
mask = mask[condition]
masks["out"][0] = mask
return mask_list_to_name(self, masks), None
else:
mask = mask[mask_utils.dim_slice(condition, axis)]
masks["out"][0] = mask
return mask_list_to_name(self, masks), None
if in_or_out == "out":
if axis is not None:
masks["in"][0][mask_utils.dim_slice(condition, axis)] = mask
else:
narray = masks["in"][0].to_array(full=True)
narray[condition] = mask.to_array(full=True)
masks["in"][0] = mask_utils.from_array(narray, masks["in"][0].shape)
return mask_list_to_name(self, masks), None
class onnx_SpaceToDepth(operator.OperatorNode):
def __init__(self, node):
super(onnx_SpaceToDepth, self).__init__(node)
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
blocksize = self.params["blocksize"]
if in_or_out == "in":
indexs, dims = mask.indexs(return_dims=True)
if max(dims) >= 2:
raise RuntimeError("Just support cut the batch and channels")
for dim in dims:
index = indexs[dim]
index = np.expand_dims(index, 1)
index = np.repeat(index, blocksize * blocksize, 1)
index = index * blocksize * blocksize
padding = np.arange(0, blocksize * blocksize)
index = index + padding
index = np.reshape(index, (-1))
index = np.sort(index, 0)
indexs[dim] = index
masks["out"][0].set_mask(indexs, dims)
return mask_list_to_name(self, masks), None
if in_or_out == "out":
indexs, dims = mask.indexs(return_dims=True)
if max(dims) >= 2:
raise RuntimeError("Just support cut the batch and channels")
mask = mask.divide_dim(1, (-1, blocksize * blocksize))
mask = mask.trim(1)
indexs, dims = mask.indexs(return_dims=True)
indexs = indexs[:2]
dims = dims[:2]
masks["in"][0] = masks["in"][0].set_mask(indexs, dims)
mask = mask.combine_dim([1, 2])
masks["out"][0] = mask
return mask_list_to_name(self, masks), None
# opposite to the SpaceToDepth
class onnx_DepthToSpace(operator.OperatorNode):
def __init__(self, node):
super(onnx_DepthToSpace, self).__init__(node)
def analysis(self, node, mask):
in_or_out, rank = self.rank(node)
masks = create_masks(self)
blocksize = self.params["blocksize"]
if in_or_out == "out":
indexs, dims = mask.indexs(return_dims=True)
if max(dims) >= 2:
raise RuntimeError("Just support cut the batch and channels")
for dim in dims:
index = indexs[dim]
index = np.expand_dims(index, 1)
index = np.repeat(index, blocksize * blocksize, 1)
index = index * blocksize * blocksize
padding = np.arange(0, blocksize * blocksize)
index = index + padding
index = np.reshape(index, (-1))
index = np.sort(index, 0)
indexs[dim] = index
masks["in"][0].set_mask(indexs, dims)
return mask_list_to_name(self, masks), None
if in_or_out == "in":
indexs, dims = mask.indexs(return_dims=True)
if max(dims) >= 2:
raise RuntimeError("Just support cut the batch and channels")
mask = mask.divide_dim(1, (-1, blocksize * blocksize))
mask = mask.trim(1)
indexs, dims = mask.indexs(return_dims=True)
indexs = indexs[:2]
dims = dims[:2]
masks["out"][0] = masks["out"][0].set_mask(indexs, dims)
mask = mask.combine_dim([1, 2])
masks["in"][0] = mask
return mask_list_to_name(self, masks), None
# onnx QDQ
# just for version 10
class onnx_QDQ(onnx_mapping):
def __init__(self, node):
super(onnx_QDQ, self).__init__(node)
self.index_mapping = {
"in_0": {"any": [("out_0", "any")]},
"out_0": {"any": [("in_0", "any")]},
}
class onnx_where(onnx_mapping):
def __init__(self, node):
super(onnx_where, self).__init__(node)
self.index_mapping = {
"in_0": {},
"in_1": {"any": [("in_2", "any"), ("out_0", "any")]},
"int_2": {"any": [("in_1", "any"), ("out_0", "any")]},
"out_0": {"any": [("in_1", "any"), ("in_2", "any")]},
}
class onnx_ScatterElements(onnx_mapping):
def __init__(self, node):
super(onnx_ScatterElements, self).__init__(node)
self.index_mapping = {
"in_0": {"any": [("out_0", "any")]},
"out_0": {"any": [("in_0", "any")]},
}
|
#!/usr/bin/env python3
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import subprocess
import time
class WatchdogTimer(FileSystemEventHandler):
proc = None
cmd = ["python", "-m", "hbi.server.grpc_server"]
def __init__(self):
self.restart()
self.last = 0
def restart(self):
if self.proc:
print("Restarting...")
self.proc.kill()
self.proc.wait()
self.proc = subprocess.Popen(self.cmd)
def dispatch(self, event):
now = time.time()
if now > (self.last + 5):
self.restart()
self.last = now
w = WatchdogTimer()
o = Observer()
o.schedule(w, "hbi")
o.start()
try:
while True:
time.sleep(15000)
except KeyboardInterrupt:
w.proc.kill()
print("Killed")
|
# -*- coding: utf-8 -*-
""" Sahana Eden Inventory Model
@copyright: 2009-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("InvWarehouseModel",
"InventoryModel",
"InventoryTrackingModel",
"InventoryAdjustModel",
"inv_adj_rheader",
"inv_item_total_weight",
"inv_item_total_volume",
"inv_recv_crud_strings",
"inv_recv_rheader",
"inv_rfooter",
"inv_rheader",
"inv_send_controller",
"inv_send_process",
"inv_send_rheader",
"inv_ship_status",
"inv_stock_movements",
"inv_tabs",
"inv_tracking_status",
"inv_InvItemRepresent",
"depends",
)
import datetime
from collections import OrderedDict
from gluon import *
from gluon.sqlhtml import RadioWidget
from gluon.storage import Storage
from ..core import *
from s3layouts import S3PopupLink
# Compact JSON encoding
SEPARATORS = (",", ":")
# Dependency list for translate-module
depends = ["supply"]
# =============================================================================
SHIP_STATUS_IN_PROCESS = 0
SHIP_STATUS_RECEIVED = 1
SHIP_STATUS_SENT = 2
SHIP_STATUS_CANCEL = 3
SHIP_STATUS_RETURNING = 4
# Dict to lookup a status by name
inv_ship_status = {"IN_PROCESS" : SHIP_STATUS_IN_PROCESS,
"SENT" : SHIP_STATUS_SENT,
"RECEIVED" : SHIP_STATUS_RECEIVED,
"CANCEL" : SHIP_STATUS_CANCEL,
"RETURNING" : SHIP_STATUS_RETURNING,
}
def inv_shipment_status_labels():
T = current.T
return OrderedDict({SHIP_STATUS_IN_PROCESS: T("In Process"),
SHIP_STATUS_SENT: T("Sent"),
SHIP_STATUS_RECEIVED: T("Received"),
SHIP_STATUS_CANCEL: T("Canceled"),
SHIP_STATUS_RETURNING: T("Returning"),
})
SHIP_DOC_PENDING = 0
SHIP_DOC_COMPLETE = 1
# =============================================================================
TRACK_STATUS_UNKNOWN = 0
TRACK_STATUS_PREPARING = 1
TRACK_STATUS_TRANSIT = 2
TRACK_STATUS_UNLOADING = 3
TRACK_STATUS_ARRIVED = 4
TRACK_STATUS_CANCELED = 5
TRACK_STATUS_RETURNING = 6
# Dict to lookup a status by name
inv_tracking_status = {"UNKNOWN" : TRACK_STATUS_UNKNOWN,
"IN_PROCESS" : TRACK_STATUS_PREPARING,
"SENT" : TRACK_STATUS_TRANSIT,
"UNLOADING" : TRACK_STATUS_UNLOADING,
"RECEIVED" : TRACK_STATUS_ARRIVED,
"CANCEL" : TRACK_STATUS_CANCELED,
"RETURNING" : TRACK_STATUS_RETURNING,
}
# =============================================================================
def inv_itn_label():
# Overwrite the label until we have a better way to do this
#return current.T("Item Source Tracking Number")
return current.T("CTN")
# =============================================================================
class InvWarehouseModel(DataModel):
names = ("inv_warehouse",
"inv_warehouse_type",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
messages = current.messages
NONE = messages["NONE"]
OBSOLETE = messages.OBSOLETE
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
settings = current.deployment_settings
super_link = self.super_link
organisation_id = self.org_organisation_id
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
root_org = auth.root_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
# ---------------------------------------------------------------------
# Warehouse Types
#
org_dependent_wh_types = settings.get_inv_org_dependent_warehouse_types()
tablename = "inv_warehouse_type"
define_table(tablename,
Field("name", length=128, notnull=True,
label = T("Name"),
requires = IS_LENGTH(128),
),
organisation_id(default = root_org if org_dependent_wh_types else None,
readable = is_admin if org_dependent_wh_types else False,
writable = is_admin if org_dependent_wh_types else False,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_WAREHOUSE_TYPE = T("Create Warehouse Type")
crud_strings[tablename] = Storage(
label_create = ADD_WAREHOUSE_TYPE,
title_display = T("Warehouse Type Details"),
title_list = T("Warehouse Types"),
title_update = T("Edit Warehouse Type"),
label_list_button = T("List Warehouse Types"),
label_delete_button = T("Delete Warehouse Type"),
msg_record_created = T("Warehouse Type added"),
msg_record_modified = T("Warehouse Type updated"),
msg_record_deleted = T("Warehouse Type deleted"),
msg_list_empty = T("No Warehouse Types currently registered"))
represent = S3Represent(lookup=tablename, translate=True)
warehouse_type_id = S3ReusableField("warehouse_type_id", "reference %s" % tablename,
label = T("Warehouse Type"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_warehouse_type.id",
represent,
filterby="organisation_id",
filter_opts=filter_opts,
sort=True
)),
sortby = "name",
comment = S3PopupLink(c = "inv",
f = "warehouse_type",
label = ADD_WAREHOUSE_TYPE,
title = T("Warehouse Type"),
tooltip = T("If you don't see the Type in the list, you can add a new one by clicking link 'Create Warehouse Type'."),
),
)
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
)
# Tags as component of Warehouse Types
#self.add_components(tablename,
# inv_warehouse_type_tag={"name": "tag",
# "joinby": "warehouse_type_id",
# }
# )
# ---------------------------------------------------------------------
# Warehouses
#
if settings.get_inv_warehouse_code_unique():
code_requires = IS_EMPTY_OR([IS_LENGTH(10),
IS_NOT_IN_DB(db, "inv_warehouse.code"),
])
else:
code_requires = IS_LENGTH(10)
tablename = "inv_warehouse"
define_table(tablename,
super_link("pe_id", "pr_pentity"),
super_link("site_id", "org_site"),
super_link("doc_id", "doc_entity"),
Field("name", notnull=True,
length=64, # Mayon Compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
Field("code", length=10, # Mayon compatibility
label = T("Code"),
represent = lambda v: v or NONE,
requires = code_requires,
),
organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
warehouse_type_id(),
self.gis_location_id(),
Field("capacity", "integer",
label = T("Capacity (m3)"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, None)
),
),
Field("free_capacity", "integer",
label = T("Free Capacity (m3)"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, None)
),
),
Field("contact",
label = T("Contact"),
represent = lambda v: v or NONE,
),
Field("phone1",
label = T("Phone"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI())
),
Field("phone2",
label = T("Phone 2"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI())
),
Field("email",
label = T("Email"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(IS_EMAIL())
),
Field("fax", label = T("Fax"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI())
),
Field("obsolete", "boolean",
default = False,
label = T("Obsolete"),
represent = lambda opt: OBSOLETE if opt else NONE,
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Warehouse"),
title_display = T("Warehouse Details"),
title_list = T("Warehouses"),
title_update = T("Edit Warehouse"),
title_upload = T("Import Warehouses"),
title_map = T("Map of Warehouses"),
label_list_button = T("List Warehouses"),
label_delete_button = T("Delete Warehouse"),
msg_record_created = T("Warehouse added"),
msg_record_modified = T("Warehouse updated"),
msg_record_deleted = T("Warehouse deleted"),
msg_list_empty = T("No Warehouses currently registered"))
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
list_fields = ["name",
"organisation_id", # Filtered in Component views
"warehouse_type_id",
]
text_fields = ["name",
"code",
"comments",
"organisation_id$name",
"organisation_id$acronym",
]
#report_fields = ["name",
# "organisation_id",
# ]
for level in levels:
lfield = "location_id$%s" % level
list_fields.append(lfield)
#report_fields.append(lfield)
text_fields.append(lfield)
list_fields += [#(T("Address"), "location_id$addr_street"),
"phone1",
"email",
]
# Filter widgets
filter_widgets = [
TextFilter(text_fields,
label = T("Search"),
#_class="filter-search",
),
OptionsFilter("organisation_id",
#hidden=True,
#label=T("Organization"),
# Doesn't support l10n
#represent="%(name)s",
),
LocationFilter("location_id",
#hidden=True,
#label=T("Location"),
levels=levels,
),
]
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
filter_widgets = filter_widgets,
list_fields = list_fields,
onaccept = self.inv_warehouse_onaccept,
realm_components = ("contact_emergency",
"physical_description",
"config",
"image",
"req",
"send",
"human_resource_site",
"note",
"contact",
"role",
"asset",
"commit",
"inv_item",
"document",
"recv",
"address",
),
super_entity = ("pr_pentity", "org_site"),
update_realm = True,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return None
# -------------------------------------------------------------------------
@staticmethod
def inv_warehouse_onaccept(form):
"""
Update Affiliation, record ownership and component ownership
"""
current.s3db.org_update_affiliations("inv_warehouse", form.vars)
# =============================================================================
class InventoryModel(DataModel):
"""
Inventory Management
Record inventories of items at sites :
Warehouses, Offices, Shelters, Hospitals, etc
"""
names = ("inv_inv_item",
"inv_remove",
"inv_item_id",
"inv_item_represent",
"inv_prep",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
organisation_id = self.org_organisation_id
#messages = current.messages
NONE = current.messages["NONE"]
settings = current.deployment_settings
direct_stock_edits = settings.get_inv_direct_stock_edits()
track_pack_values = settings.get_inv_track_pack_values()
WAREHOUSE = T(settings.get_inv_facility_label())
inv_source_type = {0: None,
1: T("Donated"),
2: T("Procured"),
}
# =====================================================================
# Inventory Item
#
# Stock in a warehouse or other site's inventory store.
#
# ondelete references have been set to RESTRICT because the inv. items
# should never be automatically deleted
inv_item_status_opts = settings.get_inv_item_status()
tablename = "inv_inv_item"
self.define_table(tablename,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
self.super_link("site_id", "org_site",
default = auth.user.site_id if auth.is_logged_in() else None,
empty = False,
label = WAREHOUSE,
ondelete = "RESTRICT",
represent = self.org_site_represent,
readable = True,
writable = True,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (WAREHOUSE,
# messages.AUTOCOMPLETE_HELP)),
),
self.supply_item_entity_id(),
self.supply_item_id(ondelete = "RESTRICT",
required = True,
),
self.supply_item_pack_id(ondelete = "RESTRICT",
required = True,
),
Field("quantity", "double", notnull=True,
default = 0.0,
label = T("Quantity"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
requires = IS_FLOAT_AMOUNT(minimum=0.0),
writable = direct_stock_edits,
),
Field("bin", length=16,
label = T("Bin"),
represent = lambda v: v or NONE,
requires = IS_LENGTH(16),
),
# e.g.: Allow items to be marked as 'still on the shelf but allocated to an outgoing shipment'
Field("status", "integer",
default = 0, # Only Items with this Status can be allocated to Outgoing Shipments
label = T("Status"),
represent = represent_option(inv_item_status_opts),
requires = IS_EMPTY_OR(
IS_IN_SET(inv_item_status_opts)
),
),
s3_date("purchase_date",
label = T("Purchase Date"),
),
s3_date("expiry_date",
label = T("Expiry Date"),
represent = inv_expiry_date_represent,
),
Field("pack_value", "double",
label = T("Value per Pack"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
readable = track_pack_values,
writable = track_pack_values,
),
# @ToDo: Move this into a Currency Widget for the pack_value field
s3_currency(readable = track_pack_values,
writable = track_pack_values,
),
Field("item_source_no", length=16,
label = inv_itn_label(),
represent = lambda v: v or NONE,
requires = IS_LENGTH(16),
),
# Organisation that owns this item
organisation_id("owner_org_id",
label = T("Owned By (Organization/Branch)"),
ondelete = "SET NULL",
),
# Original donating Organisation
organisation_id("supply_org_id",
label = T("Supplier/Donor"),
ondelete = "SET NULL",
),
Field("source_type", "integer",
default = 0,
label = T("Type"),
represent = represent_option(inv_source_type),
requires = IS_EMPTY_OR(
IS_IN_SET(inv_source_type)
),
writable = False,
),
Field.Method("total_value",
self.inv_item_total_value),
Field.Method("pack_quantity",
self.supply_item_pack_quantity(tablename=tablename)),
s3_comments(),
*s3_meta_fields())
# CRUD strings
INV_ITEM = T("Warehouse Stock")
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Stock to Warehouse"),
title_display = T("Warehouse Stock Details"),
title_list = T("Stock in Warehouse"),
title_update = T("Edit Warehouse Stock"),
title_report = T("Warehouse Stock Report"),
title_upload = T("Import Warehouse Stock"),
label_list_button = T("List Stock in Warehouse"),
label_delete_button = T("Remove Stock from Warehouse"),
msg_record_created = T("Stock added to Warehouse"),
msg_record_modified = T("Warehouse Stock updated"),
msg_record_deleted = T("Stock removed from Warehouse"),
msg_list_empty = T("No Stock currently registered in this Warehouse"))
# Reusable Field
inv_item_represent = inv_InvItemRepresent()
inv_item_id = S3ReusableField("inv_item_id", "reference %s" % tablename,
label = INV_ITEM,
ondelete = "CASCADE",
represent = inv_item_represent,
requires = IS_ONE_OF(db, "inv_inv_item.id",
inv_item_represent,
orderby = "inv_inv_item.id",
sort = True,
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (INV_ITEM,
T("Select Stock from this Warehouse"))),
script = '''
$.filterOptionsS3({
'trigger':'inv_item_id',
'target':'item_pack_id',
'lookupResource':'item_pack',
'lookupPrefix':'supply',
'lookupURL':S3.Ap.concat('/inv/inv_item_packs.json/'),
'msgNoRecords':i18n.no_packs,
'fncPrep':S3.supply.fncPrepItem,
'fncRepresent':S3.supply.fncRepresentItem
})''')
# Filter widgets
filter_widgets = [
TextFilter(["item_id$name",
"item_id$code",
"item_id$brand_id",
"item_id$model",
"item_id$comments",
"item_pack_id$name",
],
label = T("Search"),
#comment=T("Search for items with this text in the name."),
),
OptionsFilter("site_id",
#label=T("Facility"),
cols = 2,
hidden = True,
),
OptionsFilter("status",
#label=T("Status"),
cols = 2,
hidden = True,
),
RangeFilter("quantity",
label = T("Quantity Range"),
comment = T("Include only items where quantity is in this range."),
ge = 10,
hidden = True,
),
DateFilter("purchase_date",
#label = T("Purchase Date"),
comment = T("Include only items purchased within the specified dates."),
hidden = True,
),
DateFilter("expiry_date",
#label = T("Expiry Date"),
comment = T("Include only items that expire within the specified dates."),
hidden = True,
),
OptionsFilter("owner_org_id",
label = T("Owning Organization"),
comment = T("Search for items by owning organization."),
represent = "%(name)s",
#cols = 2,
hidden = True,
),
OptionsFilter("supply_org_id",
label = T("Donating Organization"),
comment = T("Search for items by donating organization."),
represent = "%(name)s",
#cols = 2,
hidden = True,
),
]
# Report options
if track_pack_values:
rows = ["item_id", "item_id$item_category_id"]
cols = ["site_id", "owner_org_id", "supply_org_id"]
fact = ["quantity", (T("Total Value"), "total_value")]
else:
rows = ["item_id", "item_id$item_category_id"]
cols = ["site_id", "owner_org_id", "supply_org_id"]
fact = ["quantity"]
report_options = Storage(rows = rows,
cols = cols,
fact = fact,
methods = ["sum"],
defaults = Storage(rows = "item_id",
cols = "site_id",
fact = "sum(quantity)",
),
groupby = self.inv_inv_item.site_id,
hide_comments = True,
)
# List fields
if track_pack_values:
list_fields = ["site_id",
"item_id",
"item_id$code",
"item_id$item_category_id",
"quantity",
"expiry_date",
"owner_org_id",
"pack_value",
(T("Total Value"), "total_value"),
"currency",
"bin",
"supply_org_id",
"status",
]
else:
list_fields = ["site_id",
"item_id",
"item_id$code",
"item_id$item_category_id",
"quantity",
"bin",
"owner_org_id",
"supply_org_id",
"status",
]
# Configuration
self.configure(tablename,
# Lock the record so that it can't be meddled with
# - unless explicitly told to allow this
create = direct_stock_edits,
deletable = direct_stock_edits,
editable = direct_stock_edits,
listadd = direct_stock_edits,
context = {"location": "site_id$location_id",
},
deduplicate = self.inv_item_duplicate,
extra_fields = ["quantity",
"pack_value",
"item_pack_id",
],
filter_widgets = filter_widgets,
list_fields = list_fields,
onvalidation = self.inv_inv_item_onvalidate,
report_options = report_options,
super_entity = "supply_item_entity",
grouped = {
"default": {
"title": T("Warehouse Stock Report"),
"fields": [(T("Warehouse"), "site_id$name"),
"item_id$item_category_id",
"bin",
"item_id$name",
"quantity",
"pack_value",
"total_value",
],
"groupby": ["site_id",
],
"orderby": ["site_id$name",
"item_id$name",
],
"aggregate": [("sum", "quantity"),
("sum", "total_value"),
],
},
},
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"inv_item_id": inv_item_id,
"inv_item_represent": inv_item_represent,
"inv_remove": self.inv_remove,
"inv_prep": self.inv_prep,
}
# -------------------------------------------------------------------------
@staticmethod
def inv_item_total_value(row):
""" Total value of an inventory item """
if hasattr(row, "inv_inv_item"):
row = row.inv_inv_item
try:
value = row.quantity * row.pack_value
except (AttributeError, TypeError):
# not available
return current.messages["NONE"]
else:
return round(value, 2)
# -------------------------------------------------------------------------
@staticmethod
def inv_inv_item_onvalidate(form):
"""
When a inv_inv_item record is created with a source number,
then the source number needs to be unique within the
organisation.
"""
item_source_no = form.vars.item_source_no
if not item_source_no:
return
if hasattr(form, "record"):
record = form.record
if record and \
record.item_source_no and \
record.item_source_no == item_source_no:
# The tracking number hasn't changed so no validation needed
return
itable = current.s3db.inv_inv_item
# Was: "track_org_id" - but inv_inv_item has no "track_org_id"!
org_field = "owner_org_id"
query = (itable[org_field] == form.vars[org_field]) & \
(itable.item_source_no == item_source_no)
record = current.db(query).select(itable[org_field],
limitby = (0, 1)
).first()
if record:
org = current.response.s3 \
.org_organisation_represent(record[org_field])
form.errors.item_source_no = current.T("The Tracking Number %s "
"is already used by %s.") % \
(item_source_no, org)
# -------------------------------------------------------------------------
@staticmethod
def inv_remove(inv_rec,
required_total,
required_pack_value = 1,
current_track_total = 0,
update = True,
):
"""
Check that the required_total can be removed from the inv_record
if there is insufficient stock then set up the total to being
what is in stock otherwise set it to be the required total.
If the update flag is true then remove it from stock.
The current total is what has already been removed for this
transaction.
"""
db = current.db
inv_item_table = db.inv_inv_item
siptable = db.supply_item_pack
inv_p_qnty = db(siptable.id == inv_rec.item_pack_id).select(siptable.quantity,
limitby = (0, 1)
).first().quantity
inv_qnty = inv_rec.quantity * inv_p_qnty
cur_qnty = current_track_total * inv_p_qnty
req_qnty = required_total * required_pack_value
# It already matches so no change required
if cur_qnty == req_qnty:
return required_total
if inv_qnty + cur_qnty > req_qnty:
send_item_quantity = req_qnty
new_qnty = (inv_qnty + cur_qnty - req_qnty) / inv_p_qnty
else:
send_item_quantity = inv_qnty + cur_qnty
new_qnty = 0
send_item_quantity = send_item_quantity / inv_p_qnty
if update:
# Update the levels in stock
if new_qnty:
db(inv_item_table.id == inv_rec.id).update(quantity = new_qnty)
else:
db(inv_item_table.id == inv_rec.id).update(deleted = True)
return send_item_quantity
# -------------------------------------------------------------------------
@staticmethod
def inv_prep(r):
"""
Used in site REST controllers to Filter out items which are
already in this inventory
"""
if r.component:
if r.component.name == "inv_item":
db = current.db
table = db.inv_inv_item
# Filter out items which are already in this inventory
query = (table.site_id == r.record.site_id) & \
(table.deleted == False)
inv_item_rows = db(query).select(table.item_id)
item_ids = [row.item_id for row in inv_item_rows]
# Ensure that the current item CAN be selected
if r.method == "update":
item = db(table.id == r.args[2]).select(table.item_id,
limitby = (0, 1)
).first()
item_ids.remove(item.item_id)
table.item_id.requires.set_filter(not_filterby = "id",
not_filter_opts = item_ids)
elif r.component.name == "send":
# Default to the Search tab in the location selector widget1
current.response.s3.gis.tab = "search"
#if current.request.get_vars.get("select", "sent") == "incoming":
# # Display only incoming shipments which haven't been received yet
# filter = (current.s3db.inv_send.status == SHIP_STATUS_SENT)
# r.resource.add_component_filter("send", filter)
# -------------------------------------------------------------------------
@staticmethod
def inv_item_duplicate(item):
"""
Update detection for inv_inv_item
Args:
item: the ImportItem
"""
table = item.table
data = item.data
data_get = data.get
site_id = data_get("site_id")
item_id = data_get("item_id")
pack_id = data_get("item_pack_id")
owner_org_id = data_get("owner_org_id")
supply_org_id = data_get("supply_org_id")
pack_value = data_get("pack_value")
currency = data_get("currency")
item_bin = data_get("bin")
# Must match all of these exactly
query = (table.site_id == site_id) & \
(table.item_id == item_id) & \
(table.item_pack_id == pack_id) & \
(table.owner_org_id == owner_org_id) & \
(table.supply_org_id == supply_org_id) & \
(table.pack_value == pack_value) & \
(table.currency == currency) & \
(table.bin == item_bin)
duplicate = current.db(query).select(table.id,
table.quantity,
limitby = (0, 1)
).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# If the import item has a quantity of 0 (e.g. when imported
# implicitly through inv_track_item), retain the stock quantity
if "quantity" in data and data.quantity == 0:
item.data.quantity = duplicate.quantity
# =============================================================================
class InventoryTrackingModel(DataModel):
"""
A module to manage the shipment of inventory items
- Sent Items
- Received Items
- And audit trail of the shipment process
"""
names = ("inv_send",
"inv_send_represent",
"inv_send_ref_represent",
"inv_send_onaccept",
"inv_recv",
"inv_recv_id",
"inv_recv_represent",
"inv_recv_ref_represent",
"inv_kitting",
"inv_kitting_item",
"inv_track_item",
"inv_track_item_deleting",
"inv_track_item_onaccept",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
person_id = self.pr_person_id
organisation_id = self.org_organisation_id
item_id = self.supply_item_id
inv_item_id = self.inv_item_id
item_pack_id = self.supply_item_pack_id
req_item_id = self.req_item_id
req_ref = self.req_req_ref
shipment_status = inv_shipment_status_labels()
tracking_status = {TRACK_STATUS_UNKNOWN: T("Unknown"),
TRACK_STATUS_PREPARING: T("In Process"),
TRACK_STATUS_TRANSIT: T("In transit"),
TRACK_STATUS_UNLOADING: T("Unloading"),
TRACK_STATUS_ARRIVED: T("Arrived"),
TRACK_STATUS_CANCELED: T("Canceled"),
TRACK_STATUS_RETURNING: T("Returning"),
}
NONE = current.messages["NONE"]
SITE_LABEL = settings.get_org_site_label()
show_org = settings.get_inv_send_show_org()
show_transport = settings.get_inv_send_show_mode_of_transport()
show_req_ref = settings.get_req_use_req_number()
type_default = settings.get_inv_send_type_default()
time_in = settings.get_inv_send_show_time_in()
recv_shortname = settings.get_inv_recv_shortname()
itn_label = inv_itn_label()
document_filing = settings.get_inv_document_filing()
is_logged_in = auth.is_logged_in
user = auth.user
org_site_represent = self.org_site_represent
s3_string_represent = lambda v: v if v else NONE
send_ref = S3ReusableField("send_ref",
label = T(settings.get_inv_send_ref_field_name()),
represent = self.inv_send_ref_represent,
writable = False,
)
recv_ref = S3ReusableField("recv_ref",
label = T("%(GRN)s Number") % {"GRN": recv_shortname},
represent = self.inv_recv_ref_represent,
writable = False,
)
ship_doc_status = {SHIP_DOC_PENDING : T("Pending"),
SHIP_DOC_COMPLETE : T("Complete"),
}
radio_widget = lambda field, value: \
RadioWidget().widget(field, value, cols = 2)
# ---------------------------------------------------------------------
# Send (Outgoing / Dispatch / etc)
#
send_type_opts = settings.get_inv_shipment_types()
# @ToDo: When is this actually wanted?
#send_type_opts.update(self.inv_item_status_opts)
send_type_opts.update(settings.get_inv_send_types())
site_types = auth.org_site_types
tablename = "inv_send"
define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
send_ref(),
req_ref(readable = show_req_ref,
writable = show_req_ref,
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
default = user.site_id if is_logged_in() else None,
empty = False,
instance_types = site_types,
label = T("From %(site)s") % {"site": SITE_LABEL},
not_filterby = "obsolete",
not_filter_opts = (True,),
readable = True,
writable = True,
represent = org_site_represent,
updateable = True,
#widget = S3SiteAutocompleteWidget(),
),
Field("type", "integer",
default = type_default,
label = T("Shipment Type"),
represent = represent_option(send_type_opts),
requires = IS_IN_SET(send_type_opts),
readable = not type_default,
writable = not type_default,
),
# This is a reference, not a super_link, so we can override
Field("to_site_id", self.org_site,
label = T("To %(site)s") % {"site": SITE_LABEL},
ondelete = "SET NULL",
represent = org_site_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_site.site_id",
org_site_represent,
instance_types = site_types,
sort = True,
not_filterby = "obsolete",
not_filter_opts = (True,),
)),
),
organisation_id(
label = T("To Organization"),
readable = show_org,
writable = show_org,
),
person_id("sender_id",
default = auth.s3_logged_in_person(),
label = T("Sent By"),
ondelete = "SET NULL",
comment = self.pr_person_comment(child="sender_id"),
),
person_id("recipient_id",
label = T("To Person"),
ondelete = "SET NULL",
represent = self.pr_PersonRepresentContact(),
comment = self.pr_person_comment(child="recipient_id"),
),
Field("transport_type",
label = T("Type of Transport"),
readable = show_transport,
writable = show_transport,
represent = s3_string_represent,
),
Field("transported_by",
label = T("Transported by"),
readable = show_transport,
writable = show_transport,
represent = s3_string_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Transported by"),
T("Freight company or organisation providing transport"))),
),
Field("transport_ref",
label = T("Transport Reference"),
readable = show_transport,
writable = show_transport,
represent = s3_string_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Transport Reference"),
T("Consignment Number, Tracking Number, etc"))),
),
Field("driver_name",
label = T("Name of Driver"),
represent = s3_string_represent,
),
Field("driver_phone",
label = T("Driver Phone Number"),
represent = lambda v: v or "",
requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI()),
),
Field("vehicle_plate_no",
label = T("Vehicle Plate Number"),
represent = s3_string_represent,
),
Field("time_in", "time",
label = T("Time In"),
represent = s3_string_represent,
readable = time_in,
writable = time_in,
),
Field("time_out", "time",
label = T("Time Out"),
represent = s3_string_represent,
),
s3_datetime(label = T("Date Sent"),
# Not always sent straight away
#default = "now",
represent = "date",
writable = False,
),
s3_datetime("delivery_date",
label = T("Estimated Delivery Date"),
represent = "date",
writable = False,
),
Field("status", "integer",
default = SHIP_STATUS_IN_PROCESS,
label = T("Status"),
represent = represent_option(shipment_status),
requires = IS_EMPTY_OR(
IS_IN_SET(shipment_status)
),
writable = False,
),
Field("filing_status", "integer",
requires = IS_EMPTY_OR(
IS_IN_SET(ship_doc_status)
),
represent = represent_option(ship_doc_status),
default = SHIP_DOC_PENDING,
widget = radio_widget,
label = T("Filing Status"),
comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Filing Status"),
T("Have all the signed documents for this shipment been filed?"),
"* %s|* %s" % (T("Requisition"), T("Waybill")),
)),
readable = document_filing,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# Filter Widgets
filter_widgets = [
TextFilter(["sender_id$first_name",
"sender_id$middle_name",
"sender_id$last_name",
"comments",
"site_id$name",
"send_ref",
"recipient_id$first_name",
"recipient_id$middle_name",
"recipient_id$last_name",
],
label = T("Search"),
comment = T("Search for an item by text."),
),
OptionsFilter("to_site_id",
label = T("To Organization"),
comment = T("If none are selected, then all are searched."),
cols = 2,
hidden = True,
),
TextFilter("type",
label = T("Shipment Type"),
hidden = True,
),
TextFilter("transport_type",
label = T("Type of Transport"),
hidden = True,
),
DateFilter("date",
label = T("Date Sent"),
comment = T("Search for a shipment sent between these dates."),
hidden = True,
),
DateFilter("delivery_date",
label = T("Estimated Delivery Date"),
comment = T("Search for a shipment which has an estimated delivery between these dates."),
hidden = True,
),
]
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Send New Shipment"),
title_display = T("Sent Shipment Details"),
title_list = T("Sent Shipments"),
title_update = T("Shipment to Send"),
label_list_button = T("List Sent Shipments"),
label_delete_button = T("Delete Sent Shipment"),
msg_record_created = T("Shipment Created"),
msg_record_modified = T("Sent Shipment updated"),
msg_record_deleted = T("Sent Shipment canceled"),
msg_list_empty = T("No Sent Shipments"))
# Reusable Field
send_id = S3ReusableField("send_id", "reference %s" % tablename,
label = T("Send Shipment"),
ondelete = "RESTRICT",
represent = self.inv_send_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_send.id",
self.inv_send_represent,
orderby = "inv_send.date",
sort = True,
)),
sortby = "date",
)
# Components
add_components(tablename,
inv_track_item = "send_id",
)
# Custom methods
# Generate Consignment Note
set_method("inv_send",
method = "form",
action = self.inv_send_form)
set_method("inv_send",
method = "timeline",
action = self.inv_timeline)
# Redirect to the Items tabs after creation
if current.request.controller == "req":
c = "req"
else:
c = "inv"
send_item_url = URL(c=c, f="send",
args = ["[id]",
"track_item",
],
)
list_fields = ["id",
"send_ref",
"req_ref",
"sender_id",
"site_id",
"date",
"recipient_id",
"delivery_date",
"to_site_id",
"status",
"driver_name",
"driver_phone",
"vehicle_plate_no",
"time_out",
"comments",
]
if time_in:
list_fields.insert(12, "time_in")
if show_transport:
list_fields.insert(10, "transport_type")
configure(tablename,
create_next = send_item_url,
update_next = send_item_url,
# It shouldn't be possible for the user to delete a send item
# unless *maybe* if it is pending and has no items referencing it
deletable = False,
filter_widgets = filter_widgets,
list_fields = list_fields,
onaccept = self.inv_send_onaccept,
onvalidation = self.inv_send_onvalidation,
orderby = "inv_send.date desc",
sortby = [[5, "desc"], [1, "asc"]],
super_entity = ("doc_entity",),
)
# ---------------------------------------------------------------------
# Received (In/Receive / Donation / etc)
#
recv_type_opts = settings.get_inv_shipment_types()
recv_type_opts.update(settings.get_inv_recv_types())
tablename = "inv_recv"
define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
default = user.site_id if is_logged_in() else None,
empty = False,
label = T("%(site)s (Recipient)") % {"site": SITE_LABEL},
ondelete = "SET NULL",
instance_types = site_types,
not_filterby = "obsolete",
not_filter_opts = (True,),
readable = True,
writable = True,
represent = org_site_represent,
updateable = True,
#widget = S3SiteAutocompleteWidget(),
),
Field("type", "integer",
requires = IS_IN_SET(recv_type_opts),
represent = represent_option(recv_type_opts),
label = T("Shipment Type"),
default = 0,
),
organisation_id(label = T("Organization/Supplier"), # From Organization/Supplier
),
# This is a reference, not a super_link, so we can override
Field("from_site_id", "reference org_site",
label = T("From %(site)s") % {"site": SITE_LABEL},
ondelete = "SET NULL",
#widget = S3SiteAutocompleteWidget(),
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_site.site_id",
org_site_represent,
instance_types = site_types,
not_filterby = "obsolete",
not_filter_opts = (True,),
sort = True,
)),
represent = org_site_represent
),
s3_date("eta",
label = T("Date Expected"),
writable = False),
s3_datetime(label = T("Date Received"),
represent = "date",
# Can also be set manually (when catching up with backlog of paperwork)
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Date Received"),
# T("Will be filled automatically when the Shipment has been Received"))),
),
send_ref(),
recv_ref(),
Field("purchase_ref",
label = T("%(PO)s Number") % {"PO": settings.get_proc_shortname()},
represent = s3_string_represent,
),
req_ref(readable = show_req_ref,
writable = show_req_ref
),
person_id(name = "sender_id",
label = T("Sent By Person"),
ondelete = "SET NULL",
comment = self.pr_person_comment(child="sender_id"),
),
person_id(name = "recipient_id",
label = T("Received By"),
ondelete = "SET NULL",
default = auth.s3_logged_in_person(),
comment = self.pr_person_comment(child="recipient_id")),
Field("transport_type",
label = T("Type of Transport"),
# Enable in template as-required
readable = False,
writable = False,
represent = s3_string_represent,
),
Field("status", "integer",
requires = IS_EMPTY_OR(
IS_IN_SET(shipment_status)
),
represent = represent_option(shipment_status),
default = SHIP_STATUS_IN_PROCESS,
label = T("Status"),
writable = False,
),
Field("grn_status", "integer",
requires = IS_EMPTY_OR(
IS_IN_SET(ship_doc_status)
),
represent = represent_option(ship_doc_status),
default = SHIP_DOC_PENDING,
widget = radio_widget,
label = T("%(GRN)s Status") % {"GRN": recv_shortname},
comment = DIV(_class="tooltip",
_title="%s|%s" % \
(T("%(GRN)s Status") % {"GRN": recv_shortname},
T("Has the %(GRN)s (%(GRN_name)s) form been completed?") % \
{"GRN": recv_shortname,
"GRN_name": settings.get_inv_recv_form_name()
})),
),
Field("cert_status", "integer",
requires = IS_EMPTY_OR(
IS_IN_SET(ship_doc_status)
),
represent = represent_option(ship_doc_status),
default = SHIP_DOC_PENDING,
widget = radio_widget,
label = T("Certificate Status"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Certificate Status"),
T("Has the Certificate for receipt of the shipment been given to the sender?"))),
),
Field("filing_status", "integer",
requires = IS_EMPTY_OR(
IS_IN_SET(ship_doc_status)
),
represent = represent_option(ship_doc_status),
default = SHIP_DOC_PENDING,
widget = radio_widget,
label = T("Filing Status"),
comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Filing Status"),
T("Have all the signed documents for this shipment been filed?"),
"* %s|* %s|* %s" % (T("Requisition"), T("Waybill"), T("GRN")),
)),
readable = document_filing,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
inv_recv_crud_strings()
if settings.get_inv_shipment_name() == "order":
recv_id_label = T("Order")
else:
recv_id_label = T("Receive Shipment")
# Reusable Field
inv_recv_represent = self.inv_recv_represent
recv_id = S3ReusableField("recv_id", "reference %s" % tablename,
label = recv_id_label,
ondelete = "RESTRICT",
represent = inv_recv_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_recv.id",
inv_recv_represent,
orderby = "inv_recv.date",
sort = True,
)),
sortby = "date",
)
# Filter Widgets
if settings.get_inv_shipment_name() == "order":
recv_search_comment = T("Search for an order by looking for text in any field.")
recv_search_date_field = "eta"
recv_search_date_comment = T("Search for an order expected between these dates")
else:
recv_search_comment = T("Search for a shipment by looking for text in any field.")
recv_search_date_field = "date"
recv_search_date_comment = T("Search for a shipment received between these dates")
filter_widgets = [
TextFilter(["sender_id$first_name",
"sender_id$middle_name",
"sender_id$last_name",
"comments",
"from_site_id$name",
"recipient_id$first_name",
"recipient_id$middle_name",
"recipient_id$last_name",
"site_id$name",
"recv_ref",
"send_ref",
"purchase_ref",
],
label = T("Search"),
comment = recv_search_comment,
),
DateFilter(recv_search_date_field,
# This will be the default
#label = table[recv_search_date_field].label,
comment = recv_search_date_comment,
hidden = True,
),
OptionsFilter("site_id",
label = SITE_LABEL,
cols = 2,
hidden = True,
),
OptionsFilter("status",
label = T("Status"),
cols = 2,
hidden = True,
),
#OptionsFilter("grn_status",
#label = T("GRN Status"),
#cols = 2,
#hidden = True,
#),
#OptionsFilter("cert_status",
#label = T("Certificate Status"),
#cols = 2,
#hidden = True,
#),
]
# Redirect to the Items tabs after creation
recv_item_url = URL(c="inv", f="recv",
args = ["[id]",
"track_item",
],
)
configure(tablename,
create_next = recv_item_url,
update_next = recv_item_url,
# it shouldn't be possible for the user to delete a send item
deletable = False,
filter_widgets = filter_widgets,
list_fields = ["id",
"recv_ref",
"send_ref",
"purchase_ref",
"recipient_id",
"organisation_id",
"from_site_id",
"site_id",
"date",
"type",
"status",
"req_ref",
"sender_id",
"comments"
],
mark_required = ("from_site_id", "organisation_id"),
onaccept = self.inv_recv_onaccept,
onvalidation = self.inv_recv_onvalidation,
orderby = "inv_recv.date desc",
sortby = [[6, "desc"], [1, "asc"]],
super_entity = ("doc_entity",),
)
# Components
add_components(tablename,
inv_track_item = "recv_id",
)
# Custom methods
# Print Forms
set_method("inv_recv",
method = "form",
action = self.inv_recv_form)
set_method("inv_recv",
method = "cert",
action = self.inv_recv_donation_cert)
set_method("inv_recv",
method = "timeline",
action = self.inv_timeline)
# ---------------------------------------------------------------------
# Kittings
# - process for creating Kits from component items
#
tablename = "inv_kitting"
define_table(tablename,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
default = user.site_id if is_logged_in() else None,
empty = False,
label = T("By %(site)s") % {"site": SITE_LABEL},
instance_types = site_types,
not_filterby = "obsolete",
not_filter_opts = (True,),
readable = True,
writable = True,
represent = org_site_represent,
updateable = True,
#widget = S3SiteAutocompleteWidget(),
),
item_id(label = T("Kit"),
requires = IS_ONE_OF(db, "supply_item.id",
self.supply_item_represent,
filterby="kit",
filter_opts = (True,),
sort = True
),
widget = S3AutocompleteWidget("supply", "item",
filter = "item.kit=1"),
# Needs better workflow as no way to add the Kit Items
#comment = S3PopupLink(c = "supply",
# f = "item",
# label = T("Create Kit"),
# title = T("Kit"),
# tooltip = T("Type the name of an existing catalog kit OR Click 'Create Kit' to add a kit which is not in the catalog."),
# ),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Kit"),
T("Type the name of an existing catalog kit"))),
),
item_pack_id(),
Field("quantity", "double",
label = T("Quantity"),
represent = lambda v, row=None: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
requires = IS_FLOAT_AMOUNT(minimum=1.0),
),
s3_date(comment = DIV(_class="tooltip",
_title="%s|%s" % \
(T("Date Repacked"),
T("Will be filled automatically when the Item has been Repacked")))
),
req_ref(writable = True),
person_id("repacked_id",
default = auth.s3_logged_in_person(),
label = T("Repacked By"),
ondelete = "SET NULL",
#comment = self.pr_person_comment(child="repacked_id")),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Kitting"),
title_display = T("Kitting Details"),
title_list = T("Kittings"),
title_update = T("Kitting"),
label_list_button = T("List Kittings"),
label_delete_button = T("Delete Kitting"),
msg_record_created = T("Kitting completed"),
msg_record_modified = T("Kitting updated"),
msg_record_deleted = T("Kitting canceled"),
msg_list_empty = T("No Kittings"))
# Components
add_components(tablename,
inv_kitting_item = {"name": "item",
"joinby": "kitting_id",
},
)
# Resource configuration
configure(tablename,
create_next = URL(c="inv", f="kitting",
args = ["[id]", "item"],
),
create_onaccept = self.inv_kitting_onaccept,
list_fields = ["site_id",
"req_ref",
"quantity",
"date",
"repacked_id",
],
onvalidation = self.inv_kitting_onvalidate,
)
# ---------------------------------------------------------------------
# Kitting Items
# - Component items of Kits which can be used to build a pick-list
#
tablename = "inv_kitting_item"
define_table(tablename,
Field("site_id", "reference org_site",
readable = False,
writable = False,
),
Field("kitting_id", "reference inv_kitting",
readable = False,
writable = False,
),
item_id(writable = False),
item_pack_id(writable = False),
Field("quantity", "double",
label = T("Quantity"),
represent = lambda v, row=None: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
writable = False,
),
Field("bin", length=16,
label = T("Bin"),
represent = s3_string_represent,
requires = IS_LENGTH(16),
writable = False,
),
Field("item_source_no", length=16,
label = itn_label,
represent = s3_string_represent,
requires = IS_LENGTH(16),
),
inv_item_id(ondelete = "RESTRICT",
readable = False,
writable = False,
),
#s3_comments(),
*s3_meta_fields())
# Resource configuration
configure(tablename,
deletable = False,
editable = False,
insertable = False,
)
# ---------------------------------------------------------------------
# Tracking Items
#
inv_item_status_opts = settings.get_inv_item_status()
tablename = "inv_track_item"
define_table(tablename,
organisation_id("track_org_id",
label = T("Shipping Organization"),
ondelete = "SET NULL",
readable = False,
writable = False
),
inv_item_id("send_inv_item_id",
ondelete = "RESTRICT",
# Local Purchases don't have this available
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_inv_item.id",
self.inv_item_represent,
orderby="inv_inv_item.id",
sort=True)),
script = '''
$.filterOptionsS3({
'trigger':'send_inv_item_id',
'target':'item_pack_id',
'lookupResource':'item_pack',
'lookupPrefix':'supply',
'lookupURL':S3.Ap.concat('/inv/inv_item_packs.json/'),
'msgNoRecords':i18n.no_packs,
'fncPrep':S3.supply.fncPrepItem,
'fncRepresent':S3.supply.fncRepresentItem
})'''),
item_id(ondelete = "RESTRICT"),
item_pack_id(ondelete = "SET NULL"),
# Now done as a VirtualField instead (looks better & updates closer to real-time, so less of a race condition)
#Field("req_quantity", "double",
# # This isn't the Quantity requested, but rather the quantity still needed
# label = T("Quantity Needed"),
# readable = False,
# writable = False),
Field("quantity", "double", notnull=True,
label = T("Quantity Sent"),
requires = IS_NOT_EMPTY(),
),
Field("recv_quantity", "double",
label = T("Quantity Received"),
represent = self.qnty_recv_repr,
readable = False,
writable = False,
),
Field("return_quantity", "double",
label = T("Quantity Returned"),
represent = self.qnty_recv_repr,
readable = False,
writable = False,
),
Field("pack_value", "double",
label = T("Value per Pack"),
),
s3_currency(),
s3_date("expiry_date",
label = T("Expiry Date"),
represent = inv_expiry_date_represent,
),
# The bin at origin
Field("bin", length=16,
label = T("Bin"),
represent = s3_string_represent,
requires = IS_LENGTH(16),
),
inv_item_id("recv_inv_item_id",
label = T("Receiving Inventory"),
ondelete = "RESTRICT",
required = False,
readable = False,
writable = False,
),
# The bin at destination
Field("recv_bin", length=16,
label = T("Add to Bin"),
represent = s3_string_represent,
requires = IS_LENGTH(16),
readable = False,
writable = False,
# Nice idea but not working properly
#widget = S3InvBinWidget("inv_track_item"),
comment = DIV(_class="tooltip",
_title="%s|%s" % \
(T("Bin"),
T("The Bin in which the Item is being stored (optional)."))),
),
Field("item_source_no", length=16,
label = itn_label,
represent = s3_string_represent,
requires = IS_LENGTH(16),
),
# Organisation which originally supplied/donated item(s)
organisation_id("supply_org_id",
label = T("Supplier/Donor"),
ondelete = "SET NULL",
),
# Organisation that owns item(s)
organisation_id("owner_org_id",
label = T("Owned By (Organization/Branch)"),
ondelete = "SET NULL",
),
Field("inv_item_status", "integer",
default = 0,
label = T("Item Status"),
represent = represent_option(inv_item_status_opts),
requires = IS_EMPTY_OR(
IS_IN_SET(inv_item_status_opts)
),
),
Field("status", "integer",
default = 1,
label = T("Item Tracking Status"),
represent = represent_option(tracking_status),
required = True,
requires = IS_IN_SET(tracking_status),
writable = False,
),
self.inv_adj_item_id(ondelete = "RESTRICT"), # any adjustment record
# send record
send_id(),
# receive record
recv_id(),
req_item_id(readable = False,
writable = False,
),
Field.Method("total_value",
self.inv_track_item_total_value,
),
Field.Method("pack_quantity",
self.supply_item_pack_quantity(tablename=tablename),
),
Field.Method("total_volume",
self.inv_track_item_total_volume,
),
Field.Method("total_weight",
self.inv_track_item_total_weight,
),
Field.Method("total_recv_volume",
lambda row: \
self.inv_track_item_total_volume(row, received=True),
),
Field.Method("total_recv_weight",
lambda row: \
self.inv_track_item_total_weight(row, received=True),
),
s3_comments(),
*s3_meta_fields()
)
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Item to Shipment"),
title_display = T("Shipment Item Details"),
title_list = T("Shipment Items"),
title_update = T("Edit Shipment Item"),
label_list_button = T("List Shipment Items"),
label_delete_button = T("Delete Shipment Item"),
msg_record_created = T("Item Added to Shipment"),
msg_record_modified = T("Shipment Item updated"),
msg_record_deleted = T("Shipment Item deleted"),
msg_list_empty = T("No Shipment Items"))
# Filter Widgets
filter_widgets = [
TextFilter(["item_source_no",
"item_id$name",
"send_id$site_id$name",
"recv_id$site_id$name",
"recv_id$purchase_ref",
"recv_id$recv_ref",
"recv_id$req_ref",
"recv_id$send_ref",
"send_id$req_ref",
"send_id$send_ref",
"supply_org_id",
"owner_org_id",
],
label = T("Search"),
#comment = recv_search_comment,
),
DateFilter("send_id$date",
label = T("Sent date"),
hidden = True,
),
DateFilter("recv_id$date",
label = T("Received date"),
hidden = True,
),
]
# Resource configuration
configure(tablename,
extra_fields = ["quantity",
"recv_quantity",
"pack_value",
"item_id$volume",
"item_id$weight",
"item_pack_id$quantity",
],
filter_widgets = filter_widgets,
list_fields = ["id",
"status",
"item_source_no",
"item_id",
"item_pack_id",
"send_id",
"recv_id",
"quantity",
(T("Total Weight (kg)"), "total_weight"),
(T("Total Volume (m3)"), "total_volume"),
"currency",
"pack_value",
"bin",
"return_quantity",
"recv_quantity",
"recv_bin",
"owner_org_id",
"supply_org_id",
],
onaccept = self.inv_track_item_onaccept,
onvalidation = self.inv_track_item_onvalidate,
)
#----------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"inv_recv_id": recv_id,
"inv_send_onaccept": self.inv_send_onaccept,
"inv_track_item_deleting": self.inv_track_item_deleting,
"inv_track_item_onaccept": self.inv_track_item_onaccept,
}
# -------------------------------------------------------------------------
def defaults(self):
"""
Safe defaults for model-global names in case module is disabled
"""
return {"inv_recv_id": S3ReusableField.dummy("recv_id"),
}
# -------------------------------------------------------------------------
@staticmethod
def inv_track_item_total_value(row):
""" Total value of a track item """
try:
inv_track_item = getattr(row, "inv_track_item")
except AttributeError:
inv_track_item = row
try:
quantity = inv_track_item.quantity
pack_value = inv_track_item.pack_value
except AttributeError:
# Need to reload the track item
# Avoid this by adding to extra_fields
ttable = current.s3db.inv_track_item
query = (ttable.id == inv_track_item.id)
inv_track_item = current.db(query).select(ttable.quantity,
ttable.pack_value,
limitby = (0, 1),
).first()
quantity = inv_track_item.quantity
pack_value = inv_track_item.pack_value
if quantity and pack_value:
return round(quantity * pack_value, 2)
else:
# Item lacks quantity, or value per pack, or both
return current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def inv_track_item_total_volume(row, received=False):
""" Total volume of a track item """
try:
inv_track_item = getattr(row, "inv_track_item")
except AttributeError:
inv_track_item = row
try:
supply_item = getattr(row, "supply_item")
volume = supply_item.volume
except AttributeError:
# Need to load the supply item
# Avoid this by adding to extra_fields
ttable = current.s3db.inv_track_item
stable = current.s3db.supply_item
query = (ttable.id == inv_track_item.id) & \
(ttable.item_id == stable.id)
supply_item = current.db(query).select(stable.volume,
limitby = (0, 1),
).first()
volume = supply_item.volume if supply_item else None
if volume is None:
return current.messages["NONE"]
if received:
qfield = "recv_quantity"
else:
qfield = "quantity"
try:
quantity = inv_track_item[qfield]
except KeyError:
# Need to reload the track item
# Avoid this by adding to extra_fields
ttable = current.s3db.inv_inv_item
query = (ttable.id == inv_track_item.id)
inv_track_item = current.db(query).select(ttable[qfield],
limitby = (0, 1),
).first()
quantity = inv_track_item[qfield]
try:
supply_item_pack = getattr(row, "supply_item_pack")
pack_quantity = supply_item_pack.quantity
except AttributeError:
# Need to load the supply item pack
# Avoid this by adding to extra_fields
ttable = current.s3db.inv_track_item
ptable = current.s3db.supply_item_pack
query = (ttable.id == inv_track_item.id) & \
(ttable.item_pack_id == ptable.id)
supply_item_pack = current.db(query).select(ptable.quantity,
limitby = (0, 1),
).first()
pack_quantity = supply_item_pack.quantity
return round(quantity * pack_quantity * volume, 3)
# -------------------------------------------------------------------------
@staticmethod
def inv_track_item_total_weight(row, received=False):
""" Total weight of a track item """
try:
inv_track_item = getattr(row, "inv_track_item")
except AttributeError:
inv_track_item = row
try:
supply_item = getattr(row, "supply_item")
weight = supply_item.weight
except AttributeError:
# Need to load the supply item
# Avoid this by adding to extra_fields
ttable = current.s3db.inv_track_item
stable = current.s3db.supply_item
query = (ttable.id == inv_track_item.id) & \
(ttable.item_id == stable.id)
supply_item = current.db(query).select(stable.weight,
limitby = (0, 1),
).first()
weight = supply_item.weight if supply_item else None
if weight is None:
return current.messages["NONE"]
if received:
qfield = "recv_quantity"
else:
qfield = "quantity"
try:
quantity = inv_track_item[qfield]
except KeyError:
# Need to reload the track item
# Avoid this by adding to extra_fields
ttable = current.s3db.inv_inv_item
query = (ttable.id == inv_track_item.id)
inv_track_item = current.db(query).select(ttable[qfield],
limitby = (0, 1),
).first()
quantity = inv_track_item[qfield]
try:
supply_item_pack = getattr(row, "supply_item_pack")
pack_quantity = supply_item_pack.quantity
except AttributeError:
# Need to load the supply item pack
# Avoid this by adding to extra_fields
ttable = current.s3db.inv_track_item
ptable = current.s3db.supply_item_pack
query = (ttable.id == inv_track_item.id) & \
(ttable.item_pack_id == ptable.id)
supply_item_pack = current.db(query).select(ptable.quantity,
limitby = (0, 1),
).first()
pack_quantity = supply_item_pack.quantity
return round(quantity * pack_quantity * weight, 2)
# -------------------------------------------------------------------------
@staticmethod
def inv_send_represent(record_id, row=None, show_link=True):
"""
Represent a Sent Shipment
@ToDo: S3Represent
"""
if row:
record_id = row.id
table = current.db.inv_send
elif not record_id:
return current.messages["NONE"]
else:
db = current.db
table = db.inv_send
row = db(table.id == record_id).select(table.date,
table.send_ref,
table.to_site_id,
limitby=(0, 1),
).first()
try:
send_ref_string = table.send_ref.represent(row.send_ref,
show_link = False,
)
to_string = table.to_site_id.represent(row.to_site_id,
show_link = False,
)
date_string = table.date.represent(row.date)
except AttributeError:
# Record not found, or not all required fields in row
return current.messages.UNKNOWN_OPT
else:
T = current.T
represent = "%s (%s: %s %s %s)" % (send_ref_string,
T("To"),
to_string,
T("on"),
date_string,
)
if show_link:
return A(represent,
_href = URL(c="inv", f="send", args=[record_id]))
else:
return represent
# -------------------------------------------------------------------------
@staticmethod
def inv_send_onaccept(form):
"""
When a inv send record is created then create the send_ref.
"""
db = current.db
formvars = form.vars
record_id = formvars.id
shipment_type = formvars.type
if shipment_type:
# Add all inv_items with status matching the send shipment type
# eg. Items for Dump, Sale, Reject, Surplus
inv_track_item_onaccept = current.s3db.inv_track_item_onaccept
site_id = formvars.site_id
itable = db.inv_inv_item
tracktable = db.inv_track_item
query = (itable.site_id == site_id) & \
(itable.status == int(shipment_type))
rows = db(query).select()
for row in rows:
if row.quantity != 0:
# Insert inv_item to inv_track_item
inv_track_id = tracktable.insert(send_id = record_id,
send_inv_item_id = row.id,
item_id = row.item_id,
quantity = row.quantity,
currency = row.currency,
pack_value = row.pack_value,
expiry_date = row.expiry_date,
owner_org_id = row.owner_org_id,
supply_org_id = row.supply_org_id,
item_source_no = row.item_source_no,
item_pack_id = row.item_pack_id,
inv_item_status = row.status,
#status = TRACK_STATUS_PREPARING,
)
# Construct form.vars for inv_track_item_onaccept
formvars = Storage()
formvars.id = inv_track_id
formvars.quantity = row.quantity
formvars.item_pack_id = row.item_pack_id
formvars.send_inv_item_id = row.id
# Call inv_track_item_onaccept to remove inv_item from stock
inv_track_item_onaccept(Storage(vars=formvars))
stable = db.inv_send
# If the send_ref is None then set it up
record = stable[record_id]
if not record.send_ref:
code = current.s3db.supply_get_shipping_code(
current.deployment_settings.get_inv_send_shortname(),
record.site_id,
stable.send_ref,
)
db(stable.id == record_id).update(send_ref=code)
# -------------------------------------------------------------------------
@staticmethod
def inv_send_form(r, **attr):
"""
Generate a PDF of a Waybill
"""
db = current.db
table = db.inv_send
tracktable = db.inv_track_item
table.date.readable = True
record = db(table.id == r.id).select(table.send_ref,
limitby = (0, 1)
).first()
send_ref = record.send_ref
# hide the inv_item field
tracktable.send_inv_item_id.readable = False
tracktable.recv_inv_item_id.readable = False
T = current.T
list_fields = [(T("Item Code"), "item_id$code"),
"item_id",
(T("Weight (kg)"), "item_id$weight"),
(T("Volume (m3)"), "item_id$volume"),
"bin",
"item_source_no",
"item_pack_id",
"quantity",
]
settings = current.deployment_settings
if r.record.req_ref:
# This Shipment relates to a request
# - show the req_item comments
list_fields.append("req_item_id$comments")
if settings.get_inv_track_pack_values():
list_fields.extend(("currency",
"pack_value",
))
from core import S3Exporter
exporter = S3Exporter().pdf
return exporter(r.resource,
request = r,
method = "list",
pdf_componentname = "track_item",
pdf_title = settings.get_inv_send_form_name(),
pdf_filename = send_ref,
list_fields = list_fields,
pdf_hide_comments = True,
pdf_header_padding = 12,
pdf_footer = inv_send_pdf_footer,
pdf_orientation = "Landscape",
pdf_table_autogrow = "B",
**attr
)
# -------------------------------------------------------------------------
@staticmethod
def inv_recv_represent(record_id, row=None, show_link=True):
"""
Represent a Received Shipment
@ToDo: S3Represent
"""
if row:
record_id = row.id
table = current.db.inv_recv
elif not record_id:
return current.messages["NONE"]
else:
db = current.db
table = db.inv_recv
row = db(table.id == record_id).select(table.date,
table.recv_ref,
table.from_site_id,
table.organisation_id,
limitby = (0, 1),
).first()
recv_ref_string = table.send_ref.represent(row.recv_ref,
show_link = False,
)
if row.from_site_id:
from_string = table.from_site_id.represent(row.from_site_id,
show_link = False,
)
else:
from_string = table.organisation_id.represent(row.organisation_id,
show_link = False,
)
date_string = table.date.represent(row.date)
T = current.T
represent = "%s (%s: %s %s %s)" % (recv_ref_string,
T("From"),
from_string,
T("on"),
date_string,
)
if show_link:
return A(represent,
_href = URL(c="inv", f="recv", args=[record_id]),
)
else:
return represent
# -------------------------------------------------------------------------
@staticmethod
def inv_recv_onaccept(form):
"""
When a inv recv record is created then create the recv_ref.
"""
db = current.db
rtable = db.inv_recv
# If the recv_ref is None then set it up
record_id = form.vars.id
record = rtable[record_id]
if not record.recv_ref:
# AR Number
code = current.s3db.supply_get_shipping_code(
current.deployment_settings.get_inv_recv_shortname(),
record.site_id,
rtable.recv_ref,
)
db(rtable.id == record_id).update(recv_ref = code)
# -------------------------------------------------------------------------
@staticmethod
def inv_send_onvalidation(form):
"""
Check that either organisation_id or to_site_id are filled according to the type
"""
form_vars = form.vars
if not form_vars.to_site_id and not form_vars.organisation_id:
error = current.T("Please enter a %(site)s OR an Organization") % \
{"site": current.deployment_settings.get_org_site_label()}
errors = form.errors
errors.to_site_id = error
errors.organisation_id = error
# -------------------------------------------------------------------------
@staticmethod
def inv_recv_onvalidation(form):
"""
Check that either organisation_id or from_site_id are filled according to the type
@ToDo: lookup the type values from s3cfg.py instead of hardcoding it
"""
form_vars_get = form.vars.get
shipment_type = form_vars_get("type")
if shipment_type is None:
return
shipment_type = int(shipment_type)
if shipment_type == 11 and not form_vars_get("from_site_id"):
# Internal Shipment needs from_site_id
form.errors.from_site_id = current.T("Please enter a %(site)s") % \
{"site": current.deployment_settings.get_org_site_label()}
elif shipment_type >= 32 and not form_vars_get("organisation_id"):
# Internal Shipment needs from_site_id
form.errors.organisation_id = current.T("Please enter an Organization/Supplier")
# ---------------------------------------------------------------------
@staticmethod
def inv_recv_form (r, **attr):
"""
Generate a PDF of a GRN (Goods Received Note)
"""
T = current.T
db = current.db
table = db.inv_recv
track_table = db.inv_track_item
table.date.readable = True
table.site_id.readable = True
track_table.recv_quantity.readable = True
table.site_id.label = T("By %(site)s") % {"site": T(current.deployment_settings.get_inv_facility_label())}
table.site_id.represent = current.s3db.org_site_represent
record = table[r.id]
recv_ref = record.recv_ref
list_fields = ["item_id",
(T("Weight (kg)"), "item_id$weight"),
(T("Volume (m3)"), "item_id$volume"),
"item_source_no",
"item_pack_id",
"quantity",
"recv_quantity",
"currency",
"pack_value",
"bin"
]
from core import S3Exporter
exporter = S3Exporter().pdf
return exporter(r.resource,
request = r,
method = "list",
pdf_title = T(current.deployment_settings.get_inv_recv_form_name()),
pdf_filename = recv_ref,
list_fields = list_fields,
pdf_hide_comments = True,
pdf_componentname = "track_item",
pdf_header_padding = 12,
pdf_footer = inv_recv_pdf_footer,
pdf_table_autogrow = "B",
pdf_orientation = "Landscape",
**attr
)
# -------------------------------------------------------------------------
@staticmethod
def inv_recv_donation_cert (r, **attr):
"""
Generate a PDF of a Donation certificate
"""
T = current.T
db = current.db
table = db.inv_recv
table.date.readable = True
table.type.readable = False
field = table.site_id
field.readable = True
field.label = T("By %(site)s") % \
{"site": T(current.deployment_settings.get_inv_facility_label())}
field.represent = current.s3db.org_site_represent
record = table[r.id]
site_id = record.site_id
site = field.represent(site_id, False)
from core import S3Exporter
exporter = S3Exporter().pdf
return exporter(r.resource,
request = r,
method = "list",
pdf_title = "Donation Certificate",
pdf_filename = "DC-%s" % site,
pdf_hide_comments = True,
pdf_componentname = "track_item",
**attr
)
# -------------------------------------------------------------------------
@staticmethod
def qnty_recv_repr(value):
if value is None:
reprstr = B(current.T("None"))
else:
reprstr = value if value else B(value)
return reprstr
# ---------------------------------------------------------------------
@staticmethod
def inv_send_ref_represent(value, show_link=True):
"""
Represent for the Tall Out number,
if show_link is True then it will generate a link to the pdf
"""
if value:
if show_link:
db = current.db
table = db.inv_send
row = db(table.send_ref == value).select(table.id,
limitby = (0, 1)
).first()
if row:
return A(value,
_href = URL(c = "inv",
f = "send",
args = [row.id, "form"],
extension = "",
),
)
else:
return value
else:
return value
else:
return current.messages["NONE"]
# ---------------------------------------------------------------------
@staticmethod
def inv_recv_ref_represent(value, show_link=True):
"""
Represent for the Goods Received Note
if show_link is True then it will generate a link to the pdf
"""
if value:
if show_link:
db = current.db
table = db.inv_recv
recv_row = db(table.recv_ref == value).select(table.id,
limitby = (0, 1)
).first()
return A(value,
_href = URL(c = "inv",
f = "recv",
args = [recv_row.id, "form"],
extension = "",
),
)
else:
return B(value)
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def inv_track_item_onvalidate(form):
"""
When a track item record is being created with a tracking number
then the tracking number needs to be unique within the organisation.
If the inv. item is coming out of a warehouse then the inv. item details
need to be copied across (org, expiry etc)
If the inv. item is being received then their might be a selected bin
ensure that the correct bin is selected and save those details.
"""
form_vars = form.vars
send_inv_item_id = form_vars.send_inv_item_id
if send_inv_item_id:
# Copy the data from the sent inv_item
db = current.db
itable = db.inv_inv_item
query = (itable.id == send_inv_item_id)
record = db(query).select(limitby=(0, 1)).first()
form_vars.item_id = record.item_id
form_vars.item_source_no = record.item_source_no
form_vars.expiry_date = record.expiry_date
form_vars.bin = record.bin
form_vars.owner_org_id = record.owner_org_id
form_vars.supply_org_id = record.supply_org_id
form_vars.pack_value = record.pack_value
form_vars.currency = record.currency
form_vars.inv_item_status = record.status
# Save the organisation from where this tracking originates
stable = current.s3db.org_site
query = query & (itable.site_id == stable.id)
record = db(query).select(stable.organisation_id,
limitby=(0, 1)).first()
form_vars.track_org_id = record.organisation_id
if not form_vars.recv_quantity and "quantity" in form_vars:
# If we have no send_id and no recv_quantity then
# copy the quantity sent directly into the received field
# This is for when there is no related send record
# The Quantity received ALWAYS defaults to the quantity sent
# (Please do not change this unless there is a specific user requirement)
#db.inv_track_item.recv_quantity.default = form_vars.quantity
form_vars.recv_quantity = form_vars.quantity
recv_bin = form_vars.recv_bin
if recv_bin:
# If there is a receiving bin then select the right one
if isinstance(recv_bin, list):
if recv_bin[1] != "":
recv_bin = recv_bin[1]
else:
recv_bin = recv_bin[0]
# -------------------------------------------------------------------------
@staticmethod
def inv_kitting_onvalidate(form):
"""
Check that we have sufficient inv_item in stock to build the kits
"""
form_vars = form.vars
item_id = form_vars.item_id
item_pack_id = form_vars.item_pack_id
quantity = form_vars.quantity
site_id = form_vars.site_id
db = current.db
s3db = current.s3db
ktable = s3db.supply_kit_item
ptable = db.supply_item_pack
iitable = s3db.inv_inv_item
# Get contents of this kit
query = (ktable.parent_item_id == item_id)
rows = db(query).select(ktable.item_id,
ktable.quantity,
ktable.item_pack_id,
)
# How many kits are we building?
p_id_field = ptable.id
p_qty_field = ptable.quantity
pack_qty = db(p_id_field == item_pack_id).select(p_qty_field,
limitby = (0, 1)
).first().quantity
quantity = quantity * pack_qty
max_kits = None
ii_pack_field = iitable.item_pack_id
ii_qty_field = iitable.quantity
ii_expiry_field = iitable.expiry_date
# Base Query: The Facility at which we're building these kits
# Filter out Stock which is in Bad condition or Expired
squery = (iitable.site_id == site_id) & \
(iitable.deleted == False) & \
((ii_expiry_field >= current.request.now) | ((ii_expiry_field == None))) & \
(iitable.status == 0)
# Loop through each supply_item in the kit
for record in rows:
# How much of this supply_item is required per kit?
pack_qty = db(p_id_field == record.item_pack_id).select(p_qty_field,
limitby = (0, 1)
).first().quantity
one_kit = record.quantity * pack_qty
# How much of this supply_item do we have in stock?
stock_amount = 0
query = squery & (iitable.item_id == record.item_id)
wh_items = db(query).select(#iitable.id,
ii_qty_field,
ii_pack_field,
)
for wh_item in wh_items:
pack_qty = db(p_id_field == wh_item.item_pack_id).select(p_qty_field,
limitby = (0, 1)
).first().quantity
amount = wh_item.quantity * pack_qty
stock_amount += amount
# How many Kits can we create based on this item?
kits = stock_amount / one_kit
if max_kits is None:
# 1st run so this item starts the list
max_kits = kits
else:
# Reduce the total possible if less than for previous items
if kits < max_kits:
max_kits = kits
# @ToDo: Save the results for the onaccept?
if max_kits is None:
form.errors.item_id = current.T("This kit hasn't got any Kit Items defined")
elif max_kits < quantity:
form.errors.quantity = current.T("You can only make %d kit(s) with the available stock") % \
int(max_kits)
# -------------------------------------------------------------------------
@staticmethod
def inv_kitting_onaccept(form):
"""
Adjust the Inventory stocks
reduce the components & increase the kits
- picks items which have an earlier expiry_date where they have them,
earlier purchase_data otherwise
Provide a pick list to ensure that the right stock items are used
to build the kits: inv_kitting_item
"""
form_vars = form.vars
kitting_id = form_vars.id
item_id = form_vars.item_id
item_pack_id = form_vars.item_pack_id
quantity = form_vars.quantity
site_id = form_vars.site_id
db = current.db
s3db = current.s3db
ktable = s3db.supply_kit_item
ptable = db.supply_item_pack
iitable = s3db.inv_inv_item
insert = s3db.inv_kitting_item.insert
inv_remove = s3db.inv_remove
# Get contents of this kit
query = (ktable.parent_item_id == item_id)
rows = db(query).select(ktable.item_id,
ktable.quantity,
ktable.item_pack_id,
)
# How many kits are we building?
p_id_field = ptable.id
p_qty_field = ptable.quantity
pack_qty = db(p_id_field == item_pack_id).select(p_qty_field,
limitby = (0, 1)
).first().quantity
quantity = quantity * pack_qty
ii_id_field = iitable.id
ii_bin_field = iitable.bin
ii_pack_field = iitable.item_pack_id
ii_qty_field = iitable.quantity
ii_expiry_field = iitable.expiry_date
ii_purchase_field = iitable.purchase_date
ii_src_field = iitable.item_source_no
# Match Stock based on oldest expiry date or purchase date
orderby = ii_expiry_field | ii_purchase_field
# We set expiry date of the kit to the oldest expiry date of the components
expiry_date = None
# Base Query: The Facility at which we're building these kits
# Filter out Stock which is in Bad condition or Expired
squery = (iitable.site_id == site_id) & \
(iitable.deleted == False) & \
((ii_expiry_field >= current.request.now) | ((ii_expiry_field == None))) & \
(iitable.status == 0)
# Loop through each supply_item in the kit
for record in rows:
# How much of this supply_item is required per kit?
pack_qty = db(p_id_field == record.item_pack_id).select(p_qty_field,
limitby = (0, 1)
).first().quantity
one_kit = record.quantity * pack_qty
# How much is required for all Kits?
required = one_kit * quantity
# List of what we have available in stock
ritem_id = record.item_id
query = squery & (iitable.item_id == ritem_id)
wh_items = db(query).select(ii_id_field,
ii_qty_field,
ii_expiry_field,
ii_purchase_field, # Included just for orderby on Postgres
ii_pack_field,
ii_bin_field,
ii_src_field,
orderby = orderby,
)
for wh_item in wh_items:
# Get the pack_qty
pack_qty = db(p_id_field == wh_item.item_pack_id).select(p_qty_field,
limitby = (0, 1)
).first().quantity
# How many of this item can we use for these kits?
amount = wh_item.quantity * pack_qty
# How many of this item will we use for the kits?
if amount > required:
# Use only what is required
amount = required
#else:
# # We use all
if wh_item.expiry_date:
if expiry_date is None:
# No expiry date set so this item starts the list
expiry_date = wh_item.expiry_date
else:
# Shorten the expiry date if less than for previous items
if wh_item.expiry_date < expiry_date:
expiry_date = wh_item.expiry_date
# @ToDo: Record which components are to be used for the kits
# Store results in a table?
# Remove from stock
inv_remove(wh_item, amount)
# Add to Pick List
insert(site_id = site_id,
kitting_id = kitting_id,
item_id = ritem_id,
item_pack_id = wh_item.item_pack_id,
bin = wh_item.bin,
item_source_no = wh_item.item_source_no,
quantity = amount,
inv_item_id = wh_item.id,
)
# Update how much is still required
required -= amount
if not required:
# No more required: move on to the next component
break
# Add Kits to Stock
# @ToDo: Keep track of Donor? Owner?
# @ToDo: Update Pack Value
new_id = iitable.insert(site_id = site_id,
item_id = item_id,
item_pack_id = item_pack_id,
quantity = quantity,
expiry_date = expiry_date,
)
s3db.update_super(iitable, {"id": new_id})
# -------------------------------------------------------------------------
@staticmethod
def inv_track_item_onaccept(form):
"""
When a track item record is created and it is linked to an inv_item
then the inv_item quantity will be reduced.
"""
db = current.db
s3db = current.s3db
tracktable = db.inv_track_item
inv_item_table = db.inv_inv_item
stable = db.inv_send
rtable = db.inv_recv
siptable = db.supply_item_pack
supply_item_add = s3db.supply_item_add
form_vars = form.vars
record_id = form_vars.id
record = form.record
if form_vars.send_inv_item_id:
stock_item = db(inv_item_table.id == form_vars.send_inv_item_id).select(inv_item_table.id,
inv_item_table.quantity,
inv_item_table.item_pack_id,
limitby = (0, 1)
).first()
elif record:
stock_item = record.send_inv_item_id
else:
# will get here for a recv (from external donor / local supplier)
stock_item = None
# Modify the original inv. item total only if we have a quantity on the form
# and a stock item to take it from.
# There will not be a quantity if it is being received since by then it is read only
# It will be there on an import and so the value will be deducted correctly
if form_vars.quantity and stock_item:
stock_quantity = stock_item.quantity
stock_pack = db(siptable.id == stock_item.item_pack_id).select(siptable.quantity,
limitby = (0, 1)
).first().quantity
if record:
if record.send_inv_item_id != None:
# Items have already been removed from stock, so first put them back
old_track_pack_quantity = db(siptable.id == record.item_pack_id).select(
siptable.quantity,
limitby = (0, 1),
).first().quantity
stock_quantity = supply_item_add(stock_quantity,
stock_pack,
record.quantity,
old_track_pack_quantity
)
try:
new_track_pack_quantity = db(siptable.id == form_vars.item_pack_id).select(
siptable.quantity,
limitby = (0, 1)
).first().quantity
except AttributeError:
new_track_pack_quantity = record.item_pack_id.quantity
new_total = supply_item_add(stock_quantity,
stock_pack,
- float(form_vars.quantity),
new_track_pack_quantity
)
db(inv_item_table.id == stock_item).update(quantity = new_total)
if form_vars.send_id and form_vars.recv_id:
send_ref = db(stable.id == form_vars.send_id).select(stable.send_ref,
limitby = (0, 1)
).first().send_ref
db(rtable.id == form_vars.recv_id).update(send_ref = send_ref)
rrtable = s3db.table("req_req")
if rrtable:
use_req = True
ritable = s3db.req_req_item
else:
# Req module deactivated
use_req = False
# If this item is linked to a request, then copy the req_ref to the send item
if use_req and record and record.req_item_id:
req_id = db(ritable.id == record.req_item_id).select(ritable.req_id,
limitby = (0, 1)
).first().req_id
req_ref = db(rrtable.id == req_id).select(rrtable.req_ref,
limitby = (0, 1)
).first().req_ref
db(stable.id == form_vars.send_id).update(req_ref = req_ref)
if form_vars.recv_id:
db(rtable.id == form_vars.recv_id).update(req_ref = req_ref)
# If the status is 'unloading':
# Move all the items into the site, update any request & make any adjustments
# Finally change the status to 'arrived'
if record and record.status == TRACK_STATUS_UNLOADING and \
record.recv_quantity:
# Look for the item in the site already
recv_rec = db(rtable.id == record.recv_id).select(rtable.site_id,
rtable.type,
limitby = (0, 1)
).first()
recv_site_id = recv_rec.site_id
query = (inv_item_table.site_id == recv_site_id) & \
(inv_item_table.item_id == record.item_id) & \
(inv_item_table.item_pack_id == record.item_pack_id) & \
(inv_item_table.currency == record.currency) & \
(inv_item_table.status == record.inv_item_status) & \
(inv_item_table.pack_value == record.pack_value) & \
(inv_item_table.expiry_date == record.expiry_date) & \
(inv_item_table.bin == record.recv_bin) & \
(inv_item_table.owner_org_id == record.owner_org_id) & \
(inv_item_table.item_source_no == record.item_source_no) & \
(inv_item_table.status == record.inv_item_status) & \
(inv_item_table.supply_org_id == record.supply_org_id)
inv_item_row = db(query).select(inv_item_table.id,
limitby = (0, 1)
).first()
if inv_item_row:
# Update the existing item
inv_item_id = inv_item_row.id
db(inv_item_table.id == inv_item_id).update(quantity = inv_item_table.quantity + record.recv_quantity)
else:
# Add a new item
source_type = 0
if form_vars.send_inv_item_id:
source_type = db(inv_item_table.id == form_vars.send_inv_item_id).select(inv_item_table.source_type,
limitby = (0, 1)
).first().source_type
else:
if recv_rec.type == 2:
source_type = 1 # Donation
else:
source_type = 2 # Procured
inv_item = {"site_id": recv_site_id,
"item_id": record.item_id,
"item_pack_id": record.item_pack_id,
"currency": record.currency,
"pack_value": record.pack_value,
"expiry_date": record.expiry_date,
"bin": record.recv_bin,
"owner_org_id": record.owner_org_id,
"supply_org_id": record.supply_org_id,
"quantity": record.recv_quantity,
"item_source_no": record.item_source_no,
"source_type": source_type,
"status": record.inv_item_status,
}
inv_item_id = inv_item_table.insert(**inv_item)
inv_item["id"] = inv_item_id
realm_entity = current.auth.get_realm_entity(inv_item_table, inv_item)
db(inv_item_table.id == inv_item_id).update(realm_entity = realm_entity)
# If this item is linked to a request, then update the quantity fulfil
if use_req and record.req_item_id:
req_item = db(ritable.id == record.req_item_id).select(ritable.quantity_fulfil,
ritable.item_pack_id,
limitby = (0, 1)
).first()
req_quantity = req_item.quantity_fulfil
req_pack_quantity = db(siptable.id == req_item.item_pack_id).select(siptable.quantity,
limitby = (0, 1)
).first().quantity
track_pack_quantity = db(siptable.id == record.item_pack_id).select(siptable.quantity,
limitby = (0, 1)
).first().quantity
quantity_fulfil = supply_item_add(req_quantity,
req_pack_quantity,
record.recv_quantity,
track_pack_quantity
)
db(ritable.id == record.req_item_id).update(quantity_fulfil = quantity_fulfil)
s3db.req_update_status(req_id)
db(tracktable.id == record_id).update(recv_inv_item_id = inv_item_id,
status = TRACK_STATUS_ARRIVED,
)
# If the receive quantity doesn't equal the sent quantity
# then an adjustment needs to be set up
if record.quantity != record.recv_quantity:
# Do we have an adjustment record?
# (which might have be created for another item in this shipment)
query = (tracktable.recv_id == record.recv_id) & \
(tracktable.adj_item_id != None)
adj_rec = db(query).select(tracktable.adj_item_id,
limitby = (0, 1)).first()
adjitemtable = s3db.inv_adj_item
if adj_rec:
adj_id = db(adjitemtable.id == adj_rec.adj_item_id).select(adjitemtable.adj_id,
limitby = (0, 1)
).first().adj_id
# If we don't yet have an adj record then create it
else:
adjtable = s3db.inv_adj
irtable = s3db.inv_recv
recv_rec = db(irtable.id == record.recv_id).select(irtable.recipient_id,
irtable.site_id,
irtable.comments,
limitby = (0, 1)
).first()
adj_id = adjtable.insert(adjuster_id = recv_rec.recipient_id,
site_id = recv_rec.site_id,
adjustment_date = current.request.now.date(),
category = 0,
status = 1,
comments = recv_rec.comments,
)
# Now create the adj item record
adj_item_id = adjitemtable.insert(reason = 0,
adj_id = adj_id,
inv_item_id = record.send_inv_item_id, # original source inv_item
item_id = record.item_id, # the supply item
item_pack_id = record.item_pack_id,
old_quantity = record.quantity,
new_quantity = record.recv_quantity,
currency = record.currency,
old_pack_value = record.pack_value,
new_pack_value = record.pack_value,
expiry_date = record.expiry_date,
bin = record.recv_bin,
comments = record.comments,
)
# Copy the adj_item_id to the tracking record
db(tracktable.id == record_id).update(adj_item_id = adj_item_id)
# -------------------------------------------------------------------------
@staticmethod
def inv_track_item_deleting(record_id):
"""
A track item can only be deleted if the status is Preparing
When a track item record is deleted and it is linked to an inv_item
then the inv_item quantity will be reduced.
"""
db = current.db
s3db = current.s3db
tracktable = db.inv_track_item
inv_item_table = db.inv_inv_item
ritable = s3db.req_req_item
siptable = db.supply_item_pack
record = tracktable[record_id]
if record.status != 1:
return False
# if this is linked to a request
# then remove these items from the quantity in transit
if record.req_item_id:
req_id = record.req_item_id
req_item = ritable[req_id]
req_quantity = req_item.quantity_transit
req_pack_quantity = siptable[req_item.item_pack_id].quantity
track_pack_quantity = siptable[record.item_pack_id].quantity
quantity_transit = s3db.supply_item_add(req_quantity,
req_pack_quantity,
- record.quantity,
track_pack_quantity
)
db(ritable.id == req_id).update(quantity_transit = quantity_transit)
s3db.req_update_status(req_id)
# Check that we have a link to a warehouse
if record.send_inv_item_id:
track_total = record.quantity
# Remove the total from this record and place it back in the warehouse
db(inv_item_table.id == record.send_inv_item_id).update(quantity = inv_item_table.quantity + track_total)
db(tracktable.id == record_id).update(quantity = 0,
comments = "%sQuantity was: %s" % \
(inv_item_table.comments,
track_total,
),
)
return True
# -------------------------------------------------------------------------
@staticmethod
def inv_timeline(r, **attr):
"""
Display the Incidents on a Simile Timeline
http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline
@ToDo: Play button
http://www.simile-widgets.org/wiki/Timeline_Moving_the_Timeline_via_Javascript
"""
resource_name = r.name
if r.representation != "html" or resource_name not in ("recv", "send"):
r.error(405, current.ERROR.BAD_METHOD)
T = current.T
db = current.db
response = current.response
s3 = response.s3
s3_include_simile()
# Add our data
# @ToDo: Make this the initial data & then collect extra via REST with a stylesheet
# add in JS using S3.timeline.eventSource.addMany(events) where events is a []
if r.record:
# Single record
rows = [r.record]
else:
# Multiple records
# @ToDo: Load all records & sort to closest in time
# http://stackoverflow.com/questions/7327689/how-to-generate-a-sequence-of-future-datetimes-in-python-and-determine-nearest-d
fields = ["id",
"date",
"send_ref",
"comments",
]
if resource_name == "recv":
fields.append("recv_ref")
rows = r.resource.select(fields,
limit = 2000,
virtual = False,
as_rows = True,
)
# We need to link these records to the other end, which can only be done by send_ref
send_refs = [row.send_ref for row in rows if row.send_ref is not None]
data = {"dateTimeFormat": "iso8601",
}
now = r.utcnow
tl_start = tl_end = now
events = []
eappend = events.append
if resource_name == "send":
table = db.inv_recv
query = (table.deleted == False) & \
current.auth.s3_accessible_query("read", table) & \
(table.send_ref.belongs(send_refs)) & \
(table.date != None)
recv_rows = db(query).select(table.date,
table.send_ref,
#table.comments,
)
for row in rows:
send_date = row.date
if send_date is None:
# Can't put on Timeline
continue
send_ref = row.send_ref
if send_ref is not None:
recv_row = recv_rows.find(lambda rrow: rrow.send_ref == send_ref).first()
if recv_row is None:
recv_date = send_date
else:
recv_date = recv_row.date
else:
recv_date = send_date
if send_date < tl_start:
tl_start = send_date
if recv_date > tl_end:
tl_end = recv_date
send_date = send_date.isoformat()
recv_date = recv_date.isoformat()
# @ToDo: Build better Caption rather than just using raw Comments
caption = description = row.comments or ""
link = URL(args = [row.id])
# Append to events
eappend({"start": send_date,
"end": recv_date,
"title": send_ref,
"caption": caption,
"description": description or "",
"link": link,
# @ToDo: Colour based on Category (More generically: Resource or Resource Type)
# "color" : "blue",
})
else:
table = db.inv_send
query = (table.deleted == False) & \
current.auth.s3_accessible_query("read", table) & \
(table.send_ref.belongs(send_refs)) & \
(table.date != None)
send_rows = db(query).select(table.date,
table.send_ref,
#table.comments,
)
for row in rows:
recv_date = row.date
if recv_date is None:
# Can't put on Timeline
continue
send_ref = row.send_ref
if send_ref is not None:
send_row = send_rows.find(lambda srow: srow.send_ref == send_ref).first()
if send_row is None:
send_date = recv_date
else:
send_date = send_row.date
else:
send_date = recv_date
send_ref = row.recv_ref
if send_date < tl_start:
tl_start = send_date
if recv_date > tl_end:
tl_end = recv_date
send_date = send_date.isoformat()
recv_date = recv_date.isoformat()
# @ToDo: Build better Caption rather than just using raw Comments
caption = description = row.comments or ""
link = URL(args = [row.id])
# Append to events
eappend({"start": send_date,
"end": recv_date,
"title": send_ref,
"caption": caption,
"description": description or "",
"link": link,
# @ToDo: Colour based on Category (More generically: Resource or Resource Type)
# "color" : "blue",
})
if len(events) == 0:
response.warning = T("No suitable data found")
data["events"] = events
data = json.dumps(data, separators=SEPARATORS)
code = "".join((
'''S3.timeline.data=''', data, '''
S3.timeline.tl_start="''', tl_start.isoformat(), '''"
S3.timeline.tl_end="''', tl_end.isoformat(), '''"
S3.timeline.now="''', now.isoformat(), '''"
'''))
# Configure our code in static/scripts/S3/s3.timeline.js
s3.js_global.append(code)
# Create the DIV
item = DIV(_id = "s3timeline",
_class = "s3-timeline",
)
output = {"item": item}
# Maintain RHeader for consistency
if "rheader" in attr:
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = T("Shipments Timeline")
response.view = "timeline.html"
return output
# =============================================================================
def inv_tabs(r):
"""
Add an expandable set of Tabs for a Site's Inventory Tasks
@ToDo: Make these Expand/Contract without a server-side call
"""
settings = current.deployment_settings
if settings.get_org_site_inv_req_tabs():
if settings.has_module("inv") and \
current.auth.s3_has_permission("read", "inv_inv_item", c="inv"):
T = current.T
s3 = current.session.s3
collapse_tabs = settings.get_inv_collapse_tabs()
tablename = s3_rheader_resource(r)[0]
if collapse_tabs and not (tablename == "inv_warehouse"):
# Test if the tabs are collapsed
show_collapse = True
show_inv = r.get_vars.show_inv
if show_inv == "True":
show_inv = True
elif show_inv == "False":
show_inv = False
else:
show_inv = None
if show_inv == True or show_inv == False:
if not s3.show_inv:
s3.show_inv = Storage()
s3.show_inv["%s_%s" % (r.name, r.id)] = show_inv
elif s3.show_inv:
show_inv = s3.show_inv.get("%s_%s" % (r.name, r.id))
else:
show_inv = False
else:
show_inv = True
show_collapse = False
if show_inv:
recv_label = settings.get_inv_recv_tab_label()
send_label = settings.get_inv_send_tab_label()
tabs = [(T("Stock"), "inv_item"),
#(T("Incoming"), "incoming/"),
(T(recv_label), "recv"),
(T(send_label), "send"),
]
if settings.has_module("proc"):
tabs.append((T("Planned Procurements"), "plan"))
if show_collapse:
tabs.append(("- %s" % T("Warehouse"), None, {"show_inv": "False"}))
else:
tabs = [("+ %s" % T("Warehouse"), "inv_item", {"show_inv": "True"}),
]
return tabs
return []
# =============================================================================
def inv_rheader(r):
""" Resource Header for Warehouses and Inventory Items """
if r.representation != "html" or r.method == "import":
# RHeaders only used in interactive views
return None
# Need to use this format as otherwise req_match?viewing=org_office.x
# doesn't have an rheader
tablename, record = s3_rheader_resource(r)
if not record:
# List or Create form: rheader makes no sense here
return None
T = current.T
s3db = current.s3db
table = s3db.table(tablename)
rheader = None
if tablename == "inv_warehouse":
# Tabs
tabs = [(T("Basic Details"), None),
#(T("Contact Data"), "contact"),
]
permit = current.auth.s3_has_permission
settings = current.deployment_settings
if settings.has_module("hrm"):
STAFF = settings.get_hrm_staff_label()
tabs.append((STAFF, "human_resource"))
if permit("create", "hrm_human_resource_site") and \
permit("update", tablename, r.id):
tabs.append((T("Assign %(staff)s") % {"staff": STAFF}, "assign"))
if settings.has_module("asset") and permit("read", "asset_asset"):
tabs.insert(6, (T("Assets"), "asset"))
tabs = tabs + inv_tabs(r)
if settings.has_module("req"):
tabs = tabs + s3db.req_tabs(r)
tabs.append((T("Attachments"), "document"))
# Fields
rheader_fields = [["name", "organisation_id", "email"],
["location_id", "phone1"],
]
rheader = S3ResourceHeader(rheader_fields, tabs)
rheader_fields, rheader_tabs = rheader(r, table=table, record=record)
# Inject logo
logo = s3db.org_organisation_logo(record.organisation_id)
if logo:
rheader = DIV(TABLE(TR(TD(logo),
TD(rheader_fields)
)))
else:
rheader = DIV(rheader_fields)
rheader.append(rheader_tabs)
elif tablename == "inv_inv_item":
# Tabs
tabs = [(T("Details"), None),
(T("Track Shipment"), "track_movement/"),
]
rheader_tabs = DIV(s3_rheader_tabs(r, tabs))
# Header
rheader = DIV(
TABLE(
TR(TH("%s: " % table.item_id.label),
table.item_id.represent(record.item_id),
TH("%s: " % table.item_pack_id.label),
table.item_pack_id.represent(record.item_pack_id),
),
TR(TH("%s: " % table.site_id.label),
TD(table.site_id.represent(record.site_id),
_colspan = 3,
),
),
), rheader_tabs)
elif tablename == "inv_kitting":
# Tabs
tabs = [(T("Details"), None),
(T("Pick List"), "item"),
]
rheader_tabs = DIV(s3_rheader_tabs(r, tabs))
# Header
rheader = DIV(
TABLE(
TR(TH("%s: " % table.req_ref.label),
TD(table.req_ref.represent(record.req_ref),
_colspan = 3,
),
),
TR(TH("%s: " % table.item_id.label),
table.item_id.represent(record.item_id),
TH("%s: " % table.item_pack_id.label),
table.item_pack_id.represent(record.item_pack_id),
TH("%s: " % table.quantity.label),
table.quantity.represent(record.quantity),
),
TR(TH("%s: " % table.site_id.label),
TD(table.site_id.represent(record.site_id),
_colspan = 3,
),
),
TR(TH("%s: " % table.repacked_id.label),
TD(table.repacked_id.represent(record.repacked_id),
_colspan = 3,
),
),
TR(TH("%s: " % table.date.label),
TD(table.date.represent(record.date),
_colspan = 3,
),
),
), rheader_tabs)
elif tablename == "inv_track_item":
# Tabs
tabs = [(T("Details"), None),
(T("Track Shipment"), "inv_item/"),
]
rheader_tabs = DIV(s3_rheader_tabs(r, tabs))
# Get site data
table = s3db.inv_inv_item
irecord = current.db(table.id == record.send_inv_item_id).select(
table.site_id,
limitby = (0, 1)
).first()
# Header
if irecord:
rheader = DIV(
TABLE(
TR(TH("%s: " % table.item_id.label),
table.item_id.represent(record.item_id),
TH("%s: " % table.item_pack_id.label),
table.item_pack_id.represent(record.item_pack_id),
),
TR(TH( "%s: " % table.site_id.label),
TD(table.site_id.represent(irecord.site_id),
_colspan = 3,
),
),
), rheader_tabs)
else:
rheader = DIV(
TABLE(
TR(TH("%s: " % table.item_id.label),
table.item_id.represent(record.item_id),
TH("%s: " % table.item_pack_id.label),
table.item_pack_id.represent(record.item_pack_id),
),
), rheader_tabs)
# Build footer
inv_rfooter(r, record)
return rheader
# =============================================================================
def inv_rfooter(r, record):
""" Resource Footer for Warehouses and Inventory Items """
if "site_id" not in record:
return
if (r.component and r.component.name == "inv_item"):
T = current.T
rfooter = TAG[""]()
component_id = r.component_id
if not current.deployment_settings.get_inv_direct_stock_edits() and \
current.auth.s3_has_permission("update", "inv_warehouse", r.id):
if component_id:
asi_btn = A(T("Adjust Stock Item"),
_href = URL(c = "inv",
f = "adj",
args = ["create"],
vars = {"site": record.site_id,
"item": component_id,
},
),
_class = "action-btn"
)
rfooter.append(asi_btn)
else:
as_btn = A(T("Adjust Stock"),
_href = URL(c = "inv",
f = "adj",
args = ["create"],
vars = {"site": record.site_id},
),
_class = "action-btn"
)
rfooter.append(as_btn)
if component_id:
ts_btn = A(T("Track Shipment"),
_href = URL(c = "inv",
f = "track_movement",
vars = {"viewing": "inv_inv_item.%s" % component_id},
),
_class = "action-btn"
)
rfooter.append(ts_btn)
current.response.s3.rfooter = rfooter
# =============================================================================
def inv_recv_crud_strings():
"""
CRUD Strings for inv_recv which need to be visible to menus without a
model load
"""
T = current.T
if current.deployment_settings.get_inv_shipment_name() == "order":
#recv_id_label = T("Order")
ADD_RECV = T("Add Order")
current.response.s3.crud_strings["inv_recv"] = Storage(
label_create = ADD_RECV,
title_display = T("Order Details"),
title_list = T("Orders"),
title_update = T("Edit Order"),
label_list_button = T("List Orders"),
label_delete_button = T("Delete Order"),
msg_record_created = T("Order Created"),
msg_record_modified = T("Order updated"),
msg_record_deleted = T("Order canceled"),
msg_list_empty = T("No Orders registered")
)
else:
#recv_id_label = T("Receive Shipment")
ADD_RECV = T("Receive New Shipment")
current.response.s3.crud_strings["inv_recv"] = Storage(
label_create = ADD_RECV,
title_display = T("Received Shipment Details"),
title_list = T("Received/Incoming Shipments"),
title_update = T("Shipment to Receive"),
label_list_button = T("List Received/Incoming Shipments"),
label_delete_button = T("Delete Received Shipment"),
msg_record_created = T("Shipment Created"),
msg_record_modified = T("Received Shipment updated"),
msg_record_deleted = T("Received Shipment canceled"),
msg_list_empty = T("No Received Shipments")
)
return
# =============================================================================
def inv_send_rheader(r):
""" Resource Header for Send """
if r.representation == "html" and r.name == "send":
record = r.record
if record:
db = current.db
s3db = current.s3db
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
tabs = [(T("Edit Details"), None),
(T("Items"), "track_item"),
]
if settings.get_inv_document_filing():
tabs.append((T("Documents"), "document"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
stable = s3db.org_site
send_id = record.id
status = record.status
site_id = record.site_id
if site_id:
site = db(stable.site_id == site_id).select(stable.organisation_id,
stable.instance_type,
limitby = (0, 1)
).first()
org_id = site.organisation_id
logo = s3db.org_organisation_logo(org_id) or ""
instance_table = s3db[site.instance_type]
if "phone1" in instance_table.fields:
site = db(instance_table.site_id == site_id).select(instance_table.phone1,
instance_table.phone2,
limitby = (0, 1)
).first()
phone1 = site.phone1
phone2 = site.phone2
else:
phone1 = None
phone2 = None
else:
org_id = None
logo = ""
phone1 = None
phone2 = None
to_site_id = record.to_site_id
if to_site_id:
site = db(stable.site_id == to_site_id).select(stable.location_id,
limitby = (0, 1)
).first()
address = s3db.gis_LocationRepresent(address_only=True)(site.location_id)
else:
address = current.messages["NONE"]
shipment_details = TABLE(
TR(TD(T(settings.get_inv_send_form_name().upper()),
_colspan=2, _class="pdf_title"),
TD(logo, _colspan=2),
),
TR(TH("%s: " % table.status.label),
table.status.represent(status),
),
TR(TH("%s: " % table.send_ref.label),
TD(table.send_ref.represent(record.send_ref)),
TH("%s: " % table.req_ref.label),
TD(table.req_ref.represent(record.req_ref)),
),
TR(TH("%s: " % table.date.label),
table.date.represent(record.date),
TH("%s: " % table.delivery_date.label),
table.delivery_date.represent(record.delivery_date),
),
TR(TH("%s: " % table.to_site_id.label),
table.to_site_id.represent(record.to_site_id),
TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
),
TR(TH("%s: " % T("Address")),
TD(address, _colspan=3),
),
TR(TH("%s: " % table.transported_by.label),
table.transported_by.represent(record.transported_by),
TH("%s: " % table.transport_ref.label),
table.transport_ref.represent(record.transport_ref),
),
TR(TH("%s: " % table.sender_id.label),
table.sender_id.represent(record.sender_id),
TH("%s: " % table.recipient_id.label),
table.recipient_id.represent(record.recipient_id),
),
TR(TH("%s: " % T("Complete? Please call")),
phone1 or "",
TH("%s: " % T("Problems? Please call")),
phone2 or phone1 or "",
),
TR(TH("%s: " % table.comments.label),
TD(record.comments or "", _colspan=3)
)
)
# Find out how many inv_track_items we have for this send record
tracktable = s3db.inv_track_item
query = (tracktable.send_id == send_id) & \
(tracktable.deleted == False)
#cnt = db(query).count()
cnt = db(query).select(tracktable.id,
limitby = (0, 1)
).first()
if cnt:
cnt = 1
else:
cnt = 0
action = DIV()
#rSubdata = TABLE()
rfooter = TAG[""]()
if status == SHIP_STATUS_IN_PROCESS:
if current.auth.s3_has_permission("update",
"inv_send",
record_id = record.id):
if cnt > 0:
action.append(A(T("Send Shipment"),
_href = URL(f = "send_process",
args = [record.id]
),
_id = "send_process",
_class = "action-btn",
)
)
s3.jquery_ready.append('''S3.confirmClick("#send_process","%s")''' \
% T("Do you want to send this shipment?"))
#if not r.component and not r.method == "form":
# ritable = s3db.req_req_item
# rcitable = s3db.req_commit_item
# query = (tracktable.send_id == record.id) & \
# (rcitable.req_item_id == tracktable.req_item_id) & \
# (tracktable.req_item_id == ritable.id) & \
# (tracktable.deleted == False)
# records = db(query).select()
# for record in records:
# rSubdata.append(TR(TH("%s: " % ritable.item_id.label),
# ritable.item_id.represent(record.req_req_item.item_id),
# TH("%s: " % rcitable.quantity.label),
# record.req_commit_item.quantity,
# ))
elif status == SHIP_STATUS_RETURNING:
if cnt > 0:
action.append(A(T("Complete Returns"),
_href = URL(c = "inv",
f = "return_process",
args = [record.id]
),
_id = "return_process",
_class = "action-btn"
)
)
s3.jquery_ready.append('''S3.confirmClick("#return_process","%s")''' \
% T("Do you want to complete the return process?") )
else:
msg = T("You need to check all item quantities before you can complete the return process")
rfooter.append(SPAN(msg))
elif status != SHIP_STATUS_CANCEL:
if status == SHIP_STATUS_SENT:
jappend = s3.jquery_ready.append
s3_has_permission = current.auth.s3_has_permission
if s3_has_permission("update",
"inv_send",
record_id=record.id):
action.append(A(T("Manage Returns"),
_href = URL(c = "inv",
f = "send_returns",
args = [record.id],
vars = None,
),
_id = "send-return",
_class = "action-btn",
_title = T("Only use this button to accept back into stock some items that were returned from a delivery to beneficiaries who do not record the shipment details directly into the system")
)
)
jappend('''S3.confirmClick("#send-return","%s")''' % \
T("Confirm that some items were returned from a delivery to beneficiaries and they will be accepted back into stock."))
action.append(A(T("Confirm Shipment Received"),
_href = URL(f = "send",
args = [record.id],
vars = {"received": 1},
),
_id = "send-receive",
_class = "action-btn",
_title = T("Only use this button to confirm that the shipment has been received by a destination which will not record the shipment directly into the system")
)
)
jappend('''S3.confirmClick("#send-receive","%s")''' % \
T("Confirm that the shipment has been received by a destination which will not record the shipment directly into the system and confirmed as received.") )
if s3_has_permission("delete",
"inv_send",
record_id=record.id):
action.append(A(T("Cancel Shipment"),
_href = URL(c = "inv",
f = "send_cancel",
args = [record.id]
),
_id = "send-cancel",
_class = "action-btn"
)
)
jappend('''S3.confirmClick("#send-cancel","%s")''' \
% T("Do you want to cancel this sent shipment? The items will be returned to the Warehouse. This action CANNOT be undone!") )
if not r.method == "form":
# msg = ""
# if cnt == 1:
# msg = T("One item is attached to this shipment")
# elif cnt > 1:
# msg = T("%s items are attached to this shipment") % cnt
# shipment_details.append(TR(TH(action, _colspan=2), TD(msg)))
shipment_details.append(TR(TH(action, _colspan=2)))
s3.rfooter = rfooter
rheader = DIV(shipment_details,
rheader_tabs,
#rSubdata
)
return rheader
return None
# ---------------------------------------------------------------------
def inv_send_pdf_footer(r):
"""
Footer for the Waybill
"""
if r.record:
T = current.T
footer = DIV(TABLE(TR(TH(T("Commodities Loaded")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
TH(T("Location (Site)")),
TH(T("Condition")),
),
TR(TD(T("Loaded By")),
TD(),
TD(),
TD(),
TD(),
TD(),
TD(),
),
TR(TD(T("Transported By")),
TD(),
TD(),
TD(),
TD(),
TD(),
TD(),
),
TR(TH(T("Reception")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
TH(T("Location (Site)")),
TH(T("Condition")),
),
TR(TD(T("Received By")),
TD(),
TD(),
TD(),
TD(),
TD(),
TD(),
),
))
return footer
return None
# =============================================================================
def inv_recv_rheader(r):
""" Resource Header for Receiving """
if r.representation == "html" and r.name == "recv":
record = r.record
if record:
T = current.T
s3db = current.s3db
settings = current.deployment_settings
tabs = [(T("Edit Details"), None),
(T("Items"), "track_item"),
]
if settings.get_inv_document_filing():
tabs.append((T("Documents"), "document"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
tracktable = s3db.inv_track_item
recv_id = record.id
site_id = record.site_id
stable = s3db.org_site
site = current.db(stable.site_id == site_id).select(stable.organisation_id,
limitby = (0, 1)
).first()
try:
org_id = site.organisation_id
except AttributeError:
org_id = None
logo = s3db.org_organisation_logo(org_id)
shipment_details = TABLE(
TR(TD(T(settings.get_inv_recv_form_name()),
_colspan = 2,
_class = "pdf_title",
),
TD(logo, _colspan=2),
),
TR(TH("%s: " % table.recv_ref.label),
TD(table.recv_ref.represent(record.recv_ref))
),
TR(TH("%s: " % table.status.label),
table.status.represent(record.status),
),
TR(TH("%s: " % table.eta.label),
table.eta.represent(record.eta),
TH("%s: " % table.date.label),
table.date.represent(record.date),
),
TR(TH("%s: " % table.from_site_id.label),
table.from_site_id.represent(record.from_site_id),
TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
),
TR(TH("%s: " % table.sender_id.label),
s3_fullname(record.sender_id),
TH("%s: " % table.recipient_id.label),
s3_fullname(record.recipient_id),
),
TR(TH("%s: " % table.send_ref.label),
table.send_ref.represent(record.send_ref),
TH("%s: " % table.recv_ref.label),
table.recv_ref.represent(record.recv_ref),
),
TR(TH("%s: " % table.comments.label),
TD(record.comments or "", _colspan=3),
),
)
rfooter = TAG[""]()
action = DIV()
# Find out how many inv_track_items we have for this recv record
query = (tracktable.recv_id == recv_id) & \
(tracktable.deleted == False)
cnt = current.db(query).count()
if record.status == SHIP_STATUS_SENT or \
record.status == SHIP_STATUS_IN_PROCESS:
if current.auth.s3_has_permission("update",
"inv_recv",
record_id = record.id):
if cnt > 0:
action.append(A(T("Receive Shipment"),
_href = URL(c = "inv",
f = "recv_process",
args = [record.id]
),
_id = "recv_process",
_class = "action-btn"
))
recv_btn_confirm = SCRIPT("S3.confirmClick('#recv_process', '%s')"
% T("Do you want to receive this shipment?") )
rfooter.append(recv_btn_confirm)
else:
msg = T("You need to check all item quantities and allocate to bins before you can receive the shipment")
rfooter.append(SPAN(msg))
# FB: Removed as serves no useful purpose & AusRC complained about it
#else:
# if record.status == SHIP_STATUS_RECEIVED:
# if current.auth.s3_has_permission("delete",
# "inv_recv",
# record_id=record.id):
# action.append(A(T("Cancel Shipment"),
# _href = URL(c = "inv",
# f = "recv_cancel",
# args = [record.id]
# ),
# _id = "recv_cancel",
# _class = "action-btn"
# ))
# cancel_btn_confirm = SCRIPT("S3.confirmClick('#recv_cancel', '%s')"
# % T("Do you want to cancel this received shipment? The items will be removed from the Warehouse. This action CANNOT be undone!") )
# rfooter.append(cancel_btn_confirm)
msg = ""
if cnt == 1:
msg = T("This shipment contains one line item")
elif cnt > 1:
msg = T("This shipment contains %s items") % cnt
shipment_details.append(TR(TH(action, _colspan=2), TD(msg)))
current.response.s3.rfooter = rfooter
rheader = DIV(shipment_details,
rheader_tabs,
)
return rheader
return None
# ---------------------------------------------------------------------
def inv_recv_pdf_footer(r):
"""
"""
record = r.record
if record:
T = current.T
footer = DIV(TABLE(TR(TH(T("Delivered By")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
),
TR(TD(),
TD(),
TD(),
TD(),
TD(),
),
TR(TH(T("Received By")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature / Stamp")),
),
TR(TD(),
TD(),
TD(),
TD(),
TD(),
),
))
return footer
return None
# =============================================================================
class InventoryAdjustModel(DataModel):
"""
A module to manage the shipment of inventory items
- Sent Items
- Received Items
- And audit trail of the shipment process
"""
names = ("inv_adj",
"inv_adj_item",
"inv_adj_item_id",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
settings = current.deployment_settings
track_pack_values = settings.get_inv_track_pack_values()
organisation_id = self.org_organisation_id
org_site_represent = self.org_site_represent
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Adjustments
#
adjust_type = {0 : T("Shipment"),
1 : T("Inventory"),
}
adjust_status = {0 : T("In Process"),
1 : T("Complete"),
}
tablename = "inv_adj"
define_table(tablename,
super_link("doc_id", "doc_entity"),
self.pr_person_id(name = "adjuster_id",
label = T("Actioning officer"),
ondelete = "RESTRICT",
default = auth.s3_logged_in_person(),
comment = self.pr_person_comment(child="adjuster_id")
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
default = auth.user.site_id if auth.is_logged_in() else None,
empty = False,
instance_types = auth.org_site_types,
label = T(current.deployment_settings.get_inv_facility_label()),
not_filterby = "obsolete",
not_filter_opts = (True,),
readable = True,
writable = True,
represent = org_site_represent,
updateable = True,
#widget = S3SiteAutocompleteWidget(),
),
s3_date("adjustment_date",
default = "now",
writable = False
),
Field("status", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(adjust_status)),
represent = represent_option(adjust_status),
default = 0,
label = T("Status"),
writable = False
),
Field("category", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(adjust_type)),
represent = represent_option(adjust_type),
default = 1,
label = T("Type"),
writable = False,
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
super_entity = "doc_entity",
onaccept = self.inv_adj_onaccept,
create_next = URL(args = ["[id]", "adj_item"]),
)
# Components
self.add_components(tablename,
inv_adj_item = "adj_id",
)
# Reusable Field
adj_id = S3ReusableField("adj_id", "reference %s" % tablename,
label = T("Inventory Adjustment"),
ondelete = "RESTRICT",
represent = self.inv_adj_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_adj.id",
self.inv_adj_represent,
orderby = "inv_adj.adjustment_date",
sort = True,
)),
sortby = "date",
)
adjust_reason = {0 : T("Unknown"),
1 : T("None"),
2 : T("Lost"),
3 : T("Damaged"),
4 : T("Expired"),
5 : T("Found"),
6 : T("Transfer Ownership"),
7 : T("Issued without Record"),
8 : T("Distributed without Record"),
}
# CRUD strings
if settings.get_inv_stock_count():
crud_strings["inv_adj"] = Storage(
label_create = T("New Stock Count"),
title_display = T("Stock Count Details"),
title_list = T("Stock Counts"),
title_update = T("Edit Stock Count"),
label_list_button = T("List Stock Counts"),
label_delete_button = T("Delete Stock Count"),
msg_record_created = T("Stock Count created"),
msg_record_modified = T("Stock Count modified"),
msg_record_deleted = T("Stock Count deleted"),
msg_list_empty = T("No stock counts have been done"))
else:
crud_strings["inv_adj"] = Storage(
label_create = T("New Stock Adjustment"),
title_display = T("Stock Adjustment Details"),
title_list = T("Stock Adjustments"),
title_update = T("Edit Adjustment"),
label_list_button = T("List Stock Adjustments"),
label_delete_button = T("Delete Stock Adjustment"),
msg_record_created = T("Adjustment created"),
msg_record_modified = T("Adjustment modified"),
msg_record_deleted = T("Adjustment deleted"),
msg_list_empty = T("No stock adjustments have been done"))
# ---------------------------------------------------------------------
# Adjustment Items
#
inv_item_status_opts = settings.get_inv_item_status()
tablename = "inv_adj_item"
define_table(tablename,
# Original inventory item
self.inv_item_id(ondelete = "RESTRICT",
readable = False,
writable = False),
self.supply_item_id(
ondelete = "RESTRICT"
),
self.supply_item_pack_id(
ondelete = "SET NULL"
),
Field("old_quantity", "double", notnull=True,
default = 0,
label = T("Original Quantity"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
writable = False,
),
Field("new_quantity", "double",
label = T("Revised Quantity"),
represent = self.qnty_adj_repr,
requires = IS_FLOAT_AMOUNT(minimum=0.0),
),
Field("reason", "integer",
default = 1,
label = T("Reason"),
represent = represent_option(adjust_reason),
requires = IS_IN_SET(adjust_reason),
writable = False,
),
Field("old_pack_value", "double",
label = T("Original Value per Pack"),
readable = track_pack_values,
writable = track_pack_values,
),
Field("new_pack_value", "double",
label = T("Revised Value per Pack"),
readable = track_pack_values,
writable = track_pack_values,
),
s3_currency(readable = track_pack_values,
writable = track_pack_values),
Field("old_status", "integer",
default = 0,
label = T("Current Status"),
represent = represent_option(inv_item_status_opts),
requires = IS_EMPTY_OR(IS_IN_SET(inv_item_status_opts)),
writable = False,
),
Field("new_status", "integer",
default = 0,
label = T("Revised Status"),
represent = represent_option(inv_item_status_opts),
requires = IS_EMPTY_OR(IS_IN_SET(inv_item_status_opts)),
),
s3_date("expiry_date",
label = T("Expiry Date"),
),
Field("bin", length=16,
label = T("Bin"),
requires = IS_LENGTH(16),
# @ToDo:
#widget = S3InvBinWidget("inv_adj_item")
),
# Organisation that owned this item before
organisation_id("old_owner_org_id",
label = T("Current Owned By (Organization/Branch)"),
ondelete = "SET NULL",
writable = False,
comment = None,
),
# Organisation that owns this item now
organisation_id("new_owner_org_id",
label = T("Transfer Ownership To (Organization/Branch)"),
ondelete = "SET NULL",
),
adj_id(),
s3_comments(),
*s3_meta_fields())
# Reusable Field
adj_item_id = S3ReusableField("adj_item_id", "reference %s" % tablename,
label = T("Inventory Adjustment Item"),
ondelete = "RESTRICT",
represent = self.inv_adj_item_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_adj_item.id",
self.inv_adj_item_represent,
orderby = "inv_adj_item.item_id",
sort = True,
)
),
sortby = "item_id",
)
# CRUD strings
crud_strings["inv_adj_item"] = Storage(
label_create = T("Add Item to Stock"),
title_display = T("Item Details"),
title_list = T("Items in Stock"),
title_update = T("Adjust Item Quantity"),
label_list_button = T("List Items in Stock"),
#label_delete_button = T("Remove Item from Stock"), # This should be forbidden - set qty to zero instead
msg_record_created = T("Item added to stock adjustment"),
msg_record_modified = T("Item quantity adjusted"),
#msg_record_deleted = T("Item removed from Stock"), # This should be forbidden - set qty to zero instead
msg_list_empty = T("No items currently in stock"))
return {"inv_adj_item_id": adj_item_id,
}
# -------------------------------------------------------------------------
@staticmethod
def qnty_adj_repr(value):
"""
Make unadjusted quantities show up in bold
"""
if value is None:
# We want the word "None" here, not just a bold dash
return B(T("None"))
else:
return IS_FLOAT_AMOUNT.represent(value, precision=2)
# ---------------------------------------------------------------------
@staticmethod
def inv_adj_onaccept(form):
"""
When an adjustment record is created and it is of type inventory
then an adj_item record for each inv_inv_item in the site will be
created. If needed, extra adj_item records can be created later.
"""
record_id = form.vars.id
db = current.db
inv_item_table = db.inv_inv_item
adjitemtable = db.inv_adj_item
adjtable = db.inv_adj
adj_rec = adjtable[record_id]
if adj_rec.category == 1:
site_id = form.vars.site_id
# Only get inv. item with a positive quantity
query = (inv_item_table.site_id == site_id) & \
(inv_item_table.quantity > 0) & \
(inv_item_table.deleted == False)
row = db(query).select()
for inv_item in row:
# add an adjustment item record
adjitemtable.insert(reason = 0,
adj_id = record_id,
inv_item_id = inv_item.id, # original source inv_item
item_id = inv_item.item_id, # the supply item
item_pack_id = inv_item.item_pack_id,
old_quantity = inv_item.quantity,
currency = inv_item.currency,
old_status = inv_item.status,
new_status = inv_item.status,
old_pack_value = inv_item.pack_value,
new_pack_value = inv_item.pack_value,
expiry_date = inv_item.expiry_date,
bin = inv_item.bin,
old_owner_org_id = inv_item.owner_org_id,
new_owner_org_id = inv_item.owner_org_id,
)
# ---------------------------------------------------------------------
@staticmethod
def inv_adj_represent(record_id, row=None, show_link=True):
"""
Represent an Inventory Adjustment
"""
if row:
table = current.db.inv_adj
elif not record_id:
return current.messages["NONE"]
else:
db = current.db
table = db.inv_adj
row = db(table.id == record_id).select(table.adjustment_date,
table.adjuster_id,
limitby = (0, 1),
).first()
try:
reprstr = "%s - %s" % (
table.adjuster_id.represent(row.adjuster_id),
table.adjustment_date.represent(row.adjustment_date),
)
except AttributeError:
# Record not found, or not all required fields in row
return current.messages.UNKNOWN_OPT
else:
if show_link:
return SPAN(reprstr)
else:
return reprstr
# ---------------------------------------------------------------------
@staticmethod
def inv_adj_item_represent(record_id, row=None, show_link=True):
"""
Represent an Inventory Adjustment Item
"""
if row:
table = current.db.inv_adj_item
elif not record_id:
return current.messages["NONE"]
else:
db = current.db
table = db.inv_adj_item
row = db(table.id == record_id).select(table.item_id,
table.old_quantity,
table.new_quantity,
table.item_pack_id,
limitby = (0, 1),
).first()
changed_quantity = 0
try:
if row.new_quantity and row.old_quantity:
changed_quantity = row.new_quantity - row.old_quantity
item_id = row.item_id
pack_id = row.item_pack_id
except AttributeError:
# Item not found, or not all required fields in row
return current.messages.UNKNOWN_OPT
else:
reprstr = "%s:%s %s" % (
table.item_id.represent(item_id,
show_link = show_link,
),
changed_quantity,
table.item_pack_id.represent(pack_id),
)
if show_link:
return SPAN(reprstr)
else:
return reprstr
# =============================================================================
def inv_item_total_weight(row):
"""
Compute the total weight of an inventory item (Field.Method)
Args:
row: the Row
"""
try:
inv_item = getattr(row, "inv_inv_item")
except AttributeError:
inv_item = row
try:
supply_item = getattr(row, "supply_item")
weight = supply_item.weight
except AttributeError:
# Need to load the supply item
# Avoid this by adding to extra_fields
itable = current.s3db.inv_inv_item
stable = current.s3db.supply_item
query = (itable.id == inv_item.id) & \
(itable.item_id == stable.id)
supply_item = current.db(query).select(stable.weight,
limitby = (0, 1),
).first()
weight = supply_item.weight if supply_item else None
if weight is None:
return current.messages["NONE"]
try:
quantity = inv_item.quantity
except AttributeError:
# Need to reload the inv item
# Avoid this by adding to extra_fields
itable = current.s3db.inv_inv_item
query = (itable.id == inv_item.id)
inv_item = current.db(query).select(itable.quantity,
limitby = (0, 1),
).first()
quantity = inv_item.quantity
try:
supply_item_pack = getattr(row, "supply_item_pack")
pack_quantity = supply_item_pack.quantity
except AttributeError:
# Need to load the supply item pack
# Avoid this by adding to extra_fields
itable = current.s3db.inv_inv_item
ptable = current.s3db.supply_item_pack
query = (itable.id == inv_item.id) & \
(itable.item_pack_id == ptable.id)
supply_item_pack = current.db(query).select(ptable.quantity,
limitby = (0, 1),
).first()
pack_quantity = supply_item_pack.quantity
return round(quantity * pack_quantity * weight, 3)
# -----------------------------------------------------------------------------
def inv_item_total_volume(row):
"""
Compute the total volume of an inventory item (Field.Method)
Args:
row: the Row
"""
try:
inv_item = getattr(row, "inv_inv_item")
except AttributeError:
inv_item = row
try:
supply_item = getattr(row, "supply_item")
volume = supply_item.volume
except AttributeError:
# Need to load the supply item
# Avoid this by adding to extra_fields
itable = current.s3db.inv_inv_item
stable = current.s3db.supply_item
query = (itable.id == inv_item.id) & \
(itable.item_id == stable.id)
supply_item = current.db(query).select(stable.volume,
limitby = (0, 1),
).first()
volume = supply_item.volume if supply_item else None
if volume is None:
return current.messages["NONE"]
try:
quantity = inv_item.quantity
except AttributeError:
# Need to reload the inv item
# Avoid this by adding to extra_fields
itable = current.s3db.inv_inv_item
query = (itable.id == inv_item.id)
inv_item = current.db(query).select(itable.quantity,
limitby = (0, 1),
).first()
quantity = inv_item.quantity
try:
supply_item_pack = getattr(row, "supply_item_pack")
pack_quantity = supply_item_pack.quantity
except AttributeError:
# Need to load the supply item pack
# Avoid this by adding to extra_fields
itable = current.s3db.inv_inv_item
ptable = current.s3db.supply_item_pack
query = (itable.id == inv_item.id) & \
(itable.item_pack_id == ptable.id)
supply_item_pack = current.db(query).select(ptable.quantity,
limitby = (0, 1),
).first()
pack_quantity = supply_item_pack.quantity
return round(quantity * pack_quantity * volume, 2)
# -----------------------------------------------------------------------------
def inv_stock_movements(resource, selectors, orderby):
"""
Extraction method for stock movements report
Args:
resource: the CRUDResource (inv_inv_item)
selectors: the field selectors
orderby: orderby expression
Note:
transactions can be filtered by earliest/latest date
using an DateFilter with selector="_transaction.date"
TODO does not take manual stock adjustments into account
TODO does not represent sites or Waybill/GRN as
links (breaks PDF export, but otherwise it's useful)
"""
# Extract the stock item data
selectors = ["id",
"site_id",
"site_id$name",
"item_id$item_category_id",
"bin",
"item_id$name",
"quantity",
]
data = resource.select(selectors,
limit = None,
orderby = orderby,
raw_data = True,
represent = True,
)
# Get all stock item IDs
inv_item_ids = [row["_row"]["inv_inv_item.id"] for row in data.rows]
# Earliest and latest date of the report (read from filter)
convert = S3TypeConverter.convert
request = current.request
get_vars_get = request.get_vars.get
dtstr = get_vars_get("_transaction.date__ge")
earliest = convert(datetime.datetime, dtstr) if dtstr else None
dtstr = get_vars_get("_transaction.date__le")
latest = convert(datetime.datetime, dtstr) if dtstr else request.utcnow
def item_dict():
""" Stock movement data per inventory item """
return {# Quantity in/out between earliest and latest date
"quantity_in": 0,
"quantity_out": 0,
# Quantity in/out after latest date
"quantity_in_after": 0,
"quantity_out_after": 0,
# Origin/destination sites
"sites": [],
# GRN/Waybill numbers
"documents": [],
}
# Dict to collect stock movement data
movements = {}
# Set of site IDs for bulk representation
all_sites = set()
s3db = current.s3db
# Incoming shipments
query = (FS("recv_inv_item_id").belongs(inv_item_ids))
if earliest:
query &= (FS("recv_id$date") >= earliest)
incoming = s3db.resource("inv_track_item", filter=query)
transactions = incoming.select(["recv_id$date",
"recv_id$from_site_id",
"recv_id$recv_ref",
"recv_inv_item_id",
"recv_quantity",
],
limit = None,
raw_data = True,
represent = True,
)
for transaction in transactions.rows:
raw = transaction["_row"]
inv_item_id = raw["inv_track_item.recv_inv_item_id"]
# Get the movement data dict for this item
if inv_item_id in movements:
item_data = movements[inv_item_id]
else:
movements[inv_item_id] = item_data = item_dict()
# Incoming quantities
quantity_in = raw["inv_track_item.recv_quantity"]
if quantity_in:
if raw["inv_recv.date"] > latest:
item_data["quantity_in_after"] += quantity_in
continue
else:
item_data["quantity_in"] += quantity_in
# Origin sites
sites = item_data["sites"]
from_site = raw["inv_recv.from_site_id"]
if from_site and from_site not in sites:
all_sites.add(from_site)
sites.append(from_site)
# GRN numbers
if raw["inv_recv.recv_ref"]:
documents = item_data["documents"]
documents.append(raw["inv_recv.recv_ref"])
# Outgoing shipments
query = (FS("send_inv_item_id").belongs(inv_item_ids))
if earliest:
query &= (FS("send_id$date") >= earliest)
outgoing = s3db.resource("inv_track_item", filter=query)
transactions = outgoing.select(["send_id$date",
"send_id$to_site_id",
"send_id$send_ref",
"send_inv_item_id",
"quantity",
],
limit = None,
raw_data = True,
represent = True,
)
for transaction in transactions.rows:
raw = transaction["_row"]
inv_item_id = raw["inv_track_item.send_inv_item_id"]
# Get the movement data dict for this item
if inv_item_id in movements:
item_data = movements[inv_item_id]
else:
movements[inv_item_id] = item_data = item_dict()
# Outgoing quantities
quantity_in = raw["inv_track_item.quantity"]
if quantity_in:
send_date = raw["inv_send.date"]
if send_date and send_date > latest:
item_data["quantity_out_after"] += quantity_in
continue
else:
item_data["quantity_out"] += quantity_in
# Destination sites
sites = item_data["sites"]
to_site = raw["inv_send.to_site_id"]
if to_site and to_site not in sites:
all_sites.add(to_site)
sites.append(to_site)
# Waybill numbers
if raw["inv_send.send_ref"]:
documents = item_data["documents"]
documents.append(raw["inv_send.send_ref"])
# Bulk-represent sites (stores the representations in represent)
represent = s3db.inv_inv_item.site_id.represent
represent.bulk(list(all_sites))
# Extend the original rows in the data dict
for row in data.rows:
raw = row["_row"]
inv_item_id = raw["inv_inv_item.id"]
if inv_item_id in movements:
item_data = movements[inv_item_id]
else:
item_data = item_dict()
# Compute original and final quantity
total_in = item_data["quantity_in"]
total_out = item_data["quantity_out"]
current_quantity = raw["inv_inv_item.quantity"]
final_quantity = current_quantity - \
item_data["quantity_in_after"] + \
item_data["quantity_out_after"]
original_quantity = final_quantity - total_in + total_out
# Write into raw data (for aggregation)
raw["inv_inv_item.quantity"] = final_quantity
raw["inv_inv_item.quantity_in"] = total_in
raw["inv_inv_item.quantity_out"] = total_out
raw["inv_inv_item.original_quantity"] = original_quantity
# Copy into represented data (for rendering)
row["inv_inv_item.quantity"] = final_quantity
row["inv_inv_item.quantity_in"] = total_in
row["inv_inv_item.quantity_out"] = total_out
row["inv_inv_item.original_quantity"] = original_quantity
# Add sites
row["inv_inv_item.sites"] = represent.multiple(item_data["sites"],
show_link = False,
)
# Add GRN/Waybill numbers
row["inv_inv_item.documents"] = ", ".join(item_data["documents"])
# Return to S3GroupedItemsReport
return data.rows
# =============================================================================
def inv_track_item_quantity_needed(row):
"""
Quantity still needed for a track item - used in Inv Send
when an Item has come from a Request
"""
if hasattr(row, "inv_track_item"):
row = row.inv_track_item
try:
req_item_id = row.req_item_id
except AttributeError:
# not available
req_item_id = None
if not req_item_id:
return current.messages["NONE"]
s3db = current.s3db
ritable = s3db.req_req_item
siptable = s3db.supply_item_pack
query = (ritable.id == req_item_id) & \
(ritable.item_pack_id == siptable.id)
row = current.db(query).select(ritable.quantity,
ritable.quantity_transit,
ritable.quantity_fulfil,
siptable.quantity
).first()
if row:
rim = row.req_req_item
quantity_shipped = max(rim.quantity_transit,
rim.quantity_fulfil)
quantity_needed = (rim.quantity - quantity_shipped) * \
row.supply_item_pack.quantity
else:
return current.messages["NONE"]
return quantity_needed
# =============================================================================
def inv_send_controller():
"""
RESTful CRUD controller for inv_send
"""
T = current.T
db = current.db
s3db = current.s3db
sendtable = s3db.inv_send
tracktable = s3db.inv_track_item
iitable = s3db.inv_inv_item
request = current.request
response = current.response
s3 = response.s3
# Limit site_id to sites the user has permissions for
error_msg = T("You do not have permission for any facility to send a shipment.")
current.auth.permitted_facilities(table=sendtable, error_msg=error_msg)
# Set Validator for checking against the number of items in the warehouse
req_vars = request.vars
send_inv_item_id = req_vars.send_inv_item_id
if send_inv_item_id:
if not req_vars.item_pack_id:
req_vars.item_pack_id = db(iitable.id == send_inv_item_id).select(
iitable.item_pack_id,
limitby = (0, 1)
).first().item_pack_id
tracktable.quantity.requires = IS_AVAILABLE_QUANTITY(send_inv_item_id,
req_vars.item_pack_id,
)
def set_send_attr(status):
sendtable.send_ref.writable = False
if status == SHIP_STATUS_IN_PROCESS:
sendtable.send_ref.readable = False
else:
# Make all fields writable False
for field in sendtable.fields:
sendtable[field].writable = False
def set_track_attr(status):
# By default Make all fields writable False
for field in tracktable.fields:
tracktable[field].writable = False
# Hide some fields
tracktable.send_id.readable = False
tracktable.recv_id.readable = False
tracktable.bin.readable = False
tracktable.item_id.readable = False
tracktable.recv_quantity.readable = False
tracktable.return_quantity.readable = False
tracktable.expiry_date.readable = False
tracktable.owner_org_id.readable = False
tracktable.supply_org_id.readable = False
tracktable.adj_item_id.readable = False
if status == TRACK_STATUS_PREPARING:
# Show some fields
tracktable.send_inv_item_id.writable = True
tracktable.item_pack_id.writable = True
tracktable.quantity.writable = True
#tracktable.req_quantity.readable = True
tracktable.comments.writable = True
# Hide some fields
tracktable.currency.readable = False
tracktable.pack_value.readable = False
tracktable.item_source_no.readable = False
tracktable.inv_item_status.readable = False
elif status == TRACK_STATUS_ARRIVED:
# Shipment arrived display some extra fields at the destination
tracktable.item_source_no.readable = True
tracktable.recv_quantity.readable = True
tracktable.return_quantity.readable = True
tracktable.recv_bin.readable = True
tracktable.currency.readable = True
tracktable.pack_value.readable = True
elif status == TRACK_STATUS_RETURNING:
tracktable.return_quantity.readable = True
tracktable.return_quantity.writable = True
tracktable.currency.readable = False
tracktable.pack_value.readable = False
def prep(r):
record = db(sendtable.id == r.id).select(sendtable.status,
sendtable.req_ref,
limitby = (0, 1)
).first()
if record:
status = record.status
if status != SHIP_STATUS_IN_PROCESS:
# Now that the shipment has been sent,
# lock the record so that it can't be meddled with
s3db.configure("inv_send",
create = False,
deletable = False,
editable = False,
listadd = False,
)
s3db.configure("inv_track_item",
create = False,
deletable = False,
editable = False,
listadd = False,
)
if r.component:
if r.component_name == "document":
# Simplify a little
table = s3db.doc_document
table.file.required = True
table.url.readable = table.url.writable = False
table.date.readable = table.date.writable = False
elif r.component_name == "track_item":
record = r.record
values = current.deployment_settings.get_inv_track_pack_values()
if status in (SHIP_STATUS_RECEIVED, SHIP_STATUS_CANCEL):
list_fields = ["status",
"item_id",
"item_pack_id",
"bin",
"quantity",
"recv_quantity",
"return_quantity",
"owner_org_id",
"supply_org_id",
"inv_item_status",
"comments",
]
if values:
list_fields.insert(6, "pack_value")
list_fields.insert(6, "currency")
elif status == SHIP_STATUS_RETURNING:
list_fields = ["status",
"item_id",
"item_pack_id",
"quantity",
"return_quantity",
"bin",
"owner_org_id",
"supply_org_id",
"inv_item_status",
]
if values:
list_fields.insert(4, "pack_value")
list_fields.insert(4, "currency")
else:
list_fields = ["status",
"item_id",
"item_pack_id",
"quantity",
"bin",
"owner_org_id",
"supply_org_id",
"inv_item_status",
]
if values:
list_fields.insert(5, "pack_value")
list_fields.insert(5, "currency")
if record.req_ref and r.interactive:
s3db.configure("inv_track_item",
extra_fields = ["req_item_id"],
)
tracktable.quantity_needed = \
Field.Method("quantity_needed",
inv_track_item_quantity_needed
)
list_fields.insert(3, (T("Quantity Needed"),
"quantity_needed"))
s3db.configure("inv_track_item",
list_fields = list_fields,
)
# Can only create or delete track items for a send record if the status is preparing
method = r.method
if method in ("create", "delete"):
if status != SHIP_STATUS_IN_PROCESS:
return False
if method == "delete":
return s3db.inv_track_item_deleting(r.component_id)
# Filter out Items which have Quantity 0, are Expired or in Bad condition
query = (iitable.quantity > 0) & \
((iitable.expiry_date >= r.now) | ((iitable.expiry_date == None))) & \
(iitable.status == 0)
if record.get("site_id"):
# Restrict to items from this facility only
query &= (iitable.site_id == record.site_id)
tracktable.send_inv_item_id.requires = IS_ONE_OF(db(query), "inv_inv_item.id",
s3db.inv_item_represent,
#not_filterby = "quantity",
#not_filter_opts = (0,),
orderby = "inv_inv_item.id",
sort = True,
)
# Hide the values that will be copied from the inv_inv_item record
if r.component_id:
track_record = db(tracktable.id == r.component_id).select(tracktable.req_item_id,
tracktable.send_inv_item_id,
tracktable.item_pack_id,
tracktable.status,
tracktable.quantity,
limitby = (0, 1)
).first()
set_track_attr(track_record.status)
# If the track record is linked to a request item then
# the stock item has already been selected so make it read only
if track_record and track_record.get("req_item_id"):
tracktable.send_inv_item_id.writable = False
tracktable.item_pack_id.writable = False
stock_qnty = track_record.quantity
tracktable.quantity.comment = T("%(quantity)s in stock") % {"quantity": stock_qnty}
tracktable.quantity.requires = IS_AVAILABLE_QUANTITY(
track_record.send_inv_item_id,
track_record.item_pack_id,
)
# Hide the item id
tracktable.item_id.readable = False
else:
set_track_attr(TRACK_STATUS_PREPARING)
if r.interactive:
crud_strings = s3.crud_strings.inv_send
if record.status == SHIP_STATUS_IN_PROCESS:
crud_strings.title_update = \
crud_strings.title_display = T("Process Shipment to Send")
elif "site_id" in req_vars and status == SHIP_STATUS_SENT:
crud_strings.title_update = \
crud_strings.title_display = T("Review Incoming Shipment to Receive")
else:
if r.id and request.get_vars.get("received"):
# "received" must not propagate:
del request.get_vars["received"]
# Set the items to being received
# @ToDo: Check Permissions & Avoid DB updates in GETs
db(sendtable.id == r.id).update(status = SHIP_STATUS_RECEIVED)
db(tracktable.send_id == r.id).update(status = TRACK_STATUS_ARRIVED)
req_ref = record.req_ref
if req_ref:
# Update the Request Status
rtable = s3db.req_req
req_id = db(rtable.req_ref == req_ref).select(rtable.id,
limitby = (0, 1)
).first()
# Get the full list of items in the request
ritable = s3db.req_req_item
query = (ritable.req_id == req_id) & \
(ritable.deleted == False)
ritems = db(query).select(ritable.id,
ritable.item_pack_id,
ritable.quantity,
# Virtual Field
#ritable.pack_quantity,
)
# Get all Received Shipments in-system for this request
query = (sendtable.status == SHIP_STATUS_RECEIVED) & \
(sendtable.req_ref == req_ref) & \
(tracktable.send_id == r.id) & \
(tracktable.deleted == False)
sitems = db(query).select(tracktable.item_pack_id,
tracktable.quantity,
# Virtual Field
#tracktable.pack_quantity,
)
fulfil_qty = {}
for item in sitems:
item_pack_id = item.item_pack_id
if item_pack_id in fulfil_qty:
fulfil_qty[item_pack_id] += (item.quantity * item.pack_quantity())
else:
fulfil_qty[item_pack_id] = (item.quantity * item.pack_quantity())
complete = False
for item in ritems:
if item.item_pack_id in fulfil_qty:
quantity_fulfil = fulfil_qty[item.item_pack_id]
db(ritable.id == item.id).update(quantity_fulfil=quantity_fulfil)
req_quantity = item.quantity * item.pack_quantity()
complete = quantity_fulfil >= req_quantity
# Update overall Request Status
if complete:
# REQ_STATUS_COMPLETE
db(rtable.id == req_id).update(fulfil_status=2)
else:
# REQ_STATUS_PARTIAL
db(rtable.id == req_id).update(fulfil_status=1)
response.confirmation = T("Shipment received")
# else set the inv_send attributes
elif r.id:
record = db(sendtable.id == r.id).select(sendtable.status,
limitby = (0, 1)
).first()
set_send_attr(record.status)
else:
set_send_attr(SHIP_STATUS_IN_PROCESS)
sendtable.send_ref.readable = False
return True
args = request.args
if len(args) > 1 and args[1] == "track_item":
# Shouldn't fail but...
# if user enters the send id then it could so wrap in a try...
try:
status = db(sendtable.id == args[0]).select(sendtable.status,
limitby = (0, 1),
).first().status
except AttributeError:
status = None
if status:
editable = False
if status == SHIP_STATUS_RETURNING:
editable = True
# remove CRUD generated buttons in the tabs
s3db.configure("inv_track_item",
create = False,
deletable = False,
editable = editable,
listadd = False,
)
s3.prep = prep
return current.crud_controller("inv", "send",
rheader = inv_send_rheader,
)
# =============================================================================
def inv_send_process():
"""
Process a Shipment
"""
request = current.request
try:
send_id = request.args[0]
except KeyError:
redirect(URL(f="send"))
T = current.T
auth = current.auth
db = current.db
s3db = current.s3db
stable = s3db.inv_send
session = current.session
if not auth.s3_has_permission("update", stable, record_id=send_id):
session.error = T("You do not have permission to send this shipment.")
send_record = db(stable.id == send_id).select(stable.status,
stable.sender_id,
stable.send_ref,
stable.req_ref,
stable.site_id,
stable.delivery_date,
stable.recipient_id,
stable.to_site_id,
stable.transport_type,
stable.comments,
limitby = (0, 1)
).first()
if send_record.status != SHIP_STATUS_IN_PROCESS:
session.error = T("This shipment has already been sent.")
tracktable = s3db.inv_track_item
siptable = s3db.supply_item_pack
rrtable = s3db.req_req
ritable = s3db.req_req_item
# Get the track items that are part of this shipment
query = (tracktable.send_id == send_id ) & \
(tracktable.deleted == False)
track_items = db(query).select(tracktable.req_item_id,
tracktable.quantity,
tracktable.item_pack_id)
if not track_items:
session.error = T("No items have been selected for shipping.")
if session.error:
redirect(URL(f = "send",
args = [send_id]))
# Update Send record & lock for editing
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
db(stable.id == send_id).update(date = request.utcnow,
status = SHIP_STATUS_SENT,
owned_by_user = None,
owned_by_group = ADMIN)
# If this is linked to a request then update the quantity in transit
req_ref = send_record.req_ref
req_rec = db(rrtable.req_ref == req_ref).select(rrtable.id,
limitby = (0, 1)
).first()
if req_rec:
req_id = req_rec.id
for track_item in track_items:
req_item_id = track_item.req_item_id
if req_item_id:
req_pack_id = db(ritable.id == req_item_id).select(ritable.item_pack_id,
limitby = (0, 1)
).first().item_pack_id
req_p_qnty = db(siptable.id == req_pack_id).select(siptable.quantity,
limitby = (0, 1)
).first().quantity
t_qnty = track_item.quantity
t_pack_id = track_item.item_pack_id
inv_p_qnty = db(siptable.id == t_pack_id).select(siptable.quantity,
limitby = (0, 1)
).first().quantity
transit_quantity = t_qnty * inv_p_qnty / req_p_qnty
db(ritable.id == req_item_id).update(quantity_transit = ritable.quantity_transit + transit_quantity)
s3db.req_update_status(req_id)
# Create a Receive record
rtable = s3db.inv_recv
recv_item = {"sender_id": send_record.sender_id,
"send_ref": send_record.send_ref,
"req_ref": req_ref,
"from_site_id": send_record.site_id,
"eta": send_record.delivery_date,
"recipient_id": send_record.recipient_id,
"site_id": send_record.to_site_id,
"transport_type": send_record.transport_type,
"comments": send_record.comments,
"status": SHIP_STATUS_SENT,
"type": 1, # 1:"Another Inventory"
}
recv_id = rtable.insert(**recv_item)
recv_item["id"] = recv_id
auth.s3_set_record_owner(rtable, recv_id)
# Change the status for all track items in this shipment to In transit
# and link to the receive record
db(tracktable.send_id == send_id).update(status = 2,
recv_id = recv_id,
)
session.confirmation = T("Shipment Items sent from Warehouse")
if req_rec:
session.confirmation = T("Request Status updated")
redirect(URL(f = "send",
args = [send_id, "track_item"]
))
# =============================================================================
def inv_adj_rheader(r):
""" Resource Header for Inventory Adjustments """
if r.representation == "html" and r.name == "adj":
record = r.record
if record:
T = current.T
tabs = [(T("Edit Details"), None),
(T("Items"), "adj_item"),
(T("Photos"), "image"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(
TR(TH("%s: " % table.adjuster_id.label),
table.adjuster_id.represent(record.adjuster_id),
TH("%s: " % table.adjustment_date.label),
table.adjustment_date.represent(record.adjustment_date),
),
TR(TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
TH("%s: " % table.category.label),
table.category.represent(record.category),
),
))
if record.status == 0: # In process
if current.auth.s3_has_permission("update", "inv_adj",
record_id = record.id):
# aitable = current.s3db.inv_adj_item
# query = (aitable.adj_id == record.id) & \
# (aitable.new_quantity == None)
# row = current.db(query).select(aitable.id,
# limitby=(0, 1)).first()
# if row == None:
close_btn = A(T("Complete Adjustment"),
_href = URL(c = "inv",
f = "adj_close",
args = [record.id]
),
_id = "adj_close",
_class = "action-btn"
)
close_btn_confirm = SCRIPT("S3.confirmClick('#adj_close', '%s')"
% T("Do you want to complete & close this adjustment?"))
rheader.append(close_btn)
rheader.append(close_btn_confirm)
rheader.append(rheader_tabs)
# else:
# msg = T("You need to check all the revised quantities before you can close this adjustment")
# rfooter.append(SPAN(msg))
return rheader
return None
# =============================================================================
def inv_expiry_date_represent(date):
"""
Show Expired Dates in Red
"""
dtstr = S3DateTime.date_represent(date, utc=True)
if date and datetime.datetime(date.year, date.month, date.day) < current.request.now:
return SPAN(dtstr, _class="expired")
else:
return dtstr
# =============================================================================
class inv_InvItemRepresent(S3Represent):
def __init__(self):
super(inv_InvItemRepresent, self).__init__(lookup = "inv_inv_item")
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
Args:
key: the key Field
values: the values
fields: unused (retained for API compatibility)
"""
s3db = current.s3db
itable = s3db.inv_inv_item
stable = s3db.supply_item
left = stable.on(stable.id == itable.item_id)
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(itable.id,
stable.name,
stable.um,
itable.item_source_no,
itable.bin,
itable.expiry_date,
itable.owner_org_id,
left = left
)
self.queries += 1
# Bulk-represent owner_org_ids
organisation_id = str(itable.owner_org_id)
organisation_ids = [row[organisation_id] for row in rows]
if organisation_ids:
itable.owner_org_id.represent.bulk(organisation_ids)
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
Args:
row: the Row
"""
itable = current.s3db.inv_inv_item
iitem = row.inv_inv_item
sitem = row.supply_item
stringify = lambda string: string if string else ""
ctn = stringify(iitem.item_source_no)
org = itable.owner_org_id.represent(iitem.owner_org_id)
item_bin = stringify(iitem.bin)
expires = iitem.expiry_date
if expires:
expires = "expires: %s" % \
S3DateTime.date_represent(expires, utc=True)
else:
expires = ""
NONE = current.messages["NONE"]
items = []
append = items.append
for string in [sitem.name, expires, ctn, org, item_bin]:
if string and string != NONE:
append(string)
append(" - ")
return TAG[""](items[:-1])
# END =========================================================================
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.